Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.19/0121-4.19.22-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3400 - (hide annotations) (download)
Fri Aug 2 11:47:33 2019 UTC (4 years, 9 months ago) by niro
File size: 43355 byte(s)
-linux-4.19.22
1 niro 3400 diff --git a/Makefile b/Makefile
2     index ba5f14d38d8e..8cfcb01fcd7b 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 19
9     -SUBLEVEL = 21
10     +SUBLEVEL = 22
11     EXTRAVERSION =
12     NAME = "People's Front"
13    
14     diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
15     index 47aa53ba6b92..559659b399d0 100644
16     --- a/arch/arm/boot/dts/da850.dtsi
17     +++ b/arch/arm/boot/dts/da850.dtsi
18     @@ -476,7 +476,7 @@
19     clocksource: timer@20000 {
20     compatible = "ti,da830-timer";
21     reg = <0x20000 0x1000>;
22     - interrupts = <12>, <13>;
23     + interrupts = <21>, <22>;
24     interrupt-names = "tint12", "tint34";
25     clocks = <&pll0_auxclk>;
26     };
27     diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
28     index 3b73813c6b04..23e8c93515d4 100644
29     --- a/arch/arm/mach-iop32x/n2100.c
30     +++ b/arch/arm/mach-iop32x/n2100.c
31     @@ -75,8 +75,7 @@ void __init n2100_map_io(void)
32     /*
33     * N2100 PCI.
34     */
35     -static int __init
36     -n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
37     +static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
38     {
39     int irq;
40    
41     diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c
42     index 028e50c6383f..a32c3b631484 100644
43     --- a/arch/arm/mach-tango/pm.c
44     +++ b/arch/arm/mach-tango/pm.c
45     @@ -3,6 +3,7 @@
46     #include <linux/suspend.h>
47     #include <asm/suspend.h>
48     #include "smc.h"
49     +#include "pm.h"
50    
51     static int tango_pm_powerdown(unsigned long arg)
52     {
53     @@ -24,10 +25,7 @@ static const struct platform_suspend_ops tango_pm_ops = {
54     .valid = suspend_valid_only_mem,
55     };
56    
57     -static int __init tango_pm_init(void)
58     +void __init tango_pm_init(void)
59     {
60     suspend_set_ops(&tango_pm_ops);
61     - return 0;
62     }
63     -
64     -late_initcall(tango_pm_init);
65     diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h
66     new file mode 100644
67     index 000000000000..35ea705a0ee2
68     --- /dev/null
69     +++ b/arch/arm/mach-tango/pm.h
70     @@ -0,0 +1,7 @@
71     +/* SPDX-License-Identifier: GPL-2.0 */
72     +
73     +#ifdef CONFIG_SUSPEND
74     +void __init tango_pm_init(void);
75     +#else
76     +#define tango_pm_init NULL
77     +#endif
78     diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c
79     index 677dd7b5efd9..824f90737b04 100644
80     --- a/arch/arm/mach-tango/setup.c
81     +++ b/arch/arm/mach-tango/setup.c
82     @@ -2,6 +2,7 @@
83     #include <asm/mach/arch.h>
84     #include <asm/hardware/cache-l2x0.h>
85     #include "smc.h"
86     +#include "pm.h"
87    
88     static void tango_l2c_write(unsigned long val, unsigned int reg)
89     {
90     @@ -15,4 +16,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT")
91     .dt_compat = tango_dt_compat,
92     .l2c_aux_mask = ~0,
93     .l2c_write_sec = tango_l2c_write,
94     + .init_late = tango_pm_init,
95     MACHINE_END
96     diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
97     index 8f5bd04f320a..7f3f136572de 100644
98     --- a/arch/mips/kernel/mips-cm.c
99     +++ b/arch/mips/kernel/mips-cm.c
100     @@ -457,5 +457,5 @@ void mips_cm_error_report(void)
101     }
102    
103     /* reprime cause register */
104     - write_gcr_error_cause(0);
105     + write_gcr_error_cause(cm_error);
106     }
107     diff --git a/arch/mips/loongson64/common/reset.c b/arch/mips/loongson64/common/reset.c
108     index a60715e11306..b26892ce871c 100644
109     --- a/arch/mips/loongson64/common/reset.c
110     +++ b/arch/mips/loongson64/common/reset.c
111     @@ -59,7 +59,12 @@ static void loongson_poweroff(void)
112     {
113     #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
114     mach_prepare_shutdown();
115     - unreachable();
116     +
117     + /*
118     + * It needs a wait loop here, but mips/kernel/reset.c already calls
119     + * a generic delay loop, machine_hang(), so simply return.
120     + */
121     + return;
122     #else
123     void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
124    
125     diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
126     index 5017d5843c5a..fc29b85cfa92 100644
127     --- a/arch/mips/pci/pci-octeon.c
128     +++ b/arch/mips/pci/pci-octeon.c
129     @@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void)
130     if (octeon_has_feature(OCTEON_FEATURE_PCIE))
131     return 0;
132    
133     + if (!octeon_is_pci_host()) {
134     + pr_notice("Not in host mode, PCI Controller not initialized\n");
135     + return 0;
136     + }
137     +
138     /* Point pcibios_map_irq() to the PCI version of it */
139     octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
140    
141     @@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void)
142     else
143     octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
144    
145     - if (!octeon_is_pci_host()) {
146     - pr_notice("Not in host mode, PCI Controller not initialized\n");
147     - return 0;
148     - }
149     -
150     /* PCI I/O and PCI MEM values */
151     set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
152     ioport_resource.start = 0;
153     diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
154     index 34605ca21498..6f10312e0c76 100644
155     --- a/arch/mips/vdso/Makefile
156     +++ b/arch/mips/vdso/Makefile
157     @@ -8,6 +8,7 @@ ccflags-vdso := \
158     $(filter -E%,$(KBUILD_CFLAGS)) \
159     $(filter -mmicromips,$(KBUILD_CFLAGS)) \
160     $(filter -march=%,$(KBUILD_CFLAGS)) \
161     + $(filter -m%-float,$(KBUILD_CFLAGS)) \
162     -D__VDSO__
163    
164     ifeq ($(cc-name),clang)
165     @@ -128,7 +129,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE
166     $(call cmd,force_checksrc)
167     $(call if_changed_rule,cc_o_c)
168    
169     -$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
170     +$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
171     $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
172     $(call if_changed_dep,cpp_lds_S)
173    
174     @@ -168,7 +169,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE
175     $(call cmd,force_checksrc)
176     $(call if_changed_rule,cc_o_c)
177    
178     -$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
179     +$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
180     $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
181     $(call if_changed_dep,cpp_lds_S)
182    
183     diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
184     index 2a2486526d1f..855dbae6d351 100644
185     --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
186     +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
187     @@ -1234,21 +1234,13 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
188    
189     #define pmd_move_must_withdraw pmd_move_must_withdraw
190     struct spinlock;
191     -static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
192     - struct spinlock *old_pmd_ptl,
193     - struct vm_area_struct *vma)
194     -{
195     - if (radix_enabled())
196     - return false;
197     - /*
198     - * Archs like ppc64 use pgtable to store per pmd
199     - * specific information. So when we switch the pmd,
200     - * we should also withdraw and deposit the pgtable
201     - */
202     - return true;
203     -}
204     -
205     -
206     +extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
207     + struct spinlock *old_pmd_ptl,
208     + struct vm_area_struct *vma);
209     +/*
210     + * Hash translation mode use the deposited table to store hash pte
211     + * slot information.
212     + */
213     #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
214     static inline bool arch_needs_pgtable_deposit(void)
215     {
216     diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
217     index 01d7c0f7c4f0..297db665d953 100644
218     --- a/arch/powerpc/mm/pgtable-book3s64.c
219     +++ b/arch/powerpc/mm/pgtable-book3s64.c
220     @@ -477,3 +477,25 @@ void arch_report_meminfo(struct seq_file *m)
221     atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
222     }
223     #endif /* CONFIG_PROC_FS */
224     +
225     +/*
226     + * For hash translation mode, we use the deposited table to store hash slot
227     + * information and they are stored at PTRS_PER_PMD offset from related pmd
228     + * location. Hence a pmd move requires deposit and withdraw.
229     + *
230     + * For radix translation with split pmd ptl, we store the deposited table in the
231     + * pmd page. Hence if we have different pmd page we need to withdraw during pmd
232     + * move.
233     + *
234     + * With hash we use deposited table always irrespective of anon or not.
235     + * With radix we use deposited table only for anonymous mapping.
236     + */
237     +int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
238     + struct spinlock *old_pmd_ptl,
239     + struct vm_area_struct *vma)
240     +{
241     + if (radix_enabled())
242     + return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
243     +
244     + return true;
245     +}
246     diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
247     index b8c3f9e6af89..adf28788cab5 100644
248     --- a/drivers/ata/libata-core.c
249     +++ b/drivers/ata/libata-core.c
250     @@ -4554,6 +4554,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
251     { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
252     { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
253     { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
254     + { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
255    
256     /* devices that don't properly handle queued TRIM commands */
257     { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
258     diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
259     index 472c88ae1c0f..92f843eaf1e0 100644
260     --- a/drivers/firmware/arm_scmi/bus.c
261     +++ b/drivers/firmware/arm_scmi/bus.c
262     @@ -119,6 +119,11 @@ void scmi_driver_unregister(struct scmi_driver *driver)
263     }
264     EXPORT_SYMBOL_GPL(scmi_driver_unregister);
265    
266     +static void scmi_device_release(struct device *dev)
267     +{
268     + kfree(to_scmi_dev(dev));
269     +}
270     +
271     struct scmi_device *
272     scmi_device_create(struct device_node *np, struct device *parent, int protocol)
273     {
274     @@ -138,6 +143,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
275     scmi_dev->dev.parent = parent;
276     scmi_dev->dev.of_node = np;
277     scmi_dev->dev.bus = &scmi_bus_type;
278     + scmi_dev->dev.release = scmi_device_release;
279     dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
280    
281     retval = device_register(&scmi_dev->dev);
282     @@ -156,9 +162,8 @@ free_mem:
283     void scmi_device_destroy(struct scmi_device *scmi_dev)
284     {
285     scmi_handle_put(scmi_dev->handle);
286     - device_unregister(&scmi_dev->dev);
287     ida_simple_remove(&scmi_bus_id, scmi_dev->id);
288     - kfree(scmi_dev);
289     + device_unregister(&scmi_dev->dev);
290     }
291    
292     void scmi_set_handle(struct scmi_device *scmi_dev)
293     diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
294     index a63e00653324..1546bc49004f 100644
295     --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
296     +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
297     @@ -984,6 +984,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
298     break;
299     case amd_pp_dpp_clock:
300     pclk_vol_table = pinfo->vdd_dep_on_dppclk;
301     + break;
302     default:
303     return -EINVAL;
304     }
305     diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
306     index 02db9ac82d7a..a3104d79b48f 100644
307     --- a/drivers/gpu/drm/drm_modes.c
308     +++ b/drivers/gpu/drm/drm_modes.c
309     @@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
310     if (mode->hsync)
311     return mode->hsync;
312    
313     - if (mode->htotal < 0)
314     + if (mode->htotal <= 0)
315     return 0;
316    
317     calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
318     diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
319     index c9af34861d9e..b4b1f9ca05b6 100644
320     --- a/drivers/gpu/drm/i915/intel_ddi.c
321     +++ b/drivers/gpu/drm/i915/intel_ddi.c
322     @@ -1085,7 +1085,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
323     return DDI_CLK_SEL_TBT_810;
324     default:
325     MISSING_CASE(clock);
326     - break;
327     + return DDI_CLK_SEL_NONE;
328     }
329     case DPLL_ID_ICL_MGPLL1:
330     case DPLL_ID_ICL_MGPLL2:
331     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
332     index bb6dbbe18835..c72b942f2bdf 100644
333     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
334     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
335     @@ -627,13 +627,16 @@ out_fixup:
336     static int vmw_dma_masks(struct vmw_private *dev_priv)
337     {
338     struct drm_device *dev = dev_priv->dev;
339     + int ret = 0;
340    
341     - if (intel_iommu_enabled &&
342     + ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
343     + if (dev_priv->map_mode != vmw_dma_phys &&
344     (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
345     DRM_INFO("Restricting DMA addresses to 44 bits.\n");
346     - return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
347     + return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
348     }
349     - return 0;
350     +
351     + return ret;
352     }
353     #else
354     static int vmw_dma_masks(struct vmw_private *dev_priv)
355     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
356     index f0ab6b2313bb..c3e2022bda5d 100644
357     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
358     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
359     @@ -3843,7 +3843,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
360     *p_fence = NULL;
361     }
362    
363     - return 0;
364     + return ret;
365     }
366    
367     /**
368     diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
369     index 031d568b4972..4e339cfd0c54 100644
370     --- a/drivers/iio/adc/axp288_adc.c
371     +++ b/drivers/iio/adc/axp288_adc.c
372     @@ -27,9 +27,18 @@
373     #include <linux/iio/machine.h>
374     #include <linux/iio/driver.h>
375    
376     -#define AXP288_ADC_EN_MASK 0xF1
377     -#define AXP288_ADC_TS_PIN_GPADC 0xF2
378     -#define AXP288_ADC_TS_PIN_ON 0xF3
379     +/*
380     + * This mask enables all ADCs except for the battery temp-sensor (TS), that is
381     + * left as-is to avoid breaking charging on devices without a temp-sensor.
382     + */
383     +#define AXP288_ADC_EN_MASK 0xF0
384     +#define AXP288_ADC_TS_ENABLE 0x01
385     +
386     +#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
387     +#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
388     +#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
389     +#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
390     +#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
391    
392     enum axp288_adc_id {
393     AXP288_ADC_TS,
394     @@ -44,6 +53,7 @@ enum axp288_adc_id {
395     struct axp288_adc_info {
396     int irq;
397     struct regmap *regmap;
398     + bool ts_enabled;
399     };
400    
401     static const struct iio_chan_spec axp288_adc_channels[] = {
402     @@ -115,21 +125,33 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
403     return IIO_VAL_INT;
404     }
405    
406     -static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
407     - unsigned long address)
408     +/*
409     + * The current-source used for the battery temp-sensor (TS) is shared
410     + * with the GPADC. For proper fuel-gauge and charger operation the TS
411     + * current-source needs to be permanently on. But to read the GPADC we
412     + * need to temporary switch the TS current-source to ondemand, so that
413     + * the GPADC can use it, otherwise we will always read an all 0 value.
414     + */
415     +static int axp288_adc_set_ts(struct axp288_adc_info *info,
416     + unsigned int mode, unsigned long address)
417     {
418     int ret;
419    
420     - /* channels other than GPADC do not need to switch TS pin */
421     + /* No need to switch the current-source if the TS pin is disabled */
422     + if (!info->ts_enabled)
423     + return 0;
424     +
425     + /* Channels other than GPADC do not need the current source */
426     if (address != AXP288_GP_ADC_H)
427     return 0;
428    
429     - ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
430     + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
431     + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode);
432     if (ret)
433     return ret;
434    
435     /* When switching to the GPADC pin give things some time to settle */
436     - if (mode == AXP288_ADC_TS_PIN_GPADC)
437     + if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND)
438     usleep_range(6000, 10000);
439    
440     return 0;
441     @@ -145,14 +167,14 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
442     mutex_lock(&indio_dev->mlock);
443     switch (mask) {
444     case IIO_CHAN_INFO_RAW:
445     - if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
446     + if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND,
447     chan->address)) {
448     dev_err(&indio_dev->dev, "GPADC mode\n");
449     ret = -EINVAL;
450     break;
451     }
452     ret = axp288_adc_read_channel(val, chan->address, info->regmap);
453     - if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
454     + if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON,
455     chan->address))
456     dev_err(&indio_dev->dev, "TS pin restore\n");
457     break;
458     @@ -164,13 +186,35 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
459     return ret;
460     }
461    
462     -static int axp288_adc_set_state(struct regmap *regmap)
463     +static int axp288_adc_initialize(struct axp288_adc_info *info)
464     {
465     - /* ADC should be always enabled for internal FG to function */
466     - if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
467     - return -EIO;
468     + int ret, adc_enable_val;
469     +
470     + /*
471     + * Determine if the TS pin is enabled and set the TS current-source
472     + * accordingly.
473     + */
474     + ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val);
475     + if (ret)
476     + return ret;
477     +
478     + if (adc_enable_val & AXP288_ADC_TS_ENABLE) {
479     + info->ts_enabled = true;
480     + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
481     + AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
482     + AXP288_ADC_TS_CURRENT_ON);
483     + } else {
484     + info->ts_enabled = false;
485     + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
486     + AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
487     + AXP288_ADC_TS_CURRENT_OFF);
488     + }
489     + if (ret)
490     + return ret;
491    
492     - return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
493     + /* Turn on the ADC for all channels except TS, leave TS as is */
494     + return regmap_update_bits(info->regmap, AXP20X_ADC_EN1,
495     + AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK);
496     }
497    
498     static const struct iio_info axp288_adc_iio_info = {
499     @@ -200,7 +244,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
500     * Set ADC to enabled state at all time, including system suspend.
501     * otherwise internal fuel gauge functionality may be affected.
502     */
503     - ret = axp288_adc_set_state(axp20x->regmap);
504     + ret = axp288_adc_initialize(info);
505     if (ret) {
506     dev_err(&pdev->dev, "unable to enable ADC device\n");
507     return ret;
508     diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
509     index 184d686ebd99..8b4568edd5cb 100644
510     --- a/drivers/iio/adc/ti-ads8688.c
511     +++ b/drivers/iio/adc/ti-ads8688.c
512     @@ -41,6 +41,7 @@
513    
514     #define ADS8688_VREF_MV 4096
515     #define ADS8688_REALBITS 16
516     +#define ADS8688_MAX_CHANNELS 8
517    
518     /*
519     * enum ads8688_range - ADS8688 reference voltage range
520     @@ -385,7 +386,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
521     {
522     struct iio_poll_func *pf = p;
523     struct iio_dev *indio_dev = pf->indio_dev;
524     - u16 buffer[8];
525     + u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
526     int i, j = 0;
527    
528     for (i = 0; i < indio_dev->masklength; i++) {
529     diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
530     index a406ad31b096..3a20cb5d9bff 100644
531     --- a/drivers/iio/chemical/atlas-ph-sensor.c
532     +++ b/drivers/iio/chemical/atlas-ph-sensor.c
533     @@ -444,9 +444,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
534     case IIO_CHAN_INFO_SCALE:
535     switch (chan->type) {
536     case IIO_TEMP:
537     - *val = 1; /* 0.01 */
538     - *val2 = 100;
539     - break;
540     + *val = 10;
541     + return IIO_VAL_INT;
542     case IIO_PH:
543     *val = 1; /* 0.001 */
544     *val2 = 1000;
545     @@ -477,7 +476,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev,
546     int val, int val2, long mask)
547     {
548     struct atlas_data *data = iio_priv(indio_dev);
549     - __be32 reg = cpu_to_be32(val);
550     + __be32 reg = cpu_to_be32(val / 10);
551    
552     if (val2 != 0 || val < 0 || val > 20000)
553     return -EINVAL;
554     diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
555     index 23739a60517f..bb1ee9834a02 100644
556     --- a/drivers/misc/mei/hw-me-regs.h
557     +++ b/drivers/misc/mei/hw-me-regs.h
558     @@ -139,6 +139,8 @@
559     #define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
560     #define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
561    
562     +#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
563     +
564     /*
565     * MEI HW Section
566     */
567     diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
568     index c8e21c894a5f..4299658d48d6 100644
569     --- a/drivers/misc/mei/pci-me.c
570     +++ b/drivers/misc/mei/pci-me.c
571     @@ -105,6 +105,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
572     {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)},
573     {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
574    
575     + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
576     +
577     /* required last entry */
578     {0, }
579     };
580     diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
581     index 3633202e18f4..de7f035a176d 100644
582     --- a/drivers/misc/mic/vop/vop_main.c
583     +++ b/drivers/misc/mic/vop/vop_main.c
584     @@ -563,6 +563,8 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
585     int ret = -1;
586    
587     if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
588     + struct device *dev = get_device(&vdev->vdev.dev);
589     +
590     dev_dbg(&vpdev->dev,
591     "%s %d config_change %d type %d vdev %p\n",
592     __func__, __LINE__,
593     @@ -574,7 +576,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
594     iowrite8(-1, &dc->h2c_vdev_db);
595     if (status & VIRTIO_CONFIG_S_DRIVER_OK)
596     wait_for_completion(&vdev->reset_done);
597     - put_device(&vdev->vdev.dev);
598     + put_device(dev);
599     iowrite8(1, &dc->guest_ack);
600     dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
601     __func__, __LINE__, ioread8(&dc->guest_ack));
602     diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
603     index 6c3591cdf855..a3c6c773d9dc 100644
604     --- a/drivers/misc/vexpress-syscfg.c
605     +++ b/drivers/misc/vexpress-syscfg.c
606     @@ -61,7 +61,7 @@ static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
607     int tries;
608     long timeout;
609    
610     - if (WARN_ON(index > func->num_templates))
611     + if (WARN_ON(index >= func->num_templates))
612     return -EINVAL;
613    
614     command = readl(syscfg->base + SYS_CFGCTRL);
615     diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
616     index 99c460facd5e..0bbb23b014f1 100644
617     --- a/drivers/mtd/mtdpart.c
618     +++ b/drivers/mtd/mtdpart.c
619     @@ -470,6 +470,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
620     /* let's register it anyway to preserve ordering */
621     slave->offset = 0;
622     slave->mtd.size = 0;
623     +
624     + /* Initialize ->erasesize to make add_mtd_device() happy. */
625     + slave->mtd.erasesize = parent->erasesize;
626     +
627     printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
628     part->name);
629     goto out_register;
630     diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
631     index 88ea2203e263..322a008290e5 100644
632     --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
633     +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
634     @@ -155,9 +155,10 @@ int gpmi_init(struct gpmi_nand_data *this)
635    
636     /*
637     * Reset BCH here, too. We got failures otherwise :(
638     - * See later BCH reset for explanation of MX23 handling
639     + * See later BCH reset for explanation of MX23 and MX28 handling
640     */
641     - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
642     + ret = gpmi_reset_block(r->bch_regs,
643     + GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
644     if (ret)
645     goto err_out;
646    
647     @@ -263,12 +264,10 @@ int bch_set_geometry(struct gpmi_nand_data *this)
648     /*
649     * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
650     * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
651     - * On the other hand, the MX28 needs the reset, because one case has been
652     - * seen where the BCH produced ECC errors constantly after 10000
653     - * consecutive reboots. The latter case has not been seen on the MX23
654     - * yet, still we don't know if it could happen there as well.
655     + * and MX28.
656     */
657     - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
658     + ret = gpmi_reset_block(r->bch_regs,
659     + GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
660     if (ret)
661     goto err_out;
662    
663     diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
664     index 30f83649c481..8c7bf91ce4e1 100644
665     --- a/drivers/mtd/nand/spi/core.c
666     +++ b/drivers/mtd/nand/spi/core.c
667     @@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
668     struct nand_device *nand = spinand_to_nand(spinand);
669     struct mtd_info *mtd = nanddev_to_mtd(nand);
670     struct nand_page_io_req adjreq = *req;
671     - unsigned int nbytes = 0;
672     - void *buf = NULL;
673     + void *buf = spinand->databuf;
674     + unsigned int nbytes;
675     u16 column = 0;
676     int ret;
677    
678     - memset(spinand->databuf, 0xff,
679     - nanddev_page_size(nand) +
680     - nanddev_per_page_oobsize(nand));
681     + /*
682     + * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
683     + * the cache content to 0xFF (depends on vendor implementation), so we
684     + * must fill the page cache entirely even if we only want to program
685     + * the data portion of the page, otherwise we might corrupt the BBM or
686     + * user data previously programmed in OOB area.
687     + */
688     + nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
689     + memset(spinand->databuf, 0xff, nbytes);
690     + adjreq.dataoffs = 0;
691     + adjreq.datalen = nanddev_page_size(nand);
692     + adjreq.databuf.out = spinand->databuf;
693     + adjreq.ooblen = nanddev_per_page_oobsize(nand);
694     + adjreq.ooboffs = 0;
695     + adjreq.oobbuf.out = spinand->oobbuf;
696    
697     - if (req->datalen) {
698     + if (req->datalen)
699     memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
700     req->datalen);
701     - adjreq.dataoffs = 0;
702     - adjreq.datalen = nanddev_page_size(nand);
703     - adjreq.databuf.out = spinand->databuf;
704     - nbytes = adjreq.datalen;
705     - buf = spinand->databuf;
706     - }
707    
708     if (req->ooblen) {
709     if (req->mode == MTD_OPS_AUTO_OOB)
710     @@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
711     else
712     memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
713     req->ooblen);
714     -
715     - adjreq.ooblen = nanddev_per_page_oobsize(nand);
716     - adjreq.ooboffs = 0;
717     - nbytes += nanddev_per_page_oobsize(nand);
718     - if (!buf) {
719     - buf = spinand->oobbuf;
720     - column = nanddev_page_size(nand);
721     - }
722     }
723    
724     spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
725     @@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
726    
727     /*
728     * We need to use the RANDOM LOAD CACHE operation if there's
729     - * more than one iteration, because the LOAD operation resets
730     - * the cache to 0xff.
731     + * more than one iteration, because the LOAD operation might
732     + * reset the cache to 0xff.
733     */
734     if (nbytes) {
735     column = op.addr.val;
736     @@ -1016,11 +1014,11 @@ static int spinand_init(struct spinand_device *spinand)
737     for (i = 0; i < nand->memorg.ntargets; i++) {
738     ret = spinand_select_target(spinand, i);
739     if (ret)
740     - goto err_free_bufs;
741     + goto err_manuf_cleanup;
742    
743     ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
744     if (ret)
745     - goto err_free_bufs;
746     + goto err_manuf_cleanup;
747     }
748    
749     ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
750     diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
751     index 6d31ad799987..b7e272d6ae81 100644
752     --- a/drivers/pinctrl/intel/pinctrl-cherryview.c
753     +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
754     @@ -1524,7 +1524,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
755     .matches = {
756     DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
757     DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
758     - DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
759     + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
760     },
761     },
762     {
763     @@ -1532,7 +1532,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
764     .matches = {
765     DMI_MATCH(DMI_SYS_VENDOR, "HP"),
766     DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
767     - DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
768     + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
769     },
770     },
771     {
772     @@ -1540,7 +1540,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
773     .matches = {
774     DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
775     DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
776     - DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
777     + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
778     },
779     },
780     {
781     @@ -1548,7 +1548,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
782     .matches = {
783     DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
784     DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
785     - DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
786     + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
787     },
788     },
789     {}
790     diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
791     index aa8b58125568..ef4268cc6227 100644
792     --- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
793     +++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
794     @@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 };
795     static const struct sunxi_pinctrl_desc h6_pinctrl_data = {
796     .pins = h6_pins,
797     .npins = ARRAY_SIZE(h6_pins),
798     - .irq_banks = 3,
799     + .irq_banks = 4,
800     .irq_bank_map = h6_irq_bank_map,
801     .irq_read_needs_mux = true,
802     };
803     diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
804     index 13b01351dd1c..41ef452c1fcf 100644
805     --- a/fs/debugfs/inode.c
806     +++ b/fs/debugfs/inode.c
807     @@ -787,6 +787,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
808     struct dentry *dentry = NULL, *trap;
809     struct name_snapshot old_name;
810    
811     + if (IS_ERR(old_dir))
812     + return old_dir;
813     + if (IS_ERR(new_dir))
814     + return new_dir;
815     + if (IS_ERR_OR_NULL(old_dentry))
816     + return old_dentry;
817     +
818     trap = lock_rename(new_dir, old_dir);
819     /* Source or destination directories don't exist? */
820     if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
821     diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
822     index 712f00995390..5508baa11bb6 100644
823     --- a/fs/ext4/fsync.c
824     +++ b/fs/ext4/fsync.c
825     @@ -116,16 +116,8 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
826     goto out;
827     }
828    
829     - ret = file_write_and_wait_range(file, start, end);
830     - if (ret)
831     - return ret;
832     -
833     if (!journal) {
834     - struct writeback_control wbc = {
835     - .sync_mode = WB_SYNC_ALL
836     - };
837     -
838     - ret = ext4_write_inode(inode, &wbc);
839     + ret = __generic_file_fsync(file, start, end, datasync);
840     if (!ret)
841     ret = ext4_sync_parent(inode);
842     if (test_opt(inode->i_sb, BARRIER))
843     @@ -133,6 +125,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
844     goto out;
845     }
846    
847     + ret = file_write_and_wait_range(file, start, end);
848     + if (ret)
849     + return ret;
850     /*
851     * data=writeback,ordered:
852     * The caller's filemap_fdatawrite()/wait will sync the data.
853     diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
854     index 336fd1a19cca..f30bf500888d 100644
855     --- a/include/linux/sunrpc/xprt.h
856     +++ b/include/linux/sunrpc/xprt.h
857     @@ -443,6 +443,11 @@ static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt)
858     return test_and_set_bit(XPRT_CONNECTING, &xprt->state);
859     }
860    
861     +static inline int xprt_close_wait(struct rpc_xprt *xprt)
862     +{
863     + return test_bit(XPRT_CLOSE_WAIT, &xprt->state);
864     +}
865     +
866     static inline void xprt_set_bound(struct rpc_xprt *xprt)
867     {
868     test_and_set_bit(XPRT_BOUND, &xprt->state);
869     diff --git a/kernel/signal.c b/kernel/signal.c
870     index edc28afc9fb4..c187def3dba6 100644
871     --- a/kernel/signal.c
872     +++ b/kernel/signal.c
873     @@ -681,6 +681,48 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
874     return signr;
875     }
876    
877     +static int dequeue_synchronous_signal(siginfo_t *info)
878     +{
879     + struct task_struct *tsk = current;
880     + struct sigpending *pending = &tsk->pending;
881     + struct sigqueue *q, *sync = NULL;
882     +
883     + /*
884     + * Might a synchronous signal be in the queue?
885     + */
886     + if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
887     + return 0;
888     +
889     + /*
890     + * Return the first synchronous signal in the queue.
891     + */
892     + list_for_each_entry(q, &pending->list, list) {
893     + /* Synchronous signals have a postive si_code */
894     + if ((q->info.si_code > SI_USER) &&
895     + (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
896     + sync = q;
897     + goto next;
898     + }
899     + }
900     + return 0;
901     +next:
902     + /*
903     + * Check if there is another siginfo for the same signal.
904     + */
905     + list_for_each_entry_continue(q, &pending->list, list) {
906     + if (q->info.si_signo == sync->info.si_signo)
907     + goto still_pending;
908     + }
909     +
910     + sigdelset(&pending->signal, sync->info.si_signo);
911     + recalc_sigpending();
912     +still_pending:
913     + list_del_init(&sync->list);
914     + copy_siginfo(info, &sync->info);
915     + __sigqueue_free(sync);
916     + return info->si_signo;
917     +}
918     +
919     /*
920     * Tell a process that it has a new active signal..
921     *
922     @@ -2390,6 +2432,11 @@ relock:
923     goto relock;
924     }
925    
926     + /* Has this task already been marked for death? */
927     + ksig->info.si_signo = signr = SIGKILL;
928     + if (signal_group_exit(signal))
929     + goto fatal;
930     +
931     for (;;) {
932     struct k_sigaction *ka;
933    
934     @@ -2403,7 +2450,15 @@ relock:
935     goto relock;
936     }
937    
938     - signr = dequeue_signal(current, &current->blocked, &ksig->info);
939     + /*
940     + * Signals generated by the execution of an instruction
941     + * need to be delivered before any other pending signals
942     + * so that the instruction pointer in the signal stack
943     + * frame points to the faulting instruction.
944     + */
945     + signr = dequeue_synchronous_signal(&ksig->info);
946     + if (!signr)
947     + signr = dequeue_signal(current, &current->blocked, &ksig->info);
948    
949     if (!signr)
950     break; /* will return 0 */
951     @@ -2485,6 +2540,7 @@ relock:
952     continue;
953     }
954    
955     + fatal:
956     spin_unlock_irq(&sighand->siglock);
957    
958     /*
959     diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
960     index e696667da29a..a6aebbc848fe 100644
961     --- a/kernel/trace/trace_uprobe.c
962     +++ b/kernel/trace/trace_uprobe.c
963     @@ -5,7 +5,7 @@
964     * Copyright (C) IBM Corporation, 2010-2012
965     * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
966     */
967     -#define pr_fmt(fmt) "trace_kprobe: " fmt
968     +#define pr_fmt(fmt) "trace_uprobe: " fmt
969    
970     #include <linux/module.h>
971     #include <linux/uaccess.h>
972     diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
973     index 2f0d42f2f913..08690d06b7be 100644
974     --- a/net/batman-adv/hard-interface.c
975     +++ b/net/batman-adv/hard-interface.c
976     @@ -20,7 +20,6 @@
977     #include "main.h"
978    
979     #include <linux/atomic.h>
980     -#include <linux/bug.h>
981     #include <linux/byteorder/generic.h>
982     #include <linux/errno.h>
983     #include <linux/gfp.h>
984     @@ -179,8 +178,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
985     parent_dev = __dev_get_by_index((struct net *)parent_net,
986     dev_get_iflink(net_dev));
987     /* if we got a NULL parent_dev there is something broken.. */
988     - if (WARN(!parent_dev, "Cannot find parent device"))
989     + if (!parent_dev) {
990     + pr_err("Cannot find parent device\n");
991     return false;
992     + }
993    
994     if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
995     return false;
996     diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
997     index 626ddca332db..3899fa6e201d 100644
998     --- a/net/batman-adv/soft-interface.c
999     +++ b/net/batman-adv/soft-interface.c
1000     @@ -221,6 +221,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
1001    
1002     netif_trans_update(soft_iface);
1003     vid = batadv_get_vid(skb, 0);
1004     +
1005     + skb_reset_mac_header(skb);
1006     ethhdr = eth_hdr(skb);
1007    
1008     switch (ntohs(ethhdr->h_proto)) {
1009     diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
1010     index 9a1c27c61de8..a127d1442116 100644
1011     --- a/net/ceph/messenger.c
1012     +++ b/net/ceph/messenger.c
1013     @@ -3240,9 +3240,10 @@ void ceph_con_keepalive(struct ceph_connection *con)
1014     dout("con_keepalive %p\n", con);
1015     mutex_lock(&con->mutex);
1016     clear_standby(con);
1017     + con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING);
1018     mutex_unlock(&con->mutex);
1019     - if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
1020     - con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
1021     +
1022     + if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
1023     queue_con(con);
1024     }
1025     EXPORT_SYMBOL(ceph_con_keepalive);
1026     diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
1027     index 995a491f73a9..c7ccd7b71b15 100644
1028     --- a/net/mac80211/tx.c
1029     +++ b/net/mac80211/tx.c
1030     @@ -1913,9 +1913,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1031     int head_need, bool may_encrypt)
1032     {
1033     struct ieee80211_local *local = sdata->local;
1034     + struct ieee80211_hdr *hdr;
1035     + bool enc_tailroom;
1036     int tail_need = 0;
1037    
1038     - if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) {
1039     + hdr = (struct ieee80211_hdr *) skb->data;
1040     + enc_tailroom = may_encrypt &&
1041     + (sdata->crypto_tx_tailroom_needed_cnt ||
1042     + ieee80211_is_mgmt(hdr->frame_control));
1043     +
1044     + if (enc_tailroom) {
1045     tail_need = IEEE80211_ENCRYPT_TAILROOM;
1046     tail_need -= skb_tailroom(skb);
1047     tail_need = max_t(int, tail_need, 0);
1048     @@ -1923,8 +1930,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1049    
1050     if (skb_cloned(skb) &&
1051     (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
1052     - !skb_clone_writable(skb, ETH_HLEN) ||
1053     - (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
1054     + !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
1055     I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1056     else if (head_need || tail_need)
1057     I802_DEBUG_INC(local->tx_expand_skb_head);
1058     diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
1059     index 8ea2f5fadd96..1fc812ba9871 100644
1060     --- a/net/sunrpc/clnt.c
1061     +++ b/net/sunrpc/clnt.c
1062     @@ -1992,13 +1992,15 @@ call_transmit(struct rpc_task *task)
1063     static void
1064     call_transmit_status(struct rpc_task *task)
1065     {
1066     + struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1067     task->tk_action = call_status;
1068    
1069     /*
1070     * Common case: success. Force the compiler to put this
1071     - * test first.
1072     + * test first. Or, if any error and xprt_close_wait,
1073     + * release the xprt lock so the socket can close.
1074     */
1075     - if (task->tk_status == 0) {
1076     + if (task->tk_status == 0 || xprt_close_wait(xprt)) {
1077     xprt_end_transmit(task);
1078     rpc_task_force_reencode(task);
1079     return;
1080     diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
1081     index 8602a5f1b515..e8ad7ddf347a 100644
1082     --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
1083     +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
1084     @@ -563,6 +563,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
1085     DMA_TO_DEVICE);
1086     }
1087    
1088     +/* If the xdr_buf has more elements than the device can
1089     + * transmit in a single RDMA Send, then the reply will
1090     + * have to be copied into a bounce buffer.
1091     + */
1092     +static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
1093     + struct xdr_buf *xdr,
1094     + __be32 *wr_lst)
1095     +{
1096     + int elements;
1097     +
1098     + /* xdr->head */
1099     + elements = 1;
1100     +
1101     + /* xdr->pages */
1102     + if (!wr_lst) {
1103     + unsigned int remaining;
1104     + unsigned long pageoff;
1105     +
1106     + pageoff = xdr->page_base & ~PAGE_MASK;
1107     + remaining = xdr->page_len;
1108     + while (remaining) {
1109     + ++elements;
1110     + remaining -= min_t(u32, PAGE_SIZE - pageoff,
1111     + remaining);
1112     + pageoff = 0;
1113     + }
1114     + }
1115     +
1116     + /* xdr->tail */
1117     + if (xdr->tail[0].iov_len)
1118     + ++elements;
1119     +
1120     + /* assume 1 SGE is needed for the transport header */
1121     + return elements >= rdma->sc_max_send_sges;
1122     +}
1123     +
1124     +/* The device is not capable of sending the reply directly.
1125     + * Assemble the elements of @xdr into the transport header
1126     + * buffer.
1127     + */
1128     +static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
1129     + struct svc_rdma_send_ctxt *ctxt,
1130     + struct xdr_buf *xdr, __be32 *wr_lst)
1131     +{
1132     + unsigned char *dst, *tailbase;
1133     + unsigned int taillen;
1134     +
1135     + dst = ctxt->sc_xprt_buf;
1136     + dst += ctxt->sc_sges[0].length;
1137     +
1138     + memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
1139     + dst += xdr->head[0].iov_len;
1140     +
1141     + tailbase = xdr->tail[0].iov_base;
1142     + taillen = xdr->tail[0].iov_len;
1143     + if (wr_lst) {
1144     + u32 xdrpad;
1145     +
1146     + xdrpad = xdr_padsize(xdr->page_len);
1147     + if (taillen && xdrpad) {
1148     + tailbase += xdrpad;
1149     + taillen -= xdrpad;
1150     + }
1151     + } else {
1152     + unsigned int len, remaining;
1153     + unsigned long pageoff;
1154     + struct page **ppages;
1155     +
1156     + ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
1157     + pageoff = xdr->page_base & ~PAGE_MASK;
1158     + remaining = xdr->page_len;
1159     + while (remaining) {
1160     + len = min_t(u32, PAGE_SIZE - pageoff, remaining);
1161     +
1162     + memcpy(dst, page_address(*ppages), len);
1163     + remaining -= len;
1164     + dst += len;
1165     + pageoff = 0;
1166     + }
1167     + }
1168     +
1169     + if (taillen)
1170     + memcpy(dst, tailbase, taillen);
1171     +
1172     + ctxt->sc_sges[0].length += xdr->len;
1173     + ib_dma_sync_single_for_device(rdma->sc_pd->device,
1174     + ctxt->sc_sges[0].addr,
1175     + ctxt->sc_sges[0].length,
1176     + DMA_TO_DEVICE);
1177     +
1178     + return 0;
1179     +}
1180     +
1181     /* svc_rdma_map_reply_msg - Map the buffer holding RPC message
1182     * @rdma: controlling transport
1183     * @ctxt: send_ctxt for the Send WR
1184     @@ -585,8 +678,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
1185     u32 xdr_pad;
1186     int ret;
1187    
1188     - if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
1189     - return -EIO;
1190     + if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
1191     + return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
1192     +
1193     + ++ctxt->sc_cur_sge_no;
1194     ret = svc_rdma_dma_map_buf(rdma, ctxt,
1195     xdr->head[0].iov_base,
1196     xdr->head[0].iov_len);
1197     @@ -617,8 +712,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
1198     while (remaining) {
1199     len = min_t(u32, PAGE_SIZE - page_off, remaining);
1200    
1201     - if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
1202     - return -EIO;
1203     + ++ctxt->sc_cur_sge_no;
1204     ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
1205     page_off, len);
1206     if (ret < 0)
1207     @@ -632,8 +726,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
1208     len = xdr->tail[0].iov_len;
1209     tail:
1210     if (len) {
1211     - if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
1212     - return -EIO;
1213     + ++ctxt->sc_cur_sge_no;
1214     ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
1215     if (ret < 0)
1216     return ret;
1217     diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
1218     index 2848cafd4a17..ce5c610b49c7 100644
1219     --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
1220     +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
1221     @@ -475,13 +475,12 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
1222    
1223     /* Qualify the transport resource defaults with the
1224     * capabilities of this particular device */
1225     - newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
1226     - /* transport hdr, head iovec, one page list entry, tail iovec */
1227     - if (newxprt->sc_max_send_sges < 4) {
1228     - pr_err("svcrdma: too few Send SGEs available (%d)\n",
1229     - newxprt->sc_max_send_sges);
1230     - goto errout;
1231     - }
1232     + /* Transport header, head iovec, tail iovec */
1233     + newxprt->sc_max_send_sges = 3;
1234     + /* Add one SGE per page list entry */
1235     + newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
1236     + if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
1237     + newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
1238     newxprt->sc_max_req_size = svcrdma_max_req_size;
1239     newxprt->sc_max_requests = svcrdma_max_requests;
1240     newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
1241     diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
1242     index 119a427d9b2b..6ea8036fcdbe 100644
1243     --- a/net/xfrm/xfrm_policy.c
1244     +++ b/net/xfrm/xfrm_policy.c
1245     @@ -1628,7 +1628,10 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1246     dst_copy_metrics(dst1, dst);
1247    
1248     if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1249     - __u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
1250     + __u32 mark = 0;
1251     +
1252     + if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
1253     + mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
1254    
1255     family = xfrm[i]->props.family;
1256     dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
1257     diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
1258     index 566919838d5e..ab557827aac0 100644
1259     --- a/net/xfrm/xfrm_user.c
1260     +++ b/net/xfrm/xfrm_user.c
1261     @@ -1488,10 +1488,15 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1262     if (!ut[i].family)
1263     ut[i].family = family;
1264    
1265     - if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
1266     - (ut[i].family != prev_family))
1267     - return -EINVAL;
1268     -
1269     + switch (ut[i].mode) {
1270     + case XFRM_MODE_TUNNEL:
1271     + case XFRM_MODE_BEET:
1272     + break;
1273     + default:
1274     + if (ut[i].family != prev_family)
1275     + return -EINVAL;
1276     + break;
1277     + }
1278     if (ut[i].mode >= XFRM_MODE_MAX)
1279     return -EINVAL;
1280    
1281     diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
1282     index 57d0d871dcf7..bb9988914a56 100644
1283     --- a/samples/mei/mei-amt-version.c
1284     +++ b/samples/mei/mei-amt-version.c
1285     @@ -117,7 +117,7 @@ static bool mei_init(struct mei *me, const uuid_le *guid,
1286    
1287     me->verbose = verbose;
1288    
1289     - me->fd = open("/dev/mei", O_RDWR);
1290     + me->fd = open("/dev/mei0", O_RDWR);
1291     if (me->fd == -1) {
1292     mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
1293     goto err;
1294     diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
1295     index 3040830d7797..84545666a09c 100644
1296     --- a/tools/iio/iio_generic_buffer.c
1297     +++ b/tools/iio/iio_generic_buffer.c
1298     @@ -330,7 +330,7 @@ static const struct option longopts[] = {
1299    
1300     int main(int argc, char **argv)
1301     {
1302     - unsigned long long num_loops = 2;
1303     + long long num_loops = 2;
1304     unsigned long timedelay = 1000000;
1305     unsigned long buf_len = 128;
1306