Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.20/0108-4.20.9-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3285 - (hide annotations) (download)
Mon Mar 4 10:35:55 2019 UTC (5 years, 3 months ago) by niro
File size: 51215 byte(s)
linux-4.20.9
1 niro 3285 diff --git a/Makefile b/Makefile
2     index d7d190781010..c9b831f5e873 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 20
9     -SUBLEVEL = 8
10     +SUBLEVEL = 9
11     EXTRAVERSION =
12     NAME = Shy Crocodile
13    
14     diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
15     index 47aa53ba6b92..559659b399d0 100644
16     --- a/arch/arm/boot/dts/da850.dtsi
17     +++ b/arch/arm/boot/dts/da850.dtsi
18     @@ -476,7 +476,7 @@
19     clocksource: timer@20000 {
20     compatible = "ti,da830-timer";
21     reg = <0x20000 0x1000>;
22     - interrupts = <12>, <13>;
23     + interrupts = <21>, <22>;
24     interrupt-names = "tint12", "tint34";
25     clocks = <&pll0_auxclk>;
26     };
27     diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
28     index 3b73813c6b04..23e8c93515d4 100644
29     --- a/arch/arm/mach-iop32x/n2100.c
30     +++ b/arch/arm/mach-iop32x/n2100.c
31     @@ -75,8 +75,7 @@ void __init n2100_map_io(void)
32     /*
33     * N2100 PCI.
34     */
35     -static int __init
36     -n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
37     +static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
38     {
39     int irq;
40    
41     diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c
42     index 028e50c6383f..a32c3b631484 100644
43     --- a/arch/arm/mach-tango/pm.c
44     +++ b/arch/arm/mach-tango/pm.c
45     @@ -3,6 +3,7 @@
46     #include <linux/suspend.h>
47     #include <asm/suspend.h>
48     #include "smc.h"
49     +#include "pm.h"
50    
51     static int tango_pm_powerdown(unsigned long arg)
52     {
53     @@ -24,10 +25,7 @@ static const struct platform_suspend_ops tango_pm_ops = {
54     .valid = suspend_valid_only_mem,
55     };
56    
57     -static int __init tango_pm_init(void)
58     +void __init tango_pm_init(void)
59     {
60     suspend_set_ops(&tango_pm_ops);
61     - return 0;
62     }
63     -
64     -late_initcall(tango_pm_init);
65     diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h
66     new file mode 100644
67     index 000000000000..35ea705a0ee2
68     --- /dev/null
69     +++ b/arch/arm/mach-tango/pm.h
70     @@ -0,0 +1,7 @@
71     +/* SPDX-License-Identifier: GPL-2.0 */
72     +
73     +#ifdef CONFIG_SUSPEND
74     +void __init tango_pm_init(void);
75     +#else
76     +#define tango_pm_init NULL
77     +#endif
78     diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c
79     index 677dd7b5efd9..824f90737b04 100644
80     --- a/arch/arm/mach-tango/setup.c
81     +++ b/arch/arm/mach-tango/setup.c
82     @@ -2,6 +2,7 @@
83     #include <asm/mach/arch.h>
84     #include <asm/hardware/cache-l2x0.h>
85     #include "smc.h"
86     +#include "pm.h"
87    
88     static void tango_l2c_write(unsigned long val, unsigned int reg)
89     {
90     @@ -15,4 +16,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT")
91     .dt_compat = tango_dt_compat,
92     .l2c_aux_mask = ~0,
93     .l2c_write_sec = tango_l2c_write,
94     + .init_late = tango_pm_init,
95     MACHINE_END
96     diff --git a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
97     index 2152b7ba65fb..cc8dbea0911f 100644
98     --- a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
99     +++ b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
100     @@ -90,11 +90,11 @@
101     interrupts = <0>;
102     };
103    
104     - axi_i2c: i2c@10A00000 {
105     + axi_i2c: i2c@10a00000 {
106     compatible = "xlnx,xps-iic-2.00.a";
107     interrupt-parent = <&axi_intc>;
108     interrupts = <4>;
109     - reg = < 0x10A00000 0x10000 >;
110     + reg = < 0x10a00000 0x10000 >;
111     clocks = <&ext>;
112     xlnx,clk-freq = <0x5f5e100>;
113     xlnx,family = "Artix7";
114     @@ -106,9 +106,9 @@
115     #address-cells = <1>;
116     #size-cells = <0>;
117    
118     - ad7420@4B {
119     + ad7420@4b {
120     compatible = "adi,adt7420";
121     - reg = <0x4B>;
122     + reg = <0x4b>;
123     };
124     } ;
125     };
126     diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
127     index 8f5bd04f320a..7f3f136572de 100644
128     --- a/arch/mips/kernel/mips-cm.c
129     +++ b/arch/mips/kernel/mips-cm.c
130     @@ -457,5 +457,5 @@ void mips_cm_error_report(void)
131     }
132    
133     /* reprime cause register */
134     - write_gcr_error_cause(0);
135     + write_gcr_error_cause(cm_error);
136     }
137     diff --git a/arch/mips/loongson64/common/reset.c b/arch/mips/loongson64/common/reset.c
138     index a60715e11306..b26892ce871c 100644
139     --- a/arch/mips/loongson64/common/reset.c
140     +++ b/arch/mips/loongson64/common/reset.c
141     @@ -59,7 +59,12 @@ static void loongson_poweroff(void)
142     {
143     #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
144     mach_prepare_shutdown();
145     - unreachable();
146     +
147     + /*
148     + * It needs a wait loop here, but mips/kernel/reset.c already calls
149     + * a generic delay loop, machine_hang(), so simply return.
150     + */
151     + return;
152     #else
153     void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
154    
155     diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
156     index 5017d5843c5a..fc29b85cfa92 100644
157     --- a/arch/mips/pci/pci-octeon.c
158     +++ b/arch/mips/pci/pci-octeon.c
159     @@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void)
160     if (octeon_has_feature(OCTEON_FEATURE_PCIE))
161     return 0;
162    
163     + if (!octeon_is_pci_host()) {
164     + pr_notice("Not in host mode, PCI Controller not initialized\n");
165     + return 0;
166     + }
167     +
168     /* Point pcibios_map_irq() to the PCI version of it */
169     octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
170    
171     @@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void)
172     else
173     octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
174    
175     - if (!octeon_is_pci_host()) {
176     - pr_notice("Not in host mode, PCI Controller not initialized\n");
177     - return 0;
178     - }
179     -
180     /* PCI I/O and PCI MEM values */
181     set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
182     ioport_resource.start = 0;
183     diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
184     index 58a0315ad743..67e44466d5a4 100644
185     --- a/arch/mips/vdso/Makefile
186     +++ b/arch/mips/vdso/Makefile
187     @@ -8,6 +8,7 @@ ccflags-vdso := \
188     $(filter -E%,$(KBUILD_CFLAGS)) \
189     $(filter -mmicromips,$(KBUILD_CFLAGS)) \
190     $(filter -march=%,$(KBUILD_CFLAGS)) \
191     + $(filter -m%-float,$(KBUILD_CFLAGS)) \
192     -D__VDSO__
193    
194     ifdef CONFIG_CC_IS_CLANG
195     @@ -128,7 +129,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE
196     $(call cmd,force_checksrc)
197     $(call if_changed_rule,cc_o_c)
198    
199     -$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
200     +$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
201     $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
202     $(call if_changed_dep,cpp_lds_S)
203    
204     @@ -168,7 +169,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE
205     $(call cmd,force_checksrc)
206     $(call if_changed_rule,cc_o_c)
207    
208     -$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
209     +$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
210     $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
211     $(call if_changed_dep,cpp_lds_S)
212    
213     diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
214     index 6c99e846a8c9..db706ffc4ca9 100644
215     --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
216     +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
217     @@ -1258,21 +1258,13 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
218    
219     #define pmd_move_must_withdraw pmd_move_must_withdraw
220     struct spinlock;
221     -static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
222     - struct spinlock *old_pmd_ptl,
223     - struct vm_area_struct *vma)
224     -{
225     - if (radix_enabled())
226     - return false;
227     - /*
228     - * Archs like ppc64 use pgtable to store per pmd
229     - * specific information. So when we switch the pmd,
230     - * we should also withdraw and deposit the pgtable
231     - */
232     - return true;
233     -}
234     -
235     -
236     +extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
237     + struct spinlock *old_pmd_ptl,
238     + struct vm_area_struct *vma);
239     +/*
240     + * Hash translation mode use the deposited table to store hash pte
241     + * slot information.
242     + */
243     #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
244     static inline bool arch_needs_pgtable_deposit(void)
245     {
246     diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
247     index 9f93c9f985c5..30d89a37fe62 100644
248     --- a/arch/powerpc/mm/pgtable-book3s64.c
249     +++ b/arch/powerpc/mm/pgtable-book3s64.c
250     @@ -482,3 +482,25 @@ void arch_report_meminfo(struct seq_file *m)
251     atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
252     }
253     #endif /* CONFIG_PROC_FS */
254     +
255     +/*
256     + * For hash translation mode, we use the deposited table to store hash slot
257     + * information and they are stored at PTRS_PER_PMD offset from related pmd
258     + * location. Hence a pmd move requires deposit and withdraw.
259     + *
260     + * For radix translation with split pmd ptl, we store the deposited table in the
261     + * pmd page. Hence if we have different pmd page we need to withdraw during pmd
262     + * move.
263     + *
264     + * With hash we use deposited table always irrespective of anon or not.
265     + * With radix we use deposited table only for anonymous mapping.
266     + */
267     +int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
268     + struct spinlock *old_pmd_ptl,
269     + struct vm_area_struct *vma)
270     +{
271     + if (radix_enabled())
272     + return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
273     +
274     + return true;
275     +}
276     diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
277     index 7d6457ab5d34..bba281b1fe1b 100644
278     --- a/arch/powerpc/platforms/pseries/papr_scm.c
279     +++ b/arch/powerpc/platforms/pseries/papr_scm.c
280     @@ -43,6 +43,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
281     {
282     unsigned long ret[PLPAR_HCALL_BUFSIZE];
283     uint64_t rc, token;
284     + uint64_t saved = 0;
285    
286     /*
287     * When the hypervisor cannot map all the requested memory in a single
288     @@ -56,6 +57,8 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
289     rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
290     p->blocks, BIND_ANY_ADDR, token);
291     token = ret[0];
292     + if (!saved)
293     + saved = ret[1];
294     cond_resched();
295     } while (rc == H_BUSY);
296    
297     @@ -64,7 +67,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
298     return -ENXIO;
299     }
300    
301     - p->bound_addr = ret[1];
302     + p->bound_addr = saved;
303    
304     dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
305    
306     diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
307     index b8c3f9e6af89..adf28788cab5 100644
308     --- a/drivers/ata/libata-core.c
309     +++ b/drivers/ata/libata-core.c
310     @@ -4554,6 +4554,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
311     { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
312     { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
313     { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
314     + { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
315    
316     /* devices that don't properly handle queued TRIM commands */
317     { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
318     diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
319     index 472c88ae1c0f..92f843eaf1e0 100644
320     --- a/drivers/firmware/arm_scmi/bus.c
321     +++ b/drivers/firmware/arm_scmi/bus.c
322     @@ -119,6 +119,11 @@ void scmi_driver_unregister(struct scmi_driver *driver)
323     }
324     EXPORT_SYMBOL_GPL(scmi_driver_unregister);
325    
326     +static void scmi_device_release(struct device *dev)
327     +{
328     + kfree(to_scmi_dev(dev));
329     +}
330     +
331     struct scmi_device *
332     scmi_device_create(struct device_node *np, struct device *parent, int protocol)
333     {
334     @@ -138,6 +143,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
335     scmi_dev->dev.parent = parent;
336     scmi_dev->dev.of_node = np;
337     scmi_dev->dev.bus = &scmi_bus_type;
338     + scmi_dev->dev.release = scmi_device_release;
339     dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
340    
341     retval = device_register(&scmi_dev->dev);
342     @@ -156,9 +162,8 @@ free_mem:
343     void scmi_device_destroy(struct scmi_device *scmi_dev)
344     {
345     scmi_handle_put(scmi_dev->handle);
346     - device_unregister(&scmi_dev->dev);
347     ida_simple_remove(&scmi_bus_id, scmi_dev->id);
348     - kfree(scmi_dev);
349     + device_unregister(&scmi_dev->dev);
350     }
351    
352     void scmi_set_handle(struct scmi_device *scmi_dev)
353     diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
354     index dd18cb710391..0b945d0fd732 100644
355     --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
356     +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
357     @@ -1005,6 +1005,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
358     break;
359     case amd_pp_dpp_clock:
360     pclk_vol_table = pinfo->vdd_dep_on_dppclk;
361     + break;
362     default:
363     return -EINVAL;
364     }
365     diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
366     index 02db9ac82d7a..a3104d79b48f 100644
367     --- a/drivers/gpu/drm/drm_modes.c
368     +++ b/drivers/gpu/drm/drm_modes.c
369     @@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
370     if (mode->hsync)
371     return mode->hsync;
372    
373     - if (mode->htotal < 0)
374     + if (mode->htotal <= 0)
375     return 0;
376    
377     calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
378     diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
379     index 5186cd7075f9..372f30d286e3 100644
380     --- a/drivers/gpu/drm/i915/intel_ddi.c
381     +++ b/drivers/gpu/drm/i915/intel_ddi.c
382     @@ -1085,7 +1085,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
383     return DDI_CLK_SEL_TBT_810;
384     default:
385     MISSING_CASE(clock);
386     - break;
387     + return DDI_CLK_SEL_NONE;
388     }
389     case DPLL_ID_ICL_MGPLL1:
390     case DPLL_ID_ICL_MGPLL2:
391     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
392     index c9878dd1f7cd..a8293a7bab8f 100644
393     --- a/drivers/gpu/drm/i915/intel_display.c
394     +++ b/drivers/gpu/drm/i915/intel_display.c
395     @@ -15684,15 +15684,44 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
396     }
397     }
398    
399     +static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
400     +{
401     + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
402     +
403     + /*
404     + * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
405     + * the hardware when a high res displays plugged in. DPLL P
406     + * divider is zero, and the pipe timings are bonkers. We'll
407     + * try to disable everything in that case.
408     + *
409     + * FIXME would be nice to be able to sanitize this state
410     + * without several WARNs, but for now let's take the easy
411     + * road.
412     + */
413     + return IS_GEN6(dev_priv) &&
414     + crtc_state->base.active &&
415     + crtc_state->shared_dpll &&
416     + crtc_state->port_clock == 0;
417     +}
418     +
419     static void intel_sanitize_encoder(struct intel_encoder *encoder)
420     {
421     struct intel_connector *connector;
422     + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
423     + struct intel_crtc_state *crtc_state = crtc ?
424     + to_intel_crtc_state(crtc->base.state) : NULL;
425    
426     /* We need to check both for a crtc link (meaning that the
427     * encoder is active and trying to read from a pipe) and the
428     * pipe itself being active. */
429     - bool has_active_crtc = encoder->base.crtc &&
430     - to_intel_crtc(encoder->base.crtc)->active;
431     + bool has_active_crtc = crtc_state &&
432     + crtc_state->base.active;
433     +
434     + if (crtc_state && has_bogus_dpll_config(crtc_state)) {
435     + DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
436     + pipe_name(crtc->pipe));
437     + has_active_crtc = false;
438     + }
439    
440     connector = intel_encoder_find_connector(encoder);
441     if (connector && !has_active_crtc) {
442     @@ -15703,15 +15732,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
443     /* Connector is active, but has no active pipe. This is
444     * fallout from our resume register restoring. Disable
445     * the encoder manually again. */
446     - if (encoder->base.crtc) {
447     - struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
448     + if (crtc_state) {
449     + struct drm_encoder *best_encoder;
450    
451     DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
452     encoder->base.base.id,
453     encoder->base.name);
454     - encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
455     +
456     + /* avoid oopsing in case the hooks consult best_encoder */
457     + best_encoder = connector->base.state->best_encoder;
458     + connector->base.state->best_encoder = &encoder->base;
459     +
460     + if (encoder->disable)
461     + encoder->disable(encoder, crtc_state,
462     + connector->base.state);
463     if (encoder->post_disable)
464     - encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
465     + encoder->post_disable(encoder, crtc_state,
466     + connector->base.state);
467     +
468     + connector->base.state->best_encoder = best_encoder;
469     }
470     encoder->base.crtc = NULL;
471    
472     diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
473     index 37f93022a106..c0351abf83a3 100644
474     --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
475     +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
476     @@ -1,17 +1,8 @@
477     -//SPDX-License-Identifier: GPL-2.0+
478     +// SPDX-License-Identifier: GPL-2.0
479     /*
480     * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
481     * Author:
482     * Sandy Huang <hjc@rock-chips.com>
483     - *
484     - * This software is licensed under the terms of the GNU General Public
485     - * License version 2, as published by the Free Software Foundation, and
486     - * may be copied, distributed, and modified under those terms.
487     - *
488     - * This program is distributed in the hope that it will be useful,
489     - * but WITHOUT ANY WARRANTY; without even the implied warranty of
490     - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
491     - * GNU General Public License for more details.
492     */
493    
494     #include <drm/drmP.h>
495     diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h
496     index 38b52e63b2b0..27b9635124bc 100644
497     --- a/drivers/gpu/drm/rockchip/rockchip_rgb.h
498     +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h
499     @@ -1,17 +1,8 @@
500     -//SPDX-License-Identifier: GPL-2.0+
501     +/* SPDX-License-Identifier: GPL-2.0 */
502     /*
503     * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
504     * Author:
505     * Sandy Huang <hjc@rock-chips.com>
506     - *
507     - * This software is licensed under the terms of the GNU General Public
508     - * License version 2, as published by the Free Software Foundation, and
509     - * may be copied, distributed, and modified under those terms.
510     - *
511     - * This program is distributed in the hope that it will be useful,
512     - * but WITHOUT ANY WARRANTY; without even the implied warranty of
513     - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
514     - * GNU General Public License for more details.
515     */
516    
517     #ifdef CONFIG_ROCKCHIP_RGB
518     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
519     index d7a2dfb8ee9b..ddf80935c4b9 100644
520     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
521     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
522     @@ -629,13 +629,16 @@ out_fixup:
523     static int vmw_dma_masks(struct vmw_private *dev_priv)
524     {
525     struct drm_device *dev = dev_priv->dev;
526     + int ret = 0;
527    
528     - if (intel_iommu_enabled &&
529     + ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
530     + if (dev_priv->map_mode != vmw_dma_phys &&
531     (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
532     DRM_INFO("Restricting DMA addresses to 44 bits.\n");
533     - return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
534     + return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
535     }
536     - return 0;
537     +
538     + return ret;
539     }
540     #else
541     static int vmw_dma_masks(struct vmw_private *dev_priv)
542     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
543     index f2d13a72c05d..88b8178d4687 100644
544     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
545     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
546     @@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
547     *p_fence = NULL;
548     }
549    
550     - return 0;
551     + return ret;
552     }
553    
554     /**
555     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
556     index dca04d4246ea..d59125c55dc2 100644
557     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
558     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
559     @@ -2592,8 +2592,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
560     user_fence_rep)
561     {
562     struct vmw_fence_obj *fence = NULL;
563     - uint32_t handle;
564     - int ret;
565     + uint32_t handle = 0;
566     + int ret = 0;
567    
568     if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
569     out_fence)
570     diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
571     index 031d568b4972..4e339cfd0c54 100644
572     --- a/drivers/iio/adc/axp288_adc.c
573     +++ b/drivers/iio/adc/axp288_adc.c
574     @@ -27,9 +27,18 @@
575     #include <linux/iio/machine.h>
576     #include <linux/iio/driver.h>
577    
578     -#define AXP288_ADC_EN_MASK 0xF1
579     -#define AXP288_ADC_TS_PIN_GPADC 0xF2
580     -#define AXP288_ADC_TS_PIN_ON 0xF3
581     +/*
582     + * This mask enables all ADCs except for the battery temp-sensor (TS), that is
583     + * left as-is to avoid breaking charging on devices without a temp-sensor.
584     + */
585     +#define AXP288_ADC_EN_MASK 0xF0
586     +#define AXP288_ADC_TS_ENABLE 0x01
587     +
588     +#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
589     +#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
590     +#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
591     +#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
592     +#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
593    
594     enum axp288_adc_id {
595     AXP288_ADC_TS,
596     @@ -44,6 +53,7 @@ enum axp288_adc_id {
597     struct axp288_adc_info {
598     int irq;
599     struct regmap *regmap;
600     + bool ts_enabled;
601     };
602    
603     static const struct iio_chan_spec axp288_adc_channels[] = {
604     @@ -115,21 +125,33 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
605     return IIO_VAL_INT;
606     }
607    
608     -static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
609     - unsigned long address)
610     +/*
611     + * The current-source used for the battery temp-sensor (TS) is shared
612     + * with the GPADC. For proper fuel-gauge and charger operation the TS
613     + * current-source needs to be permanently on. But to read the GPADC we
614     + * need to temporary switch the TS current-source to ondemand, so that
615     + * the GPADC can use it, otherwise we will always read an all 0 value.
616     + */
617     +static int axp288_adc_set_ts(struct axp288_adc_info *info,
618     + unsigned int mode, unsigned long address)
619     {
620     int ret;
621    
622     - /* channels other than GPADC do not need to switch TS pin */
623     + /* No need to switch the current-source if the TS pin is disabled */
624     + if (!info->ts_enabled)
625     + return 0;
626     +
627     + /* Channels other than GPADC do not need the current source */
628     if (address != AXP288_GP_ADC_H)
629     return 0;
630    
631     - ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
632     + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
633     + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode);
634     if (ret)
635     return ret;
636    
637     /* When switching to the GPADC pin give things some time to settle */
638     - if (mode == AXP288_ADC_TS_PIN_GPADC)
639     + if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND)
640     usleep_range(6000, 10000);
641    
642     return 0;
643     @@ -145,14 +167,14 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
644     mutex_lock(&indio_dev->mlock);
645     switch (mask) {
646     case IIO_CHAN_INFO_RAW:
647     - if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
648     + if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND,
649     chan->address)) {
650     dev_err(&indio_dev->dev, "GPADC mode\n");
651     ret = -EINVAL;
652     break;
653     }
654     ret = axp288_adc_read_channel(val, chan->address, info->regmap);
655     - if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
656     + if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON,
657     chan->address))
658     dev_err(&indio_dev->dev, "TS pin restore\n");
659     break;
660     @@ -164,13 +186,35 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
661     return ret;
662     }
663    
664     -static int axp288_adc_set_state(struct regmap *regmap)
665     +static int axp288_adc_initialize(struct axp288_adc_info *info)
666     {
667     - /* ADC should be always enabled for internal FG to function */
668     - if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
669     - return -EIO;
670     + int ret, adc_enable_val;
671     +
672     + /*
673     + * Determine if the TS pin is enabled and set the TS current-source
674     + * accordingly.
675     + */
676     + ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val);
677     + if (ret)
678     + return ret;
679     +
680     + if (adc_enable_val & AXP288_ADC_TS_ENABLE) {
681     + info->ts_enabled = true;
682     + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
683     + AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
684     + AXP288_ADC_TS_CURRENT_ON);
685     + } else {
686     + info->ts_enabled = false;
687     + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
688     + AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
689     + AXP288_ADC_TS_CURRENT_OFF);
690     + }
691     + if (ret)
692     + return ret;
693    
694     - return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
695     + /* Turn on the ADC for all channels except TS, leave TS as is */
696     + return regmap_update_bits(info->regmap, AXP20X_ADC_EN1,
697     + AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK);
698     }
699    
700     static const struct iio_info axp288_adc_iio_info = {
701     @@ -200,7 +244,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
702     * Set ADC to enabled state at all time, including system suspend.
703     * otherwise internal fuel gauge functionality may be affected.
704     */
705     - ret = axp288_adc_set_state(axp20x->regmap);
706     + ret = axp288_adc_initialize(info);
707     if (ret) {
708     dev_err(&pdev->dev, "unable to enable ADC device\n");
709     return ret;
710     diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
711     index 184d686ebd99..8b4568edd5cb 100644
712     --- a/drivers/iio/adc/ti-ads8688.c
713     +++ b/drivers/iio/adc/ti-ads8688.c
714     @@ -41,6 +41,7 @@
715    
716     #define ADS8688_VREF_MV 4096
717     #define ADS8688_REALBITS 16
718     +#define ADS8688_MAX_CHANNELS 8
719    
720     /*
721     * enum ads8688_range - ADS8688 reference voltage range
722     @@ -385,7 +386,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
723     {
724     struct iio_poll_func *pf = p;
725     struct iio_dev *indio_dev = pf->indio_dev;
726     - u16 buffer[8];
727     + u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
728     int i, j = 0;
729    
730     for (i = 0; i < indio_dev->masklength; i++) {
731     diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
732     index a406ad31b096..3a20cb5d9bff 100644
733     --- a/drivers/iio/chemical/atlas-ph-sensor.c
734     +++ b/drivers/iio/chemical/atlas-ph-sensor.c
735     @@ -444,9 +444,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
736     case IIO_CHAN_INFO_SCALE:
737     switch (chan->type) {
738     case IIO_TEMP:
739     - *val = 1; /* 0.01 */
740     - *val2 = 100;
741     - break;
742     + *val = 10;
743     + return IIO_VAL_INT;
744     case IIO_PH:
745     *val = 1; /* 0.001 */
746     *val2 = 1000;
747     @@ -477,7 +476,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev,
748     int val, int val2, long mask)
749     {
750     struct atlas_data *data = iio_priv(indio_dev);
751     - __be32 reg = cpu_to_be32(val);
752     + __be32 reg = cpu_to_be32(val / 10);
753    
754     if (val2 != 0 || val < 0 || val > 20000)
755     return -EINVAL;
756     diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
757     index 23739a60517f..bb1ee9834a02 100644
758     --- a/drivers/misc/mei/hw-me-regs.h
759     +++ b/drivers/misc/mei/hw-me-regs.h
760     @@ -139,6 +139,8 @@
761     #define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
762     #define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
763    
764     +#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
765     +
766     /*
767     * MEI HW Section
768     */
769     diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
770     index c8e21c894a5f..4299658d48d6 100644
771     --- a/drivers/misc/mei/pci-me.c
772     +++ b/drivers/misc/mei/pci-me.c
773     @@ -105,6 +105,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
774     {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)},
775     {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
776    
777     + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
778     +
779     /* required last entry */
780     {0, }
781     };
782     diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
783     index 02a9aba85368..17b6398cf66c 100644
784     --- a/drivers/misc/mic/vop/vop_main.c
785     +++ b/drivers/misc/mic/vop/vop_main.c
786     @@ -568,6 +568,8 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
787     int ret = -1;
788    
789     if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
790     + struct device *dev = get_device(&vdev->vdev.dev);
791     +
792     dev_dbg(&vpdev->dev,
793     "%s %d config_change %d type %d vdev %p\n",
794     __func__, __LINE__,
795     @@ -579,7 +581,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
796     iowrite8(-1, &dc->h2c_vdev_db);
797     if (status & VIRTIO_CONFIG_S_DRIVER_OK)
798     wait_for_completion(&vdev->reset_done);
799     - put_device(&vdev->vdev.dev);
800     + put_device(dev);
801     iowrite8(1, &dc->guest_ack);
802     dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
803     __func__, __LINE__, ioread8(&dc->guest_ack));
804     diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
805     index 6c3591cdf855..a3c6c773d9dc 100644
806     --- a/drivers/misc/vexpress-syscfg.c
807     +++ b/drivers/misc/vexpress-syscfg.c
808     @@ -61,7 +61,7 @@ static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
809     int tries;
810     long timeout;
811    
812     - if (WARN_ON(index > func->num_templates))
813     + if (WARN_ON(index >= func->num_templates))
814     return -EINVAL;
815    
816     command = readl(syscfg->base + SYS_CFGCTRL);
817     diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
818     index 99c460facd5e..0bbb23b014f1 100644
819     --- a/drivers/mtd/mtdpart.c
820     +++ b/drivers/mtd/mtdpart.c
821     @@ -470,6 +470,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
822     /* let's register it anyway to preserve ordering */
823     slave->offset = 0;
824     slave->mtd.size = 0;
825     +
826     + /* Initialize ->erasesize to make add_mtd_device() happy. */
827     + slave->mtd.erasesize = parent->erasesize;
828     +
829     printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
830     part->name);
831     goto out_register;
832     diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
833     index bd4cfac6b5aa..a4768df5083f 100644
834     --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
835     +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
836     @@ -155,9 +155,10 @@ int gpmi_init(struct gpmi_nand_data *this)
837    
838     /*
839     * Reset BCH here, too. We got failures otherwise :(
840     - * See later BCH reset for explanation of MX23 handling
841     + * See later BCH reset for explanation of MX23 and MX28 handling
842     */
843     - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
844     + ret = gpmi_reset_block(r->bch_regs,
845     + GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
846     if (ret)
847     goto err_out;
848    
849     @@ -263,12 +264,10 @@ int bch_set_geometry(struct gpmi_nand_data *this)
850     /*
851     * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
852     * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
853     - * On the other hand, the MX28 needs the reset, because one case has been
854     - * seen where the BCH produced ECC errors constantly after 10000
855     - * consecutive reboots. The latter case has not been seen on the MX23
856     - * yet, still we don't know if it could happen there as well.
857     + * and MX28.
858     */
859     - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
860     + ret = gpmi_reset_block(r->bch_regs,
861     + GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
862     if (ret)
863     goto err_out;
864    
865     diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
866     index 30f83649c481..8c7bf91ce4e1 100644
867     --- a/drivers/mtd/nand/spi/core.c
868     +++ b/drivers/mtd/nand/spi/core.c
869     @@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
870     struct nand_device *nand = spinand_to_nand(spinand);
871     struct mtd_info *mtd = nanddev_to_mtd(nand);
872     struct nand_page_io_req adjreq = *req;
873     - unsigned int nbytes = 0;
874     - void *buf = NULL;
875     + void *buf = spinand->databuf;
876     + unsigned int nbytes;
877     u16 column = 0;
878     int ret;
879    
880     - memset(spinand->databuf, 0xff,
881     - nanddev_page_size(nand) +
882     - nanddev_per_page_oobsize(nand));
883     + /*
884     + * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
885     + * the cache content to 0xFF (depends on vendor implementation), so we
886     + * must fill the page cache entirely even if we only want to program
887     + * the data portion of the page, otherwise we might corrupt the BBM or
888     + * user data previously programmed in OOB area.
889     + */
890     + nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
891     + memset(spinand->databuf, 0xff, nbytes);
892     + adjreq.dataoffs = 0;
893     + adjreq.datalen = nanddev_page_size(nand);
894     + adjreq.databuf.out = spinand->databuf;
895     + adjreq.ooblen = nanddev_per_page_oobsize(nand);
896     + adjreq.ooboffs = 0;
897     + adjreq.oobbuf.out = spinand->oobbuf;
898    
899     - if (req->datalen) {
900     + if (req->datalen)
901     memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
902     req->datalen);
903     - adjreq.dataoffs = 0;
904     - adjreq.datalen = nanddev_page_size(nand);
905     - adjreq.databuf.out = spinand->databuf;
906     - nbytes = adjreq.datalen;
907     - buf = spinand->databuf;
908     - }
909    
910     if (req->ooblen) {
911     if (req->mode == MTD_OPS_AUTO_OOB)
912     @@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
913     else
914     memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
915     req->ooblen);
916     -
917     - adjreq.ooblen = nanddev_per_page_oobsize(nand);
918     - adjreq.ooboffs = 0;
919     - nbytes += nanddev_per_page_oobsize(nand);
920     - if (!buf) {
921     - buf = spinand->oobbuf;
922     - column = nanddev_page_size(nand);
923     - }
924     }
925    
926     spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
927     @@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
928    
929     /*
930     * We need to use the RANDOM LOAD CACHE operation if there's
931     - * more than one iteration, because the LOAD operation resets
932     - * the cache to 0xff.
933     + * more than one iteration, because the LOAD operation might
934     + * reset the cache to 0xff.
935     */
936     if (nbytes) {
937     column = op.addr.val;
938     @@ -1016,11 +1014,11 @@ static int spinand_init(struct spinand_device *spinand)
939     for (i = 0; i < nand->memorg.ntargets; i++) {
940     ret = spinand_select_target(spinand, i);
941     if (ret)
942     - goto err_free_bufs;
943     + goto err_manuf_cleanup;
944    
945     ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
946     if (ret)
947     - goto err_free_bufs;
948     + goto err_manuf_cleanup;
949     }
950    
951     ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
952     diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
953     index 9b0f4b9ef482..8efe8ea45602 100644
954     --- a/drivers/pinctrl/intel/pinctrl-cherryview.c
955     +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
956     @@ -1507,7 +1507,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
957     .matches = {
958     DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
959     DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
960     - DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
961     + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
962     },
963     },
964     {
965     @@ -1515,7 +1515,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
966     .matches = {
967     DMI_MATCH(DMI_SYS_VENDOR, "HP"),
968     DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
969     - DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
970     + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
971     },
972     },
973     {
974     @@ -1523,7 +1523,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
975     .matches = {
976     DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
977     DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
978     - DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
979     + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
980     },
981     },
982     {
983     @@ -1531,7 +1531,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
984     .matches = {
985     DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
986     DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
987     - DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
988     + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
989     },
990     },
991     {}
992     diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
993     index aa8b58125568..ef4268cc6227 100644
994     --- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
995     +++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
996     @@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 };
997     static const struct sunxi_pinctrl_desc h6_pinctrl_data = {
998     .pins = h6_pins,
999     .npins = ARRAY_SIZE(h6_pins),
1000     - .irq_banks = 3,
1001     + .irq_banks = 4,
1002     .irq_bank_map = h6_irq_bank_map,
1003     .irq_read_needs_mux = true,
1004     };
1005     diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
1006     index 13b01351dd1c..41ef452c1fcf 100644
1007     --- a/fs/debugfs/inode.c
1008     +++ b/fs/debugfs/inode.c
1009     @@ -787,6 +787,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
1010     struct dentry *dentry = NULL, *trap;
1011     struct name_snapshot old_name;
1012    
1013     + if (IS_ERR(old_dir))
1014     + return old_dir;
1015     + if (IS_ERR(new_dir))
1016     + return new_dir;
1017     + if (IS_ERR_OR_NULL(old_dentry))
1018     + return old_dentry;
1019     +
1020     trap = lock_rename(new_dir, old_dir);
1021     /* Source or destination directories don't exist? */
1022     if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
1023     diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
1024     index 712f00995390..5508baa11bb6 100644
1025     --- a/fs/ext4/fsync.c
1026     +++ b/fs/ext4/fsync.c
1027     @@ -116,16 +116,8 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1028     goto out;
1029     }
1030    
1031     - ret = file_write_and_wait_range(file, start, end);
1032     - if (ret)
1033     - return ret;
1034     -
1035     if (!journal) {
1036     - struct writeback_control wbc = {
1037     - .sync_mode = WB_SYNC_ALL
1038     - };
1039     -
1040     - ret = ext4_write_inode(inode, &wbc);
1041     + ret = __generic_file_fsync(file, start, end, datasync);
1042     if (!ret)
1043     ret = ext4_sync_parent(inode);
1044     if (test_opt(inode->i_sb, BARRIER))
1045     @@ -133,6 +125,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1046     goto out;
1047     }
1048    
1049     + ret = file_write_and_wait_range(file, start, end);
1050     + if (ret)
1051     + return ret;
1052     /*
1053     * data=writeback,ordered:
1054     * The caller's filemap_fdatawrite()/wait will sync the data.
1055     diff --git a/kernel/signal.c b/kernel/signal.c
1056     index 9a32bc2088c9..cf4cf68c3ea8 100644
1057     --- a/kernel/signal.c
1058     +++ b/kernel/signal.c
1059     @@ -688,6 +688,48 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in
1060     }
1061     EXPORT_SYMBOL_GPL(dequeue_signal);
1062    
1063     +static int dequeue_synchronous_signal(kernel_siginfo_t *info)
1064     +{
1065     + struct task_struct *tsk = current;
1066     + struct sigpending *pending = &tsk->pending;
1067     + struct sigqueue *q, *sync = NULL;
1068     +
1069     + /*
1070     + * Might a synchronous signal be in the queue?
1071     + */
1072     + if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
1073     + return 0;
1074     +
1075     + /*
1076     + * Return the first synchronous signal in the queue.
1077     + */
1078     + list_for_each_entry(q, &pending->list, list) {
1079     + /* Synchronous signals have a postive si_code */
1080     + if ((q->info.si_code > SI_USER) &&
1081     + (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
1082     + sync = q;
1083     + goto next;
1084     + }
1085     + }
1086     + return 0;
1087     +next:
1088     + /*
1089     + * Check if there is another siginfo for the same signal.
1090     + */
1091     + list_for_each_entry_continue(q, &pending->list, list) {
1092     + if (q->info.si_signo == sync->info.si_signo)
1093     + goto still_pending;
1094     + }
1095     +
1096     + sigdelset(&pending->signal, sync->info.si_signo);
1097     + recalc_sigpending();
1098     +still_pending:
1099     + list_del_init(&sync->list);
1100     + copy_siginfo(info, &sync->info);
1101     + __sigqueue_free(sync);
1102     + return info->si_signo;
1103     +}
1104     +
1105     /*
1106     * Tell a process that it has a new active signal..
1107     *
1108     @@ -1057,10 +1099,9 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
1109    
1110     result = TRACE_SIGNAL_DELIVERED;
1111     /*
1112     - * Skip useless siginfo allocation for SIGKILL SIGSTOP,
1113     - * and kernel threads.
1114     + * Skip useless siginfo allocation for SIGKILL and kernel threads.
1115     */
1116     - if (sig_kernel_only(sig) || (t->flags & PF_KTHREAD))
1117     + if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1118     goto out_set;
1119    
1120     /*
1121     @@ -2394,6 +2435,11 @@ relock:
1122     goto relock;
1123     }
1124    
1125     + /* Has this task already been marked for death? */
1126     + ksig->info.si_signo = signr = SIGKILL;
1127     + if (signal_group_exit(signal))
1128     + goto fatal;
1129     +
1130     for (;;) {
1131     struct k_sigaction *ka;
1132    
1133     @@ -2407,7 +2453,15 @@ relock:
1134     goto relock;
1135     }
1136    
1137     - signr = dequeue_signal(current, &current->blocked, &ksig->info);
1138     + /*
1139     + * Signals generated by the execution of an instruction
1140     + * need to be delivered before any other pending signals
1141     + * so that the instruction pointer in the signal stack
1142     + * frame points to the faulting instruction.
1143     + */
1144     + signr = dequeue_synchronous_signal(&ksig->info);
1145     + if (!signr)
1146     + signr = dequeue_signal(current, &current->blocked, &ksig->info);
1147    
1148     if (!signr)
1149     break; /* will return 0 */
1150     @@ -2489,6 +2543,7 @@ relock:
1151     continue;
1152     }
1153    
1154     + fatal:
1155     spin_unlock_irq(&sighand->siglock);
1156    
1157     /*
1158     diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
1159     index 31ea48eceda1..ec8332c5056a 100644
1160     --- a/kernel/trace/trace_uprobe.c
1161     +++ b/kernel/trace/trace_uprobe.c
1162     @@ -5,7 +5,7 @@
1163     * Copyright (C) IBM Corporation, 2010-2012
1164     * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
1165     */
1166     -#define pr_fmt(fmt) "trace_kprobe: " fmt
1167     +#define pr_fmt(fmt) "trace_uprobe: " fmt
1168    
1169     #include <linux/module.h>
1170     #include <linux/uaccess.h>
1171     @@ -127,6 +127,13 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
1172     if (ret >= 0) {
1173     if (ret == maxlen)
1174     dst[ret - 1] = '\0';
1175     + else
1176     + /*
1177     + * Include the terminating null byte. In this case it
1178     + * was copied by strncpy_from_user but not accounted
1179     + * for in ret.
1180     + */
1181     + ret++;
1182     *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
1183     }
1184    
1185     diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
1186     index 781c5b6e6e8e..41be60d54001 100644
1187     --- a/net/batman-adv/hard-interface.c
1188     +++ b/net/batman-adv/hard-interface.c
1189     @@ -20,7 +20,6 @@
1190     #include "main.h"
1191    
1192     #include <linux/atomic.h>
1193     -#include <linux/bug.h>
1194     #include <linux/byteorder/generic.h>
1195     #include <linux/errno.h>
1196     #include <linux/gfp.h>
1197     @@ -179,8 +178,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
1198     parent_dev = __dev_get_by_index((struct net *)parent_net,
1199     dev_get_iflink(net_dev));
1200     /* if we got a NULL parent_dev there is something broken.. */
1201     - if (WARN(!parent_dev, "Cannot find parent device"))
1202     + if (!parent_dev) {
1203     + pr_err("Cannot find parent device\n");
1204     return false;
1205     + }
1206    
1207     if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
1208     return false;
1209     diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
1210     index 5db5a0a4c959..b85ca809e509 100644
1211     --- a/net/batman-adv/soft-interface.c
1212     +++ b/net/batman-adv/soft-interface.c
1213     @@ -221,6 +221,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
1214    
1215     netif_trans_update(soft_iface);
1216     vid = batadv_get_vid(skb, 0);
1217     +
1218     + skb_reset_mac_header(skb);
1219     ethhdr = eth_hdr(skb);
1220    
1221     switch (ntohs(ethhdr->h_proto)) {
1222     diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
1223     index 2f126eff275d..664f886f464d 100644
1224     --- a/net/ceph/messenger.c
1225     +++ b/net/ceph/messenger.c
1226     @@ -3219,9 +3219,10 @@ void ceph_con_keepalive(struct ceph_connection *con)
1227     dout("con_keepalive %p\n", con);
1228     mutex_lock(&con->mutex);
1229     clear_standby(con);
1230     + con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING);
1231     mutex_unlock(&con->mutex);
1232     - if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
1233     - con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
1234     +
1235     + if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
1236     queue_con(con);
1237     }
1238     EXPORT_SYMBOL(ceph_con_keepalive);
1239     diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
1240     index 1f536ba573b4..65e511756e64 100644
1241     --- a/net/mac80211/tx.c
1242     +++ b/net/mac80211/tx.c
1243     @@ -1938,9 +1938,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1244     int head_need, bool may_encrypt)
1245     {
1246     struct ieee80211_local *local = sdata->local;
1247     + struct ieee80211_hdr *hdr;
1248     + bool enc_tailroom;
1249     int tail_need = 0;
1250    
1251     - if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) {
1252     + hdr = (struct ieee80211_hdr *) skb->data;
1253     + enc_tailroom = may_encrypt &&
1254     + (sdata->crypto_tx_tailroom_needed_cnt ||
1255     + ieee80211_is_mgmt(hdr->frame_control));
1256     +
1257     + if (enc_tailroom) {
1258     tail_need = IEEE80211_ENCRYPT_TAILROOM;
1259     tail_need -= skb_tailroom(skb);
1260     tail_need = max_t(int, tail_need, 0);
1261     @@ -1948,8 +1955,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1262    
1263     if (skb_cloned(skb) &&
1264     (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
1265     - !skb_clone_writable(skb, ETH_HLEN) ||
1266     - (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
1267     + !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
1268     I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1269     else if (head_need || tail_need)
1270     I802_DEBUG_INC(local->tx_expand_skb_head);
1271     diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
1272     index 8602a5f1b515..e8ad7ddf347a 100644
1273     --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
1274     +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
1275     @@ -563,6 +563,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
1276     DMA_TO_DEVICE);
1277     }
1278    
1279     +/* If the xdr_buf has more elements than the device can
1280     + * transmit in a single RDMA Send, then the reply will
1281     + * have to be copied into a bounce buffer.
1282     + */
1283     +static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
1284     + struct xdr_buf *xdr,
1285     + __be32 *wr_lst)
1286     +{
1287     + int elements;
1288     +
1289     + /* xdr->head */
1290     + elements = 1;
1291     +
1292     + /* xdr->pages */
1293     + if (!wr_lst) {
1294     + unsigned int remaining;
1295     + unsigned long pageoff;
1296     +
1297     + pageoff = xdr->page_base & ~PAGE_MASK;
1298     + remaining = xdr->page_len;
1299     + while (remaining) {
1300     + ++elements;
1301     + remaining -= min_t(u32, PAGE_SIZE - pageoff,
1302     + remaining);
1303     + pageoff = 0;
1304     + }
1305     + }
1306     +
1307     + /* xdr->tail */
1308     + if (xdr->tail[0].iov_len)
1309     + ++elements;
1310     +
1311     + /* assume 1 SGE is needed for the transport header */
1312     + return elements >= rdma->sc_max_send_sges;
1313     +}
1314     +
1315     +/* The device is not capable of sending the reply directly.
1316     + * Assemble the elements of @xdr into the transport header
1317     + * buffer.
1318     + */
1319     +static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
1320     + struct svc_rdma_send_ctxt *ctxt,
1321     + struct xdr_buf *xdr, __be32 *wr_lst)
1322     +{
1323     + unsigned char *dst, *tailbase;
1324     + unsigned int taillen;
1325     +
1326     + dst = ctxt->sc_xprt_buf;
1327     + dst += ctxt->sc_sges[0].length;
1328     +
1329     + memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
1330     + dst += xdr->head[0].iov_len;
1331     +
1332     + tailbase = xdr->tail[0].iov_base;
1333     + taillen = xdr->tail[0].iov_len;
1334     + if (wr_lst) {
1335     + u32 xdrpad;
1336     +
1337     + xdrpad = xdr_padsize(xdr->page_len);
1338     + if (taillen && xdrpad) {
1339     + tailbase += xdrpad;
1340     + taillen -= xdrpad;
1341     + }
1342     + } else {
1343     + unsigned int len, remaining;
1344     + unsigned long pageoff;
1345     + struct page **ppages;
1346     +
1347     + ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
1348     + pageoff = xdr->page_base & ~PAGE_MASK;
1349     + remaining = xdr->page_len;
1350     + while (remaining) {
1351     + len = min_t(u32, PAGE_SIZE - pageoff, remaining);
1352     +
1353     + memcpy(dst, page_address(*ppages), len);
1354     + remaining -= len;
1355     + dst += len;
1356     + pageoff = 0;
1357     + }
1358     + }
1359     +
1360     + if (taillen)
1361     + memcpy(dst, tailbase, taillen);
1362     +
1363     + ctxt->sc_sges[0].length += xdr->len;
1364     + ib_dma_sync_single_for_device(rdma->sc_pd->device,
1365     + ctxt->sc_sges[0].addr,
1366     + ctxt->sc_sges[0].length,
1367     + DMA_TO_DEVICE);
1368     +
1369     + return 0;
1370     +}
1371     +
1372     /* svc_rdma_map_reply_msg - Map the buffer holding RPC message
1373     * @rdma: controlling transport
1374     * @ctxt: send_ctxt for the Send WR
1375     @@ -585,8 +678,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
1376     u32 xdr_pad;
1377     int ret;
1378    
1379     - if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
1380     - return -EIO;
1381     + if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
1382     + return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
1383     +
1384     + ++ctxt->sc_cur_sge_no;
1385     ret = svc_rdma_dma_map_buf(rdma, ctxt,
1386     xdr->head[0].iov_base,
1387     xdr->head[0].iov_len);
1388     @@ -617,8 +712,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
1389     while (remaining) {
1390     len = min_t(u32, PAGE_SIZE - page_off, remaining);
1391    
1392     - if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
1393     - return -EIO;
1394     + ++ctxt->sc_cur_sge_no;
1395     ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
1396     page_off, len);
1397     if (ret < 0)
1398     @@ -632,8 +726,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
1399     len = xdr->tail[0].iov_len;
1400     tail:
1401     if (len) {
1402     - if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
1403     - return -EIO;
1404     + ++ctxt->sc_cur_sge_no;
1405     ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
1406     if (ret < 0)
1407     return ret;
1408     diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
1409     index 2f7ec8912f49..ce5c610b49c7 100644
1410     --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
1411     +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
1412     @@ -478,12 +478,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
1413     /* Transport header, head iovec, tail iovec */
1414     newxprt->sc_max_send_sges = 3;
1415     /* Add one SGE per page list entry */
1416     - newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE;
1417     - if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) {
1418     - pr_err("svcrdma: too few Send SGEs available (%d needed)\n",
1419     - newxprt->sc_max_send_sges);
1420     - goto errout;
1421     - }
1422     + newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
1423     + if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
1424     + newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
1425     newxprt->sc_max_req_size = svcrdma_max_req_size;
1426     newxprt->sc_max_requests = svcrdma_max_requests;
1427     newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
1428     diff --git a/net/wireless/ap.c b/net/wireless/ap.c
1429     index 882d97bdc6bf..550ac9d827fe 100644
1430     --- a/net/wireless/ap.c
1431     +++ b/net/wireless/ap.c
1432     @@ -41,6 +41,8 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
1433     cfg80211_sched_dfs_chan_update(rdev);
1434     }
1435    
1436     + schedule_work(&cfg80211_disconnect_work);
1437     +
1438     return err;
1439     }
1440    
1441     diff --git a/net/wireless/core.h b/net/wireless/core.h
1442     index c61dbba8bf47..7f4d5f2f9112 100644
1443     --- a/net/wireless/core.h
1444     +++ b/net/wireless/core.h
1445     @@ -444,6 +444,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev);
1446     bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
1447     u32 center_freq_khz, u32 bw_khz);
1448    
1449     +extern struct work_struct cfg80211_disconnect_work;
1450     +
1451     /**
1452     * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable
1453     * @wiphy: the wiphy to validate against
1454     diff --git a/net/wireless/sme.c b/net/wireless/sme.c
1455     index f741d8376a46..7d34cb884840 100644
1456     --- a/net/wireless/sme.c
1457     +++ b/net/wireless/sme.c
1458     @@ -667,7 +667,7 @@ static void disconnect_work(struct work_struct *work)
1459     rtnl_unlock();
1460     }
1461    
1462     -static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
1463     +DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
1464    
1465    
1466     /*
1467     diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
1468     index 119a427d9b2b..6ea8036fcdbe 100644
1469     --- a/net/xfrm/xfrm_policy.c
1470     +++ b/net/xfrm/xfrm_policy.c
1471     @@ -1628,7 +1628,10 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1472     dst_copy_metrics(dst1, dst);
1473    
1474     if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1475     - __u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
1476     + __u32 mark = 0;
1477     +
1478     + if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
1479     + mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
1480    
1481     family = xfrm[i]->props.family;
1482     dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
1483     diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
1484     index 277c1c46fe94..c6d26afcf89d 100644
1485     --- a/net/xfrm/xfrm_user.c
1486     +++ b/net/xfrm/xfrm_user.c
1487     @@ -1488,10 +1488,15 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1488     if (!ut[i].family)
1489     ut[i].family = family;
1490    
1491     - if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
1492     - (ut[i].family != prev_family))
1493     - return -EINVAL;
1494     -
1495     + switch (ut[i].mode) {
1496     + case XFRM_MODE_TUNNEL:
1497     + case XFRM_MODE_BEET:
1498     + break;
1499     + default:
1500     + if (ut[i].family != prev_family)
1501     + return -EINVAL;
1502     + break;
1503     + }
1504     if (ut[i].mode >= XFRM_MODE_MAX)
1505     return -EINVAL;
1506    
1507     diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
1508     index 33e67bd1dc34..32234481ad7d 100644
1509     --- a/samples/mei/mei-amt-version.c
1510     +++ b/samples/mei/mei-amt-version.c
1511     @@ -117,7 +117,7 @@ static bool mei_init(struct mei *me, const uuid_le *guid,
1512    
1513     me->verbose = verbose;
1514    
1515     - me->fd = open("/dev/mei", O_RDWR);
1516     + me->fd = open("/dev/mei0", O_RDWR);
1517     if (me->fd == -1) {
1518     mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
1519     goto err;
1520     diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
1521     index 3040830d7797..84545666a09c 100644
1522     --- a/tools/iio/iio_generic_buffer.c
1523     +++ b/tools/iio/iio_generic_buffer.c
1524     @@ -330,7 +330,7 @@ static const struct option longopts[] = {
1525    
1526     int main(int argc, char **argv)
1527     {
1528     - unsigned long long num_loops = 2;
1529     + long long num_loops = 2;
1530     unsigned long timedelay = 1000000;
1531     unsigned long buf_len = 128;
1532