Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0159-4.14.60-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 7 months ago) by niro
File size: 249369 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.txt b/Documentation/devicetree/bindings/net/dsa/qca8k.txt
2     index 9c67ee4890d7..bbcb255c3150 100644
3     --- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt
4     +++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt
5     @@ -2,7 +2,10 @@
6    
7     Required properties:
8    
9     -- compatible: should be "qca,qca8337"
10     +- compatible: should be one of:
11     + "qca,qca8334"
12     + "qca,qca8337"
13     +
14     - #size-cells: must be 0
15     - #address-cells: must be 1
16    
17     @@ -14,6 +17,20 @@ port and PHY id, each subnode describing a port needs to have a valid phandle
18     referencing the internal PHY connected to it. The CPU port of this switch is
19     always port 0.
20    
21     +A CPU port node has the following optional node:
22     +
23     +- fixed-link : Fixed-link subnode describing a link to a non-MDIO
24     + managed entity. See
25     + Documentation/devicetree/bindings/net/fixed-link.txt
26     + for details.
27     +
28     +For QCA8K the 'fixed-link' sub-node supports only the following properties:
29     +
30     +- 'speed' (integer, mandatory), to indicate the link speed. Accepted
31     + values are 10, 100 and 1000
32     +- 'full-duplex' (boolean, optional), to indicate that full duplex is
33     + used. When absent, half duplex is assumed.
34     +
35     Example:
36    
37    
38     @@ -53,6 +70,10 @@ Example:
39     label = "cpu";
40     ethernet = <&gmac1>;
41     phy-mode = "rgmii";
42     + fixed-link {
43     + speed = 1000;
44     + full-duplex;
45     + };
46     };
47    
48     port@1 {
49     diff --git a/Documentation/devicetree/bindings/net/meson-dwmac.txt b/Documentation/devicetree/bindings/net/meson-dwmac.txt
50     index 354dd9896bb5..910187ebf1ce 100644
51     --- a/Documentation/devicetree/bindings/net/meson-dwmac.txt
52     +++ b/Documentation/devicetree/bindings/net/meson-dwmac.txt
53     @@ -10,6 +10,7 @@ Required properties on all platforms:
54     - "amlogic,meson6-dwmac"
55     - "amlogic,meson8b-dwmac"
56     - "amlogic,meson-gxbb-dwmac"
57     + - "amlogic,meson-axg-dwmac"
58     Additionally "snps,dwmac" and any applicable more
59     detailed version number described in net/stmmac.txt
60     should be used.
61     diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
62     index 2392557ede27..df77d394edc0 100644
63     --- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
64     +++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
65     @@ -3,8 +3,10 @@
66     Required properties for the root node:
67     - compatible: one of "amlogic,meson8-cbus-pinctrl"
68     "amlogic,meson8b-cbus-pinctrl"
69     + "amlogic,meson8m2-cbus-pinctrl"
70     "amlogic,meson8-aobus-pinctrl"
71     "amlogic,meson8b-aobus-pinctrl"
72     + "amlogic,meson8m2-aobus-pinctrl"
73     "amlogic,meson-gxbb-periphs-pinctrl"
74     "amlogic,meson-gxbb-aobus-pinctrl"
75     "amlogic,meson-gxl-periphs-pinctrl"
76     diff --git a/Documentation/vfio-mediated-device.txt b/Documentation/vfio-mediated-device.txt
77     index 1b3950346532..c3f69bcaf96e 100644
78     --- a/Documentation/vfio-mediated-device.txt
79     +++ b/Documentation/vfio-mediated-device.txt
80     @@ -145,6 +145,11 @@ The functions in the mdev_parent_ops structure are as follows:
81     * create: allocate basic resources in a driver for a mediated device
82     * remove: free resources in a driver when a mediated device is destroyed
83    
84     +(Note that mdev-core provides no implicit serialization of create/remove
85     +callbacks per mdev parent device, per mdev type, or any other categorization.
86     +Vendor drivers are expected to be fully asynchronous in this respect or
87     +provide their own internal resource protection.)
88     +
89     The callbacks in the mdev_parent_ops structure are as follows:
90    
91     * open: open callback of mediated device
92     diff --git a/Makefile b/Makefile
93     index 81b0e99dce80..5b48ec630990 100644
94     --- a/Makefile
95     +++ b/Makefile
96     @@ -1,7 +1,7 @@
97     # SPDX-License-Identifier: GPL-2.0
98     VERSION = 4
99     PATCHLEVEL = 14
100     -SUBLEVEL = 59
101     +SUBLEVEL = 60
102     EXTRAVERSION =
103     NAME = Petit Gorille
104    
105     diff --git a/arch/arm/boot/dts/emev2.dtsi b/arch/arm/boot/dts/emev2.dtsi
106     index 42ea246e71cb..fec1241b858f 100644
107     --- a/arch/arm/boot/dts/emev2.dtsi
108     +++ b/arch/arm/boot/dts/emev2.dtsi
109     @@ -31,13 +31,13 @@
110     #address-cells = <1>;
111     #size-cells = <0>;
112    
113     - cpu@0 {
114     + cpu0: cpu@0 {
115     device_type = "cpu";
116     compatible = "arm,cortex-a9";
117     reg = <0>;
118     clock-frequency = <533000000>;
119     };
120     - cpu@1 {
121     + cpu1: cpu@1 {
122     device_type = "cpu";
123     compatible = "arm,cortex-a9";
124     reg = <1>;
125     @@ -57,6 +57,7 @@
126     compatible = "arm,cortex-a9-pmu";
127     interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
128     <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
129     + interrupt-affinity = <&cpu0>, <&cpu1>;
130     };
131    
132     clocks@e0110000 {
133     diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi
134     index 4ea5c5a16c57..5fc24d4c2d5d 100644
135     --- a/arch/arm/boot/dts/sh73a0.dtsi
136     +++ b/arch/arm/boot/dts/sh73a0.dtsi
137     @@ -22,7 +22,7 @@
138     #address-cells = <1>;
139     #size-cells = <0>;
140    
141     - cpu@0 {
142     + cpu0: cpu@0 {
143     device_type = "cpu";
144     compatible = "arm,cortex-a9";
145     reg = <0>;
146     @@ -30,7 +30,7 @@
147     power-domains = <&pd_a2sl>;
148     next-level-cache = <&L2>;
149     };
150     - cpu@1 {
151     + cpu1: cpu@1 {
152     device_type = "cpu";
153     compatible = "arm,cortex-a9";
154     reg = <1>;
155     @@ -89,6 +89,7 @@
156     compatible = "arm,cortex-a9-pmu";
157     interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
158     <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
159     + interrupt-affinity = <&cpu0>, <&cpu1>;
160     };
161    
162     cmt1: timer@e6138000 {
163     diff --git a/arch/arm/boot/dts/stih407-pinctrl.dtsi b/arch/arm/boot/dts/stih407-pinctrl.dtsi
164     index bd1a82e8fffe..fe501d32d059 100644
165     --- a/arch/arm/boot/dts/stih407-pinctrl.dtsi
166     +++ b/arch/arm/boot/dts/stih407-pinctrl.dtsi
167     @@ -52,7 +52,7 @@
168     st,syscfg = <&syscfg_sbc>;
169     reg = <0x0961f080 0x4>;
170     reg-names = "irqmux";
171     - interrupts = <GIC_SPI 188 IRQ_TYPE_NONE>;
172     + interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
173     interrupt-names = "irqmux";
174     ranges = <0 0x09610000 0x6000>;
175    
176     @@ -376,7 +376,7 @@
177     st,syscfg = <&syscfg_front>;
178     reg = <0x0920f080 0x4>;
179     reg-names = "irqmux";
180     - interrupts = <GIC_SPI 189 IRQ_TYPE_NONE>;
181     + interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
182     interrupt-names = "irqmux";
183     ranges = <0 0x09200000 0x10000>;
184    
185     @@ -936,7 +936,7 @@
186     st,syscfg = <&syscfg_front>;
187     reg = <0x0921f080 0x4>;
188     reg-names = "irqmux";
189     - interrupts = <GIC_SPI 190 IRQ_TYPE_NONE>;
190     + interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
191     interrupt-names = "irqmux";
192     ranges = <0 0x09210000 0x10000>;
193    
194     @@ -969,7 +969,7 @@
195     st,syscfg = <&syscfg_rear>;
196     reg = <0x0922f080 0x4>;
197     reg-names = "irqmux";
198     - interrupts = <GIC_SPI 191 IRQ_TYPE_NONE>;
199     + interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
200     interrupt-names = "irqmux";
201     ranges = <0 0x09220000 0x6000>;
202    
203     @@ -1164,7 +1164,7 @@
204     st,syscfg = <&syscfg_flash>;
205     reg = <0x0923f080 0x4>;
206     reg-names = "irqmux";
207     - interrupts = <GIC_SPI 192 IRQ_TYPE_NONE>;
208     + interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
209     interrupt-names = "irqmux";
210     ranges = <0 0x09230000 0x3000>;
211    
212     diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
213     index 323a4df59a6c..ece2d1d43724 100644
214     --- a/arch/arm/net/bpf_jit_32.c
215     +++ b/arch/arm/net/bpf_jit_32.c
216     @@ -718,7 +718,7 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk,
217     }
218    
219     /* dst = dst >> src */
220     -static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk,
221     +static inline void emit_a32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
222     bool sstk, struct jit_ctx *ctx) {
223     const u8 *tmp = bpf2a32[TMP_REG_1];
224     const u8 *tmp2 = bpf2a32[TMP_REG_2];
225     @@ -734,7 +734,7 @@ static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk,
226     emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
227     }
228    
229     - /* Do LSH operation */
230     + /* Do RSH operation */
231     emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
232     emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
233     emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
234     @@ -784,7 +784,7 @@ static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk,
235     }
236    
237     /* dst = dst >> val */
238     -static inline void emit_a32_lsr_i64(const u8 dst[], bool dstk,
239     +static inline void emit_a32_rsh_i64(const u8 dst[], bool dstk,
240     const u32 val, struct jit_ctx *ctx) {
241     const u8 *tmp = bpf2a32[TMP_REG_1];
242     const u8 *tmp2 = bpf2a32[TMP_REG_2];
243     @@ -1340,7 +1340,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
244     case BPF_ALU64 | BPF_RSH | BPF_K:
245     if (unlikely(imm > 63))
246     return -EINVAL;
247     - emit_a32_lsr_i64(dst, dstk, imm, ctx);
248     + emit_a32_rsh_i64(dst, dstk, imm, ctx);
249     break;
250     /* dst = dst << src */
251     case BPF_ALU64 | BPF_LSH | BPF_X:
252     @@ -1348,7 +1348,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
253     break;
254     /* dst = dst >> src */
255     case BPF_ALU64 | BPF_RSH | BPF_X:
256     - emit_a32_lsr_r64(dst, src, dstk, sstk, ctx);
257     + emit_a32_rsh_r64(dst, src, dstk, sstk, ctx);
258     break;
259     /* dst = dst >> src (signed) */
260     case BPF_ALU64 | BPF_ARSH | BPF_X:
261     diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
262     index 9eb11a8d9eda..26a978616071 100644
263     --- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
264     +++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
265     @@ -93,20 +93,12 @@
266     regulator-always-on;
267     };
268    
269     - rsnd_ak4613: sound {
270     - compatible = "simple-audio-card";
271     + sound_card: sound {
272     + compatible = "audio-graph-card";
273    
274     - simple-audio-card,format = "left_j";
275     - simple-audio-card,bitclock-master = <&sndcpu>;
276     - simple-audio-card,frame-master = <&sndcpu>;
277     + label = "rcar-sound";
278    
279     - sndcpu: simple-audio-card,cpu {
280     - sound-dai = <&rcar_sound>;
281     - };
282     -
283     - sndcodec: simple-audio-card,codec {
284     - sound-dai = <&ak4613>;
285     - };
286     + dais = <&rsnd_port0>;
287     };
288    
289     vbus0_usb2: regulator-vbus0-usb2 {
290     @@ -320,6 +312,12 @@
291     asahi-kasei,out4-single-end;
292     asahi-kasei,out5-single-end;
293     asahi-kasei,out6-single-end;
294     +
295     + port {
296     + ak4613_endpoint: endpoint {
297     + remote-endpoint = <&rsnd_endpoint0>;
298     + };
299     + };
300     };
301    
302     cs2000: clk_multiplier@4f {
303     @@ -538,10 +536,18 @@
304     <&audio_clk_c>,
305     <&cpg CPG_CORE CPG_AUDIO_CLK_I>;
306    
307     - rcar_sound,dai {
308     - dai0 {
309     - playback = <&ssi0 &src0 &dvc0>;
310     - capture = <&ssi1 &src1 &dvc1>;
311     + ports {
312     + rsnd_port0: port@0 {
313     + rsnd_endpoint0: endpoint {
314     + remote-endpoint = <&ak4613_endpoint>;
315     +
316     + dai-format = "left_j";
317     + bitclock-master = <&rsnd_endpoint0>;
318     + frame-master = <&rsnd_endpoint0>;
319     +
320     + playback = <&ssi0 &src0 &dvc0>;
321     + capture = <&ssi1 &src1 &dvc1>;
322     + };
323     };
324     };
325     };
326     diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
327     index 34480e9af2e7..b05796578e7a 100644
328     --- a/arch/arm64/configs/defconfig
329     +++ b/arch/arm64/configs/defconfig
330     @@ -302,6 +302,8 @@ CONFIG_GPIO_XGENE_SB=y
331     CONFIG_GPIO_PCA953X=y
332     CONFIG_GPIO_PCA953X_IRQ=y
333     CONFIG_GPIO_MAX77620=y
334     +CONFIG_POWER_AVS=y
335     +CONFIG_ROCKCHIP_IODOMAIN=y
336     CONFIG_POWER_RESET_MSM=y
337     CONFIG_POWER_RESET_XGENE=y
338     CONFIG_POWER_RESET_SYSCON=y
339     diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
340     index ae852add053d..0f2e1ab5e166 100644
341     --- a/arch/arm64/include/asm/cmpxchg.h
342     +++ b/arch/arm64/include/asm/cmpxchg.h
343     @@ -229,7 +229,9 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \
344     unsigned long tmp; \
345     \
346     asm volatile( \
347     - " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
348     + " sevl\n" \
349     + " wfe\n" \
350     + " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
351     " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
352     " cbnz %" #w "[tmp], 1f\n" \
353     " wfe\n" \
354     diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
355     index 00e7b900ca41..1190d90e01e6 100644
356     --- a/arch/arm64/mm/init.c
357     +++ b/arch/arm64/mm/init.c
358     @@ -651,11 +651,13 @@ void __init mem_init(void)
359     BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
360     #endif
361    
362     +#ifdef CONFIG_SPARSEMEM_VMEMMAP
363     /*
364     * Make sure we chose the upper bound of sizeof(struct page)
365     - * correctly.
366     + * correctly when sizing the VMEMMAP array.
367     */
368     BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
369     +#endif
370    
371     if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
372     extern int sysctl_overcommit_memory;
373     diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
374     index 47f94cc383b6..7c2f52d4a0e4 100644
375     --- a/arch/microblaze/boot/Makefile
376     +++ b/arch/microblaze/boot/Makefile
377     @@ -22,17 +22,19 @@ $(obj)/linux.bin.gz: $(obj)/linux.bin FORCE
378     quiet_cmd_cp = CP $< $@$2
379     cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
380    
381     -quiet_cmd_strip = STRIP $@
382     +quiet_cmd_strip = STRIP $< $@$2
383     cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \
384     - -K _fdt_start vmlinux -o $@
385     + -K _fdt_start $< -o $@$2
386    
387     UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR)
388     +UIMAGE_IN = $@
389     +UIMAGE_OUT = $@.ub
390    
391     $(obj)/simpleImage.%: vmlinux FORCE
392     $(call if_changed,cp,.unstrip)
393     $(call if_changed,objcopy)
394     $(call if_changed,uimage)
395     - $(call if_changed,strip)
396     - @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
397     + $(call if_changed,strip,.strip)
398     + @echo 'Kernel: $(UIMAGE_OUT) is ready' ' (#'`cat .version`')'
399    
400     clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb
401     diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
402     index c7c63959ba91..e582d2c88092 100644
403     --- a/arch/powerpc/include/asm/barrier.h
404     +++ b/arch/powerpc/include/asm/barrier.h
405     @@ -76,6 +76,21 @@ do { \
406     ___p1; \
407     })
408    
409     +#ifdef CONFIG_PPC_BOOK3S_64
410     +/*
411     + * Prevent execution of subsequent instructions until preceding branches have
412     + * been fully resolved and are no longer executing speculatively.
413     + */
414     +#define barrier_nospec_asm ori 31,31,0
415     +
416     +// This also acts as a compiler barrier due to the memory clobber.
417     +#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
418     +
419     +#else /* !CONFIG_PPC_BOOK3S_64 */
420     +#define barrier_nospec_asm
421     +#define barrier_nospec()
422     +#endif
423     +
424     #include <asm-generic/barrier.h>
425    
426     #endif /* _ASM_POWERPC_BARRIER_H */
427     diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
428     index c1d257aa4c2d..66298461b640 100644
429     --- a/arch/powerpc/include/asm/cache.h
430     +++ b/arch/powerpc/include/asm/cache.h
431     @@ -9,11 +9,14 @@
432     #if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX)
433     #define L1_CACHE_SHIFT 4
434     #define MAX_COPY_PREFETCH 1
435     +#define IFETCH_ALIGN_SHIFT 2
436     #elif defined(CONFIG_PPC_E500MC)
437     #define L1_CACHE_SHIFT 6
438     #define MAX_COPY_PREFETCH 4
439     +#define IFETCH_ALIGN_SHIFT 3
440     #elif defined(CONFIG_PPC32)
441     #define MAX_COPY_PREFETCH 4
442     +#define IFETCH_ALIGN_SHIFT 3 /* 603 fetches 2 insn at a time */
443     #if defined(CONFIG_PPC_47x)
444     #define L1_CACHE_SHIFT 7
445     #else
446     diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
447     index ca2243df9cb2..470284f9e4f6 100644
448     --- a/arch/powerpc/kernel/eeh_driver.c
449     +++ b/arch/powerpc/kernel/eeh_driver.c
450     @@ -450,9 +450,11 @@ static void *eeh_add_virt_device(void *data, void *userdata)
451    
452     driver = eeh_pcid_get(dev);
453     if (driver) {
454     - eeh_pcid_put(dev);
455     - if (driver->err_handler)
456     + if (driver->err_handler) {
457     + eeh_pcid_put(dev);
458     return NULL;
459     + }
460     + eeh_pcid_put(dev);
461     }
462    
463     #ifdef CONFIG_PPC_POWERNV
464     @@ -489,17 +491,19 @@ static void *eeh_rmv_device(void *data, void *userdata)
465     if (eeh_dev_removed(edev))
466     return NULL;
467    
468     - driver = eeh_pcid_get(dev);
469     - if (driver) {
470     - eeh_pcid_put(dev);
471     - if (removed &&
472     - eeh_pe_passed(edev->pe))
473     - return NULL;
474     - if (removed &&
475     - driver->err_handler &&
476     - driver->err_handler->error_detected &&
477     - driver->err_handler->slot_reset)
478     + if (removed) {
479     + if (eeh_pe_passed(edev->pe))
480     return NULL;
481     + driver = eeh_pcid_get(dev);
482     + if (driver) {
483     + if (driver->err_handler &&
484     + driver->err_handler->error_detected &&
485     + driver->err_handler->slot_reset) {
486     + eeh_pcid_put(dev);
487     + return NULL;
488     + }
489     + eeh_pcid_put(dev);
490     + }
491     }
492    
493     /* Remove it from PCI subsystem */
494     diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
495     index 4fee00d414e8..2d0d89e2cb9a 100644
496     --- a/arch/powerpc/kernel/head_8xx.S
497     +++ b/arch/powerpc/kernel/head_8xx.S
498     @@ -958,7 +958,7 @@ start_here:
499     tovirt(r6,r6)
500     lis r5, abatron_pteptrs@h
501     ori r5, r5, abatron_pteptrs@l
502     - stw r5, 0xf0(r0) /* Must match your Abatron config file */
503     + stw r5, 0xf0(0) /* Must match your Abatron config file */
504     tophys(r5,r5)
505     stw r6, 0(r5)
506    
507     diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
508     index 1d817f4d97d9..2094f2b249fd 100644
509     --- a/arch/powerpc/kernel/pci_32.c
510     +++ b/arch/powerpc/kernel/pci_32.c
511     @@ -11,6 +11,7 @@
512     #include <linux/sched.h>
513     #include <linux/errno.h>
514     #include <linux/bootmem.h>
515     +#include <linux/syscalls.h>
516     #include <linux/irq.h>
517     #include <linux/list.h>
518     #include <linux/of.h>
519     diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
520     index 02190e90c7ae..f8782c7ef50f 100644
521     --- a/arch/powerpc/kernel/prom_init.c
522     +++ b/arch/powerpc/kernel/prom_init.c
523     @@ -334,6 +334,7 @@ static void __init prom_print_dec(unsigned long val)
524     call_prom("write", 3, 1, prom.stdout, buf+i, size);
525     }
526    
527     +__printf(1, 2)
528     static void __init prom_printf(const char *format, ...)
529     {
530     const char *p, *q, *s;
531     @@ -1148,7 +1149,7 @@ static void __init prom_send_capabilities(void)
532     */
533    
534     cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
535     - prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
536     + prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
537     cores, NR_CPUS);
538    
539     ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
540     @@ -1230,7 +1231,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
541    
542     if (align)
543     base = _ALIGN_UP(base, align);
544     - prom_debug("alloc_up(%x, %x)\n", size, align);
545     + prom_debug("%s(%lx, %lx)\n", __func__, size, align);
546     if (ram_top == 0)
547     prom_panic("alloc_up() called with mem not initialized\n");
548    
549     @@ -1241,7 +1242,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
550    
551     for(; (base + size) <= alloc_top;
552     base = _ALIGN_UP(base + 0x100000, align)) {
553     - prom_debug(" trying: 0x%x\n\r", base);
554     + prom_debug(" trying: 0x%lx\n\r", base);
555     addr = (unsigned long)prom_claim(base, size, 0);
556     if (addr != PROM_ERROR && addr != 0)
557     break;
558     @@ -1253,12 +1254,12 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
559     return 0;
560     alloc_bottom = addr + size;
561    
562     - prom_debug(" -> %x\n", addr);
563     - prom_debug(" alloc_bottom : %x\n", alloc_bottom);
564     - prom_debug(" alloc_top : %x\n", alloc_top);
565     - prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
566     - prom_debug(" rmo_top : %x\n", rmo_top);
567     - prom_debug(" ram_top : %x\n", ram_top);
568     + prom_debug(" -> %lx\n", addr);
569     + prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
570     + prom_debug(" alloc_top : %lx\n", alloc_top);
571     + prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
572     + prom_debug(" rmo_top : %lx\n", rmo_top);
573     + prom_debug(" ram_top : %lx\n", ram_top);
574    
575     return addr;
576     }
577     @@ -1273,7 +1274,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align,
578     {
579     unsigned long base, addr = 0;
580    
581     - prom_debug("alloc_down(%x, %x, %s)\n", size, align,
582     + prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
583     highmem ? "(high)" : "(low)");
584     if (ram_top == 0)
585     prom_panic("alloc_down() called with mem not initialized\n");
586     @@ -1301,7 +1302,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align,
587     base = _ALIGN_DOWN(alloc_top - size, align);
588     for (; base > alloc_bottom;
589     base = _ALIGN_DOWN(base - 0x100000, align)) {
590     - prom_debug(" trying: 0x%x\n\r", base);
591     + prom_debug(" trying: 0x%lx\n\r", base);
592     addr = (unsigned long)prom_claim(base, size, 0);
593     if (addr != PROM_ERROR && addr != 0)
594     break;
595     @@ -1312,12 +1313,12 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align,
596     alloc_top = addr;
597    
598     bail:
599     - prom_debug(" -> %x\n", addr);
600     - prom_debug(" alloc_bottom : %x\n", alloc_bottom);
601     - prom_debug(" alloc_top : %x\n", alloc_top);
602     - prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
603     - prom_debug(" rmo_top : %x\n", rmo_top);
604     - prom_debug(" ram_top : %x\n", ram_top);
605     + prom_debug(" -> %lx\n", addr);
606     + prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
607     + prom_debug(" alloc_top : %lx\n", alloc_top);
608     + prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
609     + prom_debug(" rmo_top : %lx\n", rmo_top);
610     + prom_debug(" ram_top : %lx\n", ram_top);
611    
612     return addr;
613     }
614     @@ -1443,7 +1444,7 @@ static void __init prom_init_mem(void)
615    
616     if (size == 0)
617     continue;
618     - prom_debug(" %x %x\n", base, size);
619     + prom_debug(" %lx %lx\n", base, size);
620     if (base == 0 && (of_platform & PLATFORM_LPAR))
621     rmo_top = size;
622     if ((base + size) > ram_top)
623     @@ -1463,12 +1464,12 @@ static void __init prom_init_mem(void)
624    
625     if (prom_memory_limit) {
626     if (prom_memory_limit <= alloc_bottom) {
627     - prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
628     - prom_memory_limit);
629     + prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
630     + prom_memory_limit);
631     prom_memory_limit = 0;
632     } else if (prom_memory_limit >= ram_top) {
633     - prom_printf("Ignoring mem=%x >= ram_top.\n",
634     - prom_memory_limit);
635     + prom_printf("Ignoring mem=%lx >= ram_top.\n",
636     + prom_memory_limit);
637     prom_memory_limit = 0;
638     } else {
639     ram_top = prom_memory_limit;
640     @@ -1500,12 +1501,13 @@ static void __init prom_init_mem(void)
641     alloc_bottom = PAGE_ALIGN(prom_initrd_end);
642    
643     prom_printf("memory layout at init:\n");
644     - prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit);
645     - prom_printf(" alloc_bottom : %x\n", alloc_bottom);
646     - prom_printf(" alloc_top : %x\n", alloc_top);
647     - prom_printf(" alloc_top_hi : %x\n", alloc_top_high);
648     - prom_printf(" rmo_top : %x\n", rmo_top);
649     - prom_printf(" ram_top : %x\n", ram_top);
650     + prom_printf(" memory_limit : %lx (16 MB aligned)\n",
651     + prom_memory_limit);
652     + prom_printf(" alloc_bottom : %lx\n", alloc_bottom);
653     + prom_printf(" alloc_top : %lx\n", alloc_top);
654     + prom_printf(" alloc_top_hi : %lx\n", alloc_top_high);
655     + prom_printf(" rmo_top : %lx\n", rmo_top);
656     + prom_printf(" ram_top : %lx\n", ram_top);
657     }
658    
659     static void __init prom_close_stdin(void)
660     @@ -1566,7 +1568,7 @@ static void __init prom_instantiate_opal(void)
661     return;
662     }
663    
664     - prom_printf("instantiating opal at 0x%x...", base);
665     + prom_printf("instantiating opal at 0x%llx...", base);
666    
667     if (call_prom_ret("call-method", 4, 3, rets,
668     ADDR("load-opal-runtime"),
669     @@ -1582,10 +1584,10 @@ static void __init prom_instantiate_opal(void)
670    
671     reserve_mem(base, size);
672    
673     - prom_debug("opal base = 0x%x\n", base);
674     - prom_debug("opal align = 0x%x\n", align);
675     - prom_debug("opal entry = 0x%x\n", entry);
676     - prom_debug("opal size = 0x%x\n", (long)size);
677     + prom_debug("opal base = 0x%llx\n", base);
678     + prom_debug("opal align = 0x%llx\n", align);
679     + prom_debug("opal entry = 0x%llx\n", entry);
680     + prom_debug("opal size = 0x%llx\n", size);
681    
682     prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
683     &base, sizeof(base));
684     @@ -1662,7 +1664,7 @@ static void __init prom_instantiate_rtas(void)
685    
686     prom_debug("rtas base = 0x%x\n", base);
687     prom_debug("rtas entry = 0x%x\n", entry);
688     - prom_debug("rtas size = 0x%x\n", (long)size);
689     + prom_debug("rtas size = 0x%x\n", size);
690    
691     prom_debug("prom_instantiate_rtas: end...\n");
692     }
693     @@ -1720,7 +1722,7 @@ static void __init prom_instantiate_sml(void)
694     if (base == 0)
695     prom_panic("Could not allocate memory for sml\n");
696    
697     - prom_printf("instantiating sml at 0x%x...", base);
698     + prom_printf("instantiating sml at 0x%llx...", base);
699    
700     memset((void *)base, 0, size);
701    
702     @@ -1739,8 +1741,8 @@ static void __init prom_instantiate_sml(void)
703     prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
704     &size, sizeof(size));
705    
706     - prom_debug("sml base = 0x%x\n", base);
707     - prom_debug("sml size = 0x%x\n", (long)size);
708     + prom_debug("sml base = 0x%llx\n", base);
709     + prom_debug("sml size = 0x%x\n", size);
710    
711     prom_debug("prom_instantiate_sml: end...\n");
712     }
713     @@ -1841,7 +1843,7 @@ static void __init prom_initialize_tce_table(void)
714    
715     prom_debug("TCE table: %s\n", path);
716     prom_debug("\tnode = 0x%x\n", node);
717     - prom_debug("\tbase = 0x%x\n", base);
718     + prom_debug("\tbase = 0x%llx\n", base);
719     prom_debug("\tsize = 0x%x\n", minsize);
720    
721     /* Initialize the table to have a one-to-one mapping
722     @@ -1928,12 +1930,12 @@ static void __init prom_hold_cpus(void)
723     }
724    
725     prom_debug("prom_hold_cpus: start...\n");
726     - prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
727     - prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
728     - prom_debug(" 1) acknowledge = 0x%x\n",
729     + prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop);
730     + prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop);
731     + prom_debug(" 1) acknowledge = 0x%lx\n",
732     (unsigned long)acknowledge);
733     - prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
734     - prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
735     + prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge);
736     + prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold);
737    
738     /* Set the common spinloop variable, so all of the secondary cpus
739     * will block when they are awakened from their OF spinloop.
740     @@ -1961,7 +1963,7 @@ static void __init prom_hold_cpus(void)
741     prom_getprop(node, "reg", &reg, sizeof(reg));
742     cpu_no = be32_to_cpu(reg);
743    
744     - prom_debug("cpu hw idx = %lu\n", cpu_no);
745     + prom_debug("cpu hw idx = %u\n", cpu_no);
746    
747     /* Init the acknowledge var which will be reset by
748     * the secondary cpu when it awakens from its OF
749     @@ -1971,7 +1973,7 @@ static void __init prom_hold_cpus(void)
750    
751     if (cpu_no != prom.cpu) {
752     /* Primary Thread of non-boot cpu or any thread */
753     - prom_printf("starting cpu hw idx %lu... ", cpu_no);
754     + prom_printf("starting cpu hw idx %u... ", cpu_no);
755     call_prom("start-cpu", 3, 0, node,
756     secondary_hold, cpu_no);
757    
758     @@ -1982,11 +1984,11 @@ static void __init prom_hold_cpus(void)
759     if (*acknowledge == cpu_no)
760     prom_printf("done\n");
761     else
762     - prom_printf("failed: %x\n", *acknowledge);
763     + prom_printf("failed: %lx\n", *acknowledge);
764     }
765     #ifdef CONFIG_SMP
766     else
767     - prom_printf("boot cpu hw idx %lu\n", cpu_no);
768     + prom_printf("boot cpu hw idx %u\n", cpu_no);
769     #endif /* CONFIG_SMP */
770     }
771    
772     @@ -2264,7 +2266,7 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
773     while ((*mem_start + needed) > *mem_end) {
774     unsigned long room, chunk;
775    
776     - prom_debug("Chunk exhausted, claiming more at %x...\n",
777     + prom_debug("Chunk exhausted, claiming more at %lx...\n",
778     alloc_bottom);
779     room = alloc_top - alloc_bottom;
780     if (room > DEVTREE_CHUNK_SIZE)
781     @@ -2490,7 +2492,7 @@ static void __init flatten_device_tree(void)
782     room = alloc_top - alloc_bottom - 0x4000;
783     if (room > DEVTREE_CHUNK_SIZE)
784     room = DEVTREE_CHUNK_SIZE;
785     - prom_debug("starting device tree allocs at %x\n", alloc_bottom);
786     + prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
787    
788     /* Now try to claim that */
789     mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
790     @@ -2553,7 +2555,7 @@ static void __init flatten_device_tree(void)
791     int i;
792     prom_printf("reserved memory map:\n");
793     for (i = 0; i < mem_reserve_cnt; i++)
794     - prom_printf(" %x - %x\n",
795     + prom_printf(" %llx - %llx\n",
796     be64_to_cpu(mem_reserve_map[i].base),
797     be64_to_cpu(mem_reserve_map[i].size));
798     }
799     @@ -2563,9 +2565,9 @@ static void __init flatten_device_tree(void)
800     */
801     mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
802    
803     - prom_printf("Device tree strings 0x%x -> 0x%x\n",
804     + prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
805     dt_string_start, dt_string_end);
806     - prom_printf("Device tree struct 0x%x -> 0x%x\n",
807     + prom_printf("Device tree struct 0x%lx -> 0x%lx\n",
808     dt_struct_start, dt_struct_end);
809     }
810    
811     @@ -2997,7 +2999,7 @@ static void __init prom_find_boot_cpu(void)
812     prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
813     prom.cpu = be32_to_cpu(rval);
814    
815     - prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
816     + prom_debug("Booting CPU hw index = %d\n", prom.cpu);
817     }
818    
819     static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
820     @@ -3019,8 +3021,8 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
821     reserve_mem(prom_initrd_start,
822     prom_initrd_end - prom_initrd_start);
823    
824     - prom_debug("initrd_start=0x%x\n", prom_initrd_start);
825     - prom_debug("initrd_end=0x%x\n", prom_initrd_end);
826     + prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
827     + prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
828     }
829     #endif /* CONFIG_BLK_DEV_INITRD */
830     }
831     @@ -3273,7 +3275,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
832     /* Don't print anything after quiesce under OPAL, it crashes OFW */
833     if (of_platform != PLATFORM_OPAL) {
834     prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
835     - prom_debug("->dt_header_start=0x%x\n", hdr);
836     + prom_debug("->dt_header_start=0x%lx\n", hdr);
837     }
838    
839     #ifdef CONFIG_PPC32
840     diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
841     index a787776822d8..0378def28d41 100644
842     --- a/arch/powerpc/lib/string.S
843     +++ b/arch/powerpc/lib/string.S
844     @@ -12,6 +12,7 @@
845     #include <asm/errno.h>
846     #include <asm/ppc_asm.h>
847     #include <asm/export.h>
848     +#include <asm/cache.h>
849    
850     .text
851    
852     @@ -23,7 +24,7 @@ _GLOBAL(strncpy)
853     mtctr r5
854     addi r6,r3,-1
855     addi r4,r4,-1
856     - .balign 16
857     + .balign IFETCH_ALIGN_BYTES
858     1: lbzu r0,1(r4)
859     cmpwi 0,r0,0
860     stbu r0,1(r6)
861     @@ -43,7 +44,7 @@ _GLOBAL(strncmp)
862     mtctr r5
863     addi r5,r3,-1
864     addi r4,r4,-1
865     - .balign 16
866     + .balign IFETCH_ALIGN_BYTES
867     1: lbzu r3,1(r5)
868     cmpwi 1,r3,0
869     lbzu r0,1(r4)
870     @@ -77,7 +78,7 @@ _GLOBAL(memchr)
871     beq- 2f
872     mtctr r5
873     addi r3,r3,-1
874     - .balign 16
875     + .balign IFETCH_ALIGN_BYTES
876     1: lbzu r0,1(r3)
877     cmpw 0,r0,r4
878     bdnzf 2,1b
879     diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
880     index 13cfe413b40d..6d9bf014b3e7 100644
881     --- a/arch/powerpc/mm/slb.c
882     +++ b/arch/powerpc/mm/slb.c
883     @@ -62,14 +62,14 @@ static inline void slb_shadow_update(unsigned long ea, int ssize,
884     * updating it. No write barriers are needed here, provided
885     * we only update the current CPU's SLB shadow buffer.
886     */
887     - p->save_area[index].esid = 0;
888     - p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags));
889     - p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index));
890     + WRITE_ONCE(p->save_area[index].esid, 0);
891     + WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags)));
892     + WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index)));
893     }
894    
895     static inline void slb_shadow_clear(enum slb_index index)
896     {
897     - get_slb_shadow()->save_area[index].esid = 0;
898     + WRITE_ONCE(get_slb_shadow()->save_area[index].esid, 0);
899     }
900    
901     static inline void create_shadowed_slbe(unsigned long ea, int ssize,
902     diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
903     index bd0786c23109..254634fb3fc7 100644
904     --- a/arch/powerpc/net/bpf_jit_comp64.c
905     +++ b/arch/powerpc/net/bpf_jit_comp64.c
906     @@ -203,25 +203,37 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
907    
908     static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
909     {
910     + unsigned int i, ctx_idx = ctx->idx;
911     +
912     + /* Load function address into r12 */
913     + PPC_LI64(12, func);
914     +
915     + /* For bpf-to-bpf function calls, the callee's address is unknown
916     + * until the last extra pass. As seen above, we use PPC_LI64() to
917     + * load the callee's address, but this may optimize the number of
918     + * instructions required based on the nature of the address.
919     + *
920     + * Since we don't want the number of instructions emitted to change,
921     + * we pad the optimized PPC_LI64() call with NOPs to guarantee that
922     + * we always have a five-instruction sequence, which is the maximum
923     + * that PPC_LI64() can emit.
924     + */
925     + for (i = ctx->idx - ctx_idx; i < 5; i++)
926     + PPC_NOP();
927     +
928     #ifdef PPC64_ELF_ABI_v1
929     - /* func points to the function descriptor */
930     - PPC_LI64(b2p[TMP_REG_2], func);
931     - /* Load actual entry point from function descriptor */
932     - PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
933     - /* ... and move it to LR */
934     - PPC_MTLR(b2p[TMP_REG_1]);
935     /*
936     * Load TOC from function descriptor at offset 8.
937     * We can clobber r2 since we get called through a
938     * function pointer (so caller will save/restore r2)
939     * and since we don't use a TOC ourself.
940     */
941     - PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
942     -#else
943     - /* We can clobber r12 */
944     - PPC_FUNC_ADDR(12, func);
945     - PPC_MTLR(12);
946     + PPC_BPF_LL(2, 12, 8);
947     + /* Load actual entry point from function descriptor */
948     + PPC_BPF_LL(12, 12, 0);
949     #endif
950     +
951     + PPC_MTLR(12);
952     PPC_BLRL();
953     }
954    
955     diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c
956     index 03d115aaa191..acde7bbe0716 100644
957     --- a/arch/powerpc/platforms/chrp/time.c
958     +++ b/arch/powerpc/platforms/chrp/time.c
959     @@ -28,6 +28,8 @@
960     #include <asm/sections.h>
961     #include <asm/time.h>
962    
963     +#include <platforms/chrp/chrp.h>
964     +
965     extern spinlock_t rtc_lock;
966    
967     #define NVRAM_AS0 0x74
968     @@ -63,7 +65,7 @@ long __init chrp_time_init(void)
969     return 0;
970     }
971    
972     -int chrp_cmos_clock_read(int addr)
973     +static int chrp_cmos_clock_read(int addr)
974     {
975     if (nvram_as1 != 0)
976     outb(addr>>8, nvram_as1);
977     @@ -71,7 +73,7 @@ int chrp_cmos_clock_read(int addr)
978     return (inb(nvram_data));
979     }
980    
981     -void chrp_cmos_clock_write(unsigned long val, int addr)
982     +static void chrp_cmos_clock_write(unsigned long val, int addr)
983     {
984     if (nvram_as1 != 0)
985     outb(addr>>8, nvram_as1);
986     diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
987     index 89c54de88b7a..bf4a125faec6 100644
988     --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
989     +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
990     @@ -35,6 +35,8 @@
991     */
992     #define HW_BROADWAY_ICR 0x00
993     #define HW_BROADWAY_IMR 0x04
994     +#define HW_STARLET_ICR 0x08
995     +#define HW_STARLET_IMR 0x0c
996    
997    
998     /*
999     @@ -74,6 +76,9 @@ static void hlwd_pic_unmask(struct irq_data *d)
1000     void __iomem *io_base = irq_data_get_irq_chip_data(d);
1001    
1002     setbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
1003     +
1004     + /* Make sure the ARM (aka. Starlet) doesn't handle this interrupt. */
1005     + clrbits32(io_base + HW_STARLET_IMR, 1 << irq);
1006     }
1007    
1008    
1009     diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
1010     index c3c9bbb3573a..ba0964c17620 100644
1011     --- a/arch/powerpc/platforms/powermac/bootx_init.c
1012     +++ b/arch/powerpc/platforms/powermac/bootx_init.c
1013     @@ -468,7 +468,7 @@ void __init bootx_init(unsigned long r3, unsigned long r4)
1014     boot_infos_t *bi = (boot_infos_t *) r4;
1015     unsigned long hdr;
1016     unsigned long space;
1017     - unsigned long ptr, x;
1018     + unsigned long ptr;
1019     char *model;
1020     unsigned long offset = reloc_offset();
1021    
1022     @@ -562,6 +562,8 @@ void __init bootx_init(unsigned long r3, unsigned long r4)
1023     * MMU switched OFF, so this should not be useful anymore.
1024     */
1025     if (bi->version < 4) {
1026     + unsigned long x __maybe_unused;
1027     +
1028     bootx_printf("Touching pages...\n");
1029    
1030     /*
1031     diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
1032     index ab668cb72263..8b2eab1340f4 100644
1033     --- a/arch/powerpc/platforms/powermac/setup.c
1034     +++ b/arch/powerpc/platforms/powermac/setup.c
1035     @@ -352,6 +352,7 @@ static int pmac_late_init(void)
1036     }
1037     machine_late_initcall(powermac, pmac_late_init);
1038    
1039     +void note_bootable_part(dev_t dev, int part, int goodness);
1040     /*
1041     * This is __ref because we check for "initializing" before
1042     * touching any of the __init sensitive things and "initializing"
1043     diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
1044     index 05480e4cc5ca..bc764a674594 100644
1045     --- a/arch/s390/include/asm/cpu_mf.h
1046     +++ b/arch/s390/include/asm/cpu_mf.h
1047     @@ -116,7 +116,7 @@ struct hws_basic_entry {
1048    
1049     struct hws_diag_entry {
1050     unsigned int def:16; /* 0-15 Data Entry Format */
1051     - unsigned int R:14; /* 16-19 and 20-30 reserved */
1052     + unsigned int R:15; /* 16-19 and 20-30 reserved */
1053     unsigned int I:1; /* 31 entry valid or invalid */
1054     u8 data[]; /* Machine-dependent sample data */
1055     } __packed;
1056     @@ -132,7 +132,9 @@ struct hws_trailer_entry {
1057     unsigned int f:1; /* 0 - Block Full Indicator */
1058     unsigned int a:1; /* 1 - Alert request control */
1059     unsigned int t:1; /* 2 - Timestamp format */
1060     - unsigned long long:61; /* 3 - 63: Reserved */
1061     + unsigned int :29; /* 3 - 31: Reserved */
1062     + unsigned int bsdes:16; /* 32-47: size of basic SDE */
1063     + unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */
1064     };
1065     unsigned long long flags; /* 0 - 63: All indicators */
1066     };
1067     diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
1068     index d45e06346f14..c56cb37b88e3 100644
1069     --- a/arch/x86/events/intel/uncore.c
1070     +++ b/arch/x86/events/intel/uncore.c
1071     @@ -218,7 +218,7 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e
1072     u64 prev_count, new_count, delta;
1073     int shift;
1074    
1075     - if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
1076     + if (event->hw.idx == UNCORE_PMC_IDX_FIXED)
1077     shift = 64 - uncore_fixed_ctr_bits(box);
1078     else
1079     shift = 64 - uncore_perf_ctr_bits(box);
1080     diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c
1081     index 93e7a8397cde..173e2674be6e 100644
1082     --- a/arch/x86/events/intel/uncore_nhmex.c
1083     +++ b/arch/x86/events/intel/uncore_nhmex.c
1084     @@ -246,7 +246,7 @@ static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct p
1085     {
1086     struct hw_perf_event *hwc = &event->hw;
1087    
1088     - if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
1089     + if (hwc->idx == UNCORE_PMC_IDX_FIXED)
1090     wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
1091     else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
1092     wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1093     diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
1094     index c8e0cda0f272..4fc0e08a30b9 100644
1095     --- a/arch/x86/kernel/cpu/microcode/core.c
1096     +++ b/arch/x86/kernel/cpu/microcode/core.c
1097     @@ -70,7 +70,7 @@ static DEFINE_MUTEX(microcode_mutex);
1098     /*
1099     * Serialize late loading so that CPUs get updated one-by-one.
1100     */
1101     -static DEFINE_SPINLOCK(update_lock);
1102     +static DEFINE_RAW_SPINLOCK(update_lock);
1103    
1104     struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
1105    
1106     @@ -560,9 +560,9 @@ static int __reload_late(void *info)
1107     if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
1108     return -1;
1109    
1110     - spin_lock(&update_lock);
1111     + raw_spin_lock(&update_lock);
1112     apply_microcode_local(&err);
1113     - spin_unlock(&update_lock);
1114     + raw_spin_unlock(&update_lock);
1115    
1116     /* siblings return UCODE_OK because their engine got updated already */
1117     if (err > UCODE_NFOUND) {
1118     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1119     index 43bbece92632..2ef2f1fe875b 100644
1120     --- a/arch/x86/kvm/mmu.c
1121     +++ b/arch/x86/kvm/mmu.c
1122     @@ -890,7 +890,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
1123     if (cache->nobjs >= min)
1124     return 0;
1125     while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
1126     - page = (void *)__get_free_page(GFP_KERNEL);
1127     + page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
1128     if (!page)
1129     return -ENOMEM;
1130     cache->objects[cache->nobjs++] = page;
1131     diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
1132     index 56c9cd01fd1d..4a4b7d3c909a 100644
1133     --- a/block/bfq-iosched.c
1134     +++ b/block/bfq-iosched.c
1135     @@ -1678,7 +1678,6 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
1136    
1137     if (!RB_EMPTY_NODE(&rq->rb_node))
1138     goto end;
1139     - spin_lock_irq(&bfqq->bfqd->lock);
1140    
1141     /*
1142     * If next and rq belong to the same bfq_queue and next is older
1143     @@ -1702,7 +1701,6 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
1144    
1145     bfq_remove_request(q, next);
1146    
1147     - spin_unlock_irq(&bfqq->bfqd->lock);
1148     end:
1149     bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
1150     }
1151     diff --git a/block/bio.c b/block/bio.c
1152     index 90f19d7df66c..194d28cdc642 100644
1153     --- a/block/bio.c
1154     +++ b/block/bio.c
1155     @@ -881,16 +881,16 @@ EXPORT_SYMBOL(bio_add_page);
1156     */
1157     int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1158     {
1159     - unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1160     + unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
1161     struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1162     struct page **pages = (struct page **)bv;
1163     - size_t offset, diff;
1164     + size_t offset;
1165     ssize_t size;
1166    
1167     size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
1168     if (unlikely(size <= 0))
1169     return size ? size : -EFAULT;
1170     - nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
1171     + idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
1172    
1173     /*
1174     * Deep magic below: We need to walk the pinned pages backwards
1175     @@ -903,17 +903,15 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1176     bio->bi_iter.bi_size += size;
1177     bio->bi_vcnt += nr_pages;
1178    
1179     - diff = (nr_pages * PAGE_SIZE - offset) - size;
1180     - while (nr_pages--) {
1181     - bv[nr_pages].bv_page = pages[nr_pages];
1182     - bv[nr_pages].bv_len = PAGE_SIZE;
1183     - bv[nr_pages].bv_offset = 0;
1184     + while (idx--) {
1185     + bv[idx].bv_page = pages[idx];
1186     + bv[idx].bv_len = PAGE_SIZE;
1187     + bv[idx].bv_offset = 0;
1188     }
1189    
1190     bv[0].bv_offset += offset;
1191     bv[0].bv_len -= offset;
1192     - if (diff)
1193     - bv[bio->bi_vcnt - 1].bv_len -= diff;
1194     + bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
1195    
1196     iov_iter_advance(iter, size);
1197     return 0;
1198     @@ -1891,6 +1889,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
1199     bio_integrity_trim(split);
1200    
1201     bio_advance(bio, split->bi_iter.bi_size);
1202     + bio->bi_iter.bi_done = 0;
1203    
1204     if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1205     bio_set_flag(split, BIO_TRACE_COMPLETION);
1206     diff --git a/crypto/authenc.c b/crypto/authenc.c
1207     index 875470b0e026..0db344d5a01a 100644
1208     --- a/crypto/authenc.c
1209     +++ b/crypto/authenc.c
1210     @@ -108,6 +108,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
1211     CRYPTO_TFM_RES_MASK);
1212    
1213     out:
1214     + memzero_explicit(&keys, sizeof(keys));
1215     return err;
1216    
1217     badkey:
1218     diff --git a/crypto/authencesn.c b/crypto/authencesn.c
1219     index 0cf5fefdb859..6de852ce4cf8 100644
1220     --- a/crypto/authencesn.c
1221     +++ b/crypto/authencesn.c
1222     @@ -90,6 +90,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
1223     CRYPTO_TFM_RES_MASK);
1224    
1225     out:
1226     + memzero_explicit(&keys, sizeof(keys));
1227     return err;
1228    
1229     badkey:
1230     diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
1231     index 602ae58ee2d8..75c3cb377b98 100644
1232     --- a/drivers/acpi/acpi_lpss.c
1233     +++ b/drivers/acpi/acpi_lpss.c
1234     @@ -69,6 +69,10 @@ ACPI_MODULE_NAME("acpi_lpss");
1235     #define LPSS_SAVE_CTX BIT(4)
1236     #define LPSS_NO_D3_DELAY BIT(5)
1237    
1238     +/* Crystal Cove PMIC shares same ACPI ID between different platforms */
1239     +#define BYT_CRC_HRV 2
1240     +#define CHT_CRC_HRV 3
1241     +
1242     struct lpss_private_data;
1243    
1244     struct lpss_device_desc {
1245     @@ -162,7 +166,7 @@ static void byt_pwm_setup(struct lpss_private_data *pdata)
1246     if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
1247     return;
1248    
1249     - if (!acpi_dev_present("INT33FD", NULL, -1))
1250     + if (!acpi_dev_present("INT33FD", NULL, BYT_CRC_HRV))
1251     pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
1252     }
1253    
1254     diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
1255     index 6fc204a52493..eb857d6ea1fe 100644
1256     --- a/drivers/acpi/pci_root.c
1257     +++ b/drivers/acpi/pci_root.c
1258     @@ -472,9 +472,11 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
1259     }
1260    
1261     control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
1262     - | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
1263     | OSC_PCI_EXPRESS_PME_CONTROL;
1264    
1265     + if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
1266     + control |= OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
1267     +
1268     if (pci_aer_available()) {
1269     if (aer_acpi_firmware_first())
1270     dev_info(&device->dev,
1271     diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
1272     index 711dd91b5e2c..2651c81d1edf 100644
1273     --- a/drivers/ata/libata-eh.c
1274     +++ b/drivers/ata/libata-eh.c
1275     @@ -2217,12 +2217,16 @@ static void ata_eh_link_autopsy(struct ata_link *link)
1276     if (qc->err_mask & ~AC_ERR_OTHER)
1277     qc->err_mask &= ~AC_ERR_OTHER;
1278    
1279     - /* SENSE_VALID trumps dev/unknown error and revalidation */
1280     + /*
1281     + * SENSE_VALID trumps dev/unknown error and revalidation. Upper
1282     + * layers will determine whether the command is worth retrying
1283     + * based on the sense data and device class/type. Otherwise,
1284     + * determine directly if the command is worth retrying using its
1285     + * error mask and flags.
1286     + */
1287     if (qc->flags & ATA_QCFLAG_SENSE_VALID)
1288     qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1289     -
1290     - /* determine whether the command is worth retrying */
1291     - if (ata_eh_worth_retry(qc))
1292     + else if (ata_eh_worth_retry(qc))
1293     qc->flags |= ATA_QCFLAG_RETRY;
1294    
1295     /* accumulate error info */
1296     diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1297     index 86d7975afaeb..819521d5895e 100644
1298     --- a/drivers/bluetooth/btusb.c
1299     +++ b/drivers/bluetooth/btusb.c
1300     @@ -279,6 +279,7 @@ static const struct usb_device_id blacklist_table[] = {
1301     { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
1302     { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
1303     { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
1304     + { USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME },
1305    
1306     /* Broadcom BCM2035 */
1307     { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
1308     @@ -373,6 +374,9 @@ static const struct usb_device_id blacklist_table[] = {
1309     /* Additional Realtek 8723BU Bluetooth devices */
1310     { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
1311    
1312     + /* Additional Realtek 8723DE Bluetooth devices */
1313     + { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
1314     +
1315     /* Additional Realtek 8821AE Bluetooth devices */
1316     { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
1317     { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
1318     diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
1319     index 6f4ebd5e54c8..a6173ddfb5a7 100644
1320     --- a/drivers/bluetooth/hci_qca.c
1321     +++ b/drivers/bluetooth/hci_qca.c
1322     @@ -881,7 +881,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
1323     */
1324     set_current_state(TASK_UNINTERRUPTIBLE);
1325     schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
1326     - set_current_state(TASK_INTERRUPTIBLE);
1327     + set_current_state(TASK_RUNNING);
1328    
1329     return 0;
1330     }
1331     diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
1332     index 72fd1750134d..942d076cbb0a 100644
1333     --- a/drivers/bus/arm-ccn.c
1334     +++ b/drivers/bus/arm-ccn.c
1335     @@ -736,7 +736,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
1336     ccn = pmu_to_arm_ccn(event->pmu);
1337    
1338     if (hw->sample_period) {
1339     - dev_warn(ccn->dev, "Sampling not supported!\n");
1340     + dev_dbg(ccn->dev, "Sampling not supported!\n");
1341     return -EOPNOTSUPP;
1342     }
1343    
1344     @@ -744,12 +744,12 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
1345     event->attr.exclude_kernel || event->attr.exclude_hv ||
1346     event->attr.exclude_idle || event->attr.exclude_host ||
1347     event->attr.exclude_guest) {
1348     - dev_warn(ccn->dev, "Can't exclude execution levels!\n");
1349     + dev_dbg(ccn->dev, "Can't exclude execution levels!\n");
1350     return -EINVAL;
1351     }
1352    
1353     if (event->cpu < 0) {
1354     - dev_warn(ccn->dev, "Can't provide per-task data!\n");
1355     + dev_dbg(ccn->dev, "Can't provide per-task data!\n");
1356     return -EOPNOTSUPP;
1357     }
1358     /*
1359     @@ -771,13 +771,13 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
1360     switch (type) {
1361     case CCN_TYPE_MN:
1362     if (node_xp != ccn->mn_id) {
1363     - dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp);
1364     + dev_dbg(ccn->dev, "Invalid MN ID %d!\n", node_xp);
1365     return -EINVAL;
1366     }
1367     break;
1368     case CCN_TYPE_XP:
1369     if (node_xp >= ccn->num_xps) {
1370     - dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
1371     + dev_dbg(ccn->dev, "Invalid XP ID %d!\n", node_xp);
1372     return -EINVAL;
1373     }
1374     break;
1375     @@ -785,11 +785,11 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
1376     break;
1377     default:
1378     if (node_xp >= ccn->num_nodes) {
1379     - dev_warn(ccn->dev, "Invalid node ID %d!\n", node_xp);
1380     + dev_dbg(ccn->dev, "Invalid node ID %d!\n", node_xp);
1381     return -EINVAL;
1382     }
1383     if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) {
1384     - dev_warn(ccn->dev, "Invalid type 0x%x for node %d!\n",
1385     + dev_dbg(ccn->dev, "Invalid type 0x%x for node %d!\n",
1386     type, node_xp);
1387     return -EINVAL;
1388     }
1389     @@ -808,19 +808,19 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
1390     if (event_id != e->event)
1391     continue;
1392     if (e->num_ports && port >= e->num_ports) {
1393     - dev_warn(ccn->dev, "Invalid port %d for node/XP %d!\n",
1394     + dev_dbg(ccn->dev, "Invalid port %d for node/XP %d!\n",
1395     port, node_xp);
1396     return -EINVAL;
1397     }
1398     if (e->num_vcs && vc >= e->num_vcs) {
1399     - dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n",
1400     + dev_dbg(ccn->dev, "Invalid vc %d for node/XP %d!\n",
1401     vc, node_xp);
1402     return -EINVAL;
1403     }
1404     valid = 1;
1405     }
1406     if (!valid) {
1407     - dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
1408     + dev_dbg(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
1409     event_id, node_xp);
1410     return -EINVAL;
1411     }
1412     diff --git a/drivers/char/random.c b/drivers/char/random.c
1413     index ddc493d976fd..ea4dbfa30657 100644
1414     --- a/drivers/char/random.c
1415     +++ b/drivers/char/random.c
1416     @@ -1897,14 +1897,22 @@ static int
1417     write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1418     {
1419     size_t bytes;
1420     - __u32 buf[16];
1421     + __u32 t, buf[16];
1422     const char __user *p = buffer;
1423    
1424     while (count > 0) {
1425     + int b, i = 0;
1426     +
1427     bytes = min(count, sizeof(buf));
1428     if (copy_from_user(&buf, p, bytes))
1429     return -EFAULT;
1430    
1431     + for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
1432     + if (!arch_get_random_int(&t))
1433     + break;
1434     + buf[i] ^= t;
1435     + }
1436     +
1437     count -= bytes;
1438     p += bytes;
1439    
1440     diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
1441     index 346c4987b284..38983f56ad0d 100644
1442     --- a/drivers/edac/altera_edac.c
1443     +++ b/drivers/edac/altera_edac.c
1444     @@ -1106,7 +1106,7 @@ static void *ocram_alloc_mem(size_t size, void **other)
1445    
1446     static void ocram_free_mem(void *p, size_t size, void *other)
1447     {
1448     - gen_pool_free((struct gen_pool *)other, (u32)p, size);
1449     + gen_pool_free((struct gen_pool *)other, (unsigned long)p, size);
1450     }
1451    
1452     static const struct edac_device_prv_data ocramecc_data = {
1453     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
1454     index 1360a24d2ede..f08624f2f209 100644
1455     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
1456     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
1457     @@ -683,8 +683,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
1458     return -EINVAL;
1459    
1460     /* A shared bo cannot be migrated to VRAM */
1461     - if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
1462     - return -EINVAL;
1463     + if (bo->prime_shared_count) {
1464     + if (domain & AMDGPU_GEM_DOMAIN_GTT)
1465     + domain = AMDGPU_GEM_DOMAIN_GTT;
1466     + else
1467     + return -EINVAL;
1468     + }
1469    
1470     if (bo->pin_count) {
1471     uint32_t mem_type = bo->tbo.mem.mem_type;
1472     diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
1473     index 0d8a417e2cd6..bb5cc15fa0b9 100644
1474     --- a/drivers/gpu/drm/drm_atomic.c
1475     +++ b/drivers/gpu/drm/drm_atomic.c
1476     @@ -1355,7 +1355,9 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
1477     {
1478     struct drm_plane *plane = plane_state->plane;
1479     struct drm_crtc_state *crtc_state;
1480     -
1481     + /* Nothing to do for same crtc*/
1482     + if (plane_state->crtc == crtc)
1483     + return 0;
1484     if (plane_state->crtc) {
1485     crtc_state = drm_atomic_get_crtc_state(plane_state->state,
1486     plane_state->crtc);
1487     diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
1488     index 0028591f3f95..1f08d597b87a 100644
1489     --- a/drivers/gpu/drm/drm_atomic_helper.c
1490     +++ b/drivers/gpu/drm/drm_atomic_helper.c
1491     @@ -2683,31 +2683,9 @@ commit:
1492     return 0;
1493     }
1494    
1495     -/**
1496     - * drm_atomic_helper_disable_all - disable all currently active outputs
1497     - * @dev: DRM device
1498     - * @ctx: lock acquisition context
1499     - *
1500     - * Loops through all connectors, finding those that aren't turned off and then
1501     - * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
1502     - * that they are connected to.
1503     - *
1504     - * This is used for example in suspend/resume to disable all currently active
1505     - * functions when suspending. If you just want to shut down everything at e.g.
1506     - * driver unload, look at drm_atomic_helper_shutdown().
1507     - *
1508     - * Note that if callers haven't already acquired all modeset locks this might
1509     - * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
1510     - *
1511     - * Returns:
1512     - * 0 on success or a negative error code on failure.
1513     - *
1514     - * See also:
1515     - * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
1516     - * drm_atomic_helper_shutdown().
1517     - */
1518     -int drm_atomic_helper_disable_all(struct drm_device *dev,
1519     - struct drm_modeset_acquire_ctx *ctx)
1520     +static int __drm_atomic_helper_disable_all(struct drm_device *dev,
1521     + struct drm_modeset_acquire_ctx *ctx,
1522     + bool clean_old_fbs)
1523     {
1524     struct drm_atomic_state *state;
1525     struct drm_connector_state *conn_state;
1526     @@ -2759,8 +2737,11 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
1527     goto free;
1528    
1529     drm_atomic_set_fb_for_plane(plane_state, NULL);
1530     - plane_mask |= BIT(drm_plane_index(plane));
1531     - plane->old_fb = plane->fb;
1532     +
1533     + if (clean_old_fbs) {
1534     + plane->old_fb = plane->fb;
1535     + plane_mask |= BIT(drm_plane_index(plane));
1536     + }
1537     }
1538    
1539     ret = drm_atomic_commit(state);
1540     @@ -2771,6 +2752,34 @@ free:
1541     return ret;
1542     }
1543    
1544     +/**
1545     + * drm_atomic_helper_disable_all - disable all currently active outputs
1546     + * @dev: DRM device
1547     + * @ctx: lock acquisition context
1548     + *
1549     + * Loops through all connectors, finding those that aren't turned off and then
1550     + * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
1551     + * that they are connected to.
1552     + *
1553     + * This is used for example in suspend/resume to disable all currently active
1554     + * functions when suspending. If you just want to shut down everything at e.g.
1555     + * driver unload, look at drm_atomic_helper_shutdown().
1556     + *
1557     + * Note that if callers haven't already acquired all modeset locks this might
1558     + * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
1559     + *
1560     + * Returns:
1561     + * 0 on success or a negative error code on failure.
1562     + *
1563     + * See also:
1564     + * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
1565     + * drm_atomic_helper_shutdown().
1566     + */
1567     +int drm_atomic_helper_disable_all(struct drm_device *dev,
1568     + struct drm_modeset_acquire_ctx *ctx)
1569     +{
1570     + return __drm_atomic_helper_disable_all(dev, ctx, false);
1571     +}
1572     EXPORT_SYMBOL(drm_atomic_helper_disable_all);
1573    
1574     /**
1575     @@ -2793,7 +2802,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
1576     while (1) {
1577     ret = drm_modeset_lock_all_ctx(dev, &ctx);
1578     if (!ret)
1579     - ret = drm_atomic_helper_disable_all(dev, &ctx);
1580     + ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
1581    
1582     if (ret != -EDEADLK)
1583     break;
1584     @@ -2897,16 +2906,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
1585     struct drm_connector_state *new_conn_state;
1586     struct drm_crtc *crtc;
1587     struct drm_crtc_state *new_crtc_state;
1588     - unsigned plane_mask = 0;
1589     - struct drm_device *dev = state->dev;
1590     - int ret;
1591    
1592     state->acquire_ctx = ctx;
1593    
1594     - for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1595     - plane_mask |= BIT(drm_plane_index(plane));
1596     + for_each_new_plane_in_state(state, plane, new_plane_state, i)
1597     state->planes[i].old_state = plane->state;
1598     - }
1599    
1600     for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
1601     state->crtcs[i].old_state = crtc->state;
1602     @@ -2914,11 +2918,7 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
1603     for_each_new_connector_in_state(state, connector, new_conn_state, i)
1604     state->connectors[i].old_state = connector->state;
1605    
1606     - ret = drm_atomic_commit(state);
1607     - if (plane_mask)
1608     - drm_atomic_clean_old_fb(dev, plane_mask, ret);
1609     -
1610     - return ret;
1611     + return drm_atomic_commit(state);
1612     }
1613     EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
1614    
1615     diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
1616     index 41b492f99955..c022ab6e84bd 100644
1617     --- a/drivers/gpu/drm/drm_dp_mst_topology.c
1618     +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
1619     @@ -2862,12 +2862,14 @@ static void drm_dp_mst_dump_mstb(struct seq_file *m,
1620     }
1621     }
1622    
1623     +#define DP_PAYLOAD_TABLE_SIZE 64
1624     +
1625     static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
1626     char *buf)
1627     {
1628     int i;
1629    
1630     - for (i = 0; i < 64; i += 16) {
1631     + for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
1632     if (drm_dp_dpcd_read(mgr->aux,
1633     DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
1634     &buf[i], 16) != 16)
1635     @@ -2936,7 +2938,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
1636    
1637     mutex_lock(&mgr->lock);
1638     if (mgr->mst_primary) {
1639     - u8 buf[64];
1640     + u8 buf[DP_PAYLOAD_TABLE_SIZE];
1641     int ret;
1642    
1643     ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
1644     @@ -2954,8 +2956,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
1645     seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
1646     buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
1647     if (dump_dp_payload_table(mgr, buf))
1648     - seq_printf(m, "payload table: %*ph\n", 63, buf);
1649     -
1650     + seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
1651     }
1652    
1653     mutex_unlock(&mgr->lock);
1654     diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
1655     index e8e4ea14b12b..e05e5399af2d 100644
1656     --- a/drivers/gpu/drm/gma500/psb_intel_drv.h
1657     +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
1658     @@ -255,7 +255,7 @@ extern int intelfb_remove(struct drm_device *dev,
1659     extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
1660     const struct drm_display_mode *mode,
1661     struct drm_display_mode *adjusted_mode);
1662     -extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
1663     +extern enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
1664     struct drm_display_mode *mode);
1665     extern int psb_intel_lvds_set_property(struct drm_connector *connector,
1666     struct drm_property *property,
1667     diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
1668     index be3eefec5152..8baf6325c6e4 100644
1669     --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
1670     +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
1671     @@ -343,7 +343,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
1672     }
1673     }
1674    
1675     -int psb_intel_lvds_mode_valid(struct drm_connector *connector,
1676     +enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
1677     struct drm_display_mode *mode)
1678     {
1679     struct drm_psb_private *dev_priv = connector->dev->dev_private;
1680     diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
1681     index a7e55c422501..0b632dc0cf7d 100644
1682     --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
1683     +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
1684     @@ -155,10 +155,10 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
1685     (target << 28));
1686     nvkm_wr32(device, 0x002274, (runl << 20) | nr);
1687    
1688     - if (wait_event_timeout(fifo->runlist[runl].wait,
1689     - !(nvkm_rd32(device, 0x002284 + (runl * 0x08))
1690     - & 0x00100000),
1691     - msecs_to_jiffies(2000)) == 0)
1692     + if (nvkm_msec(device, 2000,
1693     + if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000))
1694     + break;
1695     + ) < 0)
1696     nvkm_error(subdev, "runlist %d update timeout\n", runl);
1697     unlock:
1698     mutex_unlock(&subdev->mutex);
1699     diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
1700     index 424cd1b66575..337d3a1c2a40 100644
1701     --- a/drivers/gpu/drm/radeon/radeon_connectors.c
1702     +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
1703     @@ -853,7 +853,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
1704     return ret;
1705     }
1706    
1707     -static int radeon_lvds_mode_valid(struct drm_connector *connector,
1708     +static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector,
1709     struct drm_display_mode *mode)
1710     {
1711     struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1712     @@ -1013,7 +1013,7 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
1713     return ret;
1714     }
1715    
1716     -static int radeon_vga_mode_valid(struct drm_connector *connector,
1717     +static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector,
1718     struct drm_display_mode *mode)
1719     {
1720     struct drm_device *dev = connector->dev;
1721     @@ -1157,7 +1157,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
1722     return 1;
1723     }
1724    
1725     -static int radeon_tv_mode_valid(struct drm_connector *connector,
1726     +static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector,
1727     struct drm_display_mode *mode)
1728     {
1729     if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
1730     @@ -1499,7 +1499,7 @@ static void radeon_dvi_force(struct drm_connector *connector)
1731     radeon_connector->use_digital = true;
1732     }
1733    
1734     -static int radeon_dvi_mode_valid(struct drm_connector *connector,
1735     +static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector,
1736     struct drm_display_mode *mode)
1737     {
1738     struct drm_device *dev = connector->dev;
1739     @@ -1801,7 +1801,7 @@ out:
1740     return ret;
1741     }
1742    
1743     -static int radeon_dp_mode_valid(struct drm_connector *connector,
1744     +static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector,
1745     struct drm_display_mode *mode)
1746     {
1747     struct drm_device *dev = connector->dev;
1748     diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
1749     index febb21ee190e..584b10d3fc3d 100644
1750     --- a/drivers/hid/hid-plantronics.c
1751     +++ b/drivers/hid/hid-plantronics.c
1752     @@ -2,7 +2,7 @@
1753     * Plantronics USB HID Driver
1754     *
1755     * Copyright (c) 2014 JD Cole <jd.cole@plantronics.com>
1756     - * Copyright (c) 2015 Terry Junge <terry.junge@plantronics.com>
1757     + * Copyright (c) 2015-2018 Terry Junge <terry.junge@plantronics.com>
1758     */
1759    
1760     /*
1761     @@ -48,6 +48,10 @@ static int plantronics_input_mapping(struct hid_device *hdev,
1762     unsigned short mapped_key;
1763     unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev);
1764    
1765     + /* special case for PTT products */
1766     + if (field->application == HID_GD_JOYSTICK)
1767     + goto defaulted;
1768     +
1769     /* handle volume up/down mapping */
1770     /* non-standard types or multi-HID interfaces - plt_type is PID */
1771     if (!(plt_type & HID_USAGE_PAGE)) {
1772     diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
1773     index d92827556389..136a34dc31b8 100644
1774     --- a/drivers/hid/i2c-hid/i2c-hid.c
1775     +++ b/drivers/hid/i2c-hid/i2c-hid.c
1776     @@ -1036,6 +1036,14 @@ static int i2c_hid_probe(struct i2c_client *client,
1777     pm_runtime_enable(&client->dev);
1778     device_enable_async_suspend(&client->dev);
1779    
1780     + /* Make sure there is something at this address */
1781     + ret = i2c_smbus_read_byte(client);
1782     + if (ret < 0) {
1783     + dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
1784     + ret = -ENXIO;
1785     + goto err_pm;
1786     + }
1787     +
1788     ret = i2c_hid_fetch_hid_descriptor(ihid);
1789     if (ret < 0)
1790     goto err_pm;
1791     diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
1792     index 56e46581b84b..6f2fe63e8f5a 100644
1793     --- a/drivers/i2c/i2c-core-base.c
1794     +++ b/drivers/i2c/i2c-core-base.c
1795     @@ -808,8 +808,11 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
1796     */
1797     void i2c_unregister_device(struct i2c_client *client)
1798     {
1799     - if (client->dev.of_node)
1800     + if (client->dev.of_node) {
1801     of_node_clear_flag(client->dev.of_node, OF_POPULATED);
1802     + of_node_put(client->dev.of_node);
1803     + }
1804     +
1805     if (ACPI_COMPANION(&client->dev))
1806     acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
1807     device_unregister(&client->dev);
1808     diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
1809     index d8efdc191c27..55252079faf6 100644
1810     --- a/drivers/infiniband/core/mad.c
1811     +++ b/drivers/infiniband/core/mad.c
1812     @@ -1558,7 +1558,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1813     mad_reg_req->oui, 3)) {
1814     method = &(*vendor_table)->vendor_class[
1815     vclass]->method_table[i];
1816     - BUG_ON(!*method);
1817     + if (!*method)
1818     + goto error3;
1819     goto check_in_use;
1820     }
1821     }
1822     @@ -1568,10 +1569,12 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1823     vclass]->oui[i])) {
1824     method = &(*vendor_table)->vendor_class[
1825     vclass]->method_table[i];
1826     - BUG_ON(*method);
1827     /* Allocate method table for this OUI */
1828     - if ((ret = allocate_method_table(method)))
1829     - goto error3;
1830     + if (!*method) {
1831     + ret = allocate_method_table(method);
1832     + if (ret)
1833     + goto error3;
1834     + }
1835     memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1836     mad_reg_req->oui, 3);
1837     goto check_in_use;
1838     diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
1839     index e47baf0950e3..a22b992cde38 100644
1840     --- a/drivers/infiniband/core/ucma.c
1841     +++ b/drivers/infiniband/core/ucma.c
1842     @@ -218,7 +218,7 @@ static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
1843     return NULL;
1844    
1845     mutex_lock(&mut);
1846     - mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
1847     + mc->id = idr_alloc(&multicast_idr, NULL, 0, 0, GFP_KERNEL);
1848     mutex_unlock(&mut);
1849     if (mc->id < 0)
1850     goto error;
1851     @@ -1404,6 +1404,10 @@ static ssize_t ucma_process_join(struct ucma_file *file,
1852     goto err3;
1853     }
1854    
1855     + mutex_lock(&mut);
1856     + idr_replace(&multicast_idr, mc, mc->id);
1857     + mutex_unlock(&mut);
1858     +
1859     mutex_unlock(&file->mut);
1860     ucma_put_ctx(ctx);
1861     return 0;
1862     diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
1863     index 186dce6bba8f..b8229d7b0ff5 100644
1864     --- a/drivers/infiniband/core/uverbs_cmd.c
1865     +++ b/drivers/infiniband/core/uverbs_cmd.c
1866     @@ -3376,6 +3376,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
1867     goto err_uobj;
1868     }
1869    
1870     + if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
1871     + err = -EINVAL;
1872     + goto err_put;
1873     + }
1874     +
1875     flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs *
1876     sizeof(union ib_flow_spec), GFP_KERNEL);
1877     if (!flow_attr) {
1878     diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
1879     index 9032f77cc38d..feb80dbb5948 100644
1880     --- a/drivers/infiniband/core/verbs.c
1881     +++ b/drivers/infiniband/core/verbs.c
1882     @@ -2115,10 +2115,16 @@ static void __ib_drain_sq(struct ib_qp *qp)
1883     struct ib_cq *cq = qp->send_cq;
1884     struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
1885     struct ib_drain_cqe sdrain;
1886     - struct ib_send_wr swr = {}, *bad_swr;
1887     + struct ib_send_wr *bad_swr;
1888     + struct ib_rdma_wr swr = {
1889     + .wr = {
1890     + .next = NULL,
1891     + { .wr_cqe = &sdrain.cqe, },
1892     + .opcode = IB_WR_RDMA_WRITE,
1893     + },
1894     + };
1895     int ret;
1896    
1897     - swr.wr_cqe = &sdrain.cqe;
1898     sdrain.cqe.done = ib_drain_qp_done;
1899     init_completion(&sdrain.done);
1900    
1901     @@ -2128,7 +2134,7 @@ static void __ib_drain_sq(struct ib_qp *qp)
1902     return;
1903     }
1904    
1905     - ret = ib_post_send(qp, &swr, &bad_swr);
1906     + ret = ib_post_send(qp, &swr.wr, &bad_swr);
1907     if (ret) {
1908     WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
1909     return;
1910     diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
1911     index ee578fa713c2..97c2225829ea 100644
1912     --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
1913     +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
1914     @@ -787,13 +787,17 @@ static int srpt_post_recv(struct srpt_device *sdev,
1915     */
1916     static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
1917     {
1918     - struct ib_send_wr wr, *bad_wr;
1919     + struct ib_send_wr *bad_wr;
1920     + struct ib_rdma_wr wr = {
1921     + .wr = {
1922     + .next = NULL,
1923     + { .wr_cqe = &ch->zw_cqe, },
1924     + .opcode = IB_WR_RDMA_WRITE,
1925     + .send_flags = IB_SEND_SIGNALED,
1926     + }
1927     + };
1928    
1929     - memset(&wr, 0, sizeof(wr));
1930     - wr.opcode = IB_WR_RDMA_WRITE;
1931     - wr.wr_cqe = &ch->zw_cqe;
1932     - wr.send_flags = IB_SEND_SIGNALED;
1933     - return ib_post_send(ch->qp, &wr, &bad_wr);
1934     + return ib_post_send(ch->qp, &wr.wr, &bad_wr);
1935     }
1936    
1937     static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
1938     diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
1939     index 7b5fa501bbcf..696e540304fd 100644
1940     --- a/drivers/input/mouse/elan_i2c_core.c
1941     +++ b/drivers/input/mouse/elan_i2c_core.c
1942     @@ -1262,6 +1262,8 @@ static const struct acpi_device_id elan_acpi_id[] = {
1943     { "ELAN0611", 0 },
1944     { "ELAN0612", 0 },
1945     { "ELAN0618", 0 },
1946     + { "ELAN061D", 0 },
1947     + { "ELAN0622", 0 },
1948     { "ELAN1000", 0 },
1949     { }
1950     };
1951     diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
1952     index b353d494ad40..136f6e7bf797 100644
1953     --- a/drivers/input/serio/i8042-x86ia64io.h
1954     +++ b/drivers/input/serio/i8042-x86ia64io.h
1955     @@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
1956     DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
1957     },
1958     },
1959     + {
1960     + /* Lenovo LaVie Z */
1961     + .matches = {
1962     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1963     + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
1964     + },
1965     + },
1966     { }
1967     };
1968    
1969     diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
1970     index 119f4ef0d421..b7f943f96068 100644
1971     --- a/drivers/irqchip/irq-ls-scfg-msi.c
1972     +++ b/drivers/irqchip/irq-ls-scfg-msi.c
1973     @@ -21,6 +21,7 @@
1974     #include <linux/of_pci.h>
1975     #include <linux/of_platform.h>
1976     #include <linux/spinlock.h>
1977     +#include <linux/dma-iommu.h>
1978    
1979     #define MSI_IRQS_PER_MSIR 32
1980     #define MSI_MSIR_OFFSET 4
1981     @@ -94,6 +95,8 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
1982    
1983     if (msi_affinity_flag)
1984     msg->data |= cpumask_first(data->common->affinity);
1985     +
1986     + iommu_dma_map_msi_msg(data->irq, msg);
1987     }
1988    
1989     static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
1990     diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
1991     index 9bc32578a766..c0dd17a82170 100644
1992     --- a/drivers/lightnvm/pblk-rb.c
1993     +++ b/drivers/lightnvm/pblk-rb.c
1994     @@ -142,10 +142,9 @@ static void clean_wctx(struct pblk_w_ctx *w_ctx)
1995     {
1996     int flags;
1997    
1998     -try:
1999     flags = READ_ONCE(w_ctx->flags);
2000     - if (!(flags & PBLK_SUBMITTED_ENTRY))
2001     - goto try;
2002     + WARN_ONCE(!(flags & PBLK_SUBMITTED_ENTRY),
2003     + "pblk: overwriting unsubmitted data\n");
2004    
2005     /* Release flags on context. Protect from writes and reads */
2006     smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY);
2007     diff --git a/drivers/md/md.c b/drivers/md/md.c
2008     index 11a67eac55b1..5599712d478e 100644
2009     --- a/drivers/md/md.c
2010     +++ b/drivers/md/md.c
2011     @@ -6498,6 +6498,9 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
2012     char b[BDEVNAME_SIZE];
2013     struct md_rdev *rdev;
2014    
2015     + if (!mddev->pers)
2016     + return -ENODEV;
2017     +
2018     rdev = find_rdev(mddev, dev);
2019     if (!rdev)
2020     return -ENXIO;
2021     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2022     index 029ecba60727..78d830763704 100644
2023     --- a/drivers/md/raid1.c
2024     +++ b/drivers/md/raid1.c
2025     @@ -2462,6 +2462,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2026     fix_read_error(conf, r1_bio->read_disk,
2027     r1_bio->sector, r1_bio->sectors);
2028     unfreeze_array(conf);
2029     + } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2030     + md_error(mddev, rdev);
2031     } else {
2032     r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2033     }
2034     diff --git a/drivers/media/common/siano/smsendian.c b/drivers/media/common/siano/smsendian.c
2035     index bfe831c10b1c..b95a631f23f9 100644
2036     --- a/drivers/media/common/siano/smsendian.c
2037     +++ b/drivers/media/common/siano/smsendian.c
2038     @@ -35,7 +35,7 @@ void smsendian_handle_tx_message(void *buffer)
2039     switch (msg->x_msg_header.msg_type) {
2040     case MSG_SMS_DATA_DOWNLOAD_REQ:
2041     {
2042     - msg->msg_data[0] = le32_to_cpu(msg->msg_data[0]);
2043     + msg->msg_data[0] = le32_to_cpu((__force __le32)(msg->msg_data[0]));
2044     break;
2045     }
2046    
2047     @@ -44,7 +44,7 @@ void smsendian_handle_tx_message(void *buffer)
2048     sizeof(struct sms_msg_hdr))/4;
2049    
2050     for (i = 0; i < msg_words; i++)
2051     - msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]);
2052     + msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]);
2053    
2054     break;
2055     }
2056     @@ -64,7 +64,7 @@ void smsendian_handle_rx_message(void *buffer)
2057     {
2058     struct sms_version_res *ver =
2059     (struct sms_version_res *) msg;
2060     - ver->chip_model = le16_to_cpu(ver->chip_model);
2061     + ver->chip_model = le16_to_cpu((__force __le16)ver->chip_model);
2062     break;
2063     }
2064    
2065     @@ -81,7 +81,7 @@ void smsendian_handle_rx_message(void *buffer)
2066     sizeof(struct sms_msg_hdr))/4;
2067    
2068     for (i = 0; i < msg_words; i++)
2069     - msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]);
2070     + msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]);
2071    
2072     break;
2073     }
2074     @@ -95,9 +95,9 @@ void smsendian_handle_message_header(void *msg)
2075     #ifdef __BIG_ENDIAN
2076     struct sms_msg_hdr *phdr = (struct sms_msg_hdr *)msg;
2077    
2078     - phdr->msg_type = le16_to_cpu(phdr->msg_type);
2079     - phdr->msg_length = le16_to_cpu(phdr->msg_length);
2080     - phdr->msg_flags = le16_to_cpu(phdr->msg_flags);
2081     + phdr->msg_type = le16_to_cpu((__force __le16)phdr->msg_type);
2082     + phdr->msg_length = le16_to_cpu((__force __le16)phdr->msg_length);
2083     + phdr->msg_flags = le16_to_cpu((__force __le16)phdr->msg_flags);
2084     #endif /* __BIG_ENDIAN */
2085     }
2086     EXPORT_SYMBOL_GPL(smsendian_handle_message_header);
2087     diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
2088     index 700f433261d0..e4d7f2febf00 100644
2089     --- a/drivers/media/i2c/smiapp/smiapp-core.c
2090     +++ b/drivers/media/i2c/smiapp/smiapp-core.c
2091     @@ -1001,7 +1001,7 @@ static int smiapp_read_nvm(struct smiapp_sensor *sensor,
2092     if (rval)
2093     goto out;
2094    
2095     - for (i = 0; i < 1000; i++) {
2096     + for (i = 1000; i > 0; i--) {
2097     rval = smiapp_read(
2098     sensor,
2099     SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS, &s);
2100     @@ -1012,11 +1012,10 @@ static int smiapp_read_nvm(struct smiapp_sensor *sensor,
2101     if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY)
2102     break;
2103    
2104     - if (--i == 0) {
2105     - rval = -ETIMEDOUT;
2106     - goto out;
2107     - }
2108     -
2109     + }
2110     + if (!i) {
2111     + rval = -ETIMEDOUT;
2112     + goto out;
2113     }
2114    
2115     for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) {
2116     diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
2117     index e79f72b8b858..62b2c5d9bdfb 100644
2118     --- a/drivers/media/media-device.c
2119     +++ b/drivers/media/media-device.c
2120     @@ -54,9 +54,10 @@ static int media_device_close(struct file *filp)
2121     return 0;
2122     }
2123    
2124     -static int media_device_get_info(struct media_device *dev,
2125     - struct media_device_info *info)
2126     +static long media_device_get_info(struct media_device *dev, void *arg)
2127     {
2128     + struct media_device_info *info = arg;
2129     +
2130     memset(info, 0, sizeof(*info));
2131    
2132     if (dev->driver_name[0])
2133     @@ -93,9 +94,9 @@ static struct media_entity *find_entity(struct media_device *mdev, u32 id)
2134     return NULL;
2135     }
2136    
2137     -static long media_device_enum_entities(struct media_device *mdev,
2138     - struct media_entity_desc *entd)
2139     +static long media_device_enum_entities(struct media_device *mdev, void *arg)
2140     {
2141     + struct media_entity_desc *entd = arg;
2142     struct media_entity *ent;
2143    
2144     ent = find_entity(mdev, entd->id);
2145     @@ -146,9 +147,9 @@ static void media_device_kpad_to_upad(const struct media_pad *kpad,
2146     upad->flags = kpad->flags;
2147     }
2148    
2149     -static long media_device_enum_links(struct media_device *mdev,
2150     - struct media_links_enum *links)
2151     +static long media_device_enum_links(struct media_device *mdev, void *arg)
2152     {
2153     + struct media_links_enum *links = arg;
2154     struct media_entity *entity;
2155    
2156     entity = find_entity(mdev, links->entity);
2157     @@ -194,9 +195,9 @@ static long media_device_enum_links(struct media_device *mdev,
2158     return 0;
2159     }
2160    
2161     -static long media_device_setup_link(struct media_device *mdev,
2162     - struct media_link_desc *linkd)
2163     +static long media_device_setup_link(struct media_device *mdev, void *arg)
2164     {
2165     + struct media_link_desc *linkd = arg;
2166     struct media_link *link = NULL;
2167     struct media_entity *source;
2168     struct media_entity *sink;
2169     @@ -222,9 +223,9 @@ static long media_device_setup_link(struct media_device *mdev,
2170     return __media_entity_setup_link(link, linkd->flags);
2171     }
2172    
2173     -static long media_device_get_topology(struct media_device *mdev,
2174     - struct media_v2_topology *topo)
2175     +static long media_device_get_topology(struct media_device *mdev, void *arg)
2176     {
2177     + struct media_v2_topology *topo = arg;
2178     struct media_entity *entity;
2179     struct media_interface *intf;
2180     struct media_pad *pad;
2181     diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c
2182     index ef4906406ebf..a50461861133 100644
2183     --- a/drivers/media/pci/saa7164/saa7164-fw.c
2184     +++ b/drivers/media/pci/saa7164/saa7164-fw.c
2185     @@ -426,7 +426,8 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev)
2186     __func__, fw->size);
2187    
2188     if (fw->size != fwlength) {
2189     - printk(KERN_ERR "xc5000: firmware incorrect size\n");
2190     + printk(KERN_ERR "saa7164: firmware incorrect size %zu != %u\n",
2191     + fw->size, fwlength);
2192     ret = -ENOMEM;
2193     goto out;
2194     }
2195     diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c
2196     index c3fafa97b2d0..0ea8dd44026c 100644
2197     --- a/drivers/media/pci/tw686x/tw686x-video.c
2198     +++ b/drivers/media/pci/tw686x/tw686x-video.c
2199     @@ -1228,7 +1228,8 @@ int tw686x_video_init(struct tw686x_dev *dev)
2200     vc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
2201     vc->vidq.min_buffers_needed = 2;
2202     vc->vidq.lock = &vc->vb_mutex;
2203     - vc->vidq.gfp_flags = GFP_DMA32;
2204     + vc->vidq.gfp_flags = dev->dma_mode != TW686X_DMA_MODE_MEMCPY ?
2205     + GFP_DMA32 : 0;
2206     vc->vidq.dev = &dev->pci_dev->dev;
2207    
2208     err = vb2_queue_init(&vc->vidq);
2209     diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
2210     index 1a428fe9f070..9f023bc6e1b7 100644
2211     --- a/drivers/media/platform/omap3isp/isp.c
2212     +++ b/drivers/media/platform/omap3isp/isp.c
2213     @@ -1945,6 +1945,7 @@ error_csi2:
2214    
2215     static void isp_detach_iommu(struct isp_device *isp)
2216     {
2217     + arm_iommu_detach_device(isp->dev);
2218     arm_iommu_release_mapping(isp->mapping);
2219     isp->mapping = NULL;
2220     }
2221     @@ -1961,8 +1962,7 @@ static int isp_attach_iommu(struct isp_device *isp)
2222     mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G);
2223     if (IS_ERR(mapping)) {
2224     dev_err(isp->dev, "failed to create ARM IOMMU mapping\n");
2225     - ret = PTR_ERR(mapping);
2226     - goto error;
2227     + return PTR_ERR(mapping);
2228     }
2229    
2230     isp->mapping = mapping;
2231     @@ -1977,7 +1977,8 @@ static int isp_attach_iommu(struct isp_device *isp)
2232     return 0;
2233    
2234     error:
2235     - isp_detach_iommu(isp);
2236     + arm_iommu_release_mapping(isp->mapping);
2237     + isp->mapping = NULL;
2238     return ret;
2239     }
2240    
2241     diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
2242     index 070bac36d766..2e2b8c409150 100644
2243     --- a/drivers/media/platform/rcar_jpu.c
2244     +++ b/drivers/media/platform/rcar_jpu.c
2245     @@ -1280,7 +1280,7 @@ static int jpu_open(struct file *file)
2246     /* ...issue software reset */
2247     ret = jpu_reset(jpu);
2248     if (ret)
2249     - goto device_prepare_rollback;
2250     + goto jpu_reset_rollback;
2251     }
2252    
2253     jpu->ref_count++;
2254     @@ -1288,6 +1288,8 @@ static int jpu_open(struct file *file)
2255     mutex_unlock(&jpu->mutex);
2256     return 0;
2257    
2258     +jpu_reset_rollback:
2259     + clk_disable_unprepare(jpu->clk);
2260     device_prepare_rollback:
2261     mutex_unlock(&jpu->mutex);
2262     v4l_prepare_rollback:
2263     diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
2264     index b3034f80163f..8ce6f9cff746 100644
2265     --- a/drivers/media/radio/si470x/radio-si470x-i2c.c
2266     +++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
2267     @@ -92,7 +92,7 @@ MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*");
2268     */
2269     int si470x_get_register(struct si470x_device *radio, int regnr)
2270     {
2271     - u16 buf[READ_REG_NUM];
2272     + __be16 buf[READ_REG_NUM];
2273     struct i2c_msg msgs[1] = {
2274     {
2275     .addr = radio->client->addr,
2276     @@ -117,7 +117,7 @@ int si470x_get_register(struct si470x_device *radio, int regnr)
2277     int si470x_set_register(struct si470x_device *radio, int regnr)
2278     {
2279     int i;
2280     - u16 buf[WRITE_REG_NUM];
2281     + __be16 buf[WRITE_REG_NUM];
2282     struct i2c_msg msgs[1] = {
2283     {
2284     .addr = radio->client->addr,
2285     @@ -147,7 +147,7 @@ int si470x_set_register(struct si470x_device *radio, int regnr)
2286     static int si470x_get_all_registers(struct si470x_device *radio)
2287     {
2288     int i;
2289     - u16 buf[READ_REG_NUM];
2290     + __be16 buf[READ_REG_NUM];
2291     struct i2c_msg msgs[1] = {
2292     {
2293     .addr = radio->client->addr,
2294     diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
2295     index ffbb178c6918..2dbf632c10de 100644
2296     --- a/drivers/media/v4l2-core/videobuf2-core.c
2297     +++ b/drivers/media/v4l2-core/videobuf2-core.c
2298     @@ -912,9 +912,12 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
2299     dprintk(4, "done processing on buffer %d, state: %d\n",
2300     vb->index, state);
2301    
2302     - /* sync buffers */
2303     - for (plane = 0; plane < vb->num_planes; ++plane)
2304     - call_void_memop(vb, finish, vb->planes[plane].mem_priv);
2305     + if (state != VB2_BUF_STATE_QUEUED &&
2306     + state != VB2_BUF_STATE_REQUEUEING) {
2307     + /* sync buffers */
2308     + for (plane = 0; plane < vb->num_planes; ++plane)
2309     + call_void_memop(vb, finish, vb->planes[plane].mem_priv);
2310     + }
2311    
2312     spin_lock_irqsave(&q->done_lock, flags);
2313     if (state == VB2_BUF_STATE_QUEUED ||
2314     diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
2315     index a4803ac192bb..1d49a8dd4a37 100644
2316     --- a/drivers/memory/tegra/mc.c
2317     +++ b/drivers/memory/tegra/mc.c
2318     @@ -20,14 +20,6 @@
2319     #include "mc.h"
2320    
2321     #define MC_INTSTATUS 0x000
2322     -#define MC_INT_DECERR_MTS (1 << 16)
2323     -#define MC_INT_SECERR_SEC (1 << 13)
2324     -#define MC_INT_DECERR_VPR (1 << 12)
2325     -#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11)
2326     -#define MC_INT_INVALID_SMMU_PAGE (1 << 10)
2327     -#define MC_INT_ARBITRATION_EMEM (1 << 9)
2328     -#define MC_INT_SECURITY_VIOLATION (1 << 8)
2329     -#define MC_INT_DECERR_EMEM (1 << 6)
2330    
2331     #define MC_INTMASK 0x004
2332    
2333     @@ -248,12 +240,13 @@ static const char *const error_names[8] = {
2334     static irqreturn_t tegra_mc_irq(int irq, void *data)
2335     {
2336     struct tegra_mc *mc = data;
2337     - unsigned long status, mask;
2338     + unsigned long status;
2339     unsigned int bit;
2340    
2341     /* mask all interrupts to avoid flooding */
2342     - status = mc_readl(mc, MC_INTSTATUS);
2343     - mask = mc_readl(mc, MC_INTMASK);
2344     + status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
2345     + if (!status)
2346     + return IRQ_NONE;
2347    
2348     for_each_set_bit(bit, &status, 32) {
2349     const char *error = status_names[bit] ?: "unknown";
2350     @@ -346,7 +339,6 @@ static int tegra_mc_probe(struct platform_device *pdev)
2351     const struct of_device_id *match;
2352     struct resource *res;
2353     struct tegra_mc *mc;
2354     - u32 value;
2355     int err;
2356    
2357     match = of_match_node(tegra_mc_of_match, pdev->dev.of_node);
2358     @@ -414,11 +406,7 @@ static int tegra_mc_probe(struct platform_device *pdev)
2359    
2360     WARN(!mc->soc->client_id_mask, "Missing client ID mask for this SoC\n");
2361    
2362     - value = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
2363     - MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
2364     - MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM;
2365     -
2366     - mc_writel(mc, value, MC_INTMASK);
2367     + mc_writel(mc, mc->soc->intmask, MC_INTMASK);
2368    
2369     return 0;
2370     }
2371     diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
2372     index ddb16676c3af..24e020b4609b 100644
2373     --- a/drivers/memory/tegra/mc.h
2374     +++ b/drivers/memory/tegra/mc.h
2375     @@ -14,6 +14,15 @@
2376    
2377     #include <soc/tegra/mc.h>
2378    
2379     +#define MC_INT_DECERR_MTS (1 << 16)
2380     +#define MC_INT_SECERR_SEC (1 << 13)
2381     +#define MC_INT_DECERR_VPR (1 << 12)
2382     +#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11)
2383     +#define MC_INT_INVALID_SMMU_PAGE (1 << 10)
2384     +#define MC_INT_ARBITRATION_EMEM (1 << 9)
2385     +#define MC_INT_SECURITY_VIOLATION (1 << 8)
2386     +#define MC_INT_DECERR_EMEM (1 << 6)
2387     +
2388     static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset)
2389     {
2390     return readl(mc->regs + offset);
2391     diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
2392     index ba8fff3d66a6..6d2a5a849d92 100644
2393     --- a/drivers/memory/tegra/tegra114.c
2394     +++ b/drivers/memory/tegra/tegra114.c
2395     @@ -930,4 +930,6 @@ const struct tegra_mc_soc tegra114_mc_soc = {
2396     .atom_size = 32,
2397     .client_id_mask = 0x7f,
2398     .smmu = &tegra114_smmu_soc,
2399     + .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
2400     + MC_INT_DECERR_EMEM,
2401     };
2402     diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
2403     index 5a58e440f4a7..9f68a56f2727 100644
2404     --- a/drivers/memory/tegra/tegra124.c
2405     +++ b/drivers/memory/tegra/tegra124.c
2406     @@ -1020,6 +1020,9 @@ const struct tegra_mc_soc tegra124_mc_soc = {
2407     .smmu = &tegra124_smmu_soc,
2408     .emem_regs = tegra124_mc_emem_regs,
2409     .num_emem_regs = ARRAY_SIZE(tegra124_mc_emem_regs),
2410     + .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
2411     + MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
2412     + MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
2413     };
2414     #endif /* CONFIG_ARCH_TEGRA_124_SOC */
2415    
2416     @@ -1042,5 +1045,8 @@ const struct tegra_mc_soc tegra132_mc_soc = {
2417     .atom_size = 32,
2418     .client_id_mask = 0x7f,
2419     .smmu = &tegra132_smmu_soc,
2420     + .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
2421     + MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
2422     + MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
2423     };
2424     #endif /* CONFIG_ARCH_TEGRA_132_SOC */
2425     diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
2426     index 5e144abe4c18..47c78a6d8f00 100644
2427     --- a/drivers/memory/tegra/tegra210.c
2428     +++ b/drivers/memory/tegra/tegra210.c
2429     @@ -1077,4 +1077,7 @@ const struct tegra_mc_soc tegra210_mc_soc = {
2430     .atom_size = 64,
2431     .client_id_mask = 0xff,
2432     .smmu = &tegra210_smmu_soc,
2433     + .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
2434     + MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
2435     + MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
2436     };
2437     diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
2438     index b44737840e70..d0689428ea1a 100644
2439     --- a/drivers/memory/tegra/tegra30.c
2440     +++ b/drivers/memory/tegra/tegra30.c
2441     @@ -952,4 +952,6 @@ const struct tegra_mc_soc tegra30_mc_soc = {
2442     .atom_size = 16,
2443     .client_id_mask = 0x7f,
2444     .smmu = &tegra30_smmu_soc,
2445     + .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
2446     + MC_INT_DECERR_EMEM,
2447     };
2448     diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
2449     index b0ca5a4c841e..c5528ae982f2 100644
2450     --- a/drivers/mfd/cros_ec.c
2451     +++ b/drivers/mfd/cros_ec.c
2452     @@ -112,7 +112,11 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
2453    
2454     mutex_init(&ec_dev->lock);
2455    
2456     - cros_ec_query_all(ec_dev);
2457     + err = cros_ec_query_all(ec_dev);
2458     + if (err) {
2459     + dev_err(dev, "Cannot identify the EC: error %d\n", err);
2460     + return err;
2461     + }
2462    
2463     if (ec_dev->irq) {
2464     err = request_threaded_irq(ec_dev->irq, NULL, ec_irq_thread,
2465     diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
2466     index 13ef162cf066..a8b9fee4d62a 100644
2467     --- a/drivers/mmc/core/pwrseq_simple.c
2468     +++ b/drivers/mmc/core/pwrseq_simple.c
2469     @@ -40,14 +40,18 @@ static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
2470     struct gpio_descs *reset_gpios = pwrseq->reset_gpios;
2471    
2472     if (!IS_ERR(reset_gpios)) {
2473     - int i;
2474     - int values[reset_gpios->ndescs];
2475     + int i, *values;
2476     + int nvalues = reset_gpios->ndescs;
2477    
2478     - for (i = 0; i < reset_gpios->ndescs; i++)
2479     + values = kmalloc_array(nvalues, sizeof(int), GFP_KERNEL);
2480     + if (!values)
2481     + return;
2482     +
2483     + for (i = 0; i < nvalues; i++)
2484     values[i] = value;
2485    
2486     - gpiod_set_array_value_cansleep(
2487     - reset_gpios->ndescs, reset_gpios->desc, values);
2488     + gpiod_set_array_value_cansleep(nvalues, reset_gpios->desc, values);
2489     + kfree(values);
2490     }
2491     }
2492    
2493     diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
2494     index 6a2cbbba29aa..5252885e5cda 100644
2495     --- a/drivers/mmc/host/dw_mmc.c
2496     +++ b/drivers/mmc/host/dw_mmc.c
2497     @@ -1255,6 +1255,8 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
2498     if (host->state == STATE_WAITING_CMD11_DONE)
2499     sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
2500    
2501     + slot->mmc->actual_clock = 0;
2502     +
2503     if (!clock) {
2504     mci_writel(host, CLKENA, 0);
2505     mci_send_cmd(slot, sdmmc_cmd_bits, 0);
2506     @@ -1313,6 +1315,8 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
2507    
2508     /* keep the last clock value that was requested from core */
2509     slot->__clk_old = clock;
2510     + slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) :
2511     + host->bus_hz;
2512     }
2513    
2514     host->current_speed = clock;
2515     diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
2516     index 4005b427023c..16deba1a2385 100644
2517     --- a/drivers/mtd/nand/fsl_ifc_nand.c
2518     +++ b/drivers/mtd/nand/fsl_ifc_nand.c
2519     @@ -342,9 +342,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
2520    
2521     case NAND_CMD_READID:
2522     case NAND_CMD_PARAM: {
2523     + /*
2524     + * For READID, read 8 bytes that are currently used.
2525     + * For PARAM, read all 3 copies of 256-bytes pages.
2526     + */
2527     + int len = 8;
2528     int timing = IFC_FIR_OP_RB;
2529     - if (command == NAND_CMD_PARAM)
2530     + if (command == NAND_CMD_PARAM) {
2531     timing = IFC_FIR_OP_RBCD;
2532     + len = 256 * 3;
2533     + }
2534    
2535     ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
2536     (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
2537     @@ -354,12 +361,8 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
2538     &ifc->ifc_nand.nand_fcr0);
2539     ifc_out32(column, &ifc->ifc_nand.row3);
2540    
2541     - /*
2542     - * although currently it's 8 bytes for READID, we always read
2543     - * the maximum 256 bytes(for PARAM)
2544     - */
2545     - ifc_out32(256, &ifc->ifc_nand.nand_fbcr);
2546     - ifc_nand_ctrl->read_bytes = 256;
2547     + ifc_out32(len, &ifc->ifc_nand.nand_fbcr);
2548     + ifc_nand_ctrl->read_bytes = len;
2549    
2550     set_addr(mtd, 0, 0, 0);
2551     fsl_ifc_run_command(mtd);
2552     diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
2553     index 5ada7a41449c..9645c8f05c7f 100644
2554     --- a/drivers/net/dsa/qca8k.c
2555     +++ b/drivers/net/dsa/qca8k.c
2556     @@ -473,7 +473,7 @@ qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode)
2557     static void
2558     qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
2559     {
2560     - u32 mask = QCA8K_PORT_STATUS_TXMAC;
2561     + u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
2562    
2563     /* Port 0 and 6 have no internal PHY */
2564     if ((port > 0) && (port < 6))
2565     @@ -490,6 +490,7 @@ qca8k_setup(struct dsa_switch *ds)
2566     {
2567     struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2568     int ret, i, phy_mode = -1;
2569     + u32 mask;
2570    
2571     /* Make sure that port 0 is the cpu port */
2572     if (!dsa_is_cpu_port(ds, 0)) {
2573     @@ -515,7 +516,10 @@ qca8k_setup(struct dsa_switch *ds)
2574     if (ret < 0)
2575     return ret;
2576    
2577     - /* Enable CPU Port */
2578     + /* Enable CPU Port, force it to maximum bandwidth and full-duplex */
2579     + mask = QCA8K_PORT_STATUS_SPEED_1000 | QCA8K_PORT_STATUS_TXFLOW |
2580     + QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_DUPLEX;
2581     + qca8k_write(priv, QCA8K_REG_PORT_STATUS(QCA8K_CPU_PORT), mask);
2582     qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
2583     QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
2584     qca8k_port_set_status(priv, QCA8K_CPU_PORT, 1);
2585     @@ -584,6 +588,47 @@ qca8k_setup(struct dsa_switch *ds)
2586     return 0;
2587     }
2588    
2589     +static void
2590     +qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
2591     +{
2592     + struct qca8k_priv *priv = ds->priv;
2593     + u32 reg;
2594     +
2595     + /* Force fixed-link setting for CPU port, skip others. */
2596     + if (!phy_is_pseudo_fixed_link(phy))
2597     + return;
2598     +
2599     + /* Set port speed */
2600     + switch (phy->speed) {
2601     + case 10:
2602     + reg = QCA8K_PORT_STATUS_SPEED_10;
2603     + break;
2604     + case 100:
2605     + reg = QCA8K_PORT_STATUS_SPEED_100;
2606     + break;
2607     + case 1000:
2608     + reg = QCA8K_PORT_STATUS_SPEED_1000;
2609     + break;
2610     + default:
2611     + dev_dbg(priv->dev, "port%d link speed %dMbps not supported.\n",
2612     + port, phy->speed);
2613     + return;
2614     + }
2615     +
2616     + /* Set duplex mode */
2617     + if (phy->duplex == DUPLEX_FULL)
2618     + reg |= QCA8K_PORT_STATUS_DUPLEX;
2619     +
2620     + /* Force flow control */
2621     + if (dsa_is_cpu_port(ds, port))
2622     + reg |= QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_TXFLOW;
2623     +
2624     + /* Force link down before changing MAC options */
2625     + qca8k_port_set_status(priv, port, 0);
2626     + qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
2627     + qca8k_port_set_status(priv, port, 1);
2628     +}
2629     +
2630     static int
2631     qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
2632     {
2633     @@ -832,6 +877,7 @@ qca8k_get_tag_protocol(struct dsa_switch *ds)
2634     static const struct dsa_switch_ops qca8k_switch_ops = {
2635     .get_tag_protocol = qca8k_get_tag_protocol,
2636     .setup = qca8k_setup,
2637     + .adjust_link = qca8k_adjust_link,
2638     .get_strings = qca8k_get_strings,
2639     .phy_read = qca8k_phy_read,
2640     .phy_write = qca8k_phy_write,
2641     @@ -863,6 +909,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
2642     return -ENOMEM;
2643    
2644     priv->bus = mdiodev->bus;
2645     + priv->dev = &mdiodev->dev;
2646    
2647     /* read the switches ID register */
2648     id = qca8k_read(priv, QCA8K_REG_MASK_CTRL);
2649     @@ -934,6 +981,7 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
2650     qca8k_suspend, qca8k_resume);
2651    
2652     static const struct of_device_id qca8k_of_match[] = {
2653     + { .compatible = "qca,qca8334" },
2654     { .compatible = "qca,qca8337" },
2655     { /* sentinel */ },
2656     };
2657     diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
2658     index 1cf8a920d4ff..613fe5c50236 100644
2659     --- a/drivers/net/dsa/qca8k.h
2660     +++ b/drivers/net/dsa/qca8k.h
2661     @@ -51,8 +51,10 @@
2662     #define QCA8K_GOL_MAC_ADDR0 0x60
2663     #define QCA8K_GOL_MAC_ADDR1 0x64
2664     #define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
2665     -#define QCA8K_PORT_STATUS_SPEED GENMASK(2, 0)
2666     -#define QCA8K_PORT_STATUS_SPEED_S 0
2667     +#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0)
2668     +#define QCA8K_PORT_STATUS_SPEED_10 0
2669     +#define QCA8K_PORT_STATUS_SPEED_100 0x1
2670     +#define QCA8K_PORT_STATUS_SPEED_1000 0x2
2671     #define QCA8K_PORT_STATUS_TXMAC BIT(2)
2672     #define QCA8K_PORT_STATUS_RXMAC BIT(3)
2673     #define QCA8K_PORT_STATUS_TXFLOW BIT(4)
2674     @@ -165,6 +167,7 @@ struct qca8k_priv {
2675     struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
2676     struct dsa_switch *ds;
2677     struct mutex reg_mutex;
2678     + struct device *dev;
2679     };
2680    
2681     struct qca8k_mib_desc {
2682     diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
2683     index 52beba8c7a39..e3b7a71fcad9 100644
2684     --- a/drivers/net/ethernet/amazon/ena/ena_com.c
2685     +++ b/drivers/net/ethernet/amazon/ena/ena_com.c
2686     @@ -331,6 +331,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
2687    
2688     memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
2689    
2690     + io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
2691     io_sq->desc_entry_size =
2692     (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
2693     sizeof(struct ena_eth_io_tx_desc) :
2694     diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
2695     index 1b45cd73a258..119777986ea4 100644
2696     --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
2697     +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
2698     @@ -1128,14 +1128,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
2699    
2700     if (pdata->tx_pause != pdata->phy.tx_pause) {
2701     new_state = 1;
2702     - pdata->hw_if.config_tx_flow_control(pdata);
2703     pdata->tx_pause = pdata->phy.tx_pause;
2704     + pdata->hw_if.config_tx_flow_control(pdata);
2705     }
2706    
2707     if (pdata->rx_pause != pdata->phy.rx_pause) {
2708     new_state = 1;
2709     - pdata->hw_if.config_rx_flow_control(pdata);
2710     pdata->rx_pause = pdata->phy.rx_pause;
2711     + pdata->hw_if.config_rx_flow_control(pdata);
2712     }
2713    
2714     /* Speed support */
2715     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2716     index bfd2d0382f4c..94931318587c 100644
2717     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2718     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2719     @@ -5927,6 +5927,9 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
2720     }
2721     mutex_unlock(&bp->hwrm_cmd_lock);
2722    
2723     + if (!BNXT_SINGLE_PF(bp))
2724     + return 0;
2725     +
2726     diff = link_info->support_auto_speeds ^ link_info->advertising;
2727     if ((link_info->support_auto_speeds | diff) !=
2728     link_info->support_auto_speeds) {
2729     diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2730     index ff7a70ffafc6..c133491ad9fa 100644
2731     --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2732     +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2733     @@ -1272,8 +1272,11 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
2734     /* We need to alloc a vport for main NIC of PF */
2735     num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
2736    
2737     - if (hdev->num_tqps < num_vport)
2738     - num_vport = hdev->num_tqps;
2739     + if (hdev->num_tqps < num_vport) {
2740     + dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
2741     + hdev->num_tqps, num_vport);
2742     + return -EINVAL;
2743     + }
2744    
2745     /* Alloc the same number of TQPs for every vport */
2746     tqp_per_vport = hdev->num_tqps / num_vport;
2747     diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
2748     index d1e4dcec5db2..69726908e72c 100644
2749     --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
2750     +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
2751     @@ -1598,6 +1598,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
2752     hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2753     ring->desc_cb[i] = *res_cb;
2754     ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2755     + ring->desc[i].rx.bd_base_info = 0;
2756     }
2757    
2758     static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
2759     @@ -1605,6 +1606,7 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
2760     ring->desc_cb[i].reuse_flag = 0;
2761     ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
2762     + ring->desc_cb[i].page_offset);
2763     + ring->desc[i].rx.bd_base_info = 0;
2764     }
2765    
2766     static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
2767     @@ -2881,6 +2883,8 @@ static int __init hns3_init_module(void)
2768    
2769     client.ops = &client_ops;
2770    
2771     + INIT_LIST_HEAD(&client.node);
2772     +
2773     ret = hnae3_register_client(&client);
2774     if (ret)
2775     return ret;
2776     diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
2777     index 7a226537877b..6265ce8915b6 100644
2778     --- a/drivers/net/ethernet/intel/e1000e/netdev.c
2779     +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
2780     @@ -3558,15 +3558,12 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
2781     }
2782     break;
2783     case e1000_pch_spt:
2784     - if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
2785     - /* Stable 24MHz frequency */
2786     - incperiod = INCPERIOD_24MHZ;
2787     - incvalue = INCVALUE_24MHZ;
2788     - shift = INCVALUE_SHIFT_24MHZ;
2789     - adapter->cc.shift = shift;
2790     - break;
2791     - }
2792     - return -EINVAL;
2793     + /* Stable 24MHz frequency */
2794     + incperiod = INCPERIOD_24MHZ;
2795     + incvalue = INCVALUE_24MHZ;
2796     + shift = INCVALUE_SHIFT_24MHZ;
2797     + adapter->cc.shift = shift;
2798     + break;
2799     case e1000_pch_cnp:
2800     if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
2801     /* Stable 24MHz frequency */
2802     diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
2803     index d8456c381c99..ef242dbae116 100644
2804     --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
2805     +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
2806     @@ -337,6 +337,8 @@ void i40e_ptp_rx_hang(struct i40e_pf *pf)
2807     **/
2808     void i40e_ptp_tx_hang(struct i40e_pf *pf)
2809     {
2810     + struct sk_buff *skb;
2811     +
2812     if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
2813     return;
2814    
2815     @@ -349,9 +351,12 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf)
2816     * within a second it is reasonable to assume that we never will.
2817     */
2818     if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) {
2819     - dev_kfree_skb_any(pf->ptp_tx_skb);
2820     + skb = pf->ptp_tx_skb;
2821     pf->ptp_tx_skb = NULL;
2822     clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
2823     +
2824     + /* Free the skb after we clear the bitlock */
2825     + dev_kfree_skb_any(skb);
2826     pf->tx_hwtstamp_timeouts++;
2827     }
2828     }
2829     diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
2830     index 6ca580cdfd84..1c027f9d9af5 100644
2831     --- a/drivers/net/ethernet/intel/igb/igb_main.c
2832     +++ b/drivers/net/ethernet/intel/igb/igb_main.c
2833     @@ -8376,12 +8376,17 @@ static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
2834     if (is_valid_ether_addr(addr))
2835     rar_high |= E1000_RAH_AV;
2836    
2837     - if (hw->mac.type == e1000_82575)
2838     + switch (hw->mac.type) {
2839     + case e1000_82575:
2840     + case e1000_i210:
2841     rar_high |= E1000_RAH_POOL_1 *
2842     adapter->mac_table[index].queue;
2843     - else
2844     + break;
2845     + default:
2846     rar_high |= E1000_RAH_POOL_1 <<
2847     adapter->mac_table[index].queue;
2848     + break;
2849     + }
2850     }
2851    
2852     wr32(E1000_RAL(index), rar_low);
2853     diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2854     index 90ecc4b06462..90be4385bf36 100644
2855     --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2856     +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2857     @@ -3737,6 +3737,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
2858     return -EPERM;
2859    
2860     ether_addr_copy(hw->mac.addr, addr->sa_data);
2861     + ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
2862     ether_addr_copy(netdev->dev_addr, addr->sa_data);
2863    
2864     return 0;
2865     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2866     index 42a6afcaae03..7924f241e3ad 100644
2867     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2868     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2869     @@ -912,8 +912,10 @@ mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
2870     int err;
2871    
2872     /* No need to continue if only VLAN flags were changed */
2873     - if (mlxsw_sp_port_vlan->bridge_port)
2874     + if (mlxsw_sp_port_vlan->bridge_port) {
2875     + mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
2876     return 0;
2877     + }
2878    
2879     err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
2880     if (err)
2881     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2882     index 27f2e650e27b..1a9a382bf1c4 100644
2883     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2884     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2885     @@ -51,7 +51,7 @@
2886     #include <linux/of_mdio.h>
2887     #include "dwmac1000.h"
2888    
2889     -#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
2890     +#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
2891     #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
2892    
2893     /* Module parameters */
2894     diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
2895     index 18013645e76c..0c1adad7415d 100644
2896     --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
2897     +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
2898     @@ -177,12 +177,18 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
2899     }
2900    
2901     dev = bus_find_device(&platform_bus_type, NULL, node, match);
2902     - of_node_put(node);
2903     + if (!dev) {
2904     + dev_err(dev, "unable to find platform device for %pOF\n", node);
2905     + goto out;
2906     + }
2907     +
2908     priv = dev_get_drvdata(dev);
2909    
2910     priv->cpsw_phy_sel(priv, phy_mode, slave);
2911    
2912     put_device(dev);
2913     +out:
2914     + of_node_put(node);
2915     }
2916     EXPORT_SYMBOL_GPL(cpsw_phy_sel);
2917    
2918     diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
2919     index cb250cacf721..e33a6c672a0a 100644
2920     --- a/drivers/net/hyperv/hyperv_net.h
2921     +++ b/drivers/net/hyperv/hyperv_net.h
2922     @@ -724,6 +724,8 @@ struct net_device_context {
2923     struct hv_device *device_ctx;
2924     /* netvsc_device */
2925     struct netvsc_device __rcu *nvdev;
2926     + /* list of netvsc net_devices */
2927     + struct list_head list;
2928     /* reconfigure work */
2929     struct delayed_work dwork;
2930     /* last reconfig time */
2931     diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
2932     index aeabeb107fed..6a77ef38c549 100644
2933     --- a/drivers/net/hyperv/netvsc_drv.c
2934     +++ b/drivers/net/hyperv/netvsc_drv.c
2935     @@ -66,6 +66,8 @@ static int debug = -1;
2936     module_param(debug, int, S_IRUGO);
2937     MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
2938    
2939     +static LIST_HEAD(netvsc_dev_list);
2940     +
2941     static void netvsc_change_rx_flags(struct net_device *net, int change)
2942     {
2943     struct net_device_context *ndev_ctx = netdev_priv(net);
2944     @@ -1749,13 +1751,10 @@ out_unlock:
2945    
2946     static struct net_device *get_netvsc_bymac(const u8 *mac)
2947     {
2948     - struct net_device *dev;
2949     -
2950     - ASSERT_RTNL();
2951     + struct net_device_context *ndev_ctx;
2952    
2953     - for_each_netdev(&init_net, dev) {
2954     - if (dev->netdev_ops != &device_ops)
2955     - continue; /* not a netvsc device */
2956     + list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2957     + struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx);
2958    
2959     if (ether_addr_equal(mac, dev->perm_addr))
2960     return dev;
2961     @@ -1766,25 +1765,18 @@ static struct net_device *get_netvsc_bymac(const u8 *mac)
2962    
2963     static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
2964     {
2965     + struct net_device_context *net_device_ctx;
2966     struct net_device *dev;
2967    
2968     - ASSERT_RTNL();
2969     -
2970     - for_each_netdev(&init_net, dev) {
2971     - struct net_device_context *net_device_ctx;
2972     + dev = netdev_master_upper_dev_get(vf_netdev);
2973     + if (!dev || dev->netdev_ops != &device_ops)
2974     + return NULL; /* not a netvsc device */
2975    
2976     - if (dev->netdev_ops != &device_ops)
2977     - continue; /* not a netvsc device */
2978     + net_device_ctx = netdev_priv(dev);
2979     + if (!rtnl_dereference(net_device_ctx->nvdev))
2980     + return NULL; /* device is removed */
2981    
2982     - net_device_ctx = netdev_priv(dev);
2983     - if (!rtnl_dereference(net_device_ctx->nvdev))
2984     - continue; /* device is removed */
2985     -
2986     - if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev)
2987     - return dev; /* a match */
2988     - }
2989     -
2990     - return NULL;
2991     + return dev;
2992     }
2993    
2994     /* Called when VF is injecting data into network stack.
2995     @@ -2065,15 +2057,19 @@ static int netvsc_probe(struct hv_device *dev,
2996     else
2997     net->max_mtu = ETH_DATA_LEN;
2998    
2999     - ret = register_netdev(net);
3000     + rtnl_lock();
3001     + ret = register_netdevice(net);
3002     if (ret != 0) {
3003     pr_err("Unable to register netdev.\n");
3004     goto register_failed;
3005     }
3006    
3007     - return ret;
3008     + list_add(&net_device_ctx->list, &netvsc_dev_list);
3009     + rtnl_unlock();
3010     + return 0;
3011    
3012     register_failed:
3013     + rtnl_unlock();
3014     rndis_filter_device_remove(dev, nvdev);
3015     rndis_failed:
3016     free_percpu(net_device_ctx->vf_stats);
3017     @@ -2119,6 +2115,7 @@ static int netvsc_remove(struct hv_device *dev)
3018     rndis_filter_device_remove(dev, nvdev);
3019    
3020     unregister_netdevice(net);
3021     + list_del(&ndev_ctx->list);
3022    
3023     rtnl_unlock();
3024     rcu_read_unlock();
3025     diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
3026     index 0831b7142df7..0c5b68e7da51 100644
3027     --- a/drivers/net/phy/mdio-mux-bcm-iproc.c
3028     +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
3029     @@ -218,7 +218,7 @@ out:
3030    
3031     static int mdio_mux_iproc_remove(struct platform_device *pdev)
3032     {
3033     - struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev);
3034     + struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
3035    
3036     mdio_mux_uninit(md->mux_handle);
3037     mdiobus_unregister(md->mii_bus);
3038     diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
3039     index 1b2fe74a44ea..e4a6ed88b9cf 100644
3040     --- a/drivers/net/phy/phylink.c
3041     +++ b/drivers/net/phy/phylink.c
3042     @@ -561,6 +561,8 @@ void phylink_destroy(struct phylink *pl)
3043     {
3044     if (pl->sfp_bus)
3045     sfp_unregister_upstream(pl->sfp_bus);
3046     + if (!IS_ERR(pl->link_gpio))
3047     + gpiod_put(pl->link_gpio);
3048    
3049     cancel_work_sync(&pl->resolve);
3050     kfree(pl);
3051     diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
3052     index 0aa91ab9a0fb..9e3f632e22f1 100644
3053     --- a/drivers/net/usb/lan78xx.c
3054     +++ b/drivers/net/usb/lan78xx.c
3055     @@ -1216,6 +1216,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
3056     mod_timer(&dev->stat_monitor,
3057     jiffies + STAT_UPDATE_TIMER);
3058     }
3059     +
3060     + tasklet_schedule(&dev->bh);
3061     }
3062    
3063     return ret;
3064     diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
3065     index 5d80be213fac..869f276cc1d8 100644
3066     --- a/drivers/net/wireless/ath/regd.h
3067     +++ b/drivers/net/wireless/ath/regd.h
3068     @@ -68,12 +68,14 @@ enum CountryCode {
3069     CTRY_AUSTRALIA = 36,
3070     CTRY_AUSTRIA = 40,
3071     CTRY_AZERBAIJAN = 31,
3072     + CTRY_BAHAMAS = 44,
3073     CTRY_BAHRAIN = 48,
3074     CTRY_BANGLADESH = 50,
3075     CTRY_BARBADOS = 52,
3076     CTRY_BELARUS = 112,
3077     CTRY_BELGIUM = 56,
3078     CTRY_BELIZE = 84,
3079     + CTRY_BERMUDA = 60,
3080     CTRY_BOLIVIA = 68,
3081     CTRY_BOSNIA_HERZ = 70,
3082     CTRY_BRAZIL = 76,
3083     @@ -159,6 +161,7 @@ enum CountryCode {
3084     CTRY_ROMANIA = 642,
3085     CTRY_RUSSIA = 643,
3086     CTRY_SAUDI_ARABIA = 682,
3087     + CTRY_SERBIA = 688,
3088     CTRY_SERBIA_MONTENEGRO = 891,
3089     CTRY_SINGAPORE = 702,
3090     CTRY_SLOVAKIA = 703,
3091     @@ -170,11 +173,13 @@ enum CountryCode {
3092     CTRY_SWITZERLAND = 756,
3093     CTRY_SYRIA = 760,
3094     CTRY_TAIWAN = 158,
3095     + CTRY_TANZANIA = 834,
3096     CTRY_THAILAND = 764,
3097     CTRY_TRINIDAD_Y_TOBAGO = 780,
3098     CTRY_TUNISIA = 788,
3099     CTRY_TURKEY = 792,
3100     CTRY_UAE = 784,
3101     + CTRY_UGANDA = 800,
3102     CTRY_UKRAINE = 804,
3103     CTRY_UNITED_KINGDOM = 826,
3104     CTRY_UNITED_STATES = 840,
3105     diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
3106     index bdd2b4d61f2f..15bbd1e0d912 100644
3107     --- a/drivers/net/wireless/ath/regd_common.h
3108     +++ b/drivers/net/wireless/ath/regd_common.h
3109     @@ -35,6 +35,7 @@ enum EnumRd {
3110     FRANCE_RES = 0x31,
3111     FCC3_FCCA = 0x3A,
3112     FCC3_WORLD = 0x3B,
3113     + FCC3_ETSIC = 0x3F,
3114    
3115     ETSI1_WORLD = 0x37,
3116     ETSI3_ETSIA = 0x32,
3117     @@ -44,6 +45,7 @@ enum EnumRd {
3118     ETSI4_ETSIC = 0x38,
3119     ETSI5_WORLD = 0x39,
3120     ETSI6_WORLD = 0x34,
3121     + ETSI8_WORLD = 0x3D,
3122     ETSI_RESERVED = 0x33,
3123    
3124     MKK1_MKKA = 0x40,
3125     @@ -59,6 +61,7 @@ enum EnumRd {
3126     MKK1_MKKA1 = 0x4A,
3127     MKK1_MKKA2 = 0x4B,
3128     MKK1_MKKC = 0x4C,
3129     + APL2_FCCA = 0x4D,
3130    
3131     APL3_FCCA = 0x50,
3132     APL1_WORLD = 0x52,
3133     @@ -67,6 +70,7 @@ enum EnumRd {
3134     APL1_ETSIC = 0x55,
3135     APL2_ETSIC = 0x56,
3136     APL5_WORLD = 0x58,
3137     + APL13_WORLD = 0x5A,
3138     APL6_WORLD = 0x5B,
3139     APL7_FCCA = 0x5C,
3140     APL8_WORLD = 0x5D,
3141     @@ -168,6 +172,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
3142     {FCC2_ETSIC, CTL_FCC, CTL_ETSI},
3143     {FCC3_FCCA, CTL_FCC, CTL_FCC},
3144     {FCC3_WORLD, CTL_FCC, CTL_ETSI},
3145     + {FCC3_ETSIC, CTL_FCC, CTL_ETSI},
3146     {FCC4_FCCA, CTL_FCC, CTL_FCC},
3147     {FCC5_FCCA, CTL_FCC, CTL_FCC},
3148     {FCC6_FCCA, CTL_FCC, CTL_FCC},
3149     @@ -179,6 +184,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
3150     {ETSI4_WORLD, CTL_ETSI, CTL_ETSI},
3151     {ETSI5_WORLD, CTL_ETSI, CTL_ETSI},
3152     {ETSI6_WORLD, CTL_ETSI, CTL_ETSI},
3153     + {ETSI8_WORLD, CTL_ETSI, CTL_ETSI},
3154    
3155     /* XXX: For ETSI3_ETSIA, Was NO_CTL meant for the 2 GHz band ? */
3156     {ETSI3_ETSIA, CTL_ETSI, CTL_ETSI},
3157     @@ -188,9 +194,11 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
3158     {FCC1_FCCA, CTL_FCC, CTL_FCC},
3159     {APL1_WORLD, CTL_FCC, CTL_ETSI},
3160     {APL2_WORLD, CTL_FCC, CTL_ETSI},
3161     + {APL2_FCCA, CTL_FCC, CTL_FCC},
3162     {APL3_WORLD, CTL_FCC, CTL_ETSI},
3163     {APL4_WORLD, CTL_FCC, CTL_ETSI},
3164     {APL5_WORLD, CTL_FCC, CTL_ETSI},
3165     + {APL13_WORLD, CTL_ETSI, CTL_ETSI},
3166     {APL6_WORLD, CTL_ETSI, CTL_ETSI},
3167     {APL8_WORLD, CTL_ETSI, CTL_ETSI},
3168     {APL9_WORLD, CTL_ETSI, CTL_ETSI},
3169     @@ -298,6 +306,7 @@ static struct country_code_to_enum_rd allCountries[] = {
3170     {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"},
3171     {CTRY_AUSTRIA, ETSI1_WORLD, "AT"},
3172     {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"},
3173     + {CTRY_BAHAMAS, FCC3_WORLD, "BS"},
3174     {CTRY_BAHRAIN, APL6_WORLD, "BH"},
3175     {CTRY_BANGLADESH, NULL1_WORLD, "BD"},
3176     {CTRY_BARBADOS, FCC2_WORLD, "BB"},
3177     @@ -305,6 +314,7 @@ static struct country_code_to_enum_rd allCountries[] = {
3178     {CTRY_BELGIUM, ETSI1_WORLD, "BE"},
3179     {CTRY_BELGIUM2, ETSI4_WORLD, "BL"},
3180     {CTRY_BELIZE, APL1_ETSIC, "BZ"},
3181     + {CTRY_BERMUDA, FCC3_FCCA, "BM"},
3182     {CTRY_BOLIVIA, APL1_ETSIC, "BO"},
3183     {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA"},
3184     {CTRY_BRAZIL, FCC3_WORLD, "BR"},
3185     @@ -444,6 +454,7 @@ static struct country_code_to_enum_rd allCountries[] = {
3186     {CTRY_ROMANIA, NULL1_WORLD, "RO"},
3187     {CTRY_RUSSIA, NULL1_WORLD, "RU"},
3188     {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"},
3189     + {CTRY_SERBIA, ETSI1_WORLD, "RS"},
3190     {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS"},
3191     {CTRY_SINGAPORE, APL6_WORLD, "SG"},
3192     {CTRY_SLOVAKIA, ETSI1_WORLD, "SK"},
3193     @@ -455,10 +466,12 @@ static struct country_code_to_enum_rd allCountries[] = {
3194     {CTRY_SWITZERLAND, ETSI1_WORLD, "CH"},
3195     {CTRY_SYRIA, NULL1_WORLD, "SY"},
3196     {CTRY_TAIWAN, APL3_FCCA, "TW"},
3197     + {CTRY_TANZANIA, APL1_WORLD, "TZ"},
3198     {CTRY_THAILAND, FCC3_WORLD, "TH"},
3199     {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"},
3200     {CTRY_TUNISIA, ETSI3_WORLD, "TN"},
3201     {CTRY_TURKEY, ETSI3_WORLD, "TR"},
3202     + {CTRY_UGANDA, FCC3_WORLD, "UG"},
3203     {CTRY_UKRAINE, NULL1_WORLD, "UA"},
3204     {CTRY_UAE, NULL1_WORLD, "AE"},
3205     {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"},
3206     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
3207     index cd587325e286..dd6e27513cc1 100644
3208     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
3209     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
3210     @@ -1098,6 +1098,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
3211     BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
3212     BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
3213     BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
3214     + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364),
3215     BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
3216     BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
3217     BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
3218     diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
3219     index a06b6612b658..ca99c3cf41c2 100644
3220     --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
3221     +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
3222     @@ -901,6 +901,8 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
3223     }
3224     def_rxq = trans_pcie->rxq;
3225    
3226     + cancel_work_sync(&rba->rx_alloc);
3227     +
3228     spin_lock(&rba->lock);
3229     atomic_set(&rba->req_pending, 0);
3230     atomic_set(&rba->req_ready, 0);
3231     diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
3232     index f4f2b9b27e32..50890cab8807 100644
3233     --- a/drivers/net/wireless/marvell/mwifiex/usb.c
3234     +++ b/drivers/net/wireless/marvell/mwifiex/usb.c
3235     @@ -644,6 +644,9 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
3236     MWIFIEX_FUNC_SHUTDOWN);
3237     }
3238    
3239     + if (adapter->workqueue)
3240     + flush_workqueue(adapter->workqueue);
3241     +
3242     mwifiex_usb_free(card);
3243    
3244     mwifiex_dbg(adapter, FATAL,
3245     diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
3246     index 0cd68ffc2c74..51ccf10f4413 100644
3247     --- a/drivers/net/wireless/marvell/mwifiex/util.c
3248     +++ b/drivers/net/wireless/marvell/mwifiex/util.c
3249     @@ -708,12 +708,14 @@ void mwifiex_hist_data_set(struct mwifiex_private *priv, u8 rx_rate, s8 snr,
3250     s8 nflr)
3251     {
3252     struct mwifiex_histogram_data *phist_data = priv->hist_data;
3253     + s8 nf = -nflr;
3254     + s8 rssi = snr - nflr;
3255    
3256     atomic_inc(&phist_data->num_samples);
3257     atomic_inc(&phist_data->rx_rate[rx_rate]);
3258     - atomic_inc(&phist_data->snr[snr]);
3259     - atomic_inc(&phist_data->noise_flr[128 + nflr]);
3260     - atomic_inc(&phist_data->sig_str[nflr - snr]);
3261     + atomic_inc(&phist_data->snr[snr + 128]);
3262     + atomic_inc(&phist_data->noise_flr[nf + 128]);
3263     + atomic_inc(&phist_data->sig_str[rssi + 128]);
3264     }
3265    
3266     /* function to reset histogram data during init/reset */
3267     diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
3268     index 070dfd68bb83..120b0ff545c1 100644
3269     --- a/drivers/net/wireless/rsi/rsi_91x_hal.c
3270     +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
3271     @@ -557,28 +557,32 @@ static int bl_write_header(struct rsi_hw *adapter, u8 *flash_content,
3272     u32 content_size)
3273     {
3274     struct rsi_host_intf_ops *hif_ops = adapter->host_intf_ops;
3275     - struct bl_header bl_hdr;
3276     + struct bl_header *bl_hdr;
3277     u32 write_addr, write_len;
3278     int status;
3279    
3280     - bl_hdr.flags = 0;
3281     - bl_hdr.image_no = cpu_to_le32(adapter->priv->coex_mode);
3282     - bl_hdr.check_sum = cpu_to_le32(
3283     - *(u32 *)&flash_content[CHECK_SUM_OFFSET]);
3284     - bl_hdr.flash_start_address = cpu_to_le32(
3285     - *(u32 *)&flash_content[ADDR_OFFSET]);
3286     - bl_hdr.flash_len = cpu_to_le32(*(u32 *)&flash_content[LEN_OFFSET]);
3287     + bl_hdr = kzalloc(sizeof(*bl_hdr), GFP_KERNEL);
3288     + if (!bl_hdr)
3289     + return -ENOMEM;
3290     +
3291     + bl_hdr->flags = 0;
3292     + bl_hdr->image_no = cpu_to_le32(adapter->priv->coex_mode);
3293     + bl_hdr->check_sum =
3294     + cpu_to_le32(*(u32 *)&flash_content[CHECK_SUM_OFFSET]);
3295     + bl_hdr->flash_start_address =
3296     + cpu_to_le32(*(u32 *)&flash_content[ADDR_OFFSET]);
3297     + bl_hdr->flash_len = cpu_to_le32(*(u32 *)&flash_content[LEN_OFFSET]);
3298     write_len = sizeof(struct bl_header);
3299    
3300     if (adapter->rsi_host_intf == RSI_HOST_INTF_USB) {
3301     write_addr = PING_BUFFER_ADDRESS;
3302     status = hif_ops->write_reg_multiple(adapter, write_addr,
3303     - (u8 *)&bl_hdr, write_len);
3304     + (u8 *)bl_hdr, write_len);
3305     if (status < 0) {
3306     rsi_dbg(ERR_ZONE,
3307     "%s: Failed to load Version/CRC structure\n",
3308     __func__);
3309     - return status;
3310     + goto fail;
3311     }
3312     } else {
3313     write_addr = PING_BUFFER_ADDRESS >> 16;
3314     @@ -587,20 +591,23 @@ static int bl_write_header(struct rsi_hw *adapter, u8 *flash_content,
3315     rsi_dbg(ERR_ZONE,
3316     "%s: Unable to set ms word to common reg\n",
3317     __func__);
3318     - return status;
3319     + goto fail;
3320     }
3321     write_addr = RSI_SD_REQUEST_MASTER |
3322     (PING_BUFFER_ADDRESS & 0xFFFF);
3323     status = hif_ops->write_reg_multiple(adapter, write_addr,
3324     - (u8 *)&bl_hdr, write_len);
3325     + (u8 *)bl_hdr, write_len);
3326     if (status < 0) {
3327     rsi_dbg(ERR_ZONE,
3328     "%s: Failed to load Version/CRC structure\n",
3329     __func__);
3330     - return status;
3331     + goto fail;
3332     }
3333     }
3334     - return 0;
3335     + status = 0;
3336     +fail:
3337     + kfree(bl_hdr);
3338     + return status;
3339     }
3340    
3341     static u32 read_flash_capacity(struct rsi_hw *adapter)
3342     diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
3343     index 370161ca2a1c..0362967874aa 100644
3344     --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
3345     +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
3346     @@ -161,7 +161,6 @@ static void rsi_reset_card(struct sdio_func *pfunction)
3347     int err;
3348     struct mmc_card *card = pfunction->card;
3349     struct mmc_host *host = card->host;
3350     - s32 bit = (fls(host->ocr_avail) - 1);
3351     u8 cmd52_resp;
3352     u32 clock, resp, i;
3353     u16 rca;
3354     @@ -181,7 +180,6 @@ static void rsi_reset_card(struct sdio_func *pfunction)
3355     msleep(20);
3356    
3357     /* Initialize the SDIO card */
3358     - host->ios.vdd = bit;
3359     host->ios.chip_select = MMC_CS_DONTCARE;
3360     host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
3361     host->ios.power_mode = MMC_POWER_UP;
3362     @@ -970,17 +968,21 @@ static void ulp_read_write(struct rsi_hw *adapter, u16 addr, u32 data,
3363     /*This function resets and re-initializes the chip.*/
3364     static void rsi_reset_chip(struct rsi_hw *adapter)
3365     {
3366     - __le32 data;
3367     + u8 *data;
3368     u8 sdio_interrupt_status = 0;
3369     u8 request = 1;
3370     int ret;
3371    
3372     + data = kzalloc(sizeof(u32), GFP_KERNEL);
3373     + if (!data)
3374     + return;
3375     +
3376     rsi_dbg(INFO_ZONE, "Writing disable to wakeup register\n");
3377     ret = rsi_sdio_write_register(adapter, 0, SDIO_WAKEUP_REG, &request);
3378     if (ret < 0) {
3379     rsi_dbg(ERR_ZONE,
3380     "%s: Failed to write SDIO wakeup register\n", __func__);
3381     - return;
3382     + goto err;
3383     }
3384     msleep(20);
3385     ret = rsi_sdio_read_register(adapter, RSI_FN1_INT_REGISTER,
3386     @@ -988,7 +990,7 @@ static void rsi_reset_chip(struct rsi_hw *adapter)
3387     if (ret < 0) {
3388     rsi_dbg(ERR_ZONE, "%s: Failed to Read Intr Status Register\n",
3389     __func__);
3390     - return;
3391     + goto err;
3392     }
3393     rsi_dbg(INFO_ZONE, "%s: Intr Status Register value = %d\n",
3394     __func__, sdio_interrupt_status);
3395     @@ -998,17 +1000,17 @@ static void rsi_reset_chip(struct rsi_hw *adapter)
3396     rsi_dbg(ERR_ZONE,
3397     "%s: Unable to set ms word to common reg\n",
3398     __func__);
3399     - return;
3400     + goto err;
3401     }
3402    
3403     - data = TA_HOLD_THREAD_VALUE;
3404     + put_unaligned_le32(TA_HOLD_THREAD_VALUE, data);
3405     if (rsi_sdio_write_register_multiple(adapter, TA_HOLD_THREAD_REG |
3406     RSI_SD_REQUEST_MASTER,
3407     - (u8 *)&data, 4)) {
3408     + data, 4)) {
3409     rsi_dbg(ERR_ZONE,
3410     "%s: Unable to hold Thread-Arch processor threads\n",
3411     __func__);
3412     - return;
3413     + goto err;
3414     }
3415    
3416     /* This msleep will ensure Thread-Arch processor to go to hold
3417     @@ -1029,6 +1031,9 @@ static void rsi_reset_chip(struct rsi_hw *adapter)
3418     * read write operations to complete for chip reset.
3419     */
3420     msleep(500);
3421     +err:
3422     + kfree(data);
3423     + return;
3424     }
3425    
3426     /**
3427     diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
3428     index 903392039200..6788fbbdd166 100644
3429     --- a/drivers/net/wireless/rsi/rsi_sdio.h
3430     +++ b/drivers/net/wireless/rsi/rsi_sdio.h
3431     @@ -85,7 +85,7 @@ enum sdio_interrupt_type {
3432     #define TA_SOFT_RST_CLR 0
3433     #define TA_SOFT_RST_SET BIT(0)
3434     #define TA_PC_ZERO 0
3435     -#define TA_HOLD_THREAD_VALUE cpu_to_le32(0xF)
3436     +#define TA_HOLD_THREAD_VALUE 0xF
3437     #define TA_RELEASE_THREAD_VALUE cpu_to_le32(0xF)
3438     #define TA_BASE_ADDR 0x2200
3439     #define MISC_CFG_BASE_ADDR 0x4105
3440     diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
3441     index f8a1fea64e25..219d1a86b92e 100644
3442     --- a/drivers/net/wireless/ti/wlcore/sdio.c
3443     +++ b/drivers/net/wireless/ti/wlcore/sdio.c
3444     @@ -406,6 +406,11 @@ static int wl1271_suspend(struct device *dev)
3445     mmc_pm_flag_t sdio_flags;
3446     int ret = 0;
3447    
3448     + if (!wl) {
3449     + dev_err(dev, "no wilink module was probed\n");
3450     + goto out;
3451     + }
3452     +
3453     dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n",
3454     wl->wow_enabled);
3455    
3456     diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
3457     index f07b9c9bb5ba..dfc076f9ee4b 100644
3458     --- a/drivers/net/xen-netfront.c
3459     +++ b/drivers/net/xen-netfront.c
3460     @@ -87,6 +87,7 @@ struct netfront_cb {
3461     /* IRQ name is queue name with "-tx" or "-rx" appended */
3462     #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
3463    
3464     +static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
3465     static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
3466    
3467     struct netfront_stats {
3468     @@ -239,7 +240,7 @@ static void rx_refill_timeout(unsigned long data)
3469     static int netfront_tx_slot_available(struct netfront_queue *queue)
3470     {
3471     return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
3472     - (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
3473     + (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
3474     }
3475    
3476     static void xennet_maybe_wake_tx(struct netfront_queue *queue)
3477     @@ -790,7 +791,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
3478     RING_IDX cons = queue->rx.rsp_cons;
3479     struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
3480     grant_ref_t ref = xennet_get_rx_ref(queue, cons);
3481     - int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
3482     + int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
3483     int slots = 1;
3484     int err = 0;
3485     unsigned long ret;
3486     @@ -1330,6 +1331,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
3487     netif_carrier_off(netdev);
3488    
3489     xenbus_switch_state(dev, XenbusStateInitialising);
3490     + wait_event(module_load_q,
3491     + xenbus_read_driver_state(dev->otherend) !=
3492     + XenbusStateClosed &&
3493     + xenbus_read_driver_state(dev->otherend) !=
3494     + XenbusStateUnknown);
3495     return netdev;
3496    
3497     exit:
3498     diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
3499     index 4cac4755abef..f5643d107cc6 100644
3500     --- a/drivers/nvme/host/pci.c
3501     +++ b/drivers/nvme/host/pci.c
3502     @@ -2519,6 +2519,9 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
3503    
3504     static void nvme_error_resume(struct pci_dev *pdev)
3505     {
3506     + struct nvme_dev *dev = pci_get_drvdata(pdev);
3507     +
3508     + flush_work(&dev->ctrl.reset_work);
3509     pci_cleanup_aer_uncorrect_error_status(pdev);
3510     }
3511    
3512     @@ -2562,6 +2565,8 @@ static const struct pci_device_id nvme_id_table[] = {
3513     .driver_data = NVME_QUIRK_LIGHTNVM, },
3514     { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
3515     .driver_data = NVME_QUIRK_LIGHTNVM, },
3516     + { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */
3517     + .driver_data = NVME_QUIRK_LIGHTNVM, },
3518     { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
3519     { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
3520     { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
3521     diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
3522     index 93a082e0bdd4..48a831d58e7a 100644
3523     --- a/drivers/nvme/host/rdma.c
3524     +++ b/drivers/nvme/host/rdma.c
3525     @@ -796,7 +796,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
3526     if (error) {
3527     dev_err(ctrl->ctrl.device,
3528     "prop_get NVME_REG_CAP failed\n");
3529     - goto out_cleanup_queue;
3530     + goto out_stop_queue;
3531     }
3532    
3533     ctrl->ctrl.sqsize =
3534     @@ -804,23 +804,25 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
3535    
3536     error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
3537     if (error)
3538     - goto out_cleanup_queue;
3539     + goto out_stop_queue;
3540    
3541     ctrl->ctrl.max_hw_sectors =
3542     (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
3543    
3544     error = nvme_init_identify(&ctrl->ctrl);
3545     if (error)
3546     - goto out_cleanup_queue;
3547     + goto out_stop_queue;
3548    
3549     error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
3550     &ctrl->async_event_sqe, sizeof(struct nvme_command),
3551     DMA_TO_DEVICE);
3552     if (error)
3553     - goto out_cleanup_queue;
3554     + goto out_stop_queue;
3555    
3556     return 0;
3557    
3558     +out_stop_queue:
3559     + nvme_rdma_stop_queue(&ctrl->queues[0]);
3560     out_cleanup_queue:
3561     if (new)
3562     blk_cleanup_queue(ctrl->ctrl.admin_q);
3563     diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
3564     index d12e5de78e70..2afafd5d8915 100644
3565     --- a/drivers/nvmem/core.c
3566     +++ b/drivers/nvmem/core.c
3567     @@ -1049,6 +1049,8 @@ static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
3568    
3569     /* setup the first byte with lsb bits from nvmem */
3570     rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
3571     + if (rc)
3572     + goto err;
3573     *b++ |= GENMASK(bit_offset - 1, 0) & v;
3574    
3575     /* setup rest of the byte if any */
3576     @@ -1067,11 +1069,16 @@ static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
3577     /* setup the last byte with msb bits from nvmem */
3578     rc = nvmem_reg_read(nvmem,
3579     cell->offset + cell->bytes - 1, &v, 1);
3580     + if (rc)
3581     + goto err;
3582     *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
3583    
3584     }
3585    
3586     return buf;
3587     +err:
3588     + kfree(buf);
3589     + return ERR_PTR(rc);
3590     }
3591    
3592     /**
3593     diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
3594     index 087645116ecb..c78fd9c2cf8c 100644
3595     --- a/drivers/pci/host/pci-xgene.c
3596     +++ b/drivers/pci/host/pci-xgene.c
3597     @@ -686,7 +686,6 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
3598    
3599     bus = bridge->bus;
3600    
3601     - pci_scan_child_bus(bus);
3602     pci_assign_unassigned_bus_resources(bus);
3603     list_for_each_entry(child, &bus->children, node)
3604     pcie_bus_configure_settings(child);
3605     diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
3606     index 05832b597e53..46c2ee2caf28 100644
3607     --- a/drivers/pci/hotplug/pciehp_hpc.c
3608     +++ b/drivers/pci/hotplug/pciehp_hpc.c
3609     @@ -863,6 +863,13 @@ struct controller *pcie_init(struct pcie_device *dev)
3610     if (pdev->hotplug_user_indicators)
3611     slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
3612    
3613     + /*
3614     + * We assume no Thunderbolt controllers support Command Complete events,
3615     + * but some controllers falsely claim they do.
3616     + */
3617     + if (pdev->is_thunderbolt)
3618     + slot_cap |= PCI_EXP_SLTCAP_NCCS;
3619     +
3620     ctrl->slot_cap = slot_cap;
3621     mutex_init(&ctrl->ctrl_lock);
3622     init_waitqueue_head(&ctrl->queue);
3623     diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
3624     index 00fa4278c1f4..c3f0473d1afa 100644
3625     --- a/drivers/pci/pci-sysfs.c
3626     +++ b/drivers/pci/pci-sysfs.c
3627     @@ -305,13 +305,16 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
3628     if (!capable(CAP_SYS_ADMIN))
3629     return -EPERM;
3630    
3631     - if (!val) {
3632     - if (pci_is_enabled(pdev))
3633     - pci_disable_device(pdev);
3634     - else
3635     - result = -EIO;
3636     - } else
3637     + device_lock(dev);
3638     + if (dev->driver)
3639     + result = -EBUSY;
3640     + else if (val)
3641     result = pci_enable_device(pdev);
3642     + else if (pci_is_enabled(pdev))
3643     + pci_disable_device(pdev);
3644     + else
3645     + result = -EIO;
3646     + device_unlock(dev);
3647    
3648     return result < 0 ? result : count;
3649     }
3650     diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
3651     index f285cd74088e..4bccaf688aad 100644
3652     --- a/drivers/pci/probe.c
3653     +++ b/drivers/pci/probe.c
3654     @@ -516,12 +516,14 @@ static void devm_pci_release_host_bridge_dev(struct device *dev)
3655    
3656     if (bridge->release_fn)
3657     bridge->release_fn(bridge);
3658     +
3659     + pci_free_resource_list(&bridge->windows);
3660     }
3661    
3662     static void pci_release_host_bridge_dev(struct device *dev)
3663     {
3664     devm_pci_release_host_bridge_dev(dev);
3665     - pci_free_host_bridge(to_pci_host_bridge(dev));
3666     + kfree(to_pci_host_bridge(dev));
3667     }
3668    
3669     struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
3670     diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
3671     index b1ca838dd80a..e61e2f8c91ce 100644
3672     --- a/drivers/pinctrl/pinctrl-at91-pio4.c
3673     +++ b/drivers/pinctrl/pinctrl-at91-pio4.c
3674     @@ -576,8 +576,10 @@ static int atmel_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
3675     for_each_child_of_node(np_config, np) {
3676     ret = atmel_pctl_dt_subnode_to_map(pctldev, np, map,
3677     &reserved_maps, num_maps);
3678     - if (ret < 0)
3679     + if (ret < 0) {
3680     + of_node_put(np);
3681     break;
3682     + }
3683     }
3684     }
3685    
3686     diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c
3687     index f541b80f1b54..bd910fe123d9 100644
3688     --- a/drivers/regulator/cpcap-regulator.c
3689     +++ b/drivers/regulator/cpcap-regulator.c
3690     @@ -222,7 +222,7 @@ static unsigned int cpcap_map_mode(unsigned int mode)
3691     case CPCAP_BIT_AUDIO_LOW_PWR:
3692     return REGULATOR_MODE_STANDBY;
3693     default:
3694     - return -EINVAL;
3695     + return REGULATOR_MODE_INVALID;
3696     }
3697     }
3698    
3699     diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
3700     index c9875355905d..a3bf7c993723 100644
3701     --- a/drivers/regulator/of_regulator.c
3702     +++ b/drivers/regulator/of_regulator.c
3703     @@ -31,6 +31,7 @@ static void of_get_regulation_constraints(struct device_node *np,
3704     struct regulation_constraints *constraints = &(*init_data)->constraints;
3705     struct regulator_state *suspend_state;
3706     struct device_node *suspend_np;
3707     + unsigned int mode;
3708     int ret, i;
3709     u32 pval;
3710    
3711     @@ -124,11 +125,11 @@ static void of_get_regulation_constraints(struct device_node *np,
3712    
3713     if (!of_property_read_u32(np, "regulator-initial-mode", &pval)) {
3714     if (desc && desc->of_map_mode) {
3715     - ret = desc->of_map_mode(pval);
3716     - if (ret == -EINVAL)
3717     + mode = desc->of_map_mode(pval);
3718     + if (mode == REGULATOR_MODE_INVALID)
3719     pr_err("%s: invalid mode %u\n", np->name, pval);
3720     else
3721     - constraints->initial_mode = ret;
3722     + constraints->initial_mode = mode;
3723     } else {
3724     pr_warn("%s: mapping for mode %d not defined\n",
3725     np->name, pval);
3726     @@ -163,12 +164,12 @@ static void of_get_regulation_constraints(struct device_node *np,
3727     if (!of_property_read_u32(suspend_np, "regulator-mode",
3728     &pval)) {
3729     if (desc && desc->of_map_mode) {
3730     - ret = desc->of_map_mode(pval);
3731     - if (ret == -EINVAL)
3732     + mode = desc->of_map_mode(pval);
3733     + if (mode == REGULATOR_MODE_INVALID)
3734     pr_err("%s: invalid mode %u\n",
3735     np->name, pval);
3736     else
3737     - suspend_state->mode = ret;
3738     + suspend_state->mode = mode;
3739     } else {
3740     pr_warn("%s: mapping for mode %d not defined\n",
3741     np->name, pval);
3742     diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
3743     index 63922a2167e5..659e516455be 100644
3744     --- a/drivers/regulator/pfuze100-regulator.c
3745     +++ b/drivers/regulator/pfuze100-regulator.c
3746     @@ -158,6 +158,7 @@ static const struct regulator_ops pfuze100_sw_regulator_ops = {
3747     static const struct regulator_ops pfuze100_swb_regulator_ops = {
3748     .enable = regulator_enable_regmap,
3749     .disable = regulator_disable_regmap,
3750     + .is_enabled = regulator_is_enabled_regmap,
3751     .list_voltage = regulator_list_voltage_table,
3752     .map_voltage = regulator_map_voltage_ascend,
3753     .set_voltage_sel = regulator_set_voltage_sel_regmap,
3754     diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
3755     index a4456db5849d..884c7505ed91 100644
3756     --- a/drivers/regulator/twl-regulator.c
3757     +++ b/drivers/regulator/twl-regulator.c
3758     @@ -274,7 +274,7 @@ static inline unsigned int twl4030reg_map_mode(unsigned int mode)
3759     case RES_STATE_SLEEP:
3760     return REGULATOR_MODE_STANDBY;
3761     default:
3762     - return -EINVAL;
3763     + return REGULATOR_MODE_INVALID;
3764     }
3765     }
3766    
3767     diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
3768     index 9eb32ead63db..e4f951e968a4 100644
3769     --- a/drivers/rtc/interface.c
3770     +++ b/drivers/rtc/interface.c
3771     @@ -359,6 +359,11 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
3772     {
3773     int err;
3774    
3775     + if (!rtc->ops)
3776     + return -ENODEV;
3777     + else if (!rtc->ops->set_alarm)
3778     + return -EINVAL;
3779     +
3780     err = rtc_valid_tm(&alarm->time);
3781     if (err != 0)
3782     return err;
3783     diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
3784     index a3418a8a3796..97fdc99bfeef 100644
3785     --- a/drivers/rtc/rtc-tps6586x.c
3786     +++ b/drivers/rtc/rtc-tps6586x.c
3787     @@ -276,14 +276,15 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
3788     device_init_wakeup(&pdev->dev, 1);
3789    
3790     platform_set_drvdata(pdev, rtc);
3791     - rtc->rtc = devm_rtc_device_register(&pdev->dev, dev_name(&pdev->dev),
3792     - &tps6586x_rtc_ops, THIS_MODULE);
3793     + rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
3794     if (IS_ERR(rtc->rtc)) {
3795     ret = PTR_ERR(rtc->rtc);
3796     - dev_err(&pdev->dev, "RTC device register: ret %d\n", ret);
3797     + dev_err(&pdev->dev, "RTC allocate device: ret %d\n", ret);
3798     goto fail_rtc_register;
3799     }
3800    
3801     + rtc->rtc->ops = &tps6586x_rtc_ops;
3802     +
3803     ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
3804     tps6586x_rtc_irq,
3805     IRQF_ONESHOT,
3806     @@ -294,6 +295,13 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
3807     goto fail_rtc_register;
3808     }
3809     disable_irq(rtc->irq);
3810     +
3811     + ret = rtc_register_device(rtc->rtc);
3812     + if (ret) {
3813     + dev_err(&pdev->dev, "RTC device register: ret %d\n", ret);
3814     + goto fail_rtc_register;
3815     + }
3816     +
3817     return 0;
3818    
3819     fail_rtc_register:
3820     diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
3821     index d0244d7979fc..a56b526db89a 100644
3822     --- a/drivers/rtc/rtc-tps65910.c
3823     +++ b/drivers/rtc/rtc-tps65910.c
3824     @@ -380,6 +380,10 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
3825     if (!tps_rtc)
3826     return -ENOMEM;
3827    
3828     + tps_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
3829     + if (IS_ERR(tps_rtc->rtc))
3830     + return PTR_ERR(tps_rtc->rtc);
3831     +
3832     /* Clear pending interrupts */
3833     ret = regmap_read(tps65910->regmap, TPS65910_RTC_STATUS, &rtc_reg);
3834     if (ret < 0)
3835     @@ -421,10 +425,10 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
3836     tps_rtc->irq = irq;
3837     device_set_wakeup_capable(&pdev->dev, 1);
3838    
3839     - tps_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
3840     - &tps65910_rtc_ops, THIS_MODULE);
3841     - if (IS_ERR(tps_rtc->rtc)) {
3842     - ret = PTR_ERR(tps_rtc->rtc);
3843     + tps_rtc->rtc->ops = &tps65910_rtc_ops;
3844     +
3845     + ret = rtc_register_device(tps_rtc->rtc);
3846     + if (ret) {
3847     dev_err(&pdev->dev, "RTC device register: err %d\n", ret);
3848     return ret;
3849     }
3850     diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
3851     index 7ce22967fd16..7ed010714f29 100644
3852     --- a/drivers/rtc/rtc-vr41xx.c
3853     +++ b/drivers/rtc/rtc-vr41xx.c
3854     @@ -292,13 +292,14 @@ static int rtc_probe(struct platform_device *pdev)
3855     goto err_rtc1_iounmap;
3856     }
3857    
3858     - rtc = devm_rtc_device_register(&pdev->dev, rtc_name, &vr41xx_rtc_ops,
3859     - THIS_MODULE);
3860     + rtc = devm_rtc_allocate_device(&pdev->dev);
3861     if (IS_ERR(rtc)) {
3862     retval = PTR_ERR(rtc);
3863     goto err_iounmap_all;
3864     }
3865    
3866     + rtc->ops = &vr41xx_rtc_ops;
3867     +
3868     rtc->max_user_freq = MAX_PERIODIC_RATE;
3869    
3870     spin_lock_irq(&rtc_lock);
3871     @@ -340,6 +341,10 @@ static int rtc_probe(struct platform_device *pdev)
3872    
3873     dev_info(&pdev->dev, "Real Time Clock of NEC VR4100 series\n");
3874    
3875     + retval = rtc_register_device(rtc);
3876     + if (retval)
3877     + goto err_iounmap_all;
3878     +
3879     return 0;
3880    
3881     err_iounmap_all:
3882     diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
3883     index b415ba42ca73..599447032e50 100644
3884     --- a/drivers/s390/scsi/zfcp_dbf.c
3885     +++ b/drivers/s390/scsi/zfcp_dbf.c
3886     @@ -285,6 +285,8 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
3887     struct list_head *entry;
3888     unsigned long flags;
3889    
3890     + lockdep_assert_held(&adapter->erp_lock);
3891     +
3892     if (unlikely(!debug_level_enabled(dbf->rec, level)))
3893     return;
3894    
3895     diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
3896     index 00e7968a1d70..a1388842e17e 100644
3897     --- a/drivers/scsi/3w-9xxx.c
3898     +++ b/drivers/scsi/3w-9xxx.c
3899     @@ -886,6 +886,11 @@ static int twa_chrdev_open(struct inode *inode, struct file *file)
3900     unsigned int minor_number;
3901     int retval = TW_IOCTL_ERROR_OS_ENODEV;
3902    
3903     + if (!capable(CAP_SYS_ADMIN)) {
3904     + retval = -EACCES;
3905     + goto out;
3906     + }
3907     +
3908     minor_number = iminor(inode);
3909     if (minor_number >= twa_device_extension_count)
3910     goto out;
3911     diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
3912     index 33261b690774..f6179e3d6953 100644
3913     --- a/drivers/scsi/3w-xxxx.c
3914     +++ b/drivers/scsi/3w-xxxx.c
3915     @@ -1033,6 +1033,9 @@ static int tw_chrdev_open(struct inode *inode, struct file *file)
3916    
3917     dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n");
3918    
3919     + if (!capable(CAP_SYS_ADMIN))
3920     + return -EACCES;
3921     +
3922     minor_number = iminor(inode);
3923     if (minor_number >= tw_device_extension_count)
3924     return -ENODEV;
3925     diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
3926     index 0b6467206f8e..737314cac8d8 100644
3927     --- a/drivers/scsi/cxlflash/main.c
3928     +++ b/drivers/scsi/cxlflash/main.c
3929     @@ -946,9 +946,9 @@ static void cxlflash_remove(struct pci_dev *pdev)
3930     return;
3931     }
3932    
3933     - /* If a Task Management Function is active, wait for it to complete
3934     - * before continuing with remove.
3935     - */
3936     + /* Yield to running recovery threads before continuing with remove */
3937     + wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3938     + cfg->state != STATE_PROBING);
3939     spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
3940     if (cfg->tmf_active)
3941     wait_event_interruptible_lock_irq(cfg->tmf_waitq,
3942     @@ -1303,7 +1303,10 @@ static void afu_err_intr_init(struct afu *afu)
3943     for (i = 0; i < afu->num_hwqs; i++) {
3944     hwq = get_hwq(afu, i);
3945    
3946     - writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl);
3947     + reg = readq_be(&hwq->host_map->ctx_ctrl);
3948     + WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
3949     + reg |= SISL_MSI_SYNC_ERROR;
3950     + writeq_be(reg, &hwq->host_map->ctx_ctrl);
3951     writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
3952     }
3953     }
3954     diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
3955     index 09daa86670fc..0892fb1f0a1e 100644
3956     --- a/drivers/scsi/cxlflash/sislite.h
3957     +++ b/drivers/scsi/cxlflash/sislite.h
3958     @@ -284,6 +284,7 @@ struct sisl_host_map {
3959     __be64 cmd_room;
3960     __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */
3961     #define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */
3962     +#define SISL_CTX_CTRL_LISN_MASK (0xFFULL)
3963     __be64 mbox_w; /* restricted use */
3964     __be64 sq_start; /* Submission Queue (R/W): write sequence and */
3965     __be64 sq_end; /* inclusion semantics are the same as RRQ */
3966     diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
3967     index 2e5fa9717be8..871962b2e2f6 100644
3968     --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
3969     +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
3970     @@ -328,10 +328,11 @@ enum {
3971     #define DIR_TO_DEVICE 2
3972     #define DIR_RESERVED 3
3973    
3974     -#define CMD_IS_UNCONSTRAINT(cmd) \
3975     - ((cmd == ATA_CMD_READ_LOG_EXT) || \
3976     - (cmd == ATA_CMD_READ_LOG_DMA_EXT) || \
3977     - (cmd == ATA_CMD_DEV_RESET))
3978     +#define FIS_CMD_IS_UNCONSTRAINED(fis) \
3979     + ((fis.command == ATA_CMD_READ_LOG_EXT) || \
3980     + (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \
3981     + ((fis.command == ATA_CMD_DEV_RESET) && \
3982     + ((fis.control & ATA_SRST) != 0)))
3983    
3984     static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
3985     {
3986     @@ -1044,7 +1045,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
3987     << CMD_HDR_FRAME_TYPE_OFF;
3988     dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
3989    
3990     - if (CMD_IS_UNCONSTRAINT(task->ata_task.fis.command))
3991     + if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis))
3992     dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF;
3993    
3994     hdr->dw1 = cpu_to_le32(dw1);
3995     diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
3996     index 7195cff51d4c..9b6f5d024dba 100644
3997     --- a/drivers/scsi/megaraid.c
3998     +++ b/drivers/scsi/megaraid.c
3999     @@ -4199,6 +4199,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4000     int irq, i, j;
4001     int error = -ENODEV;
4002    
4003     + if (hba_count >= MAX_CONTROLLERS)
4004     + goto out;
4005     +
4006     if (pci_enable_device(pdev))
4007     goto out;
4008     pci_set_master(pdev);
4009     diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
4010     index d8f626567f59..06a2e3d9fc5b 100644
4011     --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
4012     +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
4013     @@ -2677,6 +2677,9 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
4014     pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value);
4015     pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
4016     } else {
4017     + if (os_timeout_value)
4018     + os_timeout_value++;
4019     +
4020     /* system pd Fast Path */
4021     io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4022     timeout_limit = (scmd->device->type == TYPE_DISK) ?
4023     diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
4024     index 7c0064500cc5..382edb79a0de 100644
4025     --- a/drivers/scsi/qedf/qedf_main.c
4026     +++ b/drivers/scsi/qedf/qedf_main.c
4027     @@ -1649,6 +1649,15 @@ static int qedf_vport_destroy(struct fc_vport *vport)
4028     struct Scsi_Host *shost = vport_to_shost(vport);
4029     struct fc_lport *n_port = shost_priv(shost);
4030     struct fc_lport *vn_port = vport->dd_data;
4031     + struct qedf_ctx *qedf = lport_priv(vn_port);
4032     +
4033     + if (!qedf) {
4034     + QEDF_ERR(NULL, "qedf is NULL.\n");
4035     + goto out;
4036     + }
4037     +
4038     + /* Set unloading bit on vport qedf_ctx to prevent more I/O */
4039     + set_bit(QEDF_UNLOADING, &qedf->flags);
4040    
4041     mutex_lock(&n_port->lp_mutex);
4042     list_del(&vn_port->list);
4043     @@ -1675,6 +1684,7 @@ static int qedf_vport_destroy(struct fc_vport *vport)
4044     if (vn_port->host)
4045     scsi_host_put(vn_port->host);
4046    
4047     +out:
4048     return 0;
4049     }
4050    
4051     diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
4052     index a5e30e9449ef..375cede0c534 100644
4053     --- a/drivers/scsi/scsi_dh.c
4054     +++ b/drivers/scsi/scsi_dh.c
4055     @@ -58,7 +58,10 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
4056     {"IBM", "3526", "rdac", },
4057     {"IBM", "3542", "rdac", },
4058     {"IBM", "3552", "rdac", },
4059     - {"SGI", "TP9", "rdac", },
4060     + {"SGI", "TP9300", "rdac", },
4061     + {"SGI", "TP9400", "rdac", },
4062     + {"SGI", "TP9500", "rdac", },
4063     + {"SGI", "TP9700", "rdac", },
4064     {"SGI", "IS", "rdac", },
4065     {"STK", "OPENstorage", "rdac", },
4066     {"STK", "FLEXLINE 380", "rdac", },
4067     diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
4068     index 3bb1f6cc297a..21c81c1feac5 100644
4069     --- a/drivers/scsi/ufs/ufshcd.c
4070     +++ b/drivers/scsi/ufs/ufshcd.c
4071     @@ -4947,6 +4947,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
4072     hba = container_of(work, struct ufs_hba, eeh_work);
4073    
4074     pm_runtime_get_sync(hba->dev);
4075     + scsi_block_requests(hba->host);
4076     err = ufshcd_get_ee_status(hba, &status);
4077     if (err) {
4078     dev_err(hba->dev, "%s: failed to get exception status %d\n",
4079     @@ -4960,6 +4961,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
4080     ufshcd_bkops_exception_event_handler(hba);
4081    
4082     out:
4083     + scsi_unblock_requests(hba->host);
4084     pm_runtime_put_sync(hba->dev);
4085     return;
4086     }
4087     @@ -6761,9 +6763,16 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
4088     if (list_empty(head))
4089     goto out;
4090    
4091     - ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
4092     - if (ret)
4093     - return ret;
4094     + /*
4095     + * vendor specific setup_clocks ops may depend on clocks managed by
4096     + * this standard driver hence call the vendor specific setup_clocks
4097     + * before disabling the clocks managed here.
4098     + */
4099     + if (!on) {
4100     + ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
4101     + if (ret)
4102     + return ret;
4103     + }
4104    
4105     list_for_each_entry(clki, head, list) {
4106     if (!IS_ERR_OR_NULL(clki->clk)) {
4107     @@ -6787,9 +6796,16 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
4108     }
4109     }
4110    
4111     - ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
4112     - if (ret)
4113     - return ret;
4114     + /*
4115     + * vendor specific setup_clocks ops may depend on clocks managed by
4116     + * this standard driver hence call the vendor specific setup_clocks
4117     + * after enabling the clocks managed here.
4118     + */
4119     + if (on) {
4120     + ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
4121     + if (ret)
4122     + return ret;
4123     + }
4124    
4125     out:
4126     if (ret) {
4127     diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
4128     index afc7ecc3c187..f4e3bd40c72e 100644
4129     --- a/drivers/soc/imx/gpcv2.c
4130     +++ b/drivers/soc/imx/gpcv2.c
4131     @@ -155,7 +155,7 @@ static int imx7_gpc_pu_pgc_sw_pdn_req(struct generic_pm_domain *genpd)
4132     return imx7_gpc_pu_pgc_sw_pxx_req(genpd, false);
4133     }
4134    
4135     -static struct imx7_pgc_domain imx7_pgc_domains[] = {
4136     +static const struct imx7_pgc_domain imx7_pgc_domains[] = {
4137     [IMX7_POWER_DOMAIN_MIPI_PHY] = {
4138     .genpd = {
4139     .name = "mipi-phy",
4140     @@ -321,11 +321,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
4141     continue;
4142     }
4143    
4144     - domain = &imx7_pgc_domains[domain_index];
4145     - domain->regmap = regmap;
4146     - domain->genpd.power_on = imx7_gpc_pu_pgc_sw_pup_req;
4147     - domain->genpd.power_off = imx7_gpc_pu_pgc_sw_pdn_req;
4148     -
4149     pd_pdev = platform_device_alloc("imx7-pgc-domain",
4150     domain_index);
4151     if (!pd_pdev) {
4152     @@ -334,7 +329,20 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
4153     return -ENOMEM;
4154     }
4155    
4156     - pd_pdev->dev.platform_data = domain;
4157     + ret = platform_device_add_data(pd_pdev,
4158     + &imx7_pgc_domains[domain_index],
4159     + sizeof(imx7_pgc_domains[domain_index]));
4160     + if (ret) {
4161     + platform_device_put(pd_pdev);
4162     + of_node_put(np);
4163     + return ret;
4164     + }
4165     +
4166     + domain = pd_pdev->dev.platform_data;
4167     + domain->regmap = regmap;
4168     + domain->genpd.power_on = imx7_gpc_pu_pgc_sw_pup_req;
4169     + domain->genpd.power_off = imx7_gpc_pu_pgc_sw_pdn_req;
4170     +
4171     pd_pdev->dev.parent = dev;
4172     pd_pdev->dev.of_node = np;
4173    
4174     diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
4175     index 7f8429635502..a5b0df7e6131 100644
4176     --- a/drivers/spi/spi-meson-spicc.c
4177     +++ b/drivers/spi/spi-meson-spicc.c
4178     @@ -574,10 +574,15 @@ static int meson_spicc_probe(struct platform_device *pdev)
4179     master->max_speed_hz = rate >> 2;
4180    
4181     ret = devm_spi_register_master(&pdev->dev, master);
4182     - if (!ret)
4183     - return 0;
4184     + if (ret) {
4185     + dev_err(&pdev->dev, "spi master registration failed\n");
4186     + goto out_clk;
4187     + }
4188    
4189     - dev_err(&pdev->dev, "spi master registration failed\n");
4190     + return 0;
4191     +
4192     +out_clk:
4193     + clk_disable_unprepare(spicc->core);
4194    
4195     out_master:
4196     spi_master_put(master);
4197     diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
4198     index b392cca8fa4f..1a6ec226d6e4 100644
4199     --- a/drivers/spi/spi-s3c64xx.c
4200     +++ b/drivers/spi/spi-s3c64xx.c
4201     @@ -1273,8 +1273,6 @@ static int s3c64xx_spi_resume(struct device *dev)
4202     if (ret < 0)
4203     return ret;
4204    
4205     - s3c64xx_spi_hwinit(sdd, sdd->port_id);
4206     -
4207     return spi_master_resume(master);
4208     }
4209     #endif /* CONFIG_PM_SLEEP */
4210     @@ -1312,6 +1310,8 @@ static int s3c64xx_spi_runtime_resume(struct device *dev)
4211     if (ret != 0)
4212     goto err_disable_src_clk;
4213    
4214     + s3c64xx_spi_hwinit(sdd, sdd->port_id);
4215     +
4216     return 0;
4217    
4218     err_disable_src_clk:
4219     diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
4220     index 52056535f54e..0fea18ab970e 100644
4221     --- a/drivers/spi/spi-sh-msiof.c
4222     +++ b/drivers/spi/spi-sh-msiof.c
4223     @@ -555,14 +555,16 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
4224    
4225     /* Configure native chip select mode/polarity early */
4226     clr = MDR1_SYNCMD_MASK;
4227     - set = MDR1_TRMD | TMDR1_PCON | MDR1_SYNCMD_SPI;
4228     + set = MDR1_SYNCMD_SPI;
4229     if (spi->mode & SPI_CS_HIGH)
4230     clr |= BIT(MDR1_SYNCAC_SHIFT);
4231     else
4232     set |= BIT(MDR1_SYNCAC_SHIFT);
4233     pm_runtime_get_sync(&p->pdev->dev);
4234     tmp = sh_msiof_read(p, TMDR1) & ~clr;
4235     - sh_msiof_write(p, TMDR1, tmp | set);
4236     + sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON);
4237     + tmp = sh_msiof_read(p, RMDR1) & ~clr;
4238     + sh_msiof_write(p, RMDR1, tmp | set);
4239     pm_runtime_put(&p->pdev->dev);
4240     p->native_cs_high = spi->mode & SPI_CS_HIGH;
4241     p->native_cs_inited = true;
4242     diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
4243     index 84dfef4bd6ae..f85d30dc9187 100644
4244     --- a/drivers/spi/spi.c
4245     +++ b/drivers/spi/spi.c
4246     @@ -1222,6 +1222,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
4247     if (!was_busy && ctlr->auto_runtime_pm) {
4248     ret = pm_runtime_get_sync(ctlr->dev.parent);
4249     if (ret < 0) {
4250     + pm_runtime_put_noidle(ctlr->dev.parent);
4251     dev_err(&ctlr->dev, "Failed to power device: %d\n",
4252     ret);
4253     mutex_unlock(&ctlr->io_mutex);
4254     diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
4255     index 284cdd44a2ee..8b92cf06d063 100644
4256     --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
4257     +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
4258     @@ -1710,7 +1710,7 @@ int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
4259     return 0;
4260     }
4261     spin_unlock(&fps->fps_lock);
4262     - rc = -EBUSY;
4263     + rc = -EAGAIN;
4264     }
4265    
4266     spin_lock(&fps->fps_lock);
4267     diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
4268     index 29e10021b906..4b4a20149894 100644
4269     --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
4270     +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
4271     @@ -47,7 +47,7 @@ static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
4272     __u64 dstcookie);
4273     static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn);
4274     static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn);
4275     -static void kiblnd_unmap_tx(struct lnet_ni *ni, struct kib_tx *tx);
4276     +static void kiblnd_unmap_tx(struct kib_tx *tx);
4277     static void kiblnd_check_sends_locked(struct kib_conn *conn);
4278    
4279     static void
4280     @@ -65,7 +65,7 @@ kiblnd_tx_done(struct lnet_ni *ni, struct kib_tx *tx)
4281     LASSERT(!tx->tx_waiting); /* mustn't be awaiting peer response */
4282     LASSERT(tx->tx_pool);
4283    
4284     - kiblnd_unmap_tx(ni, tx);
4285     + kiblnd_unmap_tx(tx);
4286    
4287     /* tx may have up to 2 lnet msgs to finalise */
4288     lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
4289     @@ -590,13 +590,9 @@ kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, struct kib_rdma_desc *
4290     return 0;
4291     }
4292    
4293     -static void kiblnd_unmap_tx(struct lnet_ni *ni, struct kib_tx *tx)
4294     +static void kiblnd_unmap_tx(struct kib_tx *tx)
4295     {
4296     - struct kib_net *net = ni->ni_data;
4297     -
4298     - LASSERT(net);
4299     -
4300     - if (net->ibn_fmr_ps)
4301     + if (tx->fmr.fmr_pfmr || tx->fmr.fmr_frd)
4302     kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status);
4303    
4304     if (tx->tx_nfrags) {
4305     @@ -1289,11 +1285,6 @@ kiblnd_connect_peer(struct kib_peer *peer)
4306     goto failed2;
4307     }
4308    
4309     - LASSERT(cmid->device);
4310     - CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n",
4311     - libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
4312     - &dev->ibd_ifip, cmid->device->name);
4313     -
4314     return;
4315    
4316     failed2:
4317     @@ -2995,8 +2986,19 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
4318     } else {
4319     rc = rdma_resolve_route(
4320     cmid, *kiblnd_tunables.kib_timeout * 1000);
4321     - if (!rc)
4322     + if (!rc) {
4323     + struct kib_net *net = peer->ibp_ni->ni_data;
4324     + struct kib_dev *dev = net->ibn_dev;
4325     +
4326     + CDEBUG(D_NET, "%s: connection bound to "\
4327     + "%s:%pI4h:%s\n",
4328     + libcfs_nid2str(peer->ibp_nid),
4329     + dev->ibd_ifname,
4330     + &dev->ibd_ifip, cmid->device->name);
4331     +
4332     return 0;
4333     + }
4334     +
4335     /* Can't initiate route resolution */
4336     CERROR("Can't resolve route for %s: %d\n",
4337     libcfs_nid2str(peer->ibp_nid), rc);
4338     diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
4339     index b5d84f3f6071..11e01c48f51a 100644
4340     --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
4341     +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
4342     @@ -1571,8 +1571,10 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
4343     return ERR_CAST(res);
4344    
4345     lock = ldlm_lock_new(res);
4346     - if (!lock)
4347     + if (!lock) {
4348     + ldlm_resource_putref(res);
4349     return ERR_PTR(-ENOMEM);
4350     + }
4351    
4352     lock->l_req_mode = mode;
4353     lock->l_ast_data = data;
4354     @@ -1615,6 +1617,8 @@ out:
4355     return ERR_PTR(rc);
4356     }
4357    
4358     +
4359     +
4360     /**
4361     * Enqueue (request) a lock.
4362     * On the client this is called from ldlm_cli_enqueue_fini
4363     diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
4364     index 0be55623bac4..364d697b2690 100644
4365     --- a/drivers/staging/lustre/lustre/llite/xattr.c
4366     +++ b/drivers/staging/lustre/lustre/llite/xattr.c
4367     @@ -93,7 +93,11 @@ ll_xattr_set_common(const struct xattr_handler *handler,
4368     __u64 valid;
4369     int rc;
4370    
4371     - if (flags == XATTR_REPLACE) {
4372     + /* When setxattr() is called with a size of 0 the value is
4373     + * unconditionally replaced by "". When removexattr() is
4374     + * called we get a NULL value and XATTR_REPLACE for flags.
4375     + */
4376     + if (!value && flags == XATTR_REPLACE) {
4377     ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1);
4378     valid = OBD_MD_FLXATTRRM;
4379     } else {
4380     diff --git a/drivers/staging/media/atomisp/i2c/ov2680.c b/drivers/staging/media/atomisp/i2c/ov2680.c
4381     index 51b7d61df0f5..179576224319 100644
4382     --- a/drivers/staging/media/atomisp/i2c/ov2680.c
4383     +++ b/drivers/staging/media/atomisp/i2c/ov2680.c
4384     @@ -396,12 +396,11 @@ static long __ov2680_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
4385     {
4386     struct i2c_client *client = v4l2_get_subdevdata(sd);
4387     struct ov2680_device *dev = to_ov2680_sensor(sd);
4388     - u16 vts,hts;
4389     + u16 vts;
4390     int ret,exp_val;
4391    
4392     dev_dbg(&client->dev, "+++++++__ov2680_set_exposure coarse_itg %d, gain %d, digitgain %d++\n",coarse_itg, gain, digitgain);
4393    
4394     - hts = ov2680_res[dev->fmt_idx].pixels_per_line;
4395     vts = ov2680_res[dev->fmt_idx].lines_per_frame;
4396    
4397     /* group hold */
4398     @@ -1190,7 +1189,8 @@ static int ov2680_detect(struct i2c_client *client)
4399     OV2680_SC_CMMN_SUB_ID, &high);
4400     revision = (u8) high & 0x0f;
4401    
4402     - dev_info(&client->dev, "sensor_revision id = 0x%x\n", id);
4403     + dev_info(&client->dev, "sensor_revision id = 0x%x, rev= %d\n",
4404     + id, revision);
4405    
4406     return 0;
4407     }
4408     diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c
4409     index 0592ac1f2832..cfe6bb610014 100644
4410     --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c
4411     +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c
4412     @@ -81,7 +81,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp,
4413     get_user(kp->flags, &up->flags))
4414     return -EFAULT;
4415    
4416     - kp->base = compat_ptr(tmp);
4417     + kp->base = (void __force *)compat_ptr(tmp);
4418     get_v4l2_pix_format((struct v4l2_pix_format *)&kp->fmt, &up->fmt);
4419     return 0;
4420     }
4421     @@ -232,10 +232,10 @@ static int get_atomisp_dvs_6axis_config32(struct atomisp_dvs_6axis_config *kp,
4422     get_user(ycoords_uv, &up->ycoords_uv))
4423     return -EFAULT;
4424    
4425     - kp->xcoords_y = compat_ptr(xcoords_y);
4426     - kp->ycoords_y = compat_ptr(ycoords_y);
4427     - kp->xcoords_uv = compat_ptr(xcoords_uv);
4428     - kp->ycoords_uv = compat_ptr(ycoords_uv);
4429     + kp->xcoords_y = (void __force *)compat_ptr(xcoords_y);
4430     + kp->ycoords_y = (void __force *)compat_ptr(ycoords_y);
4431     + kp->xcoords_uv = (void __force *)compat_ptr(xcoords_uv);
4432     + kp->ycoords_uv = (void __force *)compat_ptr(ycoords_uv);
4433     return 0;
4434     }
4435    
4436     @@ -296,7 +296,7 @@ static int get_atomisp_metadata_stat32(struct atomisp_metadata *kp,
4437     return -EFAULT;
4438    
4439     kp->data = compat_ptr(data);
4440     - kp->effective_width = compat_ptr(effective_width);
4441     + kp->effective_width = (void __force *)compat_ptr(effective_width);
4442     return 0;
4443     }
4444    
4445     @@ -360,7 +360,7 @@ static int get_atomisp_metadata_by_type_stat32(
4446     return -EFAULT;
4447    
4448     kp->data = compat_ptr(data);
4449     - kp->effective_width = compat_ptr(effective_width);
4450     + kp->effective_width = (void __force *)compat_ptr(effective_width);
4451     return 0;
4452     }
4453    
4454     @@ -437,7 +437,7 @@ static int get_atomisp_overlay32(struct atomisp_overlay *kp,
4455     get_user(kp->overlay_start_x, &up->overlay_start_y))
4456     return -EFAULT;
4457    
4458     - kp->frame = compat_ptr(frame);
4459     + kp->frame = (void __force *)compat_ptr(frame);
4460     return 0;
4461     }
4462    
4463     @@ -481,7 +481,7 @@ static int get_atomisp_calibration_group32(
4464     get_user(calb_grp_values, &up->calb_grp_values))
4465     return -EFAULT;
4466    
4467     - kp->calb_grp_values = compat_ptr(calb_grp_values);
4468     + kp->calb_grp_values = (void __force *)compat_ptr(calb_grp_values);
4469     return 0;
4470     }
4471    
4472     @@ -703,8 +703,8 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp,
4473     return -EFAULT;
4474    
4475     while (n >= 0) {
4476     - compat_uptr_t *src = (compat_uptr_t *)up + n;
4477     - uintptr_t *dst = (uintptr_t *)kp + n;
4478     + compat_uptr_t __user *src = ((compat_uptr_t __user *)up) + n;
4479     + uintptr_t *dst = ((uintptr_t *)kp) + n;
4480    
4481     if (get_user((*dst), src))
4482     return -EFAULT;
4483     @@ -751,12 +751,12 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp,
4484     #endif
4485     return -EFAULT;
4486    
4487     - kp->shading_table = user_ptr + offset;
4488     + kp->shading_table = (void __force *)user_ptr + offset;
4489     offset = sizeof(struct atomisp_shading_table);
4490     if (!kp->shading_table)
4491     return -EFAULT;
4492    
4493     - if (copy_to_user(kp->shading_table,
4494     + if (copy_to_user((void __user *)kp->shading_table,
4495     &karg.shading_table,
4496     sizeof(struct atomisp_shading_table)))
4497     return -EFAULT;
4498     @@ -777,13 +777,14 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp,
4499     #endif
4500     return -EFAULT;
4501    
4502     - kp->morph_table = user_ptr + offset;
4503     + kp->morph_table = (void __force *)user_ptr + offset;
4504     offset += sizeof(struct atomisp_morph_table);
4505     if (!kp->morph_table)
4506     return -EFAULT;
4507    
4508     - if (copy_to_user(kp->morph_table, &karg.morph_table,
4509     - sizeof(struct atomisp_morph_table)))
4510     + if (copy_to_user((void __user *)kp->morph_table,
4511     + &karg.morph_table,
4512     + sizeof(struct atomisp_morph_table)))
4513     return -EFAULT;
4514     }
4515    
4516     @@ -802,13 +803,14 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp,
4517     #endif
4518     return -EFAULT;
4519    
4520     - kp->dvs2_coefs = user_ptr + offset;
4521     + kp->dvs2_coefs = (void __force *)user_ptr + offset;
4522     offset += sizeof(struct atomisp_dis_coefficients);
4523     if (!kp->dvs2_coefs)
4524     return -EFAULT;
4525    
4526     - if (copy_to_user(kp->dvs2_coefs, &karg.dvs2_coefs,
4527     - sizeof(struct atomisp_dis_coefficients)))
4528     + if (copy_to_user((void __user *)kp->dvs2_coefs,
4529     + &karg.dvs2_coefs,
4530     + sizeof(struct atomisp_dis_coefficients)))
4531     return -EFAULT;
4532     }
4533     /* handle dvs 6axis configuration */
4534     @@ -826,13 +828,14 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp,
4535     #endif
4536     return -EFAULT;
4537    
4538     - kp->dvs_6axis_config = user_ptr + offset;
4539     + kp->dvs_6axis_config = (void __force *)user_ptr + offset;
4540     offset += sizeof(struct atomisp_dvs_6axis_config);
4541     if (!kp->dvs_6axis_config)
4542     return -EFAULT;
4543    
4544     - if (copy_to_user(kp->dvs_6axis_config, &karg.dvs_6axis_config,
4545     - sizeof(struct atomisp_dvs_6axis_config)))
4546     + if (copy_to_user((void __user *)kp->dvs_6axis_config,
4547     + &karg.dvs_6axis_config,
4548     + sizeof(struct atomisp_dvs_6axis_config)))
4549     return -EFAULT;
4550     }
4551     }
4552     @@ -891,7 +894,7 @@ static int get_atomisp_sensor_ae_bracketing_lut(
4553     get_user(lut, &up->lut))
4554     return -EFAULT;
4555    
4556     - kp->lut = compat_ptr(lut);
4557     + kp->lut = (void __force *)compat_ptr(lut);
4558     return 0;
4559     }
4560    
4561     diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
4562     index 486be990d7fc..a457034818c3 100644
4563     --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
4564     +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
4565     @@ -601,6 +601,7 @@ reserve_space(VCHIQ_STATE_T *state, size_t space, int is_blocking)
4566     }
4567    
4568     if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
4569     + up(&state->slot_available_event);
4570     pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
4571     return NULL;
4572     }
4573     diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
4574     index ac83f721db24..d60069b5dc98 100644
4575     --- a/drivers/thermal/samsung/exynos_tmu.c
4576     +++ b/drivers/thermal/samsung/exynos_tmu.c
4577     @@ -598,6 +598,7 @@ static int exynos5433_tmu_initialize(struct platform_device *pdev)
4578     threshold_code = temp_to_code(data, temp);
4579    
4580     rising_threshold = readl(data->base + rising_reg_offset);
4581     + rising_threshold &= ~(0xff << j * 8);
4582     rising_threshold |= (threshold_code << j * 8);
4583     writel(rising_threshold, data->base + rising_reg_offset);
4584    
4585     diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
4586     index 16331a90c1e8..9da8474fe50a 100644
4587     --- a/drivers/tty/hvc/hvc_opal.c
4588     +++ b/drivers/tty/hvc/hvc_opal.c
4589     @@ -332,7 +332,6 @@ static void udbg_init_opal_common(void)
4590     udbg_putc = udbg_opal_putc;
4591     udbg_getc = udbg_opal_getc;
4592     udbg_getc_poll = udbg_opal_getc_poll;
4593     - tb_ticks_per_usec = 0x200; /* Make udelay not suck */
4594     }
4595    
4596     void __init hvc_opal_init_early(void)
4597     diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
4598     index 64338442050e..899e8fe5e00f 100644
4599     --- a/drivers/tty/pty.c
4600     +++ b/drivers/tty/pty.c
4601     @@ -110,16 +110,19 @@ static void pty_unthrottle(struct tty_struct *tty)
4602     static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
4603     {
4604     struct tty_struct *to = tty->link;
4605     + unsigned long flags;
4606    
4607     if (tty->stopped)
4608     return 0;
4609    
4610     if (c > 0) {
4611     + spin_lock_irqsave(&to->port->lock, flags);
4612     /* Stuff the data into the input queue of the other end */
4613     c = tty_insert_flip_string(to->port, buf, c);
4614     /* And shovel */
4615     if (c)
4616     tty_flip_buffer_push(to->port);
4617     + spin_unlock_irqrestore(&to->port->lock, flags);
4618     }
4619     return c;
4620     }
4621     diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
4622     index a8bc48b26c23..a9db0887edca 100644
4623     --- a/drivers/usb/core/hub.c
4624     +++ b/drivers/usb/core/hub.c
4625     @@ -3361,6 +3361,10 @@ static int wait_for_connected(struct usb_device *udev,
4626     while (delay_ms < 2000) {
4627     if (status || *portstatus & USB_PORT_STAT_CONNECTION)
4628     break;
4629     + if (!port_is_power_on(hub, *portstatus)) {
4630     + status = -ENODEV;
4631     + break;
4632     + }
4633     msleep(20);
4634     delay_ms += 20;
4635     status = hub_port_status(hub, *port1, portstatus, portchange);
4636     diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
4637     index 4cfa72cb0a91..c12a1a6554ba 100644
4638     --- a/drivers/usb/gadget/udc/renesas_usb3.c
4639     +++ b/drivers/usb/gadget/udc/renesas_usb3.c
4640     @@ -334,6 +334,7 @@ struct renesas_usb3 {
4641     struct usb_gadget_driver *driver;
4642     struct extcon_dev *extcon;
4643     struct work_struct extcon_work;
4644     + struct dentry *dentry;
4645    
4646     struct renesas_usb3_ep *usb3_ep;
4647     int num_usb3_eps;
4648     @@ -2397,8 +2398,12 @@ static void renesas_usb3_debugfs_init(struct renesas_usb3 *usb3,
4649    
4650     file = debugfs_create_file("b_device", 0644, root, usb3,
4651     &renesas_usb3_b_device_fops);
4652     - if (!file)
4653     + if (!file) {
4654     dev_info(dev, "%s: Can't create debugfs mode\n", __func__);
4655     + debugfs_remove_recursive(root);
4656     + } else {
4657     + usb3->dentry = root;
4658     + }
4659     }
4660    
4661     /*------- platform_driver ------------------------------------------------*/
4662     @@ -2406,6 +2411,7 @@ static int renesas_usb3_remove(struct platform_device *pdev)
4663     {
4664     struct renesas_usb3 *usb3 = platform_get_drvdata(pdev);
4665    
4666     + debugfs_remove_recursive(usb3->dentry);
4667     device_remove_file(&pdev->dev, &dev_attr_role);
4668    
4669     usb_del_gadget_udc(&usb3->gadget);
4670     diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
4671     index 126991046eb7..0212f0ee8aea 100644
4672     --- a/drivers/vfio/mdev/mdev_core.c
4673     +++ b/drivers/vfio/mdev/mdev_core.c
4674     @@ -66,34 +66,6 @@ uuid_le mdev_uuid(struct mdev_device *mdev)
4675     }
4676     EXPORT_SYMBOL(mdev_uuid);
4677    
4678     -static int _find_mdev_device(struct device *dev, void *data)
4679     -{
4680     - struct mdev_device *mdev;
4681     -
4682     - if (!dev_is_mdev(dev))
4683     - return 0;
4684     -
4685     - mdev = to_mdev_device(dev);
4686     -
4687     - if (uuid_le_cmp(mdev->uuid, *(uuid_le *)data) == 0)
4688     - return 1;
4689     -
4690     - return 0;
4691     -}
4692     -
4693     -static bool mdev_device_exist(struct mdev_parent *parent, uuid_le uuid)
4694     -{
4695     - struct device *dev;
4696     -
4697     - dev = device_find_child(parent->dev, &uuid, _find_mdev_device);
4698     - if (dev) {
4699     - put_device(dev);
4700     - return true;
4701     - }
4702     -
4703     - return false;
4704     -}
4705     -
4706     /* Should be called holding parent_list_lock */
4707     static struct mdev_parent *__find_parent_device(struct device *dev)
4708     {
4709     @@ -221,7 +193,6 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
4710     }
4711    
4712     kref_init(&parent->ref);
4713     - mutex_init(&parent->lock);
4714    
4715     parent->dev = dev;
4716     parent->ops = ops;
4717     @@ -297,6 +268,10 @@ static void mdev_device_release(struct device *dev)
4718     {
4719     struct mdev_device *mdev = to_mdev_device(dev);
4720    
4721     + mutex_lock(&mdev_list_lock);
4722     + list_del(&mdev->next);
4723     + mutex_unlock(&mdev_list_lock);
4724     +
4725     dev_dbg(&mdev->dev, "MDEV: destroying\n");
4726     kfree(mdev);
4727     }
4728     @@ -304,7 +279,7 @@ static void mdev_device_release(struct device *dev)
4729     int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
4730     {
4731     int ret;
4732     - struct mdev_device *mdev;
4733     + struct mdev_device *mdev, *tmp;
4734     struct mdev_parent *parent;
4735     struct mdev_type *type = to_mdev_type(kobj);
4736    
4737     @@ -312,21 +287,28 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
4738     if (!parent)
4739     return -EINVAL;
4740    
4741     - mutex_lock(&parent->lock);
4742     + mutex_lock(&mdev_list_lock);
4743    
4744     /* Check for duplicate */
4745     - if (mdev_device_exist(parent, uuid)) {
4746     - ret = -EEXIST;
4747     - goto create_err;
4748     + list_for_each_entry(tmp, &mdev_list, next) {
4749     + if (!uuid_le_cmp(tmp->uuid, uuid)) {
4750     + mutex_unlock(&mdev_list_lock);
4751     + ret = -EEXIST;
4752     + goto mdev_fail;
4753     + }
4754     }
4755    
4756     mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
4757     if (!mdev) {
4758     + mutex_unlock(&mdev_list_lock);
4759     ret = -ENOMEM;
4760     - goto create_err;
4761     + goto mdev_fail;
4762     }
4763    
4764     memcpy(&mdev->uuid, &uuid, sizeof(uuid_le));
4765     + list_add(&mdev->next, &mdev_list);
4766     + mutex_unlock(&mdev_list_lock);
4767     +
4768     mdev->parent = parent;
4769     kref_init(&mdev->ref);
4770    
4771     @@ -338,35 +320,28 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
4772     ret = device_register(&mdev->dev);
4773     if (ret) {
4774     put_device(&mdev->dev);
4775     - goto create_err;
4776     + goto mdev_fail;
4777     }
4778    
4779     ret = mdev_device_create_ops(kobj, mdev);
4780     if (ret)
4781     - goto create_failed;
4782     + goto create_fail;
4783    
4784     ret = mdev_create_sysfs_files(&mdev->dev, type);
4785     if (ret) {
4786     mdev_device_remove_ops(mdev, true);
4787     - goto create_failed;
4788     + goto create_fail;
4789     }
4790    
4791     mdev->type_kobj = kobj;
4792     + mdev->active = true;
4793     dev_dbg(&mdev->dev, "MDEV: created\n");
4794    
4795     - mutex_unlock(&parent->lock);
4796     -
4797     - mutex_lock(&mdev_list_lock);
4798     - list_add(&mdev->next, &mdev_list);
4799     - mutex_unlock(&mdev_list_lock);
4800     -
4801     - return ret;
4802     + return 0;
4803    
4804     -create_failed:
4805     +create_fail:
4806     device_unregister(&mdev->dev);
4807     -
4808     -create_err:
4809     - mutex_unlock(&parent->lock);
4810     +mdev_fail:
4811     mdev_put_parent(parent);
4812     return ret;
4813     }
4814     @@ -377,44 +352,39 @@ int mdev_device_remove(struct device *dev, bool force_remove)
4815     struct mdev_parent *parent;
4816     struct mdev_type *type;
4817     int ret;
4818     - bool found = false;
4819    
4820     mdev = to_mdev_device(dev);
4821    
4822     mutex_lock(&mdev_list_lock);
4823     list_for_each_entry(tmp, &mdev_list, next) {
4824     - if (tmp == mdev) {
4825     - found = true;
4826     + if (tmp == mdev)
4827     break;
4828     - }
4829     }
4830    
4831     - if (found)
4832     - list_del(&mdev->next);
4833     + if (tmp != mdev) {
4834     + mutex_unlock(&mdev_list_lock);
4835     + return -ENODEV;
4836     + }
4837    
4838     - mutex_unlock(&mdev_list_lock);
4839     + if (!mdev->active) {
4840     + mutex_unlock(&mdev_list_lock);
4841     + return -EAGAIN;
4842     + }
4843    
4844     - if (!found)
4845     - return -ENODEV;
4846     + mdev->active = false;
4847     + mutex_unlock(&mdev_list_lock);
4848    
4849     type = to_mdev_type(mdev->type_kobj);
4850     parent = mdev->parent;
4851     - mutex_lock(&parent->lock);
4852    
4853     ret = mdev_device_remove_ops(mdev, force_remove);
4854     if (ret) {
4855     - mutex_unlock(&parent->lock);
4856     -
4857     - mutex_lock(&mdev_list_lock);
4858     - list_add(&mdev->next, &mdev_list);
4859     - mutex_unlock(&mdev_list_lock);
4860     -
4861     + mdev->active = true;
4862     return ret;
4863     }
4864    
4865     mdev_remove_sysfs_files(dev, type);
4866     device_unregister(dev);
4867     - mutex_unlock(&parent->lock);
4868     mdev_put_parent(parent);
4869    
4870     return 0;
4871     diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
4872     index a9cefd70a705..b5819b7d7ef7 100644
4873     --- a/drivers/vfio/mdev/mdev_private.h
4874     +++ b/drivers/vfio/mdev/mdev_private.h
4875     @@ -20,7 +20,6 @@ struct mdev_parent {
4876     struct device *dev;
4877     const struct mdev_parent_ops *ops;
4878     struct kref ref;
4879     - struct mutex lock;
4880     struct list_head next;
4881     struct kset *mdev_types_kset;
4882     struct list_head type_list;
4883     @@ -34,6 +33,7 @@ struct mdev_device {
4884     struct kref ref;
4885     struct list_head next;
4886     struct kobject *type_kobj;
4887     + bool active;
4888     };
4889    
4890     #define to_mdev_device(dev) container_of(dev, struct mdev_device, dev)
4891     diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
4892     index 4c27f4be3c3d..aa9e792110e3 100644
4893     --- a/drivers/vfio/platform/vfio_platform_common.c
4894     +++ b/drivers/vfio/platform/vfio_platform_common.c
4895     @@ -681,18 +681,23 @@ int vfio_platform_probe_common(struct vfio_platform_device *vdev,
4896     group = vfio_iommu_group_get(dev);
4897     if (!group) {
4898     pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
4899     - return -EINVAL;
4900     + ret = -EINVAL;
4901     + goto put_reset;
4902     }
4903    
4904     ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
4905     - if (ret) {
4906     - vfio_iommu_group_put(group, dev);
4907     - return ret;
4908     - }
4909     + if (ret)
4910     + goto put_iommu;
4911    
4912     mutex_init(&vdev->igate);
4913    
4914     return 0;
4915     +
4916     +put_iommu:
4917     + vfio_iommu_group_put(group, dev);
4918     +put_reset:
4919     + vfio_platform_put_reset(vdev);
4920     + return ret;
4921     }
4922     EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
4923    
4924     diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
4925     index d639378e36ac..50eeb74ddc0a 100644
4926     --- a/drivers/vfio/vfio_iommu_type1.c
4927     +++ b/drivers/vfio/vfio_iommu_type1.c
4928     @@ -83,6 +83,7 @@ struct vfio_dma {
4929     size_t size; /* Map size (bytes) */
4930     int prot; /* IOMMU_READ/WRITE */
4931     bool iommu_mapped;
4932     + bool lock_cap; /* capable(CAP_IPC_LOCK) */
4933     struct task_struct *task;
4934     struct rb_root pfn_list; /* Ex-user pinned pfn list */
4935     };
4936     @@ -246,29 +247,25 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
4937     return ret;
4938     }
4939    
4940     -static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap)
4941     +static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
4942     {
4943     struct mm_struct *mm;
4944     - bool is_current;
4945     int ret;
4946    
4947     if (!npage)
4948     return 0;
4949    
4950     - is_current = (task->mm == current->mm);
4951     -
4952     - mm = is_current ? task->mm : get_task_mm(task);
4953     + mm = async ? get_task_mm(dma->task) : dma->task->mm;
4954     if (!mm)
4955     return -ESRCH; /* process exited */
4956    
4957     ret = down_write_killable(&mm->mmap_sem);
4958     if (!ret) {
4959     if (npage > 0) {
4960     - if (lock_cap ? !*lock_cap :
4961     - !has_capability(task, CAP_IPC_LOCK)) {
4962     + if (!dma->lock_cap) {
4963     unsigned long limit;
4964    
4965     - limit = task_rlimit(task,
4966     + limit = task_rlimit(dma->task,
4967     RLIMIT_MEMLOCK) >> PAGE_SHIFT;
4968    
4969     if (mm->locked_vm + npage > limit)
4970     @@ -282,7 +279,7 @@ static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap)
4971     up_write(&mm->mmap_sem);
4972     }
4973    
4974     - if (!is_current)
4975     + if (async)
4976     mmput(mm);
4977    
4978     return ret;
4979     @@ -391,7 +388,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
4980     */
4981     static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
4982     long npage, unsigned long *pfn_base,
4983     - bool lock_cap, unsigned long limit)
4984     + unsigned long limit)
4985     {
4986     unsigned long pfn = 0;
4987     long ret, pinned = 0, lock_acct = 0;
4988     @@ -414,7 +411,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
4989     * pages are already counted against the user.
4990     */
4991     if (!rsvd && !vfio_find_vpfn(dma, iova)) {
4992     - if (!lock_cap && current->mm->locked_vm + 1 > limit) {
4993     + if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) {
4994     put_pfn(*pfn_base, dma->prot);
4995     pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
4996     limit << PAGE_SHIFT);
4997     @@ -440,7 +437,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
4998     }
4999    
5000     if (!rsvd && !vfio_find_vpfn(dma, iova)) {
5001     - if (!lock_cap &&
5002     + if (!dma->lock_cap &&
5003     current->mm->locked_vm + lock_acct + 1 > limit) {
5004     put_pfn(pfn, dma->prot);
5005     pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
5006     @@ -453,7 +450,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
5007     }
5008    
5009     out:
5010     - ret = vfio_lock_acct(current, lock_acct, &lock_cap);
5011     + ret = vfio_lock_acct(dma, lock_acct, false);
5012    
5013     unpin_out:
5014     if (ret) {
5015     @@ -484,7 +481,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
5016     }
5017    
5018     if (do_accounting)
5019     - vfio_lock_acct(dma->task, locked - unlocked, NULL);
5020     + vfio_lock_acct(dma, locked - unlocked, true);
5021    
5022     return unlocked;
5023     }
5024     @@ -501,7 +498,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
5025    
5026     ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
5027     if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
5028     - ret = vfio_lock_acct(dma->task, 1, NULL);
5029     + ret = vfio_lock_acct(dma, 1, true);
5030     if (ret) {
5031     put_pfn(*pfn_base, dma->prot);
5032     if (ret == -ENOMEM)
5033     @@ -528,7 +525,7 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
5034     unlocked = vfio_iova_put_vfio_pfn(dma, vpfn);
5035    
5036     if (do_accounting)
5037     - vfio_lock_acct(dma->task, -unlocked, NULL);
5038     + vfio_lock_acct(dma, -unlocked, true);
5039    
5040     return unlocked;
5041     }
5042     @@ -723,7 +720,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
5043    
5044     dma->iommu_mapped = false;
5045     if (do_accounting) {
5046     - vfio_lock_acct(dma->task, -unlocked, NULL);
5047     + vfio_lock_acct(dma, -unlocked, true);
5048     return 0;
5049     }
5050     return unlocked;
5051     @@ -935,14 +932,12 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
5052     size_t size = map_size;
5053     long npage;
5054     unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
5055     - bool lock_cap = capable(CAP_IPC_LOCK);
5056     int ret = 0;
5057    
5058     while (size) {
5059     /* Pin a contiguous chunk of memory */
5060     npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
5061     - size >> PAGE_SHIFT, &pfn,
5062     - lock_cap, limit);
5063     + size >> PAGE_SHIFT, &pfn, limit);
5064     if (npage <= 0) {
5065     WARN_ON(!npage);
5066     ret = (int)npage;
5067     @@ -1017,8 +1012,36 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
5068     dma->iova = iova;
5069     dma->vaddr = vaddr;
5070     dma->prot = prot;
5071     - get_task_struct(current);
5072     - dma->task = current;
5073     +
5074     + /*
5075     + * We need to be able to both add to a task's locked memory and test
5076     + * against the locked memory limit and we need to be able to do both
5077     + * outside of this call path as pinning can be asynchronous via the
5078     + * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a
5079     + * task_struct and VM locked pages requires an mm_struct, however
5080     + * holding an indefinite mm reference is not recommended, therefore we
5081     + * only hold a reference to a task. We could hold a reference to
5082     + * current, however QEMU uses this call path through vCPU threads,
5083     + * which can be killed resulting in a NULL mm and failure in the unmap
5084     + * path when called via a different thread. Avoid this problem by
5085     + * using the group_leader as threads within the same group require
5086     + * both CLONE_THREAD and CLONE_VM and will therefore use the same
5087     + * mm_struct.
5088     + *
5089     + * Previously we also used the task for testing CAP_IPC_LOCK at the
5090     + * time of pinning and accounting, however has_capability() makes use
5091     + * of real_cred, a copy-on-write field, so we can't guarantee that it
5092     + * matches group_leader, or in fact that it might not change by the
5093     + * time it's evaluated. If a process were to call MAP_DMA with
5094     + * CAP_IPC_LOCK but later drop it, it doesn't make sense that they
5095     + * possibly see different results for an iommu_mapped vfio_dma vs
5096     + * externally mapped. Therefore track CAP_IPC_LOCK in vfio_dma at the
5097     + * time of calling MAP_DMA.
5098     + */
5099     + get_task_struct(current->group_leader);
5100     + dma->task = current->group_leader;
5101     + dma->lock_cap = capable(CAP_IPC_LOCK);
5102     +
5103     dma->pfn_list = RB_ROOT;
5104    
5105     /* Insert zero-sized and grow as we map chunks of it */
5106     @@ -1053,7 +1076,6 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
5107     struct vfio_domain *d;
5108     struct rb_node *n;
5109     unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
5110     - bool lock_cap = capable(CAP_IPC_LOCK);
5111     int ret;
5112    
5113     /* Arbitrarily pick the first domain in the list for lookups */
5114     @@ -1100,8 +1122,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
5115    
5116     npage = vfio_pin_pages_remote(dma, vaddr,
5117     n >> PAGE_SHIFT,
5118     - &pfn, lock_cap,
5119     - limit);
5120     + &pfn, limit);
5121     if (npage <= 0) {
5122     WARN_ON(!npage);
5123     ret = (int)npage;
5124     @@ -1378,7 +1399,7 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
5125     if (!is_invalid_reserved_pfn(vpfn->pfn))
5126     locked++;
5127     }
5128     - vfio_lock_acct(dma->task, locked - unlocked, NULL);
5129     + vfio_lock_acct(dma, locked - unlocked, true);
5130     }
5131     }
5132    
5133     diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
5134     index 1c2289ddd555..0fa7d2bd0e48 100644
5135     --- a/drivers/video/backlight/pwm_bl.c
5136     +++ b/drivers/video/backlight/pwm_bl.c
5137     @@ -301,14 +301,14 @@ static int pwm_backlight_probe(struct platform_device *pdev)
5138    
5139     /*
5140     * If the GPIO is not known to be already configured as output, that
5141     - * is, if gpiod_get_direction returns either GPIOF_DIR_IN or -EINVAL,
5142     - * change the direction to output and set the GPIO as active.
5143     + * is, if gpiod_get_direction returns either 1 or -EINVAL, change the
5144     + * direction to output and set the GPIO as active.
5145     * Do not force the GPIO to active when it was already output as it
5146     * could cause backlight flickering or we would enable the backlight too
5147     * early. Leave the decision of the initial backlight state for later.
5148     */
5149     if (pb->enable_gpio &&
5150     - gpiod_get_direction(pb->enable_gpio) != GPIOF_DIR_OUT)
5151     + gpiod_get_direction(pb->enable_gpio) != 0)
5152     gpiod_direction_output(pb->enable_gpio, 1);
5153    
5154     pb->power_supply = devm_regulator_get(&pdev->dev, "power");
5155     diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
5156     index 2a20fc163ed0..4c62ad74aec0 100644
5157     --- a/drivers/watchdog/da9063_wdt.c
5158     +++ b/drivers/watchdog/da9063_wdt.c
5159     @@ -102,10 +102,23 @@ static int da9063_wdt_set_timeout(struct watchdog_device *wdd,
5160     {
5161     struct da9063 *da9063 = watchdog_get_drvdata(wdd);
5162     unsigned int selector;
5163     - int ret;
5164     + int ret = 0;
5165    
5166     selector = da9063_wdt_timeout_to_sel(timeout);
5167     - ret = _da9063_wdt_set_timeout(da9063, selector);
5168     +
5169     + /*
5170     + * There are two cases when a set_timeout() will be called:
5171     + * 1. The watchdog is off and someone wants to set the timeout for the
5172     + * further use.
5173     + * 2. The watchdog is already running and a new timeout value should be
5174     + * set.
5175     + *
5176     + * The watchdog can't store a timeout value not equal zero without
5177     + * enabling the watchdog, so the timeout must be buffered by the driver.
5178     + */
5179     + if (watchdog_active(wdd))
5180     + ret = _da9063_wdt_set_timeout(da9063, selector);
5181     +
5182     if (ret)
5183     dev_err(da9063->dev, "Failed to set watchdog timeout (err = %d)\n",
5184     ret);
5185     diff --git a/fs/block_dev.c b/fs/block_dev.c
5186     index 789f55e851ae..3323eec5c164 100644
5187     --- a/fs/block_dev.c
5188     +++ b/fs/block_dev.c
5189     @@ -231,7 +231,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
5190    
5191     ret = bio_iov_iter_get_pages(&bio, iter);
5192     if (unlikely(ret))
5193     - return ret;
5194     + goto out;
5195     ret = bio.bi_iter.bi_size;
5196    
5197     if (iov_iter_rw(iter) == READ) {
5198     @@ -260,12 +260,13 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
5199     put_page(bvec->bv_page);
5200     }
5201    
5202     - if (vecs != inline_vecs)
5203     - kfree(vecs);
5204     -
5205     if (unlikely(bio.bi_status))
5206     ret = blk_status_to_errno(bio.bi_status);
5207    
5208     +out:
5209     + if (vecs != inline_vecs)
5210     + kfree(vecs);
5211     +
5212     bio_uninit(&bio);
5213    
5214     return ret;
5215     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
5216     index f5b90dc137ec..28a58f40f3a4 100644
5217     --- a/fs/btrfs/inode.c
5218     +++ b/fs/btrfs/inode.c
5219     @@ -3162,6 +3162,9 @@ out:
5220     /* once for the tree */
5221     btrfs_put_ordered_extent(ordered_extent);
5222    
5223     + /* Try to release some metadata so we don't get an OOM but don't wait */
5224     + btrfs_btree_balance_dirty_nodelay(fs_info);
5225     +
5226     return ret;
5227     }
5228    
5229     @@ -4737,7 +4740,10 @@ delete:
5230     extent_num_bytes, 0,
5231     btrfs_header_owner(leaf),
5232     ino, extent_offset);
5233     - BUG_ON(ret);
5234     + if (ret) {
5235     + btrfs_abort_transaction(trans, ret);
5236     + break;
5237     + }
5238     if (btrfs_should_throttle_delayed_refs(trans, fs_info))
5239     btrfs_async_run_delayed_refs(fs_info,
5240     trans->delayed_ref_updates * 2,
5241     @@ -5496,13 +5502,18 @@ void btrfs_evict_inode(struct inode *inode)
5242     trans->block_rsv = rsv;
5243    
5244     ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
5245     - if (ret != -ENOSPC && ret != -EAGAIN)
5246     + if (ret) {
5247     + trans->block_rsv = &fs_info->trans_block_rsv;
5248     + btrfs_end_transaction(trans);
5249     + btrfs_btree_balance_dirty(fs_info);
5250     + if (ret != -ENOSPC && ret != -EAGAIN) {
5251     + btrfs_orphan_del(NULL, BTRFS_I(inode));
5252     + btrfs_free_block_rsv(fs_info, rsv);
5253     + goto no_delete;
5254     + }
5255     + } else {
5256     break;
5257     -
5258     - trans->block_rsv = &fs_info->trans_block_rsv;
5259     - btrfs_end_transaction(trans);
5260     - trans = NULL;
5261     - btrfs_btree_balance_dirty(fs_info);
5262     + }
5263     }
5264    
5265     btrfs_free_block_rsv(fs_info, rsv);
5266     @@ -5511,12 +5522,8 @@ void btrfs_evict_inode(struct inode *inode)
5267     * Errors here aren't a big deal, it just means we leave orphan items
5268     * in the tree. They will be cleaned up on the next mount.
5269     */
5270     - if (ret == 0) {
5271     - trans->block_rsv = root->orphan_block_rsv;
5272     - btrfs_orphan_del(trans, BTRFS_I(inode));
5273     - } else {
5274     - btrfs_orphan_del(NULL, BTRFS_I(inode));
5275     - }
5276     + trans->block_rsv = root->orphan_block_rsv;
5277     + btrfs_orphan_del(trans, BTRFS_I(inode));
5278    
5279     trans->block_rsv = &fs_info->trans_block_rsv;
5280     if (!(root == fs_info->tree_root ||
5281     diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
5282     index e172d4843eae..473ad5985aa3 100644
5283     --- a/fs/btrfs/qgroup.c
5284     +++ b/fs/btrfs/qgroup.c
5285     @@ -2499,6 +2499,21 @@ out:
5286     spin_unlock(&fs_info->qgroup_lock);
5287     }
5288    
5289     +/*
5290     + * Check if the leaf is the last leaf. Which means all node pointers
5291     + * are at their last position.
5292     + */
5293     +static bool is_last_leaf(struct btrfs_path *path)
5294     +{
5295     + int i;
5296     +
5297     + for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
5298     + if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
5299     + return false;
5300     + }
5301     + return true;
5302     +}
5303     +
5304     /*
5305     * returns < 0 on error, 0 when more leafs are to be scanned.
5306     * returns 1 when done.
5307     @@ -2512,6 +2527,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
5308     struct ulist *roots = NULL;
5309     struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
5310     u64 num_bytes;
5311     + bool done;
5312     int slot;
5313     int ret;
5314    
5315     @@ -2540,6 +2556,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
5316     mutex_unlock(&fs_info->qgroup_rescan_lock);
5317     return ret;
5318     }
5319     + done = is_last_leaf(path);
5320    
5321     btrfs_item_key_to_cpu(path->nodes[0], &found,
5322     btrfs_header_nritems(path->nodes[0]) - 1);
5323     @@ -2586,6 +2603,8 @@ out:
5324     }
5325     btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
5326    
5327     + if (done && !ret)
5328     + ret = 1;
5329     return ret;
5330     }
5331    
5332     diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
5333     index bf4e22df7c97..e1b4a59485df 100644
5334     --- a/fs/btrfs/tree-log.c
5335     +++ b/fs/btrfs/tree-log.c
5336     @@ -3041,8 +3041,11 @@ out_wake_log_root:
5337     mutex_unlock(&log_root_tree->log_mutex);
5338    
5339     /*
5340     - * The barrier before waitqueue_active is implied by mutex_unlock
5341     + * The barrier before waitqueue_active is needed so all the updates
5342     + * above are seen by the woken threads. It might not be necessary, but
5343     + * proving that seems to be hard.
5344     */
5345     + smp_mb();
5346     if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
5347     wake_up(&log_root_tree->log_commit_wait[index2]);
5348     out:
5349     @@ -3053,8 +3056,11 @@ out:
5350     mutex_unlock(&root->log_mutex);
5351    
5352     /*
5353     - * The barrier before waitqueue_active is implied by mutex_unlock
5354     + * The barrier before waitqueue_active is needed so all the updates
5355     + * above are seen by the woken threads. It might not be necessary, but
5356     + * proving that seems to be hard.
5357     */
5358     + smp_mb();
5359     if (waitqueue_active(&root->log_commit_wait[index1]))
5360     wake_up(&root->log_commit_wait[index1]);
5361     return ret;
5362     diff --git a/fs/ceph/super.c b/fs/ceph/super.c
5363     index 48ffe720bf09..b79b1211a2b5 100644
5364     --- a/fs/ceph/super.c
5365     +++ b/fs/ceph/super.c
5366     @@ -254,7 +254,7 @@ static int parse_fsopt_token(char *c, void *private)
5367     case Opt_rasize:
5368     if (intval < 0)
5369     return -EINVAL;
5370     - fsopt->rasize = ALIGN(intval + PAGE_SIZE - 1, PAGE_SIZE);
5371     + fsopt->rasize = ALIGN(intval, PAGE_SIZE);
5372     break;
5373     case Opt_caps_wanted_delay_min:
5374     if (intval < 1)
5375     diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
5376     index d262a93d9b31..daf2683f0655 100644
5377     --- a/fs/crypto/crypto.c
5378     +++ b/fs/crypto/crypto.c
5379     @@ -446,8 +446,17 @@ fail:
5380     */
5381     static int __init fscrypt_init(void)
5382     {
5383     + /*
5384     + * Use an unbound workqueue to allow bios to be decrypted in parallel
5385     + * even when they happen to complete on the same CPU. This sacrifices
5386     + * locality, but it's worthwhile since decryption is CPU-intensive.
5387     + *
5388     + * Also use a high-priority workqueue to prioritize decryption work,
5389     + * which blocks reads from completing, over regular application tasks.
5390     + */
5391     fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
5392     - WQ_HIGHPRI, 0);
5393     + WQ_UNBOUND | WQ_HIGHPRI,
5394     + num_online_cpus());
5395     if (!fscrypt_read_workqueue)
5396     goto fail;
5397    
5398     diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
5399     index 9c9eafd6bd76..70266a3355dc 100644
5400     --- a/fs/ext4/balloc.c
5401     +++ b/fs/ext4/balloc.c
5402     @@ -379,6 +379,8 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
5403     return -EFSCORRUPTED;
5404    
5405     ext4_lock_group(sb, block_group);
5406     + if (buffer_verified(bh))
5407     + goto verified;
5408     if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
5409     desc, bh))) {
5410     ext4_unlock_group(sb, block_group);
5411     @@ -401,6 +403,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
5412     return -EFSCORRUPTED;
5413     }
5414     set_buffer_verified(bh);
5415     +verified:
5416     ext4_unlock_group(sb, block_group);
5417     return 0;
5418     }
5419     diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
5420     index 95341bc2b3b7..2f46564d3fca 100644
5421     --- a/fs/ext4/ialloc.c
5422     +++ b/fs/ext4/ialloc.c
5423     @@ -91,6 +91,8 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
5424     return -EFSCORRUPTED;
5425    
5426     ext4_lock_group(sb, block_group);
5427     + if (buffer_verified(bh))
5428     + goto verified;
5429     blk = ext4_inode_bitmap(sb, desc);
5430     if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
5431     EXT4_INODES_PER_GROUP(sb) / 8)) {
5432     @@ -108,6 +110,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
5433     return -EFSBADCRC;
5434     }
5435     set_buffer_verified(bh);
5436     +verified:
5437     ext4_unlock_group(sb, block_group);
5438     return 0;
5439     }
5440     @@ -1394,7 +1397,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
5441     ext4_itable_unused_count(sb, gdp)),
5442     sbi->s_inodes_per_block);
5443    
5444     - if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
5445     + if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
5446     + ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
5447     + ext4_itable_unused_count(sb, gdp)) <
5448     + EXT4_FIRST_INO(sb)))) {
5449     ext4_error(sb, "Something is wrong with group %u: "
5450     "used itable blocks: %d; "
5451     "itable unused count: %u",
5452     diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
5453     index 7d498f4a3f90..b549cfd2d7d3 100644
5454     --- a/fs/ext4/inline.c
5455     +++ b/fs/ext4/inline.c
5456     @@ -688,6 +688,10 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
5457     goto convert;
5458     }
5459    
5460     + ret = ext4_journal_get_write_access(handle, iloc.bh);
5461     + if (ret)
5462     + goto out;
5463     +
5464     flags |= AOP_FLAG_NOFS;
5465    
5466     page = grab_cache_page_write_begin(mapping, 0, flags);
5467     @@ -716,7 +720,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
5468     out_up_read:
5469     up_read(&EXT4_I(inode)->xattr_sem);
5470     out:
5471     - if (handle)
5472     + if (handle && (ret != 1))
5473     ext4_journal_stop(handle);
5474     brelse(iloc.bh);
5475     return ret;
5476     @@ -758,6 +762,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
5477    
5478     ext4_write_unlock_xattr(inode, &no_expand);
5479     brelse(iloc.bh);
5480     + mark_inode_dirty(inode);
5481     out:
5482     return copied;
5483     }
5484     @@ -904,7 +909,6 @@ retry_journal:
5485     goto out;
5486     }
5487    
5488     -
5489     page = grab_cache_page_write_begin(mapping, 0, flags);
5490     if (!page) {
5491     ret = -ENOMEM;
5492     @@ -922,6 +926,9 @@ retry_journal:
5493     if (ret < 0)
5494     goto out_release_page;
5495     }
5496     + ret = ext4_journal_get_write_access(handle, iloc.bh);
5497     + if (ret)
5498     + goto out_release_page;
5499    
5500     up_read(&EXT4_I(inode)->xattr_sem);
5501     *pagep = page;
5502     @@ -942,7 +949,6 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
5503     unsigned len, unsigned copied,
5504     struct page *page)
5505     {
5506     - int i_size_changed = 0;
5507     int ret;
5508    
5509     ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
5510     @@ -960,10 +966,8 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
5511     * But it's important to update i_size while still holding page lock:
5512     * page writeout could otherwise come in and zero beyond i_size.
5513     */
5514     - if (pos+copied > inode->i_size) {
5515     + if (pos+copied > inode->i_size)
5516     i_size_write(inode, pos+copied);
5517     - i_size_changed = 1;
5518     - }
5519     unlock_page(page);
5520     put_page(page);
5521    
5522     @@ -973,8 +977,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
5523     * ordering of page lock and transaction start for journaling
5524     * filesystems.
5525     */
5526     - if (i_size_changed)
5527     - mark_inode_dirty(inode);
5528     + mark_inode_dirty(inode);
5529    
5530     return copied;
5531     }
5532     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
5533     index c2efe4d2ad87..f9baa59de0e2 100644
5534     --- a/fs/ext4/inode.c
5535     +++ b/fs/ext4/inode.c
5536     @@ -1388,9 +1388,10 @@ static int ext4_write_end(struct file *file,
5537     loff_t old_size = inode->i_size;
5538     int ret = 0, ret2;
5539     int i_size_changed = 0;
5540     + int inline_data = ext4_has_inline_data(inode);
5541    
5542     trace_ext4_write_end(inode, pos, len, copied);
5543     - if (ext4_has_inline_data(inode)) {
5544     + if (inline_data) {
5545     ret = ext4_write_inline_data_end(inode, pos, len,
5546     copied, page);
5547     if (ret < 0) {
5548     @@ -1418,7 +1419,7 @@ static int ext4_write_end(struct file *file,
5549     * ordering of page lock and transaction start for journaling
5550     * filesystems.
5551     */
5552     - if (i_size_changed)
5553     + if (i_size_changed || inline_data)
5554     ext4_mark_inode_dirty(handle, inode);
5555    
5556     if (pos + len > inode->i_size && ext4_can_truncate(inode))
5557     @@ -1492,6 +1493,7 @@ static int ext4_journalled_write_end(struct file *file,
5558     int partial = 0;
5559     unsigned from, to;
5560     int size_changed = 0;
5561     + int inline_data = ext4_has_inline_data(inode);
5562    
5563     trace_ext4_journalled_write_end(inode, pos, len, copied);
5564     from = pos & (PAGE_SIZE - 1);
5565     @@ -1499,7 +1501,7 @@ static int ext4_journalled_write_end(struct file *file,
5566    
5567     BUG_ON(!ext4_handle_valid(handle));
5568    
5569     - if (ext4_has_inline_data(inode)) {
5570     + if (inline_data) {
5571     ret = ext4_write_inline_data_end(inode, pos, len,
5572     copied, page);
5573     if (ret < 0) {
5574     @@ -1530,7 +1532,7 @@ static int ext4_journalled_write_end(struct file *file,
5575     if (old_size < pos)
5576     pagecache_isize_extended(inode, old_size, pos);
5577    
5578     - if (size_changed) {
5579     + if (size_changed || inline_data) {
5580     ret2 = ext4_mark_inode_dirty(handle, inode);
5581     if (!ret)
5582     ret = ret2;
5583     @@ -2027,11 +2029,7 @@ static int __ext4_journalled_writepage(struct page *page,
5584     }
5585    
5586     if (inline_data) {
5587     - BUFFER_TRACE(inode_bh, "get write access");
5588     - ret = ext4_journal_get_write_access(handle, inode_bh);
5589     -
5590     - err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
5591     -
5592     + ret = ext4_mark_inode_dirty(handle, inode);
5593     } else {
5594     ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
5595     do_journal_get_write_access);
5596     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
5597     index fc32a67a7a19..6b0c1ea95196 100644
5598     --- a/fs/ext4/super.c
5599     +++ b/fs/ext4/super.c
5600     @@ -3103,14 +3103,8 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
5601     if (!gdp)
5602     continue;
5603    
5604     - if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
5605     - continue;
5606     - if (group != 0)
5607     + if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
5608     break;
5609     - ext4_error(sb, "Inode table for bg 0 marked as "
5610     - "needing zeroing");
5611     - if (sb_rdonly(sb))
5612     - return ngroups;
5613     }
5614    
5615     return group;
5616     diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
5617     index 36b535207c88..85142e5df88b 100644
5618     --- a/fs/f2fs/data.c
5619     +++ b/fs/f2fs/data.c
5620     @@ -1601,7 +1601,13 @@ out:
5621    
5622     redirty_out:
5623     redirty_page_for_writepage(wbc, page);
5624     - if (!err)
5625     + /*
5626     + * pageout() in MM traslates EAGAIN, so calls handle_write_error()
5627     + * -> mapping_set_error() -> set_bit(AS_EIO, ...).
5628     + * file_write_and_wait_range() will see EIO error, which is critical
5629     + * to return value of fsync() followed by atomic_write failure to user.
5630     + */
5631     + if (!err || wbc->for_reclaim)
5632     return AOP_WRITEPAGE_ACTIVATE;
5633     unlock_page(page);
5634     return err;
5635     diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
5636     index 72c6a9e9a9b4..87e654c53c31 100644
5637     --- a/fs/f2fs/file.c
5638     +++ b/fs/f2fs/file.c
5639     @@ -1630,6 +1630,8 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
5640    
5641     inode_lock(inode);
5642    
5643     + down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
5644     +
5645     if (f2fs_is_atomic_file(inode))
5646     goto out;
5647    
5648     @@ -1659,6 +1661,7 @@ inc_stat:
5649     stat_inc_atomic_write(inode);
5650     stat_update_max_atomic_write(inode);
5651     out:
5652     + up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
5653     inode_unlock(inode);
5654     mnt_drop_write_file(filp);
5655     return ret;
5656     @@ -1808,9 +1811,11 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
5657     if (get_user(in, (__u32 __user *)arg))
5658     return -EFAULT;
5659    
5660     - ret = mnt_want_write_file(filp);
5661     - if (ret)
5662     - return ret;
5663     + if (in != F2FS_GOING_DOWN_FULLSYNC) {
5664     + ret = mnt_want_write_file(filp);
5665     + if (ret)
5666     + return ret;
5667     + }
5668    
5669     switch (in) {
5670     case F2FS_GOING_DOWN_FULLSYNC:
5671     @@ -1838,7 +1843,8 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
5672     }
5673     f2fs_update_time(sbi, REQ_TIME);
5674     out:
5675     - mnt_drop_write_file(filp);
5676     + if (in != F2FS_GOING_DOWN_FULLSYNC)
5677     + mnt_drop_write_file(filp);
5678     return ret;
5679     }
5680    
5681     @@ -2490,7 +2496,9 @@ static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
5682     }
5683     f2fs_put_page(ipage, 1);
5684    
5685     - dquot_initialize(inode);
5686     + err = dquot_initialize(inode);
5687     + if (err)
5688     + goto out_unlock;
5689    
5690     transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
5691     if (!IS_ERR(transfer_to[PRJQUOTA])) {
5692     diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
5693     index e5673a9b2619..f2f897cd23c9 100644
5694     --- a/fs/f2fs/gc.c
5695     +++ b/fs/f2fs/gc.c
5696     @@ -768,9 +768,14 @@ retry:
5697     set_cold_data(page);
5698    
5699     err = do_write_data_page(&fio);
5700     - if (err == -ENOMEM && is_dirty) {
5701     - congestion_wait(BLK_RW_ASYNC, HZ/50);
5702     - goto retry;
5703     + if (err) {
5704     + clear_cold_data(page);
5705     + if (err == -ENOMEM) {
5706     + congestion_wait(BLK_RW_ASYNC, HZ/50);
5707     + goto retry;
5708     + }
5709     + if (is_dirty)
5710     + set_page_dirty(page);
5711     }
5712     }
5713     out:
5714     diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
5715     index 271516db8939..7c05bd4222b2 100644
5716     --- a/fs/f2fs/segment.c
5717     +++ b/fs/f2fs/segment.c
5718     @@ -225,6 +225,8 @@ static int __revoke_inmem_pages(struct inode *inode,
5719    
5720     lock_page(page);
5721    
5722     + f2fs_wait_on_page_writeback(page, DATA, true);
5723     +
5724     if (recover) {
5725     struct dnode_of_data dn;
5726     struct node_info ni;
5727     @@ -435,6 +437,9 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
5728    
5729     void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
5730     {
5731     + if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
5732     + return;
5733     +
5734     /* try to shrink extent cache when there is no enough memory */
5735     if (!available_free_memory(sbi, EXTENT_CACHE))
5736     f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
5737     diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
5738     index 933c3d529e65..400c00058bad 100644
5739     --- a/fs/f2fs/super.c
5740     +++ b/fs/f2fs/super.c
5741     @@ -2663,6 +2663,12 @@ static int __init init_f2fs_fs(void)
5742     {
5743     int err;
5744    
5745     + if (PAGE_SIZE != F2FS_BLKSIZE) {
5746     + printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
5747     + PAGE_SIZE, F2FS_BLKSIZE);
5748     + return -EINVAL;
5749     + }
5750     +
5751     f2fs_build_trace_ios();
5752    
5753     err = init_inodecache();
5754     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
5755     index 928bbc397818..43fbf4495090 100644
5756     --- a/fs/nfs/nfs4proc.c
5757     +++ b/fs/nfs/nfs4proc.c
5758     @@ -745,6 +745,13 @@ static int nfs41_sequence_process(struct rpc_task *task,
5759     slot->slot_nr,
5760     slot->seq_nr);
5761     goto out_retry;
5762     + case -NFS4ERR_RETRY_UNCACHED_REP:
5763     + case -NFS4ERR_SEQ_FALSE_RETRY:
5764     + /*
5765     + * The server thinks we tried to replay a request.
5766     + * Retry the call after bumping the sequence ID.
5767     + */
5768     + goto retry_new_seq;
5769     case -NFS4ERR_BADSLOT:
5770     /*
5771     * The slot id we used was probably retired. Try again
5772     @@ -769,10 +776,6 @@ static int nfs41_sequence_process(struct rpc_task *task,
5773     goto retry_nowait;
5774     }
5775     goto session_recover;
5776     - case -NFS4ERR_SEQ_FALSE_RETRY:
5777     - if (interrupted)
5778     - goto retry_new_seq;
5779     - goto session_recover;
5780     default:
5781     /* Just update the slot sequence no. */
5782     slot->seq_done = 1;
5783     @@ -2692,7 +2695,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
5784     if (ret != 0)
5785     goto out;
5786    
5787     - state = nfs4_opendata_to_nfs4_state(opendata);
5788     + state = _nfs4_opendata_to_nfs4_state(opendata);
5789     ret = PTR_ERR(state);
5790     if (IS_ERR(state))
5791     goto out;
5792     @@ -2728,6 +2731,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
5793     nfs4_schedule_stateid_recovery(server, state);
5794     }
5795     out:
5796     + nfs4_sequence_free_slot(&opendata->o_res.seq_res);
5797     return ret;
5798     }
5799    
5800     diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
5801     index 7b34534210ce..96867fb159bf 100644
5802     --- a/fs/nfs/pnfs.c
5803     +++ b/fs/nfs/pnfs.c
5804     @@ -1126,7 +1126,7 @@ _pnfs_return_layout(struct inode *ino)
5805     LIST_HEAD(tmp_list);
5806     nfs4_stateid stateid;
5807     int status = 0;
5808     - bool send;
5809     + bool send, valid_layout;
5810    
5811     dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
5812    
5813     @@ -1147,6 +1147,7 @@ _pnfs_return_layout(struct inode *ino)
5814     goto out_put_layout_hdr;
5815     spin_lock(&ino->i_lock);
5816     }
5817     + valid_layout = pnfs_layout_is_valid(lo);
5818     pnfs_clear_layoutcommit(ino, &tmp_list);
5819     pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
5820    
5821     @@ -1160,7 +1161,8 @@ _pnfs_return_layout(struct inode *ino)
5822     }
5823    
5824     /* Don't send a LAYOUTRETURN if list was initially empty */
5825     - if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
5826     + if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
5827     + !valid_layout) {
5828     spin_unlock(&ino->i_lock);
5829     dprintk("NFS: %s no layout segments to return\n", __func__);
5830     goto out_put_layout_hdr;
5831     diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
5832     index f6588cc6816c..c1e923334012 100644
5833     --- a/fs/nfsd/nfs4xdr.c
5834     +++ b/fs/nfsd/nfs4xdr.c
5835     @@ -1586,6 +1586,8 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
5836     gdev->gd_maxcount = be32_to_cpup(p++);
5837     num = be32_to_cpup(p++);
5838     if (num) {
5839     + if (num > 1000)
5840     + goto xdr_error;
5841     READ_BUF(4 * num);
5842     gdev->gd_notify_types = be32_to_cpup(p++);
5843     for (i = 1; i < num; i++) {
5844     diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
5845     index b8f8d666e8d4..ba20393d60ef 100644
5846     --- a/fs/overlayfs/super.c
5847     +++ b/fs/overlayfs/super.c
5848     @@ -232,6 +232,7 @@ static void ovl_put_super(struct super_block *sb)
5849     kfree(ufs);
5850     }
5851    
5852     +/* Sync real dirty inodes in upper filesystem (if it exists) */
5853     static int ovl_sync_fs(struct super_block *sb, int wait)
5854     {
5855     struct ovl_fs *ufs = sb->s_fs_info;
5856     @@ -240,14 +241,24 @@ static int ovl_sync_fs(struct super_block *sb, int wait)
5857    
5858     if (!ufs->upper_mnt)
5859     return 0;
5860     - upper_sb = ufs->upper_mnt->mnt_sb;
5861     - if (!upper_sb->s_op->sync_fs)
5862     +
5863     + /*
5864     + * If this is a sync(2) call or an emergency sync, all the super blocks
5865     + * will be iterated, including upper_sb, so no need to do anything.
5866     + *
5867     + * If this is a syncfs(2) call, then we do need to call
5868     + * sync_filesystem() on upper_sb, but enough if we do it when being
5869     + * called with wait == 1.
5870     + */
5871     + if (!wait)
5872     return 0;
5873    
5874     - /* real inodes have already been synced by sync_filesystem(ovl_sb) */
5875     + upper_sb = ufs->upper_mnt->mnt_sb;
5876     +
5877     down_read(&upper_sb->s_umount);
5878     - ret = upper_sb->s_op->sync_fs(upper_sb, wait);
5879     + ret = sync_filesystem(upper_sb);
5880     up_read(&upper_sb->s_umount);
5881     +
5882     return ret;
5883     }
5884    
5885     diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
5886     index 6f337fff38c4..519522d39bde 100644
5887     --- a/fs/proc/task_mmu.c
5888     +++ b/fs/proc/task_mmu.c
5889     @@ -1275,8 +1275,9 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
5890     if (pte_swp_soft_dirty(pte))
5891     flags |= PM_SOFT_DIRTY;
5892     entry = pte_to_swp_entry(pte);
5893     - frame = swp_type(entry) |
5894     - (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
5895     + if (pm->show_pfn)
5896     + frame = swp_type(entry) |
5897     + (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
5898     flags |= PM_SWAP;
5899     if (is_migration_entry(entry))
5900     page = migration_entry_to_page(entry);
5901     @@ -1327,11 +1328,14 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
5902     #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
5903     else if (is_swap_pmd(pmd)) {
5904     swp_entry_t entry = pmd_to_swp_entry(pmd);
5905     - unsigned long offset = swp_offset(entry);
5906     + unsigned long offset;
5907    
5908     - offset += (addr & ~PMD_MASK) >> PAGE_SHIFT;
5909     - frame = swp_type(entry) |
5910     - (offset << MAX_SWAPFILES_SHIFT);
5911     + if (pm->show_pfn) {
5912     + offset = swp_offset(entry) +
5913     + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
5914     + frame = swp_type(entry) |
5915     + (offset << MAX_SWAPFILES_SHIFT);
5916     + }
5917     flags |= PM_SWAP;
5918     if (pmd_swp_soft_dirty(pmd))
5919     flags |= PM_SOFT_DIRTY;
5920     @@ -1349,10 +1353,12 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
5921     err = add_to_pagemap(addr, &pme, pm);
5922     if (err)
5923     break;
5924     - if (pm->show_pfn && (flags & PM_PRESENT))
5925     - frame++;
5926     - else if (flags & PM_SWAP)
5927     - frame += (1 << MAX_SWAPFILES_SHIFT);
5928     + if (pm->show_pfn) {
5929     + if (flags & PM_PRESENT)
5930     + frame++;
5931     + else if (flags & PM_SWAP)
5932     + frame += (1 << MAX_SWAPFILES_SHIFT);
5933     + }
5934     }
5935     spin_unlock(ptl);
5936     return err;
5937     diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
5938     index 23813c078cc9..0839efa720b3 100644
5939     --- a/fs/squashfs/cache.c
5940     +++ b/fs/squashfs/cache.c
5941     @@ -350,6 +350,9 @@ int squashfs_read_metadata(struct super_block *sb, void *buffer,
5942    
5943     TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
5944    
5945     + if (unlikely(length < 0))
5946     + return -EIO;
5947     +
5948     while (length) {
5949     entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
5950     if (entry->error) {
5951     diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
5952     index 13d80947bf9e..fcff2e0487fe 100644
5953     --- a/fs/squashfs/file.c
5954     +++ b/fs/squashfs/file.c
5955     @@ -194,7 +194,11 @@ static long long read_indexes(struct super_block *sb, int n,
5956     }
5957    
5958     for (i = 0; i < blocks; i++) {
5959     - int size = le32_to_cpu(blist[i]);
5960     + int size = squashfs_block_size(blist[i]);
5961     + if (size < 0) {
5962     + err = size;
5963     + goto failure;
5964     + }
5965     block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
5966     }
5967     n -= blocks;
5968     @@ -367,7 +371,7 @@ static int read_blocklist(struct inode *inode, int index, u64 *block)
5969     sizeof(size));
5970     if (res < 0)
5971     return res;
5972     - return le32_to_cpu(size);
5973     + return squashfs_block_size(size);
5974     }
5975    
5976     /* Copy data into page cache */
5977     diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
5978     index 0ed6edbc5c71..86ad9a4b8c36 100644
5979     --- a/fs/squashfs/fragment.c
5980     +++ b/fs/squashfs/fragment.c
5981     @@ -61,9 +61,7 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
5982     return size;
5983    
5984     *fragment_block = le64_to_cpu(fragment_entry.start_block);
5985     - size = le32_to_cpu(fragment_entry.size);
5986     -
5987     - return size;
5988     + return squashfs_block_size(fragment_entry.size);
5989     }
5990    
5991    
5992     diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
5993     index 24d12fd14177..4e6853f084d0 100644
5994     --- a/fs/squashfs/squashfs_fs.h
5995     +++ b/fs/squashfs/squashfs_fs.h
5996     @@ -129,6 +129,12 @@
5997    
5998     #define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
5999    
6000     +static inline int squashfs_block_size(__le32 raw)
6001     +{
6002     + u32 size = le32_to_cpu(raw);
6003     + return (size >> 25) ? -EIO : size;
6004     +}
6005     +
6006     /*
6007     * Inode number ops. Inodes consist of a compressed block number, and an
6008     * uncompressed offset within that block
6009     diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
6010     index b17476a6909c..8fd7cb5297ab 100644
6011     --- a/include/drm/drm_dp_helper.h
6012     +++ b/include/drm/drm_dp_helper.h
6013     @@ -453,6 +453,7 @@
6014     # define DP_PSR_FRAME_CAPTURE (1 << 3)
6015     # define DP_PSR_SELECTIVE_UPDATE (1 << 4)
6016     # define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5)
6017     +# define DP_PSR_ENABLE_PSR2 (1 << 6) /* eDP 1.4a */
6018    
6019     #define DP_ADAPTER_CTRL 0x1a0
6020     # define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
6021     diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
6022     index 5e335b6203f4..31c865d1842e 100644
6023     --- a/include/linux/delayacct.h
6024     +++ b/include/linux/delayacct.h
6025     @@ -29,7 +29,7 @@
6026    
6027     #ifdef CONFIG_TASK_DELAY_ACCT
6028     struct task_delay_info {
6029     - spinlock_t lock;
6030     + raw_spinlock_t lock;
6031     unsigned int flags; /* Private per-task flags */
6032    
6033     /* For each stat XXX, add following, aligned appropriately
6034     @@ -124,7 +124,7 @@ static inline void delayacct_blkio_start(void)
6035    
6036     static inline void delayacct_blkio_end(struct task_struct *p)
6037     {
6038     - if (current->delays)
6039     + if (p->delays)
6040     __delayacct_blkio_end(p);
6041     delayacct_clear_flag(DELAYACCT_PF_BLKIO);
6042     }
6043     diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
6044     index 92f20832fd28..e8ca5e654277 100644
6045     --- a/include/linux/dma-iommu.h
6046     +++ b/include/linux/dma-iommu.h
6047     @@ -17,6 +17,7 @@
6048     #define __DMA_IOMMU_H
6049    
6050     #ifdef __KERNEL__
6051     +#include <linux/types.h>
6052     #include <asm/errno.h>
6053    
6054     #ifdef CONFIG_IOMMU_DMA
6055     diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
6056     index cdd66a5fbd5e..0a7abe8a407f 100644
6057     --- a/include/linux/mmc/sdio_ids.h
6058     +++ b/include/linux/mmc/sdio_ids.h
6059     @@ -35,6 +35,7 @@
6060     #define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
6061     #define SDIO_DEVICE_ID_BROADCOM_4339 0x4339
6062     #define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
6063     +#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4
6064     #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
6065     #define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
6066     #define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
6067     diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
6068     index bfb3531fd88a..7ad8ddf9ca8a 100644
6069     --- a/include/linux/netfilter/ipset/ip_set_timeout.h
6070     +++ b/include/linux/netfilter/ipset/ip_set_timeout.h
6071     @@ -65,8 +65,14 @@ ip_set_timeout_set(unsigned long *timeout, u32 value)
6072     static inline u32
6073     ip_set_timeout_get(const unsigned long *timeout)
6074     {
6075     - return *timeout == IPSET_ELEM_PERMANENT ? 0 :
6076     - jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
6077     + u32 t;
6078     +
6079     + if (*timeout == IPSET_ELEM_PERMANENT)
6080     + return 0;
6081     +
6082     + t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
6083     + /* Zero value in userspace means no timeout */
6084     + return t == 0 ? 1 : t;
6085     }
6086    
6087     #endif /* __KERNEL__ */
6088     diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
6089     index df176d7c2b87..25602afd4844 100644
6090     --- a/include/linux/regulator/consumer.h
6091     +++ b/include/linux/regulator/consumer.h
6092     @@ -80,6 +80,7 @@ struct regmap;
6093     * These modes can be OR'ed together to make up a mask of valid register modes.
6094     */
6095    
6096     +#define REGULATOR_MODE_INVALID 0x0
6097     #define REGULATOR_MODE_FAST 0x1
6098     #define REGULATOR_MODE_NORMAL 0x2
6099     #define REGULATOR_MODE_IDLE 0x4
6100     diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
6101     index 74fc82d22310..868b60a79c0b 100644
6102     --- a/include/linux/serial_core.h
6103     +++ b/include/linux/serial_core.h
6104     @@ -348,7 +348,8 @@ struct earlycon_device {
6105     };
6106    
6107     struct earlycon_id {
6108     - char name[16];
6109     + char name[15];
6110     + char name_term; /* In case compiler didn't '\0' term name */
6111     char compatible[128];
6112     int (*setup)(struct earlycon_device *, const char *options);
6113     };
6114     diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
6115     index 34f053a150a9..cf2862bd134a 100644
6116     --- a/include/linux/thread_info.h
6117     +++ b/include/linux/thread_info.h
6118     @@ -43,11 +43,7 @@ enum {
6119     #define THREAD_ALIGN THREAD_SIZE
6120     #endif
6121    
6122     -#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK)
6123     -# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
6124     -#else
6125     -# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT)
6126     -#endif
6127     +#define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
6128    
6129     /*
6130     * flag set/clear/test wrappers
6131     diff --git a/include/net/tcp.h b/include/net/tcp.h
6132     index 3173dd12b8cc..686e33ea76e7 100644
6133     --- a/include/net/tcp.h
6134     +++ b/include/net/tcp.h
6135     @@ -372,7 +372,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
6136     struct pipe_inode_info *pipe, size_t len,
6137     unsigned int flags);
6138    
6139     -void tcp_enter_quickack_mode(struct sock *sk);
6140     +void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
6141     static inline void tcp_dec_quickack_mode(struct sock *sk,
6142     const unsigned int pkts)
6143     {
6144     diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
6145     index 44202ff897fd..f759e0918037 100644
6146     --- a/include/soc/tegra/mc.h
6147     +++ b/include/soc/tegra/mc.h
6148     @@ -99,6 +99,8 @@ struct tegra_mc_soc {
6149     u8 client_id_mask;
6150    
6151     const struct tegra_smmu_soc *smmu;
6152     +
6153     + u32 intmask;
6154     };
6155    
6156     struct tegra_mc {
6157     diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
6158     index 69c37ecbff7e..f3c4b46e39d8 100644
6159     --- a/include/uapi/sound/asoc.h
6160     +++ b/include/uapi/sound/asoc.h
6161     @@ -139,6 +139,11 @@
6162     #define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_CHANNELS (1 << 1)
6163     #define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_SAMPLEBITS (1 << 2)
6164    
6165     +/* DAI clock gating */
6166     +#define SND_SOC_TPLG_DAI_CLK_GATE_UNDEFINED 0
6167     +#define SND_SOC_TPLG_DAI_CLK_GATE_GATED 1
6168     +#define SND_SOC_TPLG_DAI_CLK_GATE_CONT 2
6169     +
6170     /* DAI physical PCM data formats.
6171     * Add new formats to the end of the list.
6172     */
6173     @@ -160,6 +165,18 @@
6174     #define SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_SAMPLEBITS (1 << 2)
6175     #define SND_SOC_TPLG_LNK_FLGBIT_VOICE_WAKEUP (1 << 3)
6176    
6177     +/* DAI topology BCLK parameter
6178     + * For the backwards capability, by default codec is bclk master
6179     + */
6180     +#define SND_SOC_TPLG_BCLK_CM 0 /* codec is bclk master */
6181     +#define SND_SOC_TPLG_BCLK_CS 1 /* codec is bclk slave */
6182     +
6183     +/* DAI topology FSYNC parameter
6184     + * For the backwards capability, by default codec is fsync master
6185     + */
6186     +#define SND_SOC_TPLG_FSYNC_CM 0 /* codec is fsync master */
6187     +#define SND_SOC_TPLG_FSYNC_CS 1 /* codec is fsync slave */
6188     +
6189     /*
6190     * Block Header.
6191     * This header precedes all object and object arrays below.
6192     @@ -312,11 +329,11 @@ struct snd_soc_tplg_hw_config {
6193     __le32 size; /* in bytes of this structure */
6194     __le32 id; /* unique ID - - used to match */
6195     __le32 fmt; /* SND_SOC_DAI_FORMAT_ format value */
6196     - __u8 clock_gated; /* 1 if clock can be gated to save power */
6197     + __u8 clock_gated; /* SND_SOC_TPLG_DAI_CLK_GATE_ value */
6198     __u8 invert_bclk; /* 1 for inverted BCLK, 0 for normal */
6199     __u8 invert_fsync; /* 1 for inverted frame clock, 0 for normal */
6200     - __u8 bclk_master; /* 1 for master of BCLK, 0 for slave */
6201     - __u8 fsync_master; /* 1 for master of FSYNC, 0 for slave */
6202     + __u8 bclk_master; /* SND_SOC_TPLG_BCLK_ value */
6203     + __u8 fsync_master; /* SND_SOC_TPLG_FSYNC_ value */
6204     __u8 mclk_direction; /* 0 for input, 1 for output */
6205     __le16 reserved; /* for 32bit alignment */
6206     __le32 mclk_rate; /* MCLK or SYSCLK freqency in Hz */
6207     diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
6208     index 0b0aa5854dac..8dd4063647c2 100644
6209     --- a/kernel/auditfilter.c
6210     +++ b/kernel/auditfilter.c
6211     @@ -407,7 +407,7 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f)
6212     return -EINVAL;
6213     break;
6214     case AUDIT_EXE:
6215     - if (f->op != Audit_equal)
6216     + if (f->op != Audit_not_equal && f->op != Audit_equal)
6217     return -EINVAL;
6218     if (entry->rule.listnr != AUDIT_FILTER_EXIT)
6219     return -EINVAL;
6220     diff --git a/kernel/auditsc.c b/kernel/auditsc.c
6221     index ecc23e25c9eb..677053a2fb57 100644
6222     --- a/kernel/auditsc.c
6223     +++ b/kernel/auditsc.c
6224     @@ -471,6 +471,8 @@ static int audit_filter_rules(struct task_struct *tsk,
6225     break;
6226     case AUDIT_EXE:
6227     result = audit_exe_compare(tsk, rule->exe);
6228     + if (f->op == Audit_not_equal)
6229     + result = !result;
6230     break;
6231     case AUDIT_UID:
6232     result = audit_uid_comparator(cred->uid, f->op, f->uid);
6233     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
6234     index 3ceb269c0ebd..450e2cd31ed6 100644
6235     --- a/kernel/bpf/verifier.c
6236     +++ b/kernel/bpf/verifier.c
6237     @@ -4110,7 +4110,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
6238     /* hold the map. If the program is rejected by verifier,
6239     * the map will be released by release_maps() or it
6240     * will be used by the valid program until it's unloaded
6241     - * and all maps are released in free_bpf_prog_info()
6242     + * and all maps are released in free_used_maps()
6243     */
6244     map = bpf_map_inc(map, false);
6245     if (IS_ERR(map)) {
6246     @@ -4623,7 +4623,7 @@ free_log_buf:
6247     vfree(log_buf);
6248     if (!env->prog->aux->used_maps)
6249     /* if we didn't copy map pointers into bpf_prog_info, release
6250     - * them now. Otherwise free_bpf_prog_info() will release them.
6251     + * them now. Otherwise free_used_maps() will release them.
6252     */
6253     release_maps(env);
6254     *prog = env->prog;
6255     diff --git a/kernel/delayacct.c b/kernel/delayacct.c
6256     index e2764d767f18..ca8ac2824f0b 100644
6257     --- a/kernel/delayacct.c
6258     +++ b/kernel/delayacct.c
6259     @@ -44,23 +44,24 @@ void __delayacct_tsk_init(struct task_struct *tsk)
6260     {
6261     tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL);
6262     if (tsk->delays)
6263     - spin_lock_init(&tsk->delays->lock);
6264     + raw_spin_lock_init(&tsk->delays->lock);
6265     }
6266    
6267     /*
6268     * Finish delay accounting for a statistic using its timestamps (@start),
6269     * accumalator (@total) and @count
6270     */
6271     -static void delayacct_end(spinlock_t *lock, u64 *start, u64 *total, u32 *count)
6272     +static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total,
6273     + u32 *count)
6274     {
6275     s64 ns = ktime_get_ns() - *start;
6276     unsigned long flags;
6277    
6278     if (ns > 0) {
6279     - spin_lock_irqsave(lock, flags);
6280     + raw_spin_lock_irqsave(lock, flags);
6281     *total += ns;
6282     (*count)++;
6283     - spin_unlock_irqrestore(lock, flags);
6284     + raw_spin_unlock_irqrestore(lock, flags);
6285     }
6286     }
6287    
6288     @@ -127,7 +128,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
6289    
6290     /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */
6291    
6292     - spin_lock_irqsave(&tsk->delays->lock, flags);
6293     + raw_spin_lock_irqsave(&tsk->delays->lock, flags);
6294     tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
6295     d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
6296     tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
6297     @@ -137,7 +138,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
6298     d->blkio_count += tsk->delays->blkio_count;
6299     d->swapin_count += tsk->delays->swapin_count;
6300     d->freepages_count += tsk->delays->freepages_count;
6301     - spin_unlock_irqrestore(&tsk->delays->lock, flags);
6302     + raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
6303    
6304     return 0;
6305     }
6306     @@ -147,10 +148,10 @@ __u64 __delayacct_blkio_ticks(struct task_struct *tsk)
6307     __u64 ret;
6308     unsigned long flags;
6309    
6310     - spin_lock_irqsave(&tsk->delays->lock, flags);
6311     + raw_spin_lock_irqsave(&tsk->delays->lock, flags);
6312     ret = nsec_to_clock_t(tsk->delays->blkio_delay +
6313     tsk->delays->swapin_delay);
6314     - spin_unlock_irqrestore(&tsk->delays->lock, flags);
6315     + raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
6316     return ret;
6317     }
6318    
6319     diff --git a/kernel/fork.c b/kernel/fork.c
6320     index 98c91bd341b4..91907a3701ce 100644
6321     --- a/kernel/fork.c
6322     +++ b/kernel/fork.c
6323     @@ -215,10 +215,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
6324     if (!s)
6325     continue;
6326    
6327     -#ifdef CONFIG_DEBUG_KMEMLEAK
6328     /* Clear stale pointers from reused stack. */
6329     memset(s->addr, 0, THREAD_SIZE);
6330     -#endif
6331     +
6332     tsk->stack_vm_area = s;
6333     return s->addr;
6334     }
6335     diff --git a/kernel/hung_task.c b/kernel/hung_task.c
6336     index 751593ed7c0b..32b479468e4d 100644
6337     --- a/kernel/hung_task.c
6338     +++ b/kernel/hung_task.c
6339     @@ -44,6 +44,7 @@ int __read_mostly sysctl_hung_task_warnings = 10;
6340    
6341     static int __read_mostly did_panic;
6342     static bool hung_task_show_lock;
6343     +static bool hung_task_call_panic;
6344    
6345     static struct task_struct *watchdog_task;
6346    
6347     @@ -127,10 +128,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
6348     touch_nmi_watchdog();
6349    
6350     if (sysctl_hung_task_panic) {
6351     - if (hung_task_show_lock)
6352     - debug_show_all_locks();
6353     - trigger_all_cpu_backtrace();
6354     - panic("hung_task: blocked tasks");
6355     + hung_task_show_lock = true;
6356     + hung_task_call_panic = true;
6357     }
6358     }
6359    
6360     @@ -193,6 +192,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
6361     rcu_read_unlock();
6362     if (hung_task_show_lock)
6363     debug_show_all_locks();
6364     + if (hung_task_call_panic) {
6365     + trigger_all_cpu_backtrace();
6366     + panic("hung_task: blocked tasks");
6367     + }
6368     }
6369    
6370     static long hung_timeout_jiffies(unsigned long last_checked,
6371     diff --git a/kernel/kcov.c b/kernel/kcov.c
6372     index b11ef6e51f7e..f1e060b04ef6 100644
6373     --- a/kernel/kcov.c
6374     +++ b/kernel/kcov.c
6375     @@ -108,7 +108,8 @@ static void kcov_put(struct kcov *kcov)
6376    
6377     void kcov_task_init(struct task_struct *t)
6378     {
6379     - t->kcov_mode = KCOV_MODE_DISABLED;
6380     + WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
6381     + barrier();
6382     t->kcov_size = 0;
6383     t->kcov_area = NULL;
6384     t->kcov = NULL;
6385     diff --git a/kernel/kthread.c b/kernel/kthread.c
6386     index 1ef8f3a5b072..4e6d85b63201 100644
6387     --- a/kernel/kthread.c
6388     +++ b/kernel/kthread.c
6389     @@ -311,8 +311,14 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
6390     task = create->result;
6391     if (!IS_ERR(task)) {
6392     static const struct sched_param param = { .sched_priority = 0 };
6393     + char name[TASK_COMM_LEN];
6394    
6395     - vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
6396     + /*
6397     + * task is already visible to other tasks, so updating
6398     + * COMM must be protected.
6399     + */
6400     + vsnprintf(name, sizeof(name), namefmt, args);
6401     + set_task_comm(task, name);
6402     /*
6403     * root may have changed our (kthreadd's) priority or CPU mask.
6404     * The kernel thread should not inherit these properties.
6405     diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
6406     index 0685c4499431..c0bc2c89697a 100644
6407     --- a/kernel/power/suspend.c
6408     +++ b/kernel/power/suspend.c
6409     @@ -60,7 +60,7 @@ static const struct platform_s2idle_ops *s2idle_ops;
6410     static DECLARE_WAIT_QUEUE_HEAD(s2idle_wait_head);
6411    
6412     enum s2idle_states __read_mostly s2idle_state;
6413     -static DEFINE_SPINLOCK(s2idle_lock);
6414     +static DEFINE_RAW_SPINLOCK(s2idle_lock);
6415    
6416     void s2idle_set_ops(const struct platform_s2idle_ops *ops)
6417     {
6418     @@ -78,12 +78,12 @@ static void s2idle_enter(void)
6419     {
6420     trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, true);
6421    
6422     - spin_lock_irq(&s2idle_lock);
6423     + raw_spin_lock_irq(&s2idle_lock);
6424     if (pm_wakeup_pending())
6425     goto out;
6426    
6427     s2idle_state = S2IDLE_STATE_ENTER;
6428     - spin_unlock_irq(&s2idle_lock);
6429     + raw_spin_unlock_irq(&s2idle_lock);
6430    
6431     get_online_cpus();
6432     cpuidle_resume();
6433     @@ -97,11 +97,11 @@ static void s2idle_enter(void)
6434     cpuidle_pause();
6435     put_online_cpus();
6436    
6437     - spin_lock_irq(&s2idle_lock);
6438     + raw_spin_lock_irq(&s2idle_lock);
6439    
6440     out:
6441     s2idle_state = S2IDLE_STATE_NONE;
6442     - spin_unlock_irq(&s2idle_lock);
6443     + raw_spin_unlock_irq(&s2idle_lock);
6444    
6445     trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, false);
6446     }
6447     @@ -156,12 +156,12 @@ void s2idle_wake(void)
6448     {
6449     unsigned long flags;
6450    
6451     - spin_lock_irqsave(&s2idle_lock, flags);
6452     + raw_spin_lock_irqsave(&s2idle_lock, flags);
6453     if (s2idle_state > S2IDLE_STATE_NONE) {
6454     s2idle_state = S2IDLE_STATE_WAKE;
6455     wake_up(&s2idle_wait_head);
6456     }
6457     - spin_unlock_irqrestore(&s2idle_lock, flags);
6458     + raw_spin_unlock_irqrestore(&s2idle_lock, flags);
6459     }
6460     EXPORT_SYMBOL_GPL(s2idle_wake);
6461    
6462     diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
6463     index d989cc238198..64825b2df3a5 100644
6464     --- a/kernel/printk/printk_safe.c
6465     +++ b/kernel/printk/printk_safe.c
6466     @@ -284,7 +284,7 @@ void printk_safe_flush_on_panic(void)
6467     * Make sure that we could access the main ring buffer.
6468     * Do not risk a double release when more CPUs are up.
6469     */
6470     - if (in_nmi() && raw_spin_is_locked(&logbuf_lock)) {
6471     + if (raw_spin_is_locked(&logbuf_lock)) {
6472     if (num_online_cpus() > 1)
6473     return;
6474    
6475     diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
6476     index 2f6fa95de2d8..1ff523dae6e2 100644
6477     --- a/kernel/stop_machine.c
6478     +++ b/kernel/stop_machine.c
6479     @@ -37,7 +37,7 @@ struct cpu_stop_done {
6480     struct cpu_stopper {
6481     struct task_struct *thread;
6482    
6483     - spinlock_t lock;
6484     + raw_spinlock_t lock;
6485     bool enabled; /* is this stopper enabled? */
6486     struct list_head works; /* list of pending works */
6487    
6488     @@ -81,13 +81,13 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
6489     unsigned long flags;
6490     bool enabled;
6491    
6492     - spin_lock_irqsave(&stopper->lock, flags);
6493     + raw_spin_lock_irqsave(&stopper->lock, flags);
6494     enabled = stopper->enabled;
6495     if (enabled)
6496     __cpu_stop_queue_work(stopper, work, &wakeq);
6497     else if (work->done)
6498     cpu_stop_signal_done(work->done);
6499     - spin_unlock_irqrestore(&stopper->lock, flags);
6500     + raw_spin_unlock_irqrestore(&stopper->lock, flags);
6501    
6502     wake_up_q(&wakeq);
6503    
6504     @@ -237,8 +237,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
6505     DEFINE_WAKE_Q(wakeq);
6506     int err;
6507     retry:
6508     - spin_lock_irq(&stopper1->lock);
6509     - spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
6510     + raw_spin_lock_irq(&stopper1->lock);
6511     + raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
6512    
6513     err = -ENOENT;
6514     if (!stopper1->enabled || !stopper2->enabled)
6515     @@ -261,8 +261,8 @@ retry:
6516     __cpu_stop_queue_work(stopper1, work1, &wakeq);
6517     __cpu_stop_queue_work(stopper2, work2, &wakeq);
6518     unlock:
6519     - spin_unlock(&stopper2->lock);
6520     - spin_unlock_irq(&stopper1->lock);
6521     + raw_spin_unlock(&stopper2->lock);
6522     + raw_spin_unlock_irq(&stopper1->lock);
6523    
6524     if (unlikely(err == -EDEADLK)) {
6525     while (stop_cpus_in_progress)
6526     @@ -461,9 +461,9 @@ static int cpu_stop_should_run(unsigned int cpu)
6527     unsigned long flags;
6528     int run;
6529    
6530     - spin_lock_irqsave(&stopper->lock, flags);
6531     + raw_spin_lock_irqsave(&stopper->lock, flags);
6532     run = !list_empty(&stopper->works);
6533     - spin_unlock_irqrestore(&stopper->lock, flags);
6534     + raw_spin_unlock_irqrestore(&stopper->lock, flags);
6535     return run;
6536     }
6537    
6538     @@ -474,13 +474,13 @@ static void cpu_stopper_thread(unsigned int cpu)
6539    
6540     repeat:
6541     work = NULL;
6542     - spin_lock_irq(&stopper->lock);
6543     + raw_spin_lock_irq(&stopper->lock);
6544     if (!list_empty(&stopper->works)) {
6545     work = list_first_entry(&stopper->works,
6546     struct cpu_stop_work, list);
6547     list_del_init(&work->list);
6548     }
6549     - spin_unlock_irq(&stopper->lock);
6550     + raw_spin_unlock_irq(&stopper->lock);
6551    
6552     if (work) {
6553     cpu_stop_fn_t fn = work->fn;
6554     @@ -554,7 +554,7 @@ static int __init cpu_stop_init(void)
6555     for_each_possible_cpu(cpu) {
6556     struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
6557    
6558     - spin_lock_init(&stopper->lock);
6559     + raw_spin_lock_init(&stopper->lock);
6560     INIT_LIST_HEAD(&stopper->works);
6561     }
6562    
6563     diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
6564     index b413fab7d75b..43254c5e7e16 100644
6565     --- a/kernel/trace/trace_events_trigger.c
6566     +++ b/kernel/trace/trace_events_trigger.c
6567     @@ -680,6 +680,8 @@ event_trigger_callback(struct event_command *cmd_ops,
6568     goto out_free;
6569    
6570     out_reg:
6571     + /* Up the trigger_data count to make sure reg doesn't free it on failure */
6572     + event_trigger_init(trigger_ops, trigger_data);
6573     ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
6574     /*
6575     * The above returns on success the # of functions enabled,
6576     @@ -687,11 +689,13 @@ event_trigger_callback(struct event_command *cmd_ops,
6577     * Consider no functions a failure too.
6578     */
6579     if (!ret) {
6580     + cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
6581     ret = -ENOENT;
6582     - goto out_free;
6583     - } else if (ret < 0)
6584     - goto out_free;
6585     - ret = 0;
6586     + } else if (ret > 0)
6587     + ret = 0;
6588     +
6589     + /* Down the counter of trigger_data or free it if not used anymore */
6590     + event_trigger_free(trigger_ops, trigger_data);
6591     out:
6592     return ret;
6593    
6594     @@ -1392,6 +1396,9 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
6595     goto out;
6596     }
6597    
6598     + /* Up the trigger_data count to make sure nothing frees it on failure */
6599     + event_trigger_init(trigger_ops, trigger_data);
6600     +
6601     if (trigger) {
6602     number = strsep(&trigger, ":");
6603    
6604     @@ -1442,6 +1449,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
6605     goto out_disable;
6606     /* Just return zero, not the number of enabled functions */
6607     ret = 0;
6608     + event_trigger_free(trigger_ops, trigger_data);
6609     out:
6610     return ret;
6611    
6612     @@ -1452,7 +1460,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
6613     out_free:
6614     if (cmd_ops->set_filter)
6615     cmd_ops->set_filter(NULL, trigger_data, NULL);
6616     - kfree(trigger_data);
6617     + event_trigger_free(trigger_ops, trigger_data);
6618     kfree(enable_data);
6619     goto out;
6620     }
6621     diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
6622     index f8d3bd974bcc..ea20274a105a 100644
6623     --- a/kernel/trace/trace_kprobe.c
6624     +++ b/kernel/trace/trace_kprobe.c
6625     @@ -376,11 +376,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
6626     static int
6627     enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
6628     {
6629     + struct event_file_link *link = NULL;
6630     int ret = 0;
6631    
6632     if (file) {
6633     - struct event_file_link *link;
6634     -
6635     link = kmalloc(sizeof(*link), GFP_KERNEL);
6636     if (!link) {
6637     ret = -ENOMEM;
6638     @@ -400,6 +399,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
6639     else
6640     ret = enable_kprobe(&tk->rp.kp);
6641     }
6642     +
6643     + if (ret) {
6644     + if (file) {
6645     + /* Notice the if is true on not WARN() */
6646     + if (!WARN_ON_ONCE(!link))
6647     + list_del_rcu(&link->list);
6648     + kfree(link);
6649     + tk->tp.flags &= ~TP_FLAG_TRACE;
6650     + } else {
6651     + tk->tp.flags &= ~TP_FLAG_PROFILE;
6652     + }
6653     + }
6654     out:
6655     return ret;
6656     }
6657     diff --git a/mm/slub.c b/mm/slub.c
6658     index c38e71cea6d3..10e54c4acd19 100644
6659     --- a/mm/slub.c
6660     +++ b/mm/slub.c
6661     @@ -708,7 +708,7 @@ void object_err(struct kmem_cache *s, struct page *page,
6662     print_trailer(s, page, object);
6663     }
6664    
6665     -static void slab_err(struct kmem_cache *s, struct page *page,
6666     +static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
6667     const char *fmt, ...)
6668     {
6669     va_list args;
6670     diff --git a/mm/vmalloc.c b/mm/vmalloc.c
6671     index ebff729cc956..9ff21a12ea00 100644
6672     --- a/mm/vmalloc.c
6673     +++ b/mm/vmalloc.c
6674     @@ -1519,7 +1519,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
6675     addr))
6676     return;
6677    
6678     - area = remove_vm_area(addr);
6679     + area = find_vmap_area((unsigned long)addr)->vm;
6680     if (unlikely(!area)) {
6681     WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
6682     addr);
6683     @@ -1529,6 +1529,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
6684     debug_check_no_locks_freed(addr, get_vm_area_size(area));
6685     debug_check_no_obj_freed(addr, get_vm_area_size(area));
6686    
6687     + remove_vm_area(addr);
6688     if (deallocate_pages) {
6689     int i;
6690    
6691     diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
6692     index 67eebcb113f3..5bbdd05d0cd3 100644
6693     --- a/net/ipv4/fib_frontend.c
6694     +++ b/net/ipv4/fib_frontend.c
6695     @@ -282,19 +282,19 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
6696     return ip_hdr(skb)->daddr;
6697    
6698     in_dev = __in_dev_get_rcu(dev);
6699     - BUG_ON(!in_dev);
6700    
6701     net = dev_net(dev);
6702    
6703     scope = RT_SCOPE_UNIVERSE;
6704     if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
6705     + bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev);
6706     struct flowi4 fl4 = {
6707     .flowi4_iif = LOOPBACK_IFINDEX,
6708     .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
6709     .daddr = ip_hdr(skb)->saddr,
6710     .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
6711     .flowi4_scope = scope,
6712     - .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0,
6713     + .flowi4_mark = vmark ? skb->mark : 0,
6714     };
6715     if (!fib_lookup(net, &fl4, &res, 0))
6716     return FIB_RES_PREFSRC(net, res);
6717     diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
6718     index abdebca848c9..f0782c91514c 100644
6719     --- a/net/ipv4/ipconfig.c
6720     +++ b/net/ipv4/ipconfig.c
6721     @@ -781,6 +781,11 @@ static void __init ic_bootp_init_ext(u8 *e)
6722     */
6723     static inline void __init ic_bootp_init(void)
6724     {
6725     + /* Re-initialise all name servers to NONE, in case any were set via the
6726     + * "ip=" or "nfsaddrs=" kernel command line parameters: any IP addresses
6727     + * specified there will already have been decoded but are no longer
6728     + * needed
6729     + */
6730     ic_nameservers_predef();
6731    
6732     dev_add_pack(&bootp_packet_type);
6733     @@ -1402,6 +1407,13 @@ static int __init ip_auto_config(void)
6734     int err;
6735     unsigned int i;
6736    
6737     + /* Initialise all name servers to NONE (but only if the "ip=" or
6738     + * "nfsaddrs=" kernel command line parameters weren't decoded, otherwise
6739     + * we'll overwrite the IP addresses specified there)
6740     + */
6741     + if (ic_set_manually == 0)
6742     + ic_nameservers_predef();
6743     +
6744     #ifdef CONFIG_PROC_FS
6745     proc_create("pnp", S_IRUGO, init_net.proc_net, &pnp_seq_fops);
6746     #endif /* CONFIG_PROC_FS */
6747     @@ -1622,6 +1634,7 @@ static int __init ip_auto_config_setup(char *addrs)
6748     return 1;
6749     }
6750    
6751     + /* Initialise all name servers to NONE */
6752     ic_nameservers_predef();
6753    
6754     /* Parse string for static IP assignment. */
6755     diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
6756     index 9a0b952dd09b..06f247ca9197 100644
6757     --- a/net/ipv4/tcp_bbr.c
6758     +++ b/net/ipv4/tcp_bbr.c
6759     @@ -353,6 +353,10 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
6760     /* Reduce delayed ACKs by rounding up cwnd to the next even number. */
6761     cwnd = (cwnd + 1) & ~1U;
6762    
6763     + /* Ensure gain cycling gets inflight above BDP even for small BDPs. */
6764     + if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT)
6765     + cwnd += 2;
6766     +
6767     return cwnd;
6768     }
6769    
6770     diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
6771     index c78fb53988a1..1a9b88c8cf72 100644
6772     --- a/net/ipv4/tcp_dctcp.c
6773     +++ b/net/ipv4/tcp_dctcp.c
6774     @@ -138,7 +138,7 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
6775     */
6776     if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
6777     __tcp_send_ack(sk, ca->prior_rcv_nxt);
6778     - tcp_enter_quickack_mode(sk);
6779     + tcp_enter_quickack_mode(sk, 1);
6780     }
6781    
6782     ca->prior_rcv_nxt = tp->rcv_nxt;
6783     @@ -159,7 +159,7 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
6784     */
6785     if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
6786     __tcp_send_ack(sk, ca->prior_rcv_nxt);
6787     - tcp_enter_quickack_mode(sk);
6788     + tcp_enter_quickack_mode(sk, 1);
6789     }
6790    
6791     ca->prior_rcv_nxt = tp->rcv_nxt;
6792     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
6793     index b86e7b8beb1d..bdabd748f4bc 100644
6794     --- a/net/ipv4/tcp_input.c
6795     +++ b/net/ipv4/tcp_input.c
6796     @@ -198,21 +198,23 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
6797     }
6798     }
6799    
6800     -static void tcp_incr_quickack(struct sock *sk)
6801     +static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
6802     {
6803     struct inet_connection_sock *icsk = inet_csk(sk);
6804     unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
6805    
6806     if (quickacks == 0)
6807     quickacks = 2;
6808     + quickacks = min(quickacks, max_quickacks);
6809     if (quickacks > icsk->icsk_ack.quick)
6810     - icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
6811     + icsk->icsk_ack.quick = quickacks;
6812     }
6813    
6814     -void tcp_enter_quickack_mode(struct sock *sk)
6815     +void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
6816     {
6817     struct inet_connection_sock *icsk = inet_csk(sk);
6818     - tcp_incr_quickack(sk);
6819     +
6820     + tcp_incr_quickack(sk, max_quickacks);
6821     icsk->icsk_ack.pingpong = 0;
6822     icsk->icsk_ack.ato = TCP_ATO_MIN;
6823     }
6824     @@ -248,8 +250,10 @@ static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
6825     tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
6826     }
6827    
6828     -static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
6829     +static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
6830     {
6831     + struct tcp_sock *tp = tcp_sk(sk);
6832     +
6833     switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
6834     case INET_ECN_NOT_ECT:
6835     /* Funny extension: if ECT is not set on a segment,
6836     @@ -257,31 +261,31 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
6837     * it is probably a retransmit.
6838     */
6839     if (tp->ecn_flags & TCP_ECN_SEEN)
6840     - tcp_enter_quickack_mode((struct sock *)tp);
6841     + tcp_enter_quickack_mode(sk, 2);
6842     break;
6843     case INET_ECN_CE:
6844     - if (tcp_ca_needs_ecn((struct sock *)tp))
6845     - tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE);
6846     + if (tcp_ca_needs_ecn(sk))
6847     + tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
6848    
6849     if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
6850     /* Better not delay acks, sender can have a very low cwnd */
6851     - tcp_enter_quickack_mode((struct sock *)tp);
6852     + tcp_enter_quickack_mode(sk, 2);
6853     tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
6854     }
6855     tp->ecn_flags |= TCP_ECN_SEEN;
6856     break;
6857     default:
6858     - if (tcp_ca_needs_ecn((struct sock *)tp))
6859     - tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE);
6860     + if (tcp_ca_needs_ecn(sk))
6861     + tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
6862     tp->ecn_flags |= TCP_ECN_SEEN;
6863     break;
6864     }
6865     }
6866    
6867     -static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
6868     +static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
6869     {
6870     - if (tp->ecn_flags & TCP_ECN_OK)
6871     - __tcp_ecn_check_ce(tp, skb);
6872     + if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK)
6873     + __tcp_ecn_check_ce(sk, skb);
6874     }
6875    
6876     static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
6877     @@ -686,7 +690,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
6878     /* The _first_ data packet received, initialize
6879     * delayed ACK engine.
6880     */
6881     - tcp_incr_quickack(sk);
6882     + tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
6883     icsk->icsk_ack.ato = TCP_ATO_MIN;
6884     } else {
6885     int m = now - icsk->icsk_ack.lrcvtime;
6886     @@ -702,13 +706,13 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
6887     /* Too long gap. Apparently sender failed to
6888     * restart window, so that we send ACKs quickly.
6889     */
6890     - tcp_incr_quickack(sk);
6891     + tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
6892     sk_mem_reclaim(sk);
6893     }
6894     }
6895     icsk->icsk_ack.lrcvtime = now;
6896    
6897     - tcp_ecn_check_ce(tp, skb);
6898     + tcp_ecn_check_ce(sk, skb);
6899    
6900     if (skb->len >= 128)
6901     tcp_grow_window(sk, skb);
6902     @@ -4160,7 +4164,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
6903     if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
6904     before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
6905     NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
6906     - tcp_enter_quickack_mode(sk);
6907     + tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
6908    
6909     if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
6910     u32 end_seq = TCP_SKB_CB(skb)->end_seq;
6911     @@ -4441,7 +4445,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
6912     u32 seq, end_seq;
6913     bool fragstolen;
6914    
6915     - tcp_ecn_check_ce(tp, skb);
6916     + tcp_ecn_check_ce(sk, skb);
6917    
6918     if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
6919     NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
6920     @@ -4710,7 +4714,7 @@ queue_and_out:
6921     tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
6922    
6923     out_of_window:
6924     - tcp_enter_quickack_mode(sk);
6925     + tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
6926     inet_csk_schedule_ack(sk);
6927     drop:
6928     tcp_drop(sk, skb);
6929     @@ -4721,8 +4725,6 @@ drop:
6930     if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
6931     goto out_of_window;
6932    
6933     - tcp_enter_quickack_mode(sk);
6934     -
6935     if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
6936     /* Partial packet, seq < rcv_next < end_seq */
6937     SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
6938     @@ -5793,7 +5795,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
6939     * to stand against the temptation 8) --ANK
6940     */
6941     inet_csk_schedule_ack(sk);
6942     - tcp_enter_quickack_mode(sk);
6943     + tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
6944     inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
6945     TCP_DELACK_MAX, TCP_RTO_MAX);
6946    
6947     diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
6948     index 51063d9ed0f7..dfd268166e42 100644
6949     --- a/net/netfilter/ipset/ip_set_hash_gen.h
6950     +++ b/net/netfilter/ipset/ip_set_hash_gen.h
6951     @@ -1241,7 +1241,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
6952     pr_debug("Create set %s with family %s\n",
6953     set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
6954    
6955     -#ifndef IP_SET_PROTO_UNDEF
6956     +#ifdef IP_SET_PROTO_UNDEF
6957     + if (set->family != NFPROTO_UNSPEC)
6958     + return -IPSET_ERR_INVALID_FAMILY;
6959     +#else
6960     if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
6961     return -IPSET_ERR_INVALID_FAMILY;
6962     #endif
6963     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
6964     index 85b549e84104..9a945024a0b6 100644
6965     --- a/net/netfilter/nf_tables_api.c
6966     +++ b/net/netfilter/nf_tables_api.c
6967     @@ -2710,12 +2710,13 @@ static struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
6968     u32 id = ntohl(nla_get_be32(nla));
6969    
6970     list_for_each_entry(trans, &net->nft.commit_list, list) {
6971     - struct nft_set *set = nft_trans_set(trans);
6972     + if (trans->msg_type == NFT_MSG_NEWSET) {
6973     + struct nft_set *set = nft_trans_set(trans);
6974    
6975     - if (trans->msg_type == NFT_MSG_NEWSET &&
6976     - id == nft_trans_set_id(trans) &&
6977     - nft_active_genmask(set, genmask))
6978     - return set;
6979     + if (id == nft_trans_set_id(trans) &&
6980     + nft_active_genmask(set, genmask))
6981     + return set;
6982     + }
6983     }
6984     return ERR_PTR(-ENOENT);
6985     }
6986     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
6987     index b3932846f6c4..b2fcbf012056 100644
6988     --- a/net/netlink/af_netlink.c
6989     +++ b/net/netlink/af_netlink.c
6990     @@ -977,6 +977,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
6991     return err;
6992     }
6993    
6994     + if (nlk->ngroups == 0)
6995     + groups = 0;
6996     + else
6997     + groups &= (1ULL << nlk->ngroups) - 1;
6998     +
6999     bound = nlk->bound;
7000     if (bound) {
7001     /* Ensure nlk->portid is up-to-date. */
7002     diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
7003     index 7e334fd31c15..f8553179bdd7 100644
7004     --- a/security/integrity/ima/ima_main.c
7005     +++ b/security/integrity/ima/ima_main.c
7006     @@ -379,6 +379,7 @@ int ima_read_file(struct file *file, enum kernel_read_file_id read_id)
7007    
7008     static int read_idmap[READING_MAX_ID] = {
7009     [READING_FIRMWARE] = FIRMWARE_CHECK,
7010     + [READING_FIRMWARE_PREALLOC_BUFFER] = FIRMWARE_CHECK,
7011     [READING_MODULE] = MODULE_CHECK,
7012     [READING_KEXEC_IMAGE] = KEXEC_KERNEL_CHECK,
7013     [READING_KEXEC_INITRAMFS] = KEXEC_INITRAMFS_CHECK,
7014     diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
7015     index 2683b9717215..56be1630bd3e 100644
7016     --- a/sound/pci/emu10k1/emupcm.c
7017     +++ b/sound/pci/emu10k1/emupcm.c
7018     @@ -1850,7 +1850,9 @@ int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device)
7019     if (!kctl)
7020     return -ENOMEM;
7021     kctl->id.device = device;
7022     - snd_ctl_add(emu->card, kctl);
7023     + err = snd_ctl_add(emu->card, kctl);
7024     + if (err < 0)
7025     + return err;
7026    
7027     snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 64*1024, 64*1024);
7028    
7029     diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
7030     index 4f1f69be1865..8c778fa33031 100644
7031     --- a/sound/pci/emu10k1/memory.c
7032     +++ b/sound/pci/emu10k1/memory.c
7033     @@ -237,13 +237,13 @@ __found_pages:
7034     static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
7035     {
7036     if (addr & ~emu->dma_mask) {
7037     - dev_err(emu->card->dev,
7038     + dev_err_ratelimited(emu->card->dev,
7039     "max memory size is 0x%lx (addr = 0x%lx)!!\n",
7040     emu->dma_mask, (unsigned long)addr);
7041     return 0;
7042     }
7043     if (addr & (EMUPAGESIZE-1)) {
7044     - dev_err(emu->card->dev, "page is not aligned\n");
7045     + dev_err_ratelimited(emu->card->dev, "page is not aligned\n");
7046     return 0;
7047     }
7048     return 1;
7049     @@ -334,7 +334,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
7050     else
7051     addr = snd_pcm_sgbuf_get_addr(substream, ofs);
7052     if (! is_valid_page(emu, addr)) {
7053     - dev_err(emu->card->dev,
7054     + dev_err_ratelimited(emu->card->dev,
7055     "emu: failure page = %d\n", idx);
7056     mutex_unlock(&hdr->block_mutex);
7057     return NULL;
7058     diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
7059     index 73a67bc3586b..e3fb9c61017c 100644
7060     --- a/sound/pci/fm801.c
7061     +++ b/sound/pci/fm801.c
7062     @@ -1068,11 +1068,19 @@ static int snd_fm801_mixer(struct fm801 *chip)
7063     if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97_sec)) < 0)
7064     return err;
7065     }
7066     - for (i = 0; i < FM801_CONTROLS; i++)
7067     - snd_ctl_add(chip->card, snd_ctl_new1(&snd_fm801_controls[i], chip));
7068     + for (i = 0; i < FM801_CONTROLS; i++) {
7069     + err = snd_ctl_add(chip->card,
7070     + snd_ctl_new1(&snd_fm801_controls[i], chip));
7071     + if (err < 0)
7072     + return err;
7073     + }
7074     if (chip->multichannel) {
7075     - for (i = 0; i < FM801_CONTROLS_MULTI; i++)
7076     - snd_ctl_add(chip->card, snd_ctl_new1(&snd_fm801_controls_multi[i], chip));
7077     + for (i = 0; i < FM801_CONTROLS_MULTI; i++) {
7078     + err = snd_ctl_add(chip->card,
7079     + snd_ctl_new1(&snd_fm801_controls_multi[i], chip));
7080     + if (err < 0)
7081     + return err;
7082     + }
7083     }
7084     return 0;
7085     }
7086     diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
7087     index 3e73d5c6ccfc..119f3b504765 100644
7088     --- a/sound/pci/hda/patch_ca0132.c
7089     +++ b/sound/pci/hda/patch_ca0132.c
7090     @@ -38,6 +38,10 @@
7091     /* Enable this to see controls for tuning purpose. */
7092     /*#define ENABLE_TUNING_CONTROLS*/
7093    
7094     +#ifdef ENABLE_TUNING_CONTROLS
7095     +#include <sound/tlv.h>
7096     +#endif
7097     +
7098     #define FLOAT_ZERO 0x00000000
7099     #define FLOAT_ONE 0x3f800000
7100     #define FLOAT_TWO 0x40000000
7101     @@ -3067,8 +3071,8 @@ static int equalizer_ctl_put(struct snd_kcontrol *kcontrol,
7102     return 1;
7103     }
7104    
7105     -static const DECLARE_TLV_DB_SCALE(voice_focus_db_scale, 2000, 100, 0);
7106     -static const DECLARE_TLV_DB_SCALE(eq_db_scale, -2400, 100, 0);
7107     +static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(voice_focus_db_scale, 2000, 100, 0);
7108     +static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(eq_db_scale, -2400, 100, 0);
7109    
7110     static int add_tuning_control(struct hda_codec *codec,
7111     hda_nid_t pnid, hda_nid_t nid,
7112     diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
7113     index 94b88b897c3b..3d0dab8282ad 100644
7114     --- a/sound/soc/soc-pcm.c
7115     +++ b/sound/soc/soc-pcm.c
7116     @@ -1779,8 +1779,10 @@ int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
7117     continue;
7118    
7119     if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
7120     - (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN))
7121     - continue;
7122     + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN)) {
7123     + soc_pcm_hw_free(be_substream);
7124     + be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE;
7125     + }
7126    
7127     dev_dbg(be->dev, "ASoC: close BE %s\n",
7128     be->dai_link->name);
7129     diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
7130     index 30cdad2eab7f..c1619860a5de 100644
7131     --- a/sound/soc/soc-topology.c
7132     +++ b/sound/soc/soc-topology.c
7133     @@ -1997,6 +1997,13 @@ static void set_link_hw_format(struct snd_soc_dai_link *link,
7134    
7135     link->dai_fmt = hw_config->fmt & SND_SOC_DAIFMT_FORMAT_MASK;
7136    
7137     + /* clock gating */
7138     + if (hw_config->clock_gated == SND_SOC_TPLG_DAI_CLK_GATE_GATED)
7139     + link->dai_fmt |= SND_SOC_DAIFMT_GATED;
7140     + else if (hw_config->clock_gated ==
7141     + SND_SOC_TPLG_DAI_CLK_GATE_CONT)
7142     + link->dai_fmt |= SND_SOC_DAIFMT_CONT;
7143     +
7144     /* clock signal polarity */
7145     invert_bclk = hw_config->invert_bclk;
7146     invert_fsync = hw_config->invert_fsync;
7147     @@ -2010,13 +2017,15 @@ static void set_link_hw_format(struct snd_soc_dai_link *link,
7148     link->dai_fmt |= SND_SOC_DAIFMT_IB_IF;
7149    
7150     /* clock masters */
7151     - bclk_master = hw_config->bclk_master;
7152     - fsync_master = hw_config->fsync_master;
7153     - if (!bclk_master && !fsync_master)
7154     + bclk_master = (hw_config->bclk_master ==
7155     + SND_SOC_TPLG_BCLK_CM);
7156     + fsync_master = (hw_config->fsync_master ==
7157     + SND_SOC_TPLG_FSYNC_CM);
7158     + if (bclk_master && fsync_master)
7159     link->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
7160     - else if (bclk_master && !fsync_master)
7161     - link->dai_fmt |= SND_SOC_DAIFMT_CBS_CFM;
7162     else if (!bclk_master && fsync_master)
7163     + link->dai_fmt |= SND_SOC_DAIFMT_CBS_CFM;
7164     + else if (bclk_master && !fsync_master)
7165     link->dai_fmt |= SND_SOC_DAIFMT_CBM_CFS;
7166     else
7167     link->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
7168     diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
7169     index 3cbfae6604f9..d8a46d46bcd2 100644
7170     --- a/sound/usb/pcm.c
7171     +++ b/sound/usb/pcm.c
7172     @@ -1311,7 +1311,7 @@ static void retire_capture_urb(struct snd_usb_substream *subs,
7173     if (bytes % (runtime->sample_bits >> 3) != 0) {
7174     int oldbytes = bytes;
7175     bytes = frames * stride;
7176     - dev_warn(&subs->dev->dev,
7177     + dev_warn_ratelimited(&subs->dev->dev,
7178     "Corrected urb data len. %d->%d\n",
7179     oldbytes, bytes);
7180     }
7181     diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
7182     index e81a20ea8d7d..988310cd3049 100644
7183     --- a/tools/perf/util/parse-events.y
7184     +++ b/tools/perf/util/parse-events.y
7185     @@ -72,6 +72,7 @@ static void inc_group_count(struct list_head *list,
7186     %type <num> value_sym
7187     %type <head> event_config
7188     %type <head> opt_event_config
7189     +%type <head> opt_pmu_config
7190     %type <term> event_term
7191     %type <head> event_pmu
7192     %type <head> event_legacy_symbol
7193     @@ -223,7 +224,7 @@ event_def: event_pmu |
7194     event_bpf_file
7195    
7196     event_pmu:
7197     -PE_NAME opt_event_config
7198     +PE_NAME opt_pmu_config
7199     {
7200     struct list_head *list, *orig_terms, *terms;
7201    
7202     @@ -486,6 +487,17 @@ opt_event_config:
7203     $$ = NULL;
7204     }
7205    
7206     +opt_pmu_config:
7207     +'/' event_config '/'
7208     +{
7209     + $$ = $2;
7210     +}
7211     +|
7212     +'/' '/'
7213     +{
7214     + $$ = NULL;
7215     +}
7216     +
7217     start_terms: event_config
7218     {
7219     struct parse_events_state *parse_state = _parse_state;
7220     diff --git a/tools/testing/selftests/intel_pstate/run.sh b/tools/testing/selftests/intel_pstate/run.sh
7221     index c670359becc6..928978804342 100755
7222     --- a/tools/testing/selftests/intel_pstate/run.sh
7223     +++ b/tools/testing/selftests/intel_pstate/run.sh
7224     @@ -30,9 +30,12 @@
7225    
7226     EVALUATE_ONLY=0
7227    
7228     +# Kselftest framework requirement - SKIP code is 4.
7229     +ksft_skip=4
7230     +
7231     if ! uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ | grep -q x86; then
7232     echo "$0 # Skipped: Test can only run on x86 architectures."
7233     - exit 0
7234     + exit $ksft_skip
7235     fi
7236    
7237     max_cpus=$(($(nproc)-1))
7238     @@ -48,11 +51,12 @@ function run_test () {
7239    
7240     echo "sleeping for 5 seconds"
7241     sleep 5
7242     - num_freqs=$(cat /proc/cpuinfo | grep MHz | sort -u | wc -l)
7243     - if [ $num_freqs -le 2 ]; then
7244     - cat /proc/cpuinfo | grep MHz | sort -u | tail -1 > /tmp/result.$1
7245     + grep MHz /proc/cpuinfo | sort -u > /tmp/result.freqs
7246     + num_freqs=$(wc -l /tmp/result.freqs | awk ' { print $1 } ')
7247     + if [ $num_freqs -ge 2 ]; then
7248     + tail -n 1 /tmp/result.freqs > /tmp/result.$1
7249     else
7250     - cat /proc/cpuinfo | grep MHz | sort -u > /tmp/result.$1
7251     + cp /tmp/result.freqs /tmp/result.$1
7252     fi
7253     ./msr 0 >> /tmp/result.$1
7254    
7255     @@ -82,21 +86,20 @@ _max_freq=$(cpupower frequency-info -l | tail -1 | awk ' { print $2 } ')
7256     max_freq=$(($_max_freq / 1000))
7257    
7258    
7259     -for freq in `seq $max_freq -100 $min_freq`
7260     +[ $EVALUATE_ONLY -eq 0 ] && for freq in `seq $max_freq -100 $min_freq`
7261     do
7262     echo "Setting maximum frequency to $freq"
7263     cpupower frequency-set -g powersave --max=${freq}MHz >& /dev/null
7264     - [ $EVALUATE_ONLY -eq 0 ] && run_test $freq
7265     + run_test $freq
7266     done
7267    
7268     -echo "=============================================================================="
7269     +[ $EVALUATE_ONLY -eq 0 ] && cpupower frequency-set -g powersave --max=${max_freq}MHz >& /dev/null
7270    
7271     +echo "=============================================================================="
7272     echo "The marketing frequency of the cpu is $mkt_freq MHz"
7273     echo "The maximum frequency of the cpu is $max_freq MHz"
7274     echo "The minimum frequency of the cpu is $min_freq MHz"
7275    
7276     -cpupower frequency-set -g powersave --max=${max_freq}MHz >& /dev/null
7277     -
7278     # make a pretty table
7279     echo "Target Actual Difference MSR(0x199) max_perf_pct"
7280     for freq in `seq $max_freq -100 $min_freq`
7281     @@ -104,10 +107,6 @@ do
7282     result_freq=$(cat /tmp/result.${freq} | grep "cpu MHz" | awk ' { print $4 } ' | awk -F "." ' { print $1 } ')
7283     msr=$(cat /tmp/result.${freq} | grep "msr" | awk ' { print $3 } ')
7284     max_perf_pct=$(cat /tmp/result.${freq} | grep "max_perf_pct" | awk ' { print $2 } ' )
7285     - if [ $result_freq -eq $freq ]; then
7286     - echo " $freq $result_freq 0 $msr $(($max_perf_pct*3300))"
7287     - else
7288     - echo " $freq $result_freq $(($result_freq-$freq)) $msr $(($max_perf_pct*$max_freq))"
7289     - fi
7290     + echo " $freq $result_freq $(($result_freq-$freq)) $msr $(($max_perf_pct*$max_freq))"
7291     done
7292     exit 0
7293     diff --git a/tools/testing/selftests/memfd/run_tests.sh b/tools/testing/selftests/memfd/run_tests.sh
7294     index daabb350697c..bf83db61013a 100755
7295     --- a/tools/testing/selftests/memfd/run_tests.sh
7296     +++ b/tools/testing/selftests/memfd/run_tests.sh
7297     @@ -1,6 +1,9 @@
7298     #!/bin/bash
7299     # please run as root
7300    
7301     +# Kselftest framework requirement - SKIP code is 4.
7302     +ksft_skip=4
7303     +
7304     #
7305     # Normal tests requiring no special resources
7306     #
7307     @@ -29,12 +32,13 @@ if [ -n "$freepgs" ] && [ $freepgs -lt $hpages_test ]; then
7308     nr_hugepgs=`cat /proc/sys/vm/nr_hugepages`
7309     hpages_needed=`expr $hpages_test - $freepgs`
7310    
7311     + if [ $UID != 0 ]; then
7312     + echo "Please run memfd with hugetlbfs test as root"
7313     + exit $ksft_skip
7314     + fi
7315     +
7316     echo 3 > /proc/sys/vm/drop_caches
7317     echo $(( $hpages_needed + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages
7318     - if [ $? -ne 0 ]; then
7319     - echo "Please run this test as root"
7320     - exit 1
7321     - fi
7322     while read name size unit; do
7323     if [ "$name" = "HugePages_Free:" ]; then
7324     freepgs=$size
7325     @@ -53,7 +57,7 @@ if [ $freepgs -lt $hpages_test ]; then
7326     fi
7327     printf "Not enough huge pages available (%d < %d)\n" \
7328     $freepgs $needpgs
7329     - exit 1
7330     + exit $ksft_skip
7331     fi
7332    
7333     #
7334     diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
7335     index d1fc0f9f00fb..ed8c9d360c0f 100644
7336     --- a/tools/usb/usbip/libsrc/vhci_driver.c
7337     +++ b/tools/usb/usbip/libsrc/vhci_driver.c
7338     @@ -135,11 +135,11 @@ static int refresh_imported_device_list(void)
7339     return 0;
7340     }
7341    
7342     -static int get_nports(void)
7343     +static int get_nports(struct udev_device *hc_device)
7344     {
7345     const char *attr_nports;
7346    
7347     - attr_nports = udev_device_get_sysattr_value(vhci_driver->hc_device, "nports");
7348     + attr_nports = udev_device_get_sysattr_value(hc_device, "nports");
7349     if (!attr_nports) {
7350     err("udev_device_get_sysattr_value nports failed");
7351     return -1;
7352     @@ -242,35 +242,41 @@ static int read_record(int rhport, char *host, unsigned long host_len,
7353    
7354     int usbip_vhci_driver_open(void)
7355     {
7356     + int nports;
7357     + struct udev_device *hc_device;
7358     +
7359     udev_context = udev_new();
7360     if (!udev_context) {
7361     err("udev_new failed");
7362     return -1;
7363     }
7364    
7365     - vhci_driver = calloc(1, sizeof(struct usbip_vhci_driver));
7366     -
7367     /* will be freed in usbip_driver_close() */
7368     - vhci_driver->hc_device =
7369     + hc_device =
7370     udev_device_new_from_subsystem_sysname(udev_context,
7371     USBIP_VHCI_BUS_TYPE,
7372     USBIP_VHCI_DEVICE_NAME);
7373     - if (!vhci_driver->hc_device) {
7374     + if (!hc_device) {
7375     err("udev_device_new_from_subsystem_sysname failed");
7376     goto err;
7377     }
7378    
7379     - vhci_driver->nports = get_nports();
7380     - dbg("available ports: %d", vhci_driver->nports);
7381     -
7382     - if (vhci_driver->nports <= 0) {
7383     + nports = get_nports(hc_device);
7384     + if (nports <= 0) {
7385     err("no available ports");
7386     goto err;
7387     - } else if (vhci_driver->nports > MAXNPORT) {
7388     - err("port number exceeds %d", MAXNPORT);
7389     + }
7390     + dbg("available ports: %d", nports);
7391     +
7392     + vhci_driver = calloc(1, sizeof(struct usbip_vhci_driver) +
7393     + nports * sizeof(struct usbip_imported_device));
7394     + if (!vhci_driver) {
7395     + err("vhci_driver allocation failed");
7396     goto err;
7397     }
7398    
7399     + vhci_driver->nports = nports;
7400     + vhci_driver->hc_device = hc_device;
7401     vhci_driver->ncontrollers = get_ncontrollers();
7402     dbg("available controllers: %d", vhci_driver->ncontrollers);
7403    
7404     @@ -285,7 +291,7 @@ int usbip_vhci_driver_open(void)
7405     return 0;
7406    
7407     err:
7408     - udev_device_unref(vhci_driver->hc_device);
7409     + udev_device_unref(hc_device);
7410    
7411     if (vhci_driver)
7412     free(vhci_driver);
7413     diff --git a/tools/usb/usbip/libsrc/vhci_driver.h b/tools/usb/usbip/libsrc/vhci_driver.h
7414     index 418b404d5121..6c9aca216705 100644
7415     --- a/tools/usb/usbip/libsrc/vhci_driver.h
7416     +++ b/tools/usb/usbip/libsrc/vhci_driver.h
7417     @@ -13,7 +13,6 @@
7418    
7419     #define USBIP_VHCI_BUS_TYPE "platform"
7420     #define USBIP_VHCI_DEVICE_NAME "vhci_hcd.0"
7421     -#define MAXNPORT 128
7422    
7423     enum hub_speed {
7424     HUB_SPEED_HIGH = 0,
7425     @@ -41,7 +40,7 @@ struct usbip_vhci_driver {
7426    
7427     int ncontrollers;
7428     int nports;
7429     - struct usbip_imported_device idev[MAXNPORT];
7430     + struct usbip_imported_device idev[];
7431     };
7432    
7433    
7434     diff --git a/tools/usb/usbip/src/usbip_detach.c b/tools/usb/usbip/src/usbip_detach.c
7435     index 9db9d21bb2ec..6a8db858caa5 100644
7436     --- a/tools/usb/usbip/src/usbip_detach.c
7437     +++ b/tools/usb/usbip/src/usbip_detach.c
7438     @@ -43,7 +43,7 @@ void usbip_detach_usage(void)
7439    
7440     static int detach_port(char *port)
7441     {
7442     - int ret;
7443     + int ret = 0;
7444     uint8_t portnum;
7445     char path[PATH_MAX+1];
7446    
7447     @@ -73,9 +73,12 @@ static int detach_port(char *port)
7448     }
7449    
7450     ret = usbip_vhci_detach_device(portnum);
7451     - if (ret < 0)
7452     - return -1;
7453     + if (ret < 0) {
7454     + ret = -1;
7455     + goto call_driver_close;
7456     + }
7457    
7458     +call_driver_close:
7459     usbip_vhci_driver_close();
7460    
7461     return ret;