Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0172-5.4.73-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3635 - (hide annotations) (download)
Mon Oct 24 12:34:12 2022 UTC (18 months, 2 weeks ago) by niro
File size: 449245 byte(s)
-sync kernel patches
1 niro 3635 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
2     index 13984b6cc3225..988a0d2535b25 100644
3     --- a/Documentation/admin-guide/kernel-parameters.txt
4     +++ b/Documentation/admin-guide/kernel-parameters.txt
5     @@ -567,7 +567,7 @@
6     loops can be debugged more effectively on production
7     systems.
8    
9     - clearcpuid=BITNUM [X86]
10     + clearcpuid=BITNUM[,BITNUM...] [X86]
11     Disable CPUID feature X for the kernel. See
12     arch/x86/include/asm/cpufeatures.h for the valid bit
13     numbers. Note the Linux specific bits are not necessarily
14     diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
15     index 8d4ad1d1ae26f..8af3771a3ebf2 100644
16     --- a/Documentation/networking/ip-sysctl.txt
17     +++ b/Documentation/networking/ip-sysctl.txt
18     @@ -1000,12 +1000,14 @@ icmp_ratelimit - INTEGER
19     icmp_msgs_per_sec - INTEGER
20     Limit maximal number of ICMP packets sent per second from this host.
21     Only messages whose type matches icmp_ratemask (see below) are
22     - controlled by this limit.
23     + controlled by this limit. For security reasons, the precise count
24     + of messages per second is randomized.
25     Default: 1000
26    
27     icmp_msgs_burst - INTEGER
28     icmp_msgs_per_sec controls number of ICMP packets sent per second,
29     while icmp_msgs_burst controls the burst size of these packets.
30     + For security reasons, the precise burst size is randomized.
31     Default: 50
32    
33     icmp_ratemask - INTEGER
34     diff --git a/Makefile b/Makefile
35     index 8db75cc76ed16..f9a8d76a693eb 100644
36     --- a/Makefile
37     +++ b/Makefile
38     @@ -1,7 +1,7 @@
39     # SPDX-License-Identifier: GPL-2.0
40     VERSION = 5
41     PATCHLEVEL = 4
42     -SUBLEVEL = 72
43     +SUBLEVEL = 73
44     EXTRAVERSION =
45     NAME = Kleptomaniac Octopus
46    
47     diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
48     index ce81018345184..6b5c54576f54d 100644
49     --- a/arch/arc/plat-hsdk/Kconfig
50     +++ b/arch/arc/plat-hsdk/Kconfig
51     @@ -8,5 +8,6 @@ menuconfig ARC_SOC_HSDK
52     select ARC_HAS_ACCL_REGS
53     select ARC_IRQ_NO_AUTOSAVE
54     select CLK_HSDK
55     + select RESET_CONTROLLER
56     select RESET_HSDK
57     select HAVE_PCI
58     diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
59     index 3a96b5538a2a1..540880f0413fd 100644
60     --- a/arch/arm/boot/dts/imx6sl.dtsi
61     +++ b/arch/arm/boot/dts/imx6sl.dtsi
62     @@ -936,8 +936,10 @@
63     };
64    
65     rngb: rngb@21b4000 {
66     + compatible = "fsl,imx6sl-rngb", "fsl,imx25-rngb";
67     reg = <0x021b4000 0x4000>;
68     interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
69     + clocks = <&clks IMX6SL_CLK_DUMMY>;
70     };
71    
72     weim: weim@21b8000 {
73     diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
74     index db2033f674c67..3efe9d41c2bb6 100644
75     --- a/arch/arm/boot/dts/meson8.dtsi
76     +++ b/arch/arm/boot/dts/meson8.dtsi
77     @@ -230,8 +230,6 @@
78     <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
79     <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
80     <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
81     - <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
82     - <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
83     <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
84     <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
85     <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
86     diff --git a/arch/arm/boot/dts/owl-s500.dtsi b/arch/arm/boot/dts/owl-s500.dtsi
87     index 5ceb6cc4451d2..1dbe4e8b38ac7 100644
88     --- a/arch/arm/boot/dts/owl-s500.dtsi
89     +++ b/arch/arm/boot/dts/owl-s500.dtsi
90     @@ -84,21 +84,21 @@
91     global_timer: timer@b0020200 {
92     compatible = "arm,cortex-a9-global-timer";
93     reg = <0xb0020200 0x100>;
94     - interrupts = <GIC_PPI 0 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
95     + interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
96     status = "disabled";
97     };
98    
99     twd_timer: timer@b0020600 {
100     compatible = "arm,cortex-a9-twd-timer";
101     reg = <0xb0020600 0x20>;
102     - interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
103     + interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
104     status = "disabled";
105     };
106    
107     twd_wdt: wdt@b0020620 {
108     compatible = "arm,cortex-a9-twd-wdt";
109     reg = <0xb0020620 0xe0>;
110     - interrupts = <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
111     + interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
112     status = "disabled";
113     };
114    
115     diff --git a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
116     index 42d62d1ba1dc7..ea15073f0c79c 100644
117     --- a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
118     +++ b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
119     @@ -223,16 +223,16 @@
120     };
121    
122     &reg_dc1sw {
123     - regulator-min-microvolt = <3000000>;
124     - regulator-max-microvolt = <3000000>;
125     + regulator-min-microvolt = <3300000>;
126     + regulator-max-microvolt = <3300000>;
127     regulator-name = "vcc-gmac-phy";
128     };
129    
130     &reg_dcdc1 {
131     regulator-always-on;
132     - regulator-min-microvolt = <3000000>;
133     - regulator-max-microvolt = <3000000>;
134     - regulator-name = "vcc-3v0";
135     + regulator-min-microvolt = <3300000>;
136     + regulator-max-microvolt = <3300000>;
137     + regulator-name = "vcc-3v3";
138     };
139    
140     &reg_dcdc2 {
141     diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
142     index 6bc3000deb86e..676cc2a318f41 100644
143     --- a/arch/arm/mach-at91/pm.c
144     +++ b/arch/arm/mach-at91/pm.c
145     @@ -777,6 +777,7 @@ static void __init at91_pm_init(void (*pm_idle)(void))
146    
147     pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
148     soc_pm.data.pmc = of_iomap(pmc_np, 0);
149     + of_node_put(pmc_np);
150     if (!soc_pm.data.pmc) {
151     pr_err("AT91: PM not supported, PMC not found\n");
152     return;
153     diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
154     index 6f5f89711f256..a92d277f81a08 100644
155     --- a/arch/arm/mach-omap2/cpuidle44xx.c
156     +++ b/arch/arm/mach-omap2/cpuidle44xx.c
157     @@ -174,8 +174,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
158     */
159     if (mpuss_can_lose_context) {
160     error = cpu_cluster_pm_enter();
161     - if (error)
162     + if (error) {
163     + omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
164     goto cpu_cluster_pm_out;
165     + }
166     }
167     }
168    
169     diff --git a/arch/arm/mach-s3c24xx/mach-at2440evb.c b/arch/arm/mach-s3c24xx/mach-at2440evb.c
170     index 58c5ef3cf1d7e..2d370f7f75fa2 100644
171     --- a/arch/arm/mach-s3c24xx/mach-at2440evb.c
172     +++ b/arch/arm/mach-s3c24xx/mach-at2440evb.c
173     @@ -143,7 +143,7 @@ static struct gpiod_lookup_table at2440evb_mci_gpio_table = {
174     .dev_id = "s3c2410-sdi",
175     .table = {
176     /* Card detect S3C2410_GPG(10) */
177     - GPIO_LOOKUP("GPG", 10, "cd", GPIO_ACTIVE_LOW),
178     + GPIO_LOOKUP("GPIOG", 10, "cd", GPIO_ACTIVE_LOW),
179     { },
180     },
181     };
182     diff --git a/arch/arm/mach-s3c24xx/mach-h1940.c b/arch/arm/mach-s3c24xx/mach-h1940.c
183     index 74d6b68e91c74..8d9d8e7c71d4c 100644
184     --- a/arch/arm/mach-s3c24xx/mach-h1940.c
185     +++ b/arch/arm/mach-s3c24xx/mach-h1940.c
186     @@ -468,9 +468,9 @@ static struct gpiod_lookup_table h1940_mmc_gpio_table = {
187     .dev_id = "s3c2410-sdi",
188     .table = {
189     /* Card detect S3C2410_GPF(5) */
190     - GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW),
191     + GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW),
192     /* Write protect S3C2410_GPH(8) */
193     - GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW),
194     + GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW),
195     { },
196     },
197     };
198     diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c
199     index 9035f868fb34e..3a5b1124037b2 100644
200     --- a/arch/arm/mach-s3c24xx/mach-mini2440.c
201     +++ b/arch/arm/mach-s3c24xx/mach-mini2440.c
202     @@ -244,9 +244,9 @@ static struct gpiod_lookup_table mini2440_mmc_gpio_table = {
203     .dev_id = "s3c2410-sdi",
204     .table = {
205     /* Card detect S3C2410_GPG(8) */
206     - GPIO_LOOKUP("GPG", 8, "cd", GPIO_ACTIVE_LOW),
207     + GPIO_LOOKUP("GPIOG", 8, "cd", GPIO_ACTIVE_LOW),
208     /* Write protect S3C2410_GPH(8) */
209     - GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_HIGH),
210     + GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_HIGH),
211     { },
212     },
213     };
214     diff --git a/arch/arm/mach-s3c24xx/mach-n30.c b/arch/arm/mach-s3c24xx/mach-n30.c
215     index d856f23939aff..ffa20f52aa832 100644
216     --- a/arch/arm/mach-s3c24xx/mach-n30.c
217     +++ b/arch/arm/mach-s3c24xx/mach-n30.c
218     @@ -359,9 +359,9 @@ static struct gpiod_lookup_table n30_mci_gpio_table = {
219     .dev_id = "s3c2410-sdi",
220     .table = {
221     /* Card detect S3C2410_GPF(1) */
222     - GPIO_LOOKUP("GPF", 1, "cd", GPIO_ACTIVE_LOW),
223     + GPIO_LOOKUP("GPIOF", 1, "cd", GPIO_ACTIVE_LOW),
224     /* Write protect S3C2410_GPG(10) */
225     - GPIO_LOOKUP("GPG", 10, "wp", GPIO_ACTIVE_LOW),
226     + GPIO_LOOKUP("GPIOG", 10, "wp", GPIO_ACTIVE_LOW),
227     { },
228     },
229     };
230     diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c
231     index 29f9b345a5311..534e9c1d8161f 100644
232     --- a/arch/arm/mach-s3c24xx/mach-rx1950.c
233     +++ b/arch/arm/mach-s3c24xx/mach-rx1950.c
234     @@ -567,9 +567,9 @@ static struct gpiod_lookup_table rx1950_mmc_gpio_table = {
235     .dev_id = "s3c2410-sdi",
236     .table = {
237     /* Card detect S3C2410_GPF(5) */
238     - GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW),
239     + GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW),
240     /* Write protect S3C2410_GPH(8) */
241     - GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW),
242     + GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW),
243     { },
244     },
245     };
246     diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
247     index 12c26eb88afbc..43d91bfd23600 100644
248     --- a/arch/arm/mm/cache-l2x0.c
249     +++ b/arch/arm/mm/cache-l2x0.c
250     @@ -1249,20 +1249,28 @@ static void __init l2c310_of_parse(const struct device_node *np,
251    
252     ret = of_property_read_u32(np, "prefetch-data", &val);
253     if (ret == 0) {
254     - if (val)
255     + if (val) {
256     prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
257     - else
258     + *aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH;
259     + } else {
260     prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
261     + *aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
262     + }
263     + *aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
264     } else if (ret != -EINVAL) {
265     pr_err("L2C-310 OF prefetch-data property value is missing\n");
266     }
267    
268     ret = of_property_read_u32(np, "prefetch-instr", &val);
269     if (ret == 0) {
270     - if (val)
271     + if (val) {
272     prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
273     - else
274     + *aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
275     + } else {
276     prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
277     + *aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
278     + }
279     + *aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
280     } else if (ret != -EINVAL) {
281     pr_err("L2C-310 OF prefetch-instr property value is missing\n");
282     }
283     diff --git a/arch/arm64/boot/dts/actions/s700.dtsi b/arch/arm64/boot/dts/actions/s700.dtsi
284     index 2006ad5424fa6..f8eb72bb41254 100644
285     --- a/arch/arm64/boot/dts/actions/s700.dtsi
286     +++ b/arch/arm64/boot/dts/actions/s700.dtsi
287     @@ -231,7 +231,7 @@
288    
289     pinctrl: pinctrl@e01b0000 {
290     compatible = "actions,s700-pinctrl";
291     - reg = <0x0 0xe01b0000 0x0 0x1000>;
292     + reg = <0x0 0xe01b0000 0x0 0x100>;
293     clocks = <&cmu CLK_GPIO>;
294     gpio-controller;
295     gpio-ranges = <&pinctrl 0 0 136>;
296     diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
297     index eaf8f83794fd9..8466d44ee0b15 100644
298     --- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
299     +++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
300     @@ -155,8 +155,7 @@
301     <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
302     <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
303     <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
304     - <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
305     - <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
306     + <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
307     interrupt-names = "gp",
308     "gpmmu",
309     "pp",
310     @@ -167,8 +166,7 @@
311     "pp2",
312     "ppmmu2",
313     "pp3",
314     - "ppmmu3",
315     - "pmu";
316     + "ppmmu3";
317     clocks = <&ccu CLK_BUS_GPU>, <&ccu CLK_GPU>;
318     clock-names = "bus", "core";
319     resets = <&ccu RST_BUS_GPU>;
320     diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
321     index 8647da7d6609b..f6694aad84db3 100644
322     --- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
323     +++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
324     @@ -43,13 +43,13 @@
325    
326     white {
327     label = "vim3:white:sys";
328     - gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_LOW>;
329     + gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_HIGH>;
330     linux,default-trigger = "heartbeat";
331     };
332    
333     red {
334     label = "vim3:red";
335     - gpios = <&gpio_expander 5 GPIO_ACTIVE_LOW>;
336     + gpios = <&gpio_expander 5 GPIO_ACTIVE_HIGH>;
337     };
338     };
339    
340     diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
341     index bc8540f879654..f1011bcd5ed5a 100644
342     --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
343     +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
344     @@ -516,6 +516,7 @@
345     gpc: gpc@303a0000 {
346     compatible = "fsl,imx8mq-gpc";
347     reg = <0x303a0000 0x10000>;
348     + interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
349     interrupt-parent = <&gic>;
350     interrupt-controller;
351     #interrupt-cells = <3>;
352     diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
353     index 340da154d4e37..d95273af9f1e4 100644
354     --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
355     +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
356     @@ -175,14 +175,14 @@
357     };
358    
359     thermal-zones {
360     - cpu0_1-thermal {
361     + cpu0-1-thermal {
362     polling-delay-passive = <250>;
363     polling-delay = <1000>;
364    
365     thermal-sensors = <&tsens 4>;
366    
367     trips {
368     - cpu0_1_alert0: trip-point@0 {
369     + cpu0_1_alert0: trip-point0 {
370     temperature = <75000>;
371     hysteresis = <2000>;
372     type = "passive";
373     @@ -205,7 +205,7 @@
374     };
375     };
376    
377     - cpu2_3-thermal {
378     + cpu2-3-thermal {
379     polling-delay-passive = <250>;
380     polling-delay = <1000>;
381    
382     @@ -934,7 +934,7 @@
383     reg-names = "mdp_phys";
384    
385     interrupt-parent = <&mdss>;
386     - interrupts = <0 0>;
387     + interrupts = <0>;
388    
389     clocks = <&gcc GCC_MDSS_AHB_CLK>,
390     <&gcc GCC_MDSS_AXI_CLK>,
391     @@ -966,7 +966,7 @@
392     reg-names = "dsi_ctrl";
393    
394     interrupt-parent = <&mdss>;
395     - interrupts = <4 0>;
396     + interrupts = <4>;
397    
398     assigned-clocks = <&gcc BYTE0_CLK_SRC>,
399     <&gcc PCLK0_CLK_SRC>;
400     diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
401     index 9dd2df1cbf47d..40df4d95a47ac 100644
402     --- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
403     +++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
404     @@ -113,7 +113,7 @@
405    
406     wcd_codec: codec@f000 {
407     compatible = "qcom,pm8916-wcd-analog-codec";
408     - reg = <0xf000 0x200>;
409     + reg = <0xf000>;
410     reg-names = "pmic-codec-core";
411     clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
412     clock-names = "mclk";
413     diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
414     index a1c2de90e4706..73ded80a79ba0 100644
415     --- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
416     +++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
417     @@ -1212,9 +1212,8 @@
418     reg = <0 0xe6ea0000 0 0x0064>;
419     interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
420     clocks = <&cpg CPG_MOD 210>;
421     - dmas = <&dmac1 0x43>, <&dmac1 0x42>,
422     - <&dmac2 0x43>, <&dmac2 0x42>;
423     - dma-names = "tx", "rx", "tx", "rx";
424     + dmas = <&dmac0 0x43>, <&dmac0 0x42>;
425     + dma-names = "tx", "rx";
426     power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
427     resets = <&cpg 210>;
428     #address-cells = <1>;
429     diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
430     index 455954c3d98ea..dabee157119f9 100644
431     --- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
432     +++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
433     @@ -1168,9 +1168,8 @@
434     reg = <0 0xe6ea0000 0 0x0064>;
435     interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
436     clocks = <&cpg CPG_MOD 210>;
437     - dmas = <&dmac1 0x43>, <&dmac1 0x42>,
438     - <&dmac2 0x43>, <&dmac2 0x42>;
439     - dma-names = "tx", "rx", "tx", "rx";
440     + dmas = <&dmac0 0x43>, <&dmac0 0x42>;
441     + dma-names = "tx", "rx";
442     power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
443     resets = <&cpg 210>;
444     #address-cells = <1>;
445     diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
446     index 9aa67340a4d8c..a2645262f8623 100644
447     --- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
448     +++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
449     @@ -419,7 +419,7 @@
450     };
451    
452     i2c0: i2c@ff020000 {
453     - compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
454     + compatible = "cdns,i2c-r1p14";
455     status = "disabled";
456     interrupt-parent = <&gic>;
457     interrupts = <0 17 4>;
458     @@ -429,7 +429,7 @@
459     };
460    
461     i2c1: i2c@ff030000 {
462     - compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
463     + compatible = "cdns,i2c-r1p14";
464     status = "disabled";
465     interrupt-parent = <&gic>;
466     interrupts = <0 18 4>;
467     diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
468     index 3f9ae3585ab98..80c9534148821 100644
469     --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
470     +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
471     @@ -13,20 +13,19 @@
472     */
473     #define MAX_EA_BITS_PER_CONTEXT 46
474    
475     -#define REGION_SHIFT (MAX_EA_BITS_PER_CONTEXT - 2)
476    
477     /*
478     - * Our page table limit us to 64TB. Hence for the kernel mapping,
479     - * each MAP area is limited to 16 TB.
480     - * The four map areas are: linear mapping, vmap, IO and vmemmap
481     + * Our page table limit us to 64TB. For 64TB physical memory, we only need 64GB
482     + * of vmemmap space. To better support sparse memory layout, we use 61TB
483     + * linear map range, 1TB of vmalloc, 1TB of I/O and 1TB of vmememmap.
484     */
485     +#define REGION_SHIFT (40)
486     #define H_KERN_MAP_SIZE (ASM_CONST(1) << REGION_SHIFT)
487    
488     /*
489     - * Define the address range of the kernel non-linear virtual area
490     - * 16TB
491     + * Define the address range of the kernel non-linear virtual area (61TB)
492     */
493     -#define H_KERN_VIRT_START ASM_CONST(0xc000100000000000)
494     +#define H_KERN_VIRT_START ASM_CONST(0xc0003d0000000000)
495    
496     #ifndef __ASSEMBLY__
497     #define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
498     diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
499     index 28c3d936fdf32..bea7a2405ba5d 100644
500     --- a/arch/powerpc/include/asm/drmem.h
501     +++ b/arch/powerpc/include/asm/drmem.h
502     @@ -8,14 +8,13 @@
503     #ifndef _ASM_POWERPC_LMB_H
504     #define _ASM_POWERPC_LMB_H
505    
506     +#include <linux/sched.h>
507     +
508     struct drmem_lmb {
509     u64 base_addr;
510     u32 drc_index;
511     u32 aa_index;
512     u32 flags;
513     -#ifdef CONFIG_MEMORY_HOTPLUG
514     - int nid;
515     -#endif
516     };
517    
518     struct drmem_lmb_info {
519     @@ -26,8 +25,22 @@ struct drmem_lmb_info {
520    
521     extern struct drmem_lmb_info *drmem_info;
522    
523     +static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
524     + const struct drmem_lmb *start)
525     +{
526     + /*
527     + * DLPAR code paths can take several milliseconds per element
528     + * when interacting with firmware. Ensure that we don't
529     + * unfairly monopolize the CPU.
530     + */
531     + if (((++lmb - start) % 16) == 0)
532     + cond_resched();
533     +
534     + return lmb;
535     +}
536     +
537     #define for_each_drmem_lmb_in_range(lmb, start, end) \
538     - for ((lmb) = (start); (lmb) < (end); (lmb)++)
539     + for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start))
540    
541     #define for_each_drmem_lmb(lmb) \
542     for_each_drmem_lmb_in_range((lmb), \
543     @@ -103,22 +116,4 @@ static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
544     lmb->aa_index = 0xffffffff;
545     }
546    
547     -#ifdef CONFIG_MEMORY_HOTPLUG
548     -static inline void lmb_set_nid(struct drmem_lmb *lmb)
549     -{
550     - lmb->nid = memory_add_physaddr_to_nid(lmb->base_addr);
551     -}
552     -static inline void lmb_clear_nid(struct drmem_lmb *lmb)
553     -{
554     - lmb->nid = -1;
555     -}
556     -#else
557     -static inline void lmb_set_nid(struct drmem_lmb *lmb)
558     -{
559     -}
560     -static inline void lmb_clear_nid(struct drmem_lmb *lmb)
561     -{
562     -}
563     -#endif
564     -
565     #endif /* _ASM_POWERPC_LMB_H */
566     diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
567     index b3cbb1136bce0..34d08ff21b988 100644
568     --- a/arch/powerpc/include/asm/reg.h
569     +++ b/arch/powerpc/include/asm/reg.h
570     @@ -796,7 +796,7 @@
571     #define THRM1_TIN (1 << 31)
572     #define THRM1_TIV (1 << 30)
573     #define THRM1_THRES(x) ((x&0x7f)<<23)
574     -#define THRM3_SITV(x) ((x&0x3fff)<<1)
575     +#define THRM3_SITV(x) ((x & 0x1fff) << 1)
576     #define THRM1_TID (1<<2)
577     #define THRM1_TIE (1<<1)
578     #define THRM1_V (1<<0)
579     diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
580     index 7f3a8b9023254..02a1c18cdba3d 100644
581     --- a/arch/powerpc/include/asm/tlb.h
582     +++ b/arch/powerpc/include/asm/tlb.h
583     @@ -67,19 +67,6 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
584     return false;
585     return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
586     }
587     -static inline void mm_reset_thread_local(struct mm_struct *mm)
588     -{
589     - WARN_ON(atomic_read(&mm->context.copros) > 0);
590     - /*
591     - * It's possible for mm_access to take a reference on mm_users to
592     - * access the remote mm from another thread, but it's not allowed
593     - * to set mm_cpumask, so mm_users may be > 1 here.
594     - */
595     - WARN_ON(current->mm != mm);
596     - atomic_set(&mm->context.active_cpus, 1);
597     - cpumask_clear(mm_cpumask(mm));
598     - cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
599     -}
600     #else /* CONFIG_PPC_BOOK3S_64 */
601     static inline int mm_is_thread_local(struct mm_struct *mm)
602     {
603     diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
604     index e2ab8a111b693..0b4694b8d2482 100644
605     --- a/arch/powerpc/kernel/tau_6xx.c
606     +++ b/arch/powerpc/kernel/tau_6xx.c
607     @@ -13,13 +13,14 @@
608     */
609    
610     #include <linux/errno.h>
611     -#include <linux/jiffies.h>
612     #include <linux/kernel.h>
613     #include <linux/param.h>
614     #include <linux/string.h>
615     #include <linux/mm.h>
616     #include <linux/interrupt.h>
617     #include <linux/init.h>
618     +#include <linux/delay.h>
619     +#include <linux/workqueue.h>
620    
621     #include <asm/io.h>
622     #include <asm/reg.h>
623     @@ -39,9 +40,7 @@ static struct tau_temp
624     unsigned char grew;
625     } tau[NR_CPUS];
626    
627     -struct timer_list tau_timer;
628     -
629     -#undef DEBUG
630     +static bool tau_int_enable;
631    
632     /* TODO: put these in a /proc interface, with some sanity checks, and maybe
633     * dynamic adjustment to minimize # of interrupts */
634     @@ -50,72 +49,49 @@ struct timer_list tau_timer;
635     #define step_size 2 /* step size when temp goes out of range */
636     #define window_expand 1 /* expand the window by this much */
637     /* configurable values for shrinking the window */
638     -#define shrink_timer 2*HZ /* period between shrinking the window */
639     +#define shrink_timer 2000 /* period between shrinking the window */
640     #define min_window 2 /* minimum window size, degrees C */
641    
642     static void set_thresholds(unsigned long cpu)
643     {
644     -#ifdef CONFIG_TAU_INT
645     - /*
646     - * setup THRM1,
647     - * threshold, valid bit, enable interrupts, interrupt when below threshold
648     - */
649     - mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID);
650     + u32 maybe_tie = tau_int_enable ? THRM1_TIE : 0;
651    
652     - /* setup THRM2,
653     - * threshold, valid bit, enable interrupts, interrupt when above threshold
654     - */
655     - mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE);
656     -#else
657     - /* same thing but don't enable interrupts */
658     - mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID);
659     - mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V);
660     -#endif
661     + /* setup THRM1, threshold, valid bit, interrupt when below threshold */
662     + mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID);
663     +
664     + /* setup THRM2, threshold, valid bit, interrupt when above threshold */
665     + mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie);
666     }
667    
668     static void TAUupdate(int cpu)
669     {
670     - unsigned thrm;
671     -
672     -#ifdef DEBUG
673     - printk("TAUupdate ");
674     -#endif
675     + u32 thrm;
676     + u32 bits = THRM1_TIV | THRM1_TIN | THRM1_V;
677    
678     /* if both thresholds are crossed, the step_sizes cancel out
679     * and the window winds up getting expanded twice. */
680     - if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */
681     - if(thrm & THRM1_TIN){ /* crossed low threshold */
682     - if (tau[cpu].low >= step_size){
683     - tau[cpu].low -= step_size;
684     - tau[cpu].high -= (step_size - window_expand);
685     - }
686     - tau[cpu].grew = 1;
687     -#ifdef DEBUG
688     - printk("low threshold crossed ");
689     -#endif
690     + thrm = mfspr(SPRN_THRM1);
691     + if ((thrm & bits) == bits) {
692     + mtspr(SPRN_THRM1, 0);
693     +
694     + if (tau[cpu].low >= step_size) {
695     + tau[cpu].low -= step_size;
696     + tau[cpu].high -= (step_size - window_expand);
697     }
698     + tau[cpu].grew = 1;
699     + pr_debug("%s: low threshold crossed\n", __func__);
700     }
701     - if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */
702     - if(thrm & THRM1_TIN){ /* crossed high threshold */
703     - if (tau[cpu].high <= 127-step_size){
704     - tau[cpu].low += (step_size - window_expand);
705     - tau[cpu].high += step_size;
706     - }
707     - tau[cpu].grew = 1;
708     -#ifdef DEBUG
709     - printk("high threshold crossed ");
710     -#endif
711     + thrm = mfspr(SPRN_THRM2);
712     + if ((thrm & bits) == bits) {
713     + mtspr(SPRN_THRM2, 0);
714     +
715     + if (tau[cpu].high <= 127 - step_size) {
716     + tau[cpu].low += (step_size - window_expand);
717     + tau[cpu].high += step_size;
718     }
719     + tau[cpu].grew = 1;
720     + pr_debug("%s: high threshold crossed\n", __func__);
721     }
722     -
723     -#ifdef DEBUG
724     - printk("grew = %d\n", tau[cpu].grew);
725     -#endif
726     -
727     -#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */
728     - set_thresholds(cpu);
729     -#endif
730     -
731     }
732    
733     #ifdef CONFIG_TAU_INT
734     @@ -140,17 +116,16 @@ void TAUException(struct pt_regs * regs)
735     static void tau_timeout(void * info)
736     {
737     int cpu;
738     - unsigned long flags;
739     int size;
740     int shrink;
741    
742     - /* disabling interrupts *should* be okay */
743     - local_irq_save(flags);
744     cpu = smp_processor_id();
745    
746     -#ifndef CONFIG_TAU_INT
747     - TAUupdate(cpu);
748     -#endif
749     + if (!tau_int_enable)
750     + TAUupdate(cpu);
751     +
752     + /* Stop thermal sensor comparisons and interrupts */
753     + mtspr(SPRN_THRM3, 0);
754    
755     size = tau[cpu].high - tau[cpu].low;
756     if (size > min_window && ! tau[cpu].grew) {
757     @@ -173,32 +148,26 @@ static void tau_timeout(void * info)
758    
759     set_thresholds(cpu);
760    
761     - /*
762     - * Do the enable every time, since otherwise a bunch of (relatively)
763     - * complex sleep code needs to be added. One mtspr every time
764     - * tau_timeout is called is probably not a big deal.
765     - *
766     - * Enable thermal sensor and set up sample interval timer
767     - * need 20 us to do the compare.. until a nice 'cpu_speed' function
768     - * call is implemented, just assume a 500 mhz clock. It doesn't really
769     - * matter if we take too long for a compare since it's all interrupt
770     - * driven anyway.
771     - *
772     - * use a extra long time.. (60 us @ 500 mhz)
773     + /* Restart thermal sensor comparisons and interrupts.
774     + * The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet"
775     + * recommends that "the maximum value be set in THRM3 under all
776     + * conditions."
777     */
778     - mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E);
779     -
780     - local_irq_restore(flags);
781     + mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E);
782     }
783    
784     -static void tau_timeout_smp(struct timer_list *unused)
785     -{
786     +static struct workqueue_struct *tau_workq;
787    
788     - /* schedule ourselves to be run again */
789     - mod_timer(&tau_timer, jiffies + shrink_timer) ;
790     +static void tau_work_func(struct work_struct *work)
791     +{
792     + msleep(shrink_timer);
793     on_each_cpu(tau_timeout, NULL, 0);
794     + /* schedule ourselves to be run again */
795     + queue_work(tau_workq, work);
796     }
797    
798     +DECLARE_WORK(tau_work, tau_work_func);
799     +
800     /*
801     * setup the TAU
802     *
803     @@ -231,21 +200,19 @@ static int __init TAU_init(void)
804     return 1;
805     }
806    
807     + tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) &&
808     + !strcmp(cur_cpu_spec->platform, "ppc750");
809    
810     - /* first, set up the window shrinking timer */
811     - timer_setup(&tau_timer, tau_timeout_smp, 0);
812     - tau_timer.expires = jiffies + shrink_timer;
813     - add_timer(&tau_timer);
814     + tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1, 0);
815     + if (!tau_workq)
816     + return -ENOMEM;
817    
818     on_each_cpu(TAU_init_smp, NULL, 0);
819    
820     - printk("Thermal assist unit ");
821     -#ifdef CONFIG_TAU_INT
822     - printk("using interrupts, ");
823     -#else
824     - printk("using timers, ");
825     -#endif
826     - printk("shrink_timer: %d jiffies\n", shrink_timer);
827     + queue_work(tau_workq, &tau_work);
828     +
829     + pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n",
830     + tau_int_enable ? "interrupts" : "workqueue", shrink_timer);
831     tau_initialized = 1;
832    
833     return 0;
834     diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
835     index 67af871190c6d..b0f240afffa22 100644
836     --- a/arch/powerpc/mm/book3s64/radix_tlb.c
837     +++ b/arch/powerpc/mm/book3s64/radix_tlb.c
838     @@ -639,19 +639,29 @@ static void do_exit_flush_lazy_tlb(void *arg)
839     struct mm_struct *mm = arg;
840     unsigned long pid = mm->context.id;
841    
842     + /*
843     + * A kthread could have done a mmget_not_zero() after the flushing CPU
844     + * checked mm_is_singlethreaded, and be in the process of
845     + * kthread_use_mm when interrupted here. In that case, current->mm will
846     + * be set to mm, because kthread_use_mm() setting ->mm and switching to
847     + * the mm is done with interrupts off.
848     + */
849     if (current->mm == mm)
850     - return; /* Local CPU */
851     + goto out_flush;
852    
853     if (current->active_mm == mm) {
854     - /*
855     - * Must be a kernel thread because sender is single-threaded.
856     - */
857     - BUG_ON(current->mm);
858     + WARN_ON_ONCE(current->mm != NULL);
859     + /* Is a kernel thread and is using mm as the lazy tlb */
860     mmgrab(&init_mm);
861     - switch_mm(mm, &init_mm, current);
862     current->active_mm = &init_mm;
863     + switch_mm_irqs_off(mm, &init_mm, current);
864     mmdrop(mm);
865     }
866     +
867     + atomic_dec(&mm->context.active_cpus);
868     + cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm));
869     +
870     +out_flush:
871     _tlbiel_pid(pid, RIC_FLUSH_ALL);
872     }
873    
874     @@ -666,7 +676,6 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm)
875     */
876     smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
877     (void *)mm, 1);
878     - mm_reset_thread_local(mm);
879     }
880    
881     void radix__flush_tlb_mm(struct mm_struct *mm)
882     diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
883     index 59327cefbc6a6..873fcfc7b8756 100644
884     --- a/arch/powerpc/mm/drmem.c
885     +++ b/arch/powerpc/mm/drmem.c
886     @@ -362,10 +362,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
887     if (!drmem_info->lmbs)
888     return;
889    
890     - for_each_drmem_lmb(lmb) {
891     + for_each_drmem_lmb(lmb)
892     read_drconf_v1_cell(lmb, &prop);
893     - lmb_set_nid(lmb);
894     - }
895     }
896    
897     static void __init init_drmem_v2_lmbs(const __be32 *prop)
898     @@ -410,8 +408,6 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
899    
900     lmb->aa_index = dr_cell.aa_index;
901     lmb->flags = dr_cell.flags;
902     -
903     - lmb_set_nid(lmb);
904     }
905     }
906     }
907     diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h
908     index e608f9db12ddc..8965b4463d433 100644
909     --- a/arch/powerpc/perf/hv-gpci-requests.h
910     +++ b/arch/powerpc/perf/hv-gpci-requests.h
911     @@ -95,7 +95,7 @@ REQUEST(__field(0, 8, partition_id)
912    
913     #define REQUEST_NAME system_performance_capabilities
914     #define REQUEST_NUM 0x40
915     -#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
916     +#define REQUEST_IDX_KIND "starting_index=0xffffffff"
917     #include I(REQUEST_BEGIN)
918     REQUEST(__field(0, 1, perf_collect_privileged)
919     __field(0x1, 1, capability_mask)
920     @@ -223,7 +223,7 @@ REQUEST(__field(0, 2, partition_id)
921    
922     #define REQUEST_NAME system_hypervisor_times
923     #define REQUEST_NUM 0xF0
924     -#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
925     +#define REQUEST_IDX_KIND "starting_index=0xffffffff"
926     #include I(REQUEST_BEGIN)
927     REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
928     __count(0x8, 8, time_spent_processing_virtual_processor_timers)
929     @@ -234,7 +234,7 @@ REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
930    
931     #define REQUEST_NAME system_tlbie_count_and_time
932     #define REQUEST_NUM 0xF4
933     -#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
934     +#define REQUEST_IDX_KIND "starting_index=0xffffffff"
935     #include I(REQUEST_BEGIN)
936     REQUEST(__count(0, 8, tlbie_instructions_issued)
937     /*
938     diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
939     index 4c86da5eb28ab..0b5c8f4fbdbfd 100644
940     --- a/arch/powerpc/perf/isa207-common.c
941     +++ b/arch/powerpc/perf/isa207-common.c
942     @@ -269,6 +269,15 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
943    
944     mask |= CNST_PMC_MASK(pmc);
945     value |= CNST_PMC_VAL(pmc);
946     +
947     + /*
948     + * PMC5 and PMC6 are used to count cycles and instructions and
949     + * they do not support most of the constraint bits. Add a check
950     + * to exclude PMC5/6 from most of the constraints except for
951     + * EBB/BHRB.
952     + */
953     + if (pmc >= 5)
954     + goto ebb_bhrb;
955     }
956    
957     if (pmc <= 4) {
958     @@ -335,6 +344,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
959     }
960     }
961    
962     +ebb_bhrb:
963     if (!pmc && ebb)
964     /* EBB events must specify the PMC */
965     return -1;
966     diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
967     index d82e3664ffdf8..18792a5b003a0 100644
968     --- a/arch/powerpc/platforms/Kconfig
969     +++ b/arch/powerpc/platforms/Kconfig
970     @@ -219,12 +219,11 @@ config TAU
971     temperature within 2-4 degrees Celsius. This option shows the current
972     on-die temperature in /proc/cpuinfo if the cpu supports it.
973    
974     - Unfortunately, on some chip revisions, this sensor is very inaccurate
975     - and in many cases, does not work at all, so don't assume the cpu
976     - temp is actually what /proc/cpuinfo says it is.
977     + Unfortunately, this sensor is very inaccurate when uncalibrated, so
978     + don't assume the cpu temp is actually what /proc/cpuinfo says it is.
979    
980     config TAU_INT
981     - bool "Interrupt driven TAU driver (DANGEROUS)"
982     + bool "Interrupt driven TAU driver (EXPERIMENTAL)"
983     depends on TAU
984     ---help---
985     The TAU supports an interrupt driven mode which causes an interrupt
986     @@ -232,12 +231,7 @@ config TAU_INT
987     to get notified the temp has exceeded a range. With this option off,
988     a timer is used to re-check the temperature periodically.
989    
990     - However, on some cpus it appears that the TAU interrupt hardware
991     - is buggy and can cause a situation which would lead unexplained hard
992     - lockups.
993     -
994     - Unless you are extending the TAU driver, or enjoy kernel/hardware
995     - debugging, leave this option off.
996     + If in doubt, say N here.
997    
998     config TAU_AVERAGE
999     bool "Average high and low temp"
1000     diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
1001     index 543c816fa99ef..0e6693bacb7e7 100644
1002     --- a/arch/powerpc/platforms/powernv/opal-dump.c
1003     +++ b/arch/powerpc/platforms/powernv/opal-dump.c
1004     @@ -318,15 +318,14 @@ static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj,
1005     return count;
1006     }
1007    
1008     -static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
1009     - uint32_t type)
1010     +static void create_dump_obj(uint32_t id, size_t size, uint32_t type)
1011     {
1012     struct dump_obj *dump;
1013     int rc;
1014    
1015     dump = kzalloc(sizeof(*dump), GFP_KERNEL);
1016     if (!dump)
1017     - return NULL;
1018     + return;
1019    
1020     dump->kobj.kset = dump_kset;
1021    
1022     @@ -346,21 +345,39 @@ static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
1023     rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id);
1024     if (rc) {
1025     kobject_put(&dump->kobj);
1026     - return NULL;
1027     + return;
1028     }
1029    
1030     + /*
1031     + * As soon as the sysfs file for this dump is created/activated there is
1032     + * a chance the opal_errd daemon (or any userspace) might read and
1033     + * acknowledge the dump before kobject_uevent() is called. If that
1034     + * happens then there is a potential race between
1035     + * dump_ack_store->kobject_put() and kobject_uevent() which leads to a
1036     + * use-after-free of a kernfs object resulting in a kernel crash.
1037     + *
1038     + * To avoid that, we need to take a reference on behalf of the bin file,
1039     + * so that our reference remains valid while we call kobject_uevent().
1040     + * We then drop our reference before exiting the function, leaving the
1041     + * bin file to drop the last reference (if it hasn't already).
1042     + */
1043     +
1044     + /* Take a reference for the bin file */
1045     + kobject_get(&dump->kobj);
1046     rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr);
1047     - if (rc) {
1048     + if (rc == 0) {
1049     + kobject_uevent(&dump->kobj, KOBJ_ADD);
1050     +
1051     + pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
1052     + __func__, dump->id, dump->size);
1053     + } else {
1054     + /* Drop reference count taken for bin file */
1055     kobject_put(&dump->kobj);
1056     - return NULL;
1057     }
1058    
1059     - pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
1060     - __func__, dump->id, dump->size);
1061     -
1062     - kobject_uevent(&dump->kobj, KOBJ_ADD);
1063     -
1064     - return dump;
1065     + /* Drop our reference */
1066     + kobject_put(&dump->kobj);
1067     + return;
1068     }
1069    
1070     static irqreturn_t process_dump(int irq, void *data)
1071     diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
1072     index e7d23a933a0d3..66b32f46702de 100644
1073     --- a/arch/powerpc/platforms/pseries/hotplug-memory.c
1074     +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
1075     @@ -376,25 +376,32 @@ static int dlpar_add_lmb(struct drmem_lmb *);
1076    
1077     static int dlpar_remove_lmb(struct drmem_lmb *lmb)
1078     {
1079     + struct memory_block *mem_block;
1080     unsigned long block_sz;
1081     int rc;
1082    
1083     if (!lmb_is_removable(lmb))
1084     return -EINVAL;
1085    
1086     + mem_block = lmb_to_memblock(lmb);
1087     + if (mem_block == NULL)
1088     + return -EINVAL;
1089     +
1090     rc = dlpar_offline_lmb(lmb);
1091     - if (rc)
1092     + if (rc) {
1093     + put_device(&mem_block->dev);
1094     return rc;
1095     + }
1096    
1097     block_sz = pseries_memory_block_size();
1098    
1099     - __remove_memory(lmb->nid, lmb->base_addr, block_sz);
1100     + __remove_memory(mem_block->nid, lmb->base_addr, block_sz);
1101     + put_device(&mem_block->dev);
1102    
1103     /* Update memory regions for memory remove */
1104     memblock_remove(lmb->base_addr, block_sz);
1105    
1106     invalidate_lmb_associativity_index(lmb);
1107     - lmb_clear_nid(lmb);
1108     lmb->flags &= ~DRCONF_MEM_ASSIGNED;
1109    
1110     return 0;
1111     @@ -651,7 +658,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
1112     static int dlpar_add_lmb(struct drmem_lmb *lmb)
1113     {
1114     unsigned long block_sz;
1115     - int rc;
1116     + int nid, rc;
1117    
1118     if (lmb->flags & DRCONF_MEM_ASSIGNED)
1119     return -EINVAL;
1120     @@ -662,11 +669,13 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
1121     return rc;
1122     }
1123    
1124     - lmb_set_nid(lmb);
1125     block_sz = memory_block_size_bytes();
1126    
1127     + /* Find the node id for this address. */
1128     + nid = memory_add_physaddr_to_nid(lmb->base_addr);
1129     +
1130     /* Add the memory */
1131     - rc = __add_memory(lmb->nid, lmb->base_addr, block_sz);
1132     + rc = __add_memory(nid, lmb->base_addr, block_sz);
1133     if (rc) {
1134     invalidate_lmb_associativity_index(lmb);
1135     return rc;
1136     @@ -674,9 +683,8 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
1137    
1138     rc = dlpar_online_lmb(lmb);
1139     if (rc) {
1140     - __remove_memory(lmb->nid, lmb->base_addr, block_sz);
1141     + __remove_memory(nid, lmb->base_addr, block_sz);
1142     invalidate_lmb_associativity_index(lmb);
1143     - lmb_clear_nid(lmb);
1144     } else {
1145     lmb->flags |= DRCONF_MEM_ASSIGNED;
1146     }
1147     diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
1148     index b3c4848869e52..b658fa627a34b 100644
1149     --- a/arch/powerpc/platforms/pseries/ras.c
1150     +++ b/arch/powerpc/platforms/pseries/ras.c
1151     @@ -494,18 +494,55 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
1152     return 0; /* need to perform reset */
1153     }
1154    
1155     +static int mce_handle_err_realmode(int disposition, u8 error_type)
1156     +{
1157     +#ifdef CONFIG_PPC_BOOK3S_64
1158     + if (disposition == RTAS_DISP_NOT_RECOVERED) {
1159     + switch (error_type) {
1160     + case MC_ERROR_TYPE_SLB:
1161     + case MC_ERROR_TYPE_ERAT:
1162     + /*
1163     + * Store the old slb content in paca before flushing.
1164     + * Print this when we go to virtual mode.
1165     + * There are chances that we may hit MCE again if there
1166     + * is a parity error on the SLB entry we trying to read
1167     + * for saving. Hence limit the slb saving to single
1168     + * level of recursion.
1169     + */
1170     + if (local_paca->in_mce == 1)
1171     + slb_save_contents(local_paca->mce_faulty_slbs);
1172     + flush_and_reload_slb();
1173     + disposition = RTAS_DISP_FULLY_RECOVERED;
1174     + break;
1175     + default:
1176     + break;
1177     + }
1178     + } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
1179     + /* Platform corrected itself but could be degraded */
1180     + pr_err("MCE: limited recovery, system may be degraded\n");
1181     + disposition = RTAS_DISP_FULLY_RECOVERED;
1182     + }
1183     +#endif
1184     + return disposition;
1185     +}
1186    
1187     -static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
1188     +static int mce_handle_err_virtmode(struct pt_regs *regs,
1189     + struct rtas_error_log *errp,
1190     + struct pseries_mc_errorlog *mce_log,
1191     + int disposition)
1192     {
1193     struct mce_error_info mce_err = { 0 };
1194     - unsigned long eaddr = 0, paddr = 0;
1195     - struct pseries_errorlog *pseries_log;
1196     - struct pseries_mc_errorlog *mce_log;
1197     - int disposition = rtas_error_disposition(errp);
1198     int initiator = rtas_error_initiator(errp);
1199     int severity = rtas_error_severity(errp);
1200     + unsigned long eaddr = 0, paddr = 0;
1201     u8 error_type, err_sub_type;
1202    
1203     + if (!mce_log)
1204     + goto out;
1205     +
1206     + error_type = mce_log->error_type;
1207     + err_sub_type = rtas_mc_error_sub_type(mce_log);
1208     +
1209     if (initiator == RTAS_INITIATOR_UNKNOWN)
1210     mce_err.initiator = MCE_INITIATOR_UNKNOWN;
1211     else if (initiator == RTAS_INITIATOR_CPU)
1212     @@ -544,18 +581,7 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
1213     mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
1214     mce_err.error_class = MCE_ECLASS_UNKNOWN;
1215    
1216     - if (!rtas_error_extended(errp))
1217     - goto out;
1218     -
1219     - pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
1220     - if (pseries_log == NULL)
1221     - goto out;
1222     -
1223     - mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
1224     - error_type = mce_log->error_type;
1225     - err_sub_type = rtas_mc_error_sub_type(mce_log);
1226     -
1227     - switch (mce_log->error_type) {
1228     + switch (error_type) {
1229     case MC_ERROR_TYPE_UE:
1230     mce_err.error_type = MCE_ERROR_TYPE_UE;
1231     switch (err_sub_type) {
1232     @@ -652,37 +678,31 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
1233     mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
1234     break;
1235     }
1236     +out:
1237     + save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
1238     + &mce_err, regs->nip, eaddr, paddr);
1239     + return disposition;
1240     +}
1241    
1242     -#ifdef CONFIG_PPC_BOOK3S_64
1243     - if (disposition == RTAS_DISP_NOT_RECOVERED) {
1244     - switch (error_type) {
1245     - case MC_ERROR_TYPE_SLB:
1246     - case MC_ERROR_TYPE_ERAT:
1247     - /*
1248     - * Store the old slb content in paca before flushing.
1249     - * Print this when we go to virtual mode.
1250     - * There are chances that we may hit MCE again if there
1251     - * is a parity error on the SLB entry we trying to read
1252     - * for saving. Hence limit the slb saving to single
1253     - * level of recursion.
1254     - */
1255     - if (local_paca->in_mce == 1)
1256     - slb_save_contents(local_paca->mce_faulty_slbs);
1257     - flush_and_reload_slb();
1258     - disposition = RTAS_DISP_FULLY_RECOVERED;
1259     - break;
1260     - default:
1261     - break;
1262     - }
1263     - } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
1264     - /* Platform corrected itself but could be degraded */
1265     - printk(KERN_ERR "MCE: limited recovery, system may "
1266     - "be degraded\n");
1267     - disposition = RTAS_DISP_FULLY_RECOVERED;
1268     - }
1269     -#endif
1270     +static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
1271     +{
1272     + struct pseries_errorlog *pseries_log;
1273     + struct pseries_mc_errorlog *mce_log = NULL;
1274     + int disposition = rtas_error_disposition(errp);
1275     + u8 error_type;
1276     +
1277     + if (!rtas_error_extended(errp))
1278     + goto out;
1279     +
1280     + pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
1281     + if (!pseries_log)
1282     + goto out;
1283     +
1284     + mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
1285     + error_type = mce_log->error_type;
1286     +
1287     + disposition = mce_handle_err_realmode(disposition, error_type);
1288    
1289     -out:
1290     /*
1291     * Enable translation as we will be accessing per-cpu variables
1292     * in save_mce_event() which may fall outside RMO region, also
1293     @@ -693,10 +713,10 @@ out:
1294     * Note: All the realmode handling like flushing SLB entries for
1295     * SLB multihit is done by now.
1296     */
1297     +out:
1298     mtmsr(mfmsr() | MSR_IR | MSR_DR);
1299     - save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
1300     - &mce_err, regs->nip, eaddr, paddr);
1301     -
1302     + disposition = mce_handle_err_virtmode(regs, errp, mce_log,
1303     + disposition);
1304     return disposition;
1305     }
1306    
1307     diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
1308     index bbb97169bf63e..6268545947b83 100644
1309     --- a/arch/powerpc/platforms/pseries/rng.c
1310     +++ b/arch/powerpc/platforms/pseries/rng.c
1311     @@ -36,6 +36,7 @@ static __init int rng_init(void)
1312    
1313     ppc_md.get_random_seed = pseries_get_random_long;
1314    
1315     + of_node_put(dn);
1316     return 0;
1317     }
1318     machine_subsys_initcall(pseries, rng_init);
1319     diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
1320     index ad8117148ea3b..21b9d1bf39ff6 100644
1321     --- a/arch/powerpc/sysdev/xics/icp-hv.c
1322     +++ b/arch/powerpc/sysdev/xics/icp-hv.c
1323     @@ -174,6 +174,7 @@ int icp_hv_init(void)
1324    
1325     icp_ops = &icp_hv_ops;
1326    
1327     + of_node_put(np);
1328     return 0;
1329     }
1330    
1331     diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
1332     index c8862696a47b9..7d0394f4ebf97 100644
1333     --- a/arch/x86/boot/compressed/pgtable_64.c
1334     +++ b/arch/x86/boot/compressed/pgtable_64.c
1335     @@ -5,15 +5,6 @@
1336     #include "pgtable.h"
1337     #include "../string.h"
1338    
1339     -/*
1340     - * __force_order is used by special_insns.h asm code to force instruction
1341     - * serialization.
1342     - *
1343     - * It is not referenced from the code, but GCC < 5 with -fPIE would fail
1344     - * due to an undefined symbol. Define it to make these ancient GCCs work.
1345     - */
1346     -unsigned long __force_order;
1347     -
1348     #define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */
1349     #define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */
1350    
1351     diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
1352     index fb616203ce427..be50ef8572cce 100644
1353     --- a/arch/x86/events/amd/iommu.c
1354     +++ b/arch/x86/events/amd/iommu.c
1355     @@ -379,7 +379,7 @@ static __init int _init_events_attrs(void)
1356     while (amd_iommu_v2_event_descs[i].attr.attr.name)
1357     i++;
1358    
1359     - attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL);
1360     + attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL);
1361     if (!attrs)
1362     return -ENOMEM;
1363    
1364     diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
1365     index e5ad97a823426..1aaba2c8a9ba6 100644
1366     --- a/arch/x86/events/intel/ds.c
1367     +++ b/arch/x86/events/intel/ds.c
1368     @@ -669,9 +669,7 @@ unlock:
1369    
1370     static inline void intel_pmu_drain_pebs_buffer(void)
1371     {
1372     - struct pt_regs regs;
1373     -
1374     - x86_pmu.drain_pebs(&regs);
1375     + x86_pmu.drain_pebs(NULL);
1376     }
1377    
1378     /*
1379     @@ -1736,6 +1734,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
1380     struct x86_perf_regs perf_regs;
1381     struct pt_regs *regs = &perf_regs.regs;
1382     void *at = get_next_pebs_record_by_bit(base, top, bit);
1383     + struct pt_regs dummy_iregs;
1384    
1385     if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
1386     /*
1387     @@ -1748,6 +1747,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
1388     } else if (!intel_pmu_save_and_restart(event))
1389     return;
1390    
1391     + if (!iregs)
1392     + iregs = &dummy_iregs;
1393     +
1394     while (count > 1) {
1395     setup_sample(event, iregs, at, &data, regs);
1396     perf_event_output(event, &data, regs);
1397     @@ -1757,16 +1759,22 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
1398     }
1399    
1400     setup_sample(event, iregs, at, &data, regs);
1401     -
1402     - /*
1403     - * All but the last records are processed.
1404     - * The last one is left to be able to call the overflow handler.
1405     - */
1406     - if (perf_event_overflow(event, &data, regs)) {
1407     - x86_pmu_stop(event, 0);
1408     - return;
1409     + if (iregs == &dummy_iregs) {
1410     + /*
1411     + * The PEBS records may be drained in the non-overflow context,
1412     + * e.g., large PEBS + context switch. Perf should treat the
1413     + * last record the same as other PEBS records, and doesn't
1414     + * invoke the generic overflow handler.
1415     + */
1416     + perf_event_output(event, &data, regs);
1417     + } else {
1418     + /*
1419     + * All but the last records are processed.
1420     + * The last one is left to be able to call the overflow handler.
1421     + */
1422     + if (perf_event_overflow(event, &data, regs))
1423     + x86_pmu_stop(event, 0);
1424     }
1425     -
1426     }
1427    
1428     static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
1429     diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
1430     index c37cb12d0ef68..aec6e63c6a04a 100644
1431     --- a/arch/x86/events/intel/uncore_snb.c
1432     +++ b/arch/x86/events/intel/uncore_snb.c
1433     @@ -110,6 +110,10 @@
1434     #define ICL_UNC_CBO_0_PER_CTR0 0x702
1435     #define ICL_UNC_CBO_MSR_OFFSET 0x8
1436    
1437     +/* ICL ARB register */
1438     +#define ICL_UNC_ARB_PER_CTR 0x3b1
1439     +#define ICL_UNC_ARB_PERFEVTSEL 0x3b3
1440     +
1441     DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
1442     DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
1443     DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
1444     @@ -297,15 +301,21 @@ void skl_uncore_cpu_init(void)
1445     snb_uncore_arb.ops = &skl_uncore_msr_ops;
1446     }
1447    
1448     +static struct intel_uncore_ops icl_uncore_msr_ops = {
1449     + .disable_event = snb_uncore_msr_disable_event,
1450     + .enable_event = snb_uncore_msr_enable_event,
1451     + .read_counter = uncore_msr_read_counter,
1452     +};
1453     +
1454     static struct intel_uncore_type icl_uncore_cbox = {
1455     .name = "cbox",
1456     - .num_counters = 4,
1457     + .num_counters = 2,
1458     .perf_ctr_bits = 44,
1459     .perf_ctr = ICL_UNC_CBO_0_PER_CTR0,
1460     .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
1461     .event_mask = SNB_UNC_RAW_EVENT_MASK,
1462     .msr_offset = ICL_UNC_CBO_MSR_OFFSET,
1463     - .ops = &skl_uncore_msr_ops,
1464     + .ops = &icl_uncore_msr_ops,
1465     .format_group = &snb_uncore_format_group,
1466     };
1467    
1468     @@ -334,13 +344,25 @@ static struct intel_uncore_type icl_uncore_clockbox = {
1469     .single_fixed = 1,
1470     .event_mask = SNB_UNC_CTL_EV_SEL_MASK,
1471     .format_group = &icl_uncore_clock_format_group,
1472     - .ops = &skl_uncore_msr_ops,
1473     + .ops = &icl_uncore_msr_ops,
1474     .event_descs = icl_uncore_events,
1475     };
1476    
1477     +static struct intel_uncore_type icl_uncore_arb = {
1478     + .name = "arb",
1479     + .num_counters = 1,
1480     + .num_boxes = 1,
1481     + .perf_ctr_bits = 44,
1482     + .perf_ctr = ICL_UNC_ARB_PER_CTR,
1483     + .event_ctl = ICL_UNC_ARB_PERFEVTSEL,
1484     + .event_mask = SNB_UNC_RAW_EVENT_MASK,
1485     + .ops = &icl_uncore_msr_ops,
1486     + .format_group = &snb_uncore_format_group,
1487     +};
1488     +
1489     static struct intel_uncore_type *icl_msr_uncores[] = {
1490     &icl_uncore_cbox,
1491     - &snb_uncore_arb,
1492     + &icl_uncore_arb,
1493     &icl_uncore_clockbox,
1494     NULL,
1495     };
1496     @@ -358,7 +380,6 @@ void icl_uncore_cpu_init(void)
1497     {
1498     uncore_msr_uncores = icl_msr_uncores;
1499     icl_uncore_cbox.num_boxes = icl_get_cbox_num();
1500     - snb_uncore_arb.ops = &skl_uncore_msr_ops;
1501     }
1502    
1503     enum {
1504     diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
1505     index 6d37b8fcfc778..2e0cdc64cb50d 100644
1506     --- a/arch/x86/include/asm/special_insns.h
1507     +++ b/arch/x86/include/asm/special_insns.h
1508     @@ -10,45 +10,47 @@
1509     #include <linux/jump_label.h>
1510    
1511     /*
1512     - * Volatile isn't enough to prevent the compiler from reordering the
1513     - * read/write functions for the control registers and messing everything up.
1514     - * A memory clobber would solve the problem, but would prevent reordering of
1515     - * all loads stores around it, which can hurt performance. Solution is to
1516     - * use a variable and mimic reads and writes to it to enforce serialization
1517     + * The compiler should not reorder volatile asm statements with respect to each
1518     + * other: they should execute in program order. However GCC 4.9.x and 5.x have
1519     + * a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder
1520     + * volatile asm. The write functions are not affected since they have memory
1521     + * clobbers preventing reordering. To prevent reads from being reordered with
1522     + * respect to writes, use a dummy memory operand.
1523     */
1524     -extern unsigned long __force_order;
1525     +
1526     +#define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL)
1527    
1528     void native_write_cr0(unsigned long val);
1529    
1530     static inline unsigned long native_read_cr0(void)
1531     {
1532     unsigned long val;
1533     - asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
1534     + asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER);
1535     return val;
1536     }
1537    
1538     static inline unsigned long native_read_cr2(void)
1539     {
1540     unsigned long val;
1541     - asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
1542     + asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER);
1543     return val;
1544     }
1545    
1546     static inline void native_write_cr2(unsigned long val)
1547     {
1548     - asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
1549     + asm volatile("mov %0,%%cr2": : "r" (val) : "memory");
1550     }
1551    
1552     static inline unsigned long __native_read_cr3(void)
1553     {
1554     unsigned long val;
1555     - asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
1556     + asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
1557     return val;
1558     }
1559    
1560     static inline void native_write_cr3(unsigned long val)
1561     {
1562     - asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
1563     + asm volatile("mov %0,%%cr3": : "r" (val) : "memory");
1564     }
1565    
1566     static inline unsigned long native_read_cr4(void)
1567     @@ -63,10 +65,10 @@ static inline unsigned long native_read_cr4(void)
1568     asm volatile("1: mov %%cr4, %0\n"
1569     "2:\n"
1570     _ASM_EXTABLE(1b, 2b)
1571     - : "=r" (val), "=m" (__force_order) : "0" (0));
1572     + : "=r" (val) : "0" (0), __FORCE_ORDER);
1573     #else
1574     /* CR4 always exists on x86_64. */
1575     - asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
1576     + asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER);
1577     #endif
1578     return val;
1579     }
1580     diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
1581     index 9b3f25e146087..8a85c2e144a6f 100644
1582     --- a/arch/x86/kernel/cpu/common.c
1583     +++ b/arch/x86/kernel/cpu/common.c
1584     @@ -377,7 +377,7 @@ void native_write_cr0(unsigned long val)
1585     unsigned long bits_missing = 0;
1586    
1587     set_register:
1588     - asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order));
1589     + asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
1590    
1591     if (static_branch_likely(&cr_pinning)) {
1592     if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
1593     @@ -396,7 +396,7 @@ void native_write_cr4(unsigned long val)
1594     unsigned long bits_changed = 0;
1595    
1596     set_register:
1597     - asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
1598     + asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
1599    
1600     if (static_branch_likely(&cr_pinning)) {
1601     if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
1602     diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
1603     index fd76e3733dd3d..92331de16d70e 100644
1604     --- a/arch/x86/kernel/cpu/mce/core.c
1605     +++ b/arch/x86/kernel/cpu/mce/core.c
1606     @@ -388,10 +388,28 @@ static int msr_to_offset(u32 msr)
1607     return -1;
1608     }
1609    
1610     +__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
1611     + struct pt_regs *regs, int trapnr,
1612     + unsigned long error_code,
1613     + unsigned long fault_addr)
1614     +{
1615     + pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
1616     + (unsigned int)regs->cx, regs->ip, (void *)regs->ip);
1617     +
1618     + show_stack_regs(regs);
1619     +
1620     + panic("MCA architectural violation!\n");
1621     +
1622     + while (true)
1623     + cpu_relax();
1624     +
1625     + return true;
1626     +}
1627     +
1628     /* MSR access wrappers used for error injection */
1629     static u64 mce_rdmsrl(u32 msr)
1630     {
1631     - u64 v;
1632     + DECLARE_ARGS(val, low, high);
1633    
1634     if (__this_cpu_read(injectm.finished)) {
1635     int offset = msr_to_offset(msr);
1636     @@ -401,21 +419,43 @@ static u64 mce_rdmsrl(u32 msr)
1637     return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
1638     }
1639    
1640     - if (rdmsrl_safe(msr, &v)) {
1641     - WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
1642     - /*
1643     - * Return zero in case the access faulted. This should
1644     - * not happen normally but can happen if the CPU does
1645     - * something weird, or if the code is buggy.
1646     - */
1647     - v = 0;
1648     - }
1649     + /*
1650     + * RDMSR on MCA MSRs should not fault. If they do, this is very much an
1651     + * architectural violation and needs to be reported to hw vendor. Panic
1652     + * the box to not allow any further progress.
1653     + */
1654     + asm volatile("1: rdmsr\n"
1655     + "2:\n"
1656     + _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault)
1657     + : EAX_EDX_RET(val, low, high) : "c" (msr));
1658    
1659     - return v;
1660     +
1661     + return EAX_EDX_VAL(val, low, high);
1662     +}
1663     +
1664     +__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
1665     + struct pt_regs *regs, int trapnr,
1666     + unsigned long error_code,
1667     + unsigned long fault_addr)
1668     +{
1669     + pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
1670     + (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
1671     + regs->ip, (void *)regs->ip);
1672     +
1673     + show_stack_regs(regs);
1674     +
1675     + panic("MCA architectural violation!\n");
1676     +
1677     + while (true)
1678     + cpu_relax();
1679     +
1680     + return true;
1681     }
1682    
1683     static void mce_wrmsrl(u32 msr, u64 v)
1684     {
1685     + u32 low, high;
1686     +
1687     if (__this_cpu_read(injectm.finished)) {
1688     int offset = msr_to_offset(msr);
1689    
1690     @@ -423,7 +463,15 @@ static void mce_wrmsrl(u32 msr, u64 v)
1691     *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
1692     return;
1693     }
1694     - wrmsrl(msr, v);
1695     +
1696     + low = (u32)v;
1697     + high = (u32)(v >> 32);
1698     +
1699     + /* See comment in mce_rdmsrl() */
1700     + asm volatile("1: wrmsr\n"
1701     + "2:\n"
1702     + _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault)
1703     + : : "c" (msr), "a"(low), "d" (high) : "memory");
1704     }
1705    
1706     /*
1707     diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
1708     index 43031db429d24..231954fe5b4e6 100644
1709     --- a/arch/x86/kernel/cpu/mce/internal.h
1710     +++ b/arch/x86/kernel/cpu/mce/internal.h
1711     @@ -172,4 +172,14 @@ extern bool amd_filter_mce(struct mce *m);
1712     static inline bool amd_filter_mce(struct mce *m) { return false; };
1713     #endif
1714    
1715     +__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
1716     + struct pt_regs *regs, int trapnr,
1717     + unsigned long error_code,
1718     + unsigned long fault_addr);
1719     +
1720     +__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
1721     + struct pt_regs *regs, int trapnr,
1722     + unsigned long error_code,
1723     + unsigned long fault_addr);
1724     +
1725     #endif /* __X86_MCE_INTERNAL_H__ */
1726     diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
1727     index 87bcdc6dc2f0c..0d09eb13743b4 100644
1728     --- a/arch/x86/kernel/cpu/mce/severity.c
1729     +++ b/arch/x86/kernel/cpu/mce/severity.c
1730     @@ -9,9 +9,11 @@
1731     #include <linux/seq_file.h>
1732     #include <linux/init.h>
1733     #include <linux/debugfs.h>
1734     -#include <asm/mce.h>
1735     #include <linux/uaccess.h>
1736    
1737     +#include <asm/mce.h>
1738     +#include <asm/intel-family.h>
1739     +
1740     #include "internal.h"
1741    
1742     /*
1743     @@ -40,9 +42,14 @@ static struct severity {
1744     unsigned char context;
1745     unsigned char excp;
1746     unsigned char covered;
1747     + unsigned char cpu_model;
1748     + unsigned char cpu_minstepping;
1749     + unsigned char bank_lo, bank_hi;
1750     char *msg;
1751     } severities[] = {
1752     #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
1753     +#define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h
1754     +#define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s
1755     #define KERNEL .context = IN_KERNEL
1756     #define USER .context = IN_USER
1757     #define KERNEL_RECOV .context = IN_KERNEL_RECOV
1758     @@ -97,7 +104,6 @@ static struct severity {
1759     KEEP, "Corrected error",
1760     NOSER, BITCLR(MCI_STATUS_UC)
1761     ),
1762     -
1763     /*
1764     * known AO MCACODs reported via MCE or CMC:
1765     *
1766     @@ -113,6 +119,18 @@ static struct severity {
1767     AO, "Action optional: last level cache writeback error",
1768     SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
1769     ),
1770     + /*
1771     + * Quirk for Skylake/Cascade Lake. Patrol scrubber may be configured
1772     + * to report uncorrected errors using CMCI with a special signature.
1773     + * UC=0, MSCOD=0x0010, MCACOD=binary(000X 0000 1100 XXXX) reported
1774     + * in one of the memory controller banks.
1775     + * Set severity to "AO" for same action as normal patrol scrub error.
1776     + */
1777     + MCESEV(
1778     + AO, "Uncorrected Patrol Scrub Error",
1779     + SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0),
1780     + MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18)
1781     + ),
1782    
1783     /* ignore OVER for UCNA */
1784     MCESEV(
1785     @@ -320,6 +338,12 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e
1786     continue;
1787     if (s->excp && excp != s->excp)
1788     continue;
1789     + if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model)
1790     + continue;
1791     + if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping)
1792     + continue;
1793     + if (s->bank_lo && (m->bank < s->bank_lo || m->bank > s->bank_hi))
1794     + continue;
1795     if (msg)
1796     *msg = s->msg;
1797     s->covered = 1;
1798     diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
1799     index 6ce7e0a23268f..b271da0fa2193 100644
1800     --- a/arch/x86/kernel/fpu/init.c
1801     +++ b/arch/x86/kernel/fpu/init.c
1802     @@ -242,9 +242,9 @@ static void __init fpu__init_system_ctx_switch(void)
1803     */
1804     static void __init fpu__init_parse_early_param(void)
1805     {
1806     - char arg[32];
1807     + char arg[128];
1808     char *argptr = arg;
1809     - int bit;
1810     + int arglen, res, bit;
1811    
1812     #ifdef CONFIG_X86_32
1813     if (cmdline_find_option_bool(boot_command_line, "no387"))
1814     @@ -267,12 +267,26 @@ static void __init fpu__init_parse_early_param(void)
1815     if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
1816     setup_clear_cpu_cap(X86_FEATURE_XSAVES);
1817    
1818     - if (cmdline_find_option(boot_command_line, "clearcpuid", arg,
1819     - sizeof(arg)) &&
1820     - get_option(&argptr, &bit) &&
1821     - bit >= 0 &&
1822     - bit < NCAPINTS * 32)
1823     - setup_clear_cpu_cap(bit);
1824     + arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
1825     + if (arglen <= 0)
1826     + return;
1827     +
1828     + pr_info("Clearing CPUID bits:");
1829     + do {
1830     + res = get_option(&argptr, &bit);
1831     + if (res == 0 || res == 3)
1832     + break;
1833     +
1834     + /* If the argument was too long, the last bit may be cut off */
1835     + if (res == 1 && arglen >= sizeof(arg))
1836     + break;
1837     +
1838     + if (bit >= 0 && bit < NCAPINTS * 32) {
1839     + pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
1840     + setup_clear_cpu_cap(bit);
1841     + }
1842     + } while (res == 2);
1843     + pr_cont("\n");
1844     }
1845    
1846     /*
1847     diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
1848     index 54c21d6abd5ac..5bb001c0c771a 100644
1849     --- a/arch/x86/kernel/nmi.c
1850     +++ b/arch/x86/kernel/nmi.c
1851     @@ -106,7 +106,6 @@ fs_initcall(nmi_warning_debugfs);
1852    
1853     static void nmi_check_duration(struct nmiaction *action, u64 duration)
1854     {
1855     - u64 whole_msecs = READ_ONCE(action->max_duration);
1856     int remainder_ns, decimal_msecs;
1857    
1858     if (duration < nmi_longest_ns || duration < action->max_duration)
1859     @@ -114,12 +113,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
1860    
1861     action->max_duration = duration;
1862    
1863     - remainder_ns = do_div(whole_msecs, (1000 * 1000));
1864     + remainder_ns = do_div(duration, (1000 * 1000));
1865     decimal_msecs = remainder_ns / 1000;
1866    
1867     printk_ratelimited(KERN_INFO
1868     "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
1869     - action->handler, whole_msecs, decimal_msecs);
1870     + action->handler, duration, decimal_msecs);
1871     }
1872    
1873     static int nmi_handle(unsigned int type, struct pt_regs *regs)
1874     diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
1875     index cc7823e7ef96c..484c32b7f79ff 100644
1876     --- a/arch/x86/kvm/emulate.c
1877     +++ b/arch/x86/kvm/emulate.c
1878     @@ -3617,7 +3617,7 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt)
1879     u64 tsc_aux = 0;
1880    
1881     if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
1882     - return emulate_gp(ctxt, 0);
1883     + return emulate_ud(ctxt);
1884     ctxt->dst.val = tsc_aux;
1885     return X86EMUL_CONTINUE;
1886     }
1887     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1888     index bb743f956c232..b90e8fd2f6ced 100644
1889     --- a/arch/x86/kvm/mmu.c
1890     +++ b/arch/x86/kvm/mmu.c
1891     @@ -6453,6 +6453,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
1892     cond_resched_lock(&kvm->mmu_lock);
1893     }
1894     }
1895     + kvm_mmu_commit_zap_page(kvm, &invalid_list);
1896    
1897     spin_unlock(&kvm->mmu_lock);
1898     srcu_read_unlock(&kvm->srcu, rcu_idx);
1899     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1900     index b58495fde2e89..c79c1a07f44b9 100644
1901     --- a/arch/x86/kvm/svm.c
1902     +++ b/arch/x86/kvm/svm.c
1903     @@ -5383,6 +5383,7 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
1904     * - Tell IOMMU to use legacy mode for this interrupt.
1905     * - Retrieve ga_tag of prior interrupt remapping data.
1906     */
1907     + pi.prev_ga_tag = 0;
1908     pi.is_guest_mode = false;
1909     ret = irq_set_vcpu_affinity(host_irq, &pi);
1910    
1911     diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
1912     index a460ddf04d60c..08e1e7544f823 100644
1913     --- a/arch/x86/kvm/vmx/nested.c
1914     +++ b/arch/x86/kvm/vmx/nested.c
1915     @@ -2231,6 +2231,8 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
1916     vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
1917     vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
1918     vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
1919     +
1920     + vmx->segment_cache.bitmask = 0;
1921     }
1922    
1923     if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
1924     @@ -3094,8 +3096,10 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
1925     prepare_vmcs02_early(vmx, vmcs12);
1926    
1927     if (from_vmentry) {
1928     - if (unlikely(!nested_get_vmcs12_pages(vcpu)))
1929     + if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
1930     + vmx_switch_vmcs(vcpu, &vmx->vmcs01);
1931     return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
1932     + }
1933    
1934     if (nested_vmx_check_vmentry_hw(vcpu)) {
1935     vmx_switch_vmcs(vcpu, &vmx->vmcs01);
1936     diff --git a/block/blk-core.c b/block/blk-core.c
1937     index 81aafb601df06..d2213220099d3 100644
1938     --- a/block/blk-core.c
1939     +++ b/block/blk-core.c
1940     @@ -743,11 +743,10 @@ static void handle_bad_sector(struct bio *bio, sector_t maxsector)
1941     {
1942     char b[BDEVNAME_SIZE];
1943    
1944     - printk(KERN_INFO "attempt to access beyond end of device\n");
1945     - printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
1946     - bio_devname(bio, b), bio->bi_opf,
1947     - (unsigned long long)bio_end_sector(bio),
1948     - (long long)maxsector);
1949     + pr_info_ratelimited("attempt to access beyond end of device\n"
1950     + "%s: rw=%d, want=%llu, limit=%llu\n",
1951     + bio_devname(bio, b), bio->bi_opf,
1952     + bio_end_sector(bio), maxsector);
1953     }
1954    
1955     #ifdef CONFIG_FAIL_MAKE_REQUEST
1956     diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
1957     index a09ab0c3d074d..5dafd7a8ec913 100644
1958     --- a/block/blk-mq-sysfs.c
1959     +++ b/block/blk-mq-sysfs.c
1960     @@ -36,8 +36,6 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
1961     struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
1962     kobj);
1963    
1964     - cancel_delayed_work_sync(&hctx->run_work);
1965     -
1966     if (hctx->flags & BLK_MQ_F_BLOCKING)
1967     cleanup_srcu_struct(hctx->srcu);
1968     blk_free_flush_queue(hctx->fq);
1969     diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
1970     index 46f5198be0173..bf33570da5ac7 100644
1971     --- a/block/blk-sysfs.c
1972     +++ b/block/blk-sysfs.c
1973     @@ -891,9 +891,16 @@ static void __blk_release_queue(struct work_struct *work)
1974    
1975     blk_free_queue_stats(q->stats);
1976    
1977     - if (queue_is_mq(q))
1978     + if (queue_is_mq(q)) {
1979     + struct blk_mq_hw_ctx *hctx;
1980     + int i;
1981     +
1982     cancel_delayed_work_sync(&q->requeue_work);
1983    
1984     + queue_for_each_hw_ctx(q, hctx, i)
1985     + cancel_delayed_work_sync(&hctx->run_work);
1986     + }
1987     +
1988     blk_exit_queue(q);
1989    
1990     blk_queue_free_zone_bitmaps(q);
1991     diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
1992     index 43c6aa784858b..e62d735ed2660 100644
1993     --- a/crypto/algif_aead.c
1994     +++ b/crypto/algif_aead.c
1995     @@ -78,7 +78,7 @@ static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
1996     SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
1997    
1998     skcipher_request_set_sync_tfm(skreq, null_tfm);
1999     - skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
2000     + skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
2001     NULL, NULL);
2002     skcipher_request_set_crypt(skreq, src, dst, len, NULL);
2003    
2004     @@ -291,19 +291,20 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
2005     areq->outlen = outlen;
2006    
2007     aead_request_set_callback(&areq->cra_u.aead_req,
2008     - CRYPTO_TFM_REQ_MAY_BACKLOG,
2009     + CRYPTO_TFM_REQ_MAY_SLEEP,
2010     af_alg_async_cb, areq);
2011     err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
2012     crypto_aead_decrypt(&areq->cra_u.aead_req);
2013    
2014     /* AIO operation in progress */
2015     - if (err == -EINPROGRESS || err == -EBUSY)
2016     + if (err == -EINPROGRESS)
2017     return -EIOCBQUEUED;
2018    
2019     sock_put(sk);
2020     } else {
2021     /* Synchronous operation */
2022     aead_request_set_callback(&areq->cra_u.aead_req,
2023     + CRYPTO_TFM_REQ_MAY_SLEEP |
2024     CRYPTO_TFM_REQ_MAY_BACKLOG,
2025     crypto_req_done, &ctx->wait);
2026     err = crypto_wait_req(ctx->enc ?
2027     diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
2028     index 81c4022285a7c..30069a92a9b22 100644
2029     --- a/crypto/algif_skcipher.c
2030     +++ b/crypto/algif_skcipher.c
2031     @@ -123,7 +123,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
2032     crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
2033    
2034     /* AIO operation in progress */
2035     - if (err == -EINPROGRESS || err == -EBUSY)
2036     + if (err == -EINPROGRESS)
2037     return -EIOCBQUEUED;
2038    
2039     sock_put(sk);
2040     diff --git a/drivers/android/binder.c b/drivers/android/binder.c
2041     index 110dd4c2977f5..b62b1ab6bb699 100644
2042     --- a/drivers/android/binder.c
2043     +++ b/drivers/android/binder.c
2044     @@ -227,7 +227,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
2045     struct binder_work {
2046     struct list_head entry;
2047    
2048     - enum {
2049     + enum binder_work_type {
2050     BINDER_WORK_TRANSACTION = 1,
2051     BINDER_WORK_TRANSACTION_COMPLETE,
2052     BINDER_WORK_RETURN_ERROR,
2053     @@ -889,27 +889,6 @@ static struct binder_work *binder_dequeue_work_head_ilocked(
2054     return w;
2055     }
2056    
2057     -/**
2058     - * binder_dequeue_work_head() - Dequeues the item at head of list
2059     - * @proc: binder_proc associated with list
2060     - * @list: list to dequeue head
2061     - *
2062     - * Removes the head of the list if there are items on the list
2063     - *
2064     - * Return: pointer dequeued binder_work, NULL if list was empty
2065     - */
2066     -static struct binder_work *binder_dequeue_work_head(
2067     - struct binder_proc *proc,
2068     - struct list_head *list)
2069     -{
2070     - struct binder_work *w;
2071     -
2072     - binder_inner_proc_lock(proc);
2073     - w = binder_dequeue_work_head_ilocked(list);
2074     - binder_inner_proc_unlock(proc);
2075     - return w;
2076     -}
2077     -
2078     static void
2079     binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
2080     static void binder_free_thread(struct binder_thread *thread);
2081     @@ -2347,8 +2326,6 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
2082     * file is done when the transaction is torn
2083     * down.
2084     */
2085     - WARN_ON(failed_at &&
2086     - proc->tsk == current->group_leader);
2087     } break;
2088     case BINDER_TYPE_PTR:
2089     /*
2090     @@ -4591,13 +4568,17 @@ static void binder_release_work(struct binder_proc *proc,
2091     struct list_head *list)
2092     {
2093     struct binder_work *w;
2094     + enum binder_work_type wtype;
2095    
2096     while (1) {
2097     - w = binder_dequeue_work_head(proc, list);
2098     + binder_inner_proc_lock(proc);
2099     + w = binder_dequeue_work_head_ilocked(list);
2100     + wtype = w ? w->type : 0;
2101     + binder_inner_proc_unlock(proc);
2102     if (!w)
2103     return;
2104    
2105     - switch (w->type) {
2106     + switch (wtype) {
2107     case BINDER_WORK_TRANSACTION: {
2108     struct binder_transaction *t;
2109    
2110     @@ -4631,9 +4612,11 @@ static void binder_release_work(struct binder_proc *proc,
2111     kfree(death);
2112     binder_stats_deleted(BINDER_STAT_DEATH);
2113     } break;
2114     + case BINDER_WORK_NODE:
2115     + break;
2116     default:
2117     pr_err("unexpected work type, %d, not freed\n",
2118     - w->type);
2119     + wtype);
2120     break;
2121     }
2122     }
2123     diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
2124     index f3f0529564da0..b326eeddaadf0 100644
2125     --- a/drivers/bluetooth/btusb.c
2126     +++ b/drivers/bluetooth/btusb.c
2127     @@ -2664,6 +2664,7 @@ static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev)
2128     buf = kmalloc(size, GFP_KERNEL);
2129     if (!buf) {
2130     kfree(dr);
2131     + usb_free_urb(urb);
2132     return -ENOMEM;
2133     }
2134    
2135     diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
2136     index 85a30fb9177bb..f83d67eafc9f0 100644
2137     --- a/drivers/bluetooth/hci_ldisc.c
2138     +++ b/drivers/bluetooth/hci_ldisc.c
2139     @@ -538,6 +538,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
2140     clear_bit(HCI_UART_PROTO_READY, &hu->flags);
2141     percpu_up_write(&hu->proto_lock);
2142    
2143     + cancel_work_sync(&hu->init_ready);
2144     cancel_work_sync(&hu->write_work);
2145    
2146     if (hdev) {
2147     diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
2148     index ad2f26cb2622e..5b9aa73ff2b7f 100644
2149     --- a/drivers/bluetooth/hci_serdev.c
2150     +++ b/drivers/bluetooth/hci_serdev.c
2151     @@ -357,6 +357,8 @@ void hci_uart_unregister_device(struct hci_uart *hu)
2152     struct hci_dev *hdev = hu->hdev;
2153    
2154     clear_bit(HCI_UART_PROTO_READY, &hu->flags);
2155     +
2156     + cancel_work_sync(&hu->init_ready);
2157     if (test_bit(HCI_UART_REGISTERED, &hu->flags))
2158     hci_unregister_dev(hdev);
2159     hci_free_dev(hdev);
2160     diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
2161     index 6b9a0593d2eb7..b6e7df9e88503 100644
2162     --- a/drivers/char/ipmi/ipmi_si_intf.c
2163     +++ b/drivers/char/ipmi/ipmi_si_intf.c
2164     @@ -1977,7 +1977,7 @@ static int try_smi_init(struct smi_info *new_smi)
2165     /* Do this early so it's available for logs. */
2166     if (!new_smi->io.dev) {
2167     pr_err("IPMI interface added with no device\n");
2168     - rv = EIO;
2169     + rv = -EIO;
2170     goto out_err;
2171     }
2172    
2173     diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
2174     index 37c22667e8319..4313ecb2af5b2 100644
2175     --- a/drivers/clk/at91/clk-main.c
2176     +++ b/drivers/clk/at91/clk-main.c
2177     @@ -437,12 +437,17 @@ static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
2178     return -EINVAL;
2179    
2180     regmap_read(regmap, AT91_CKGR_MOR, &tmp);
2181     - tmp &= ~MOR_KEY_MASK;
2182    
2183     if (index && !(tmp & AT91_PMC_MOSCSEL))
2184     - regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
2185     + tmp = AT91_PMC_MOSCSEL;
2186     else if (!index && (tmp & AT91_PMC_MOSCSEL))
2187     - regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
2188     + tmp = 0;
2189     + else
2190     + return 0;
2191     +
2192     + regmap_update_bits(regmap, AT91_CKGR_MOR,
2193     + AT91_PMC_MOSCSEL | MOR_KEY_MASK,
2194     + tmp | AT91_PMC_KEY);
2195    
2196     while (!clk_sam9x5_main_ready(regmap))
2197     cpu_relax();
2198     diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
2199     index 45420b514149f..c5486537b9284 100644
2200     --- a/drivers/clk/bcm/clk-bcm2835.c
2201     +++ b/drivers/clk/bcm/clk-bcm2835.c
2202     @@ -1336,8 +1336,10 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
2203     pll->hw.init = &init;
2204    
2205     ret = devm_clk_hw_register(cprman->dev, &pll->hw);
2206     - if (ret)
2207     + if (ret) {
2208     + kfree(pll);
2209     return NULL;
2210     + }
2211     return &pll->hw;
2212     }
2213    
2214     diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
2215     index 41fc9c63356ea..1846bd879dd71 100644
2216     --- a/drivers/clk/imx/clk-imx8mq.c
2217     +++ b/drivers/clk/imx/clk-imx8mq.c
2218     @@ -157,10 +157,10 @@ static const char * const imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys
2219     "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
2220    
2221     static const char * const imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
2222     - "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
2223     + "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
2224    
2225     static const char * const imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
2226     - "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
2227     + "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
2228    
2229     static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll_out", "audio_pll1_out",
2230     "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
2231     diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
2232     index 7edf8c8432b67..64ea895f1a7df 100644
2233     --- a/drivers/clk/keystone/sci-clk.c
2234     +++ b/drivers/clk/keystone/sci-clk.c
2235     @@ -522,7 +522,7 @@ static int ti_sci_scan_clocks_from_dt(struct sci_clk_provider *provider)
2236     np = of_find_node_with_property(np, *clk_name);
2237     if (!np) {
2238     clk_name++;
2239     - break;
2240     + continue;
2241     }
2242    
2243     if (!of_device_is_available(np))
2244     diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
2245     index 608a9a6621a37..00920182bbe63 100644
2246     --- a/drivers/clk/mediatek/clk-mt6779.c
2247     +++ b/drivers/clk/mediatek/clk-mt6779.c
2248     @@ -919,6 +919,8 @@ static const struct mtk_gate infra_clks[] = {
2249     "pwm_sel", 19),
2250     GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm",
2251     "pwm_sel", 21),
2252     + GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0",
2253     + "uart_sel", 22),
2254     GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1",
2255     "uart_sel", 23),
2256     GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2",
2257     diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
2258     index d2760a021301d..3143e16065de6 100644
2259     --- a/drivers/clk/meson/g12a.c
2260     +++ b/drivers/clk/meson/g12a.c
2261     @@ -298,6 +298,17 @@ static struct clk_regmap g12a_fclk_div2 = {
2262     &g12a_fclk_div2_div.hw
2263     },
2264     .num_parents = 1,
2265     + /*
2266     + * Similar to fclk_div3, it seems that this clock is used by
2267     + * the resident firmware and is required by the platform to
2268     + * operate correctly.
2269     + * Until the following condition are met, we need this clock to
2270     + * be marked as critical:
2271     + * a) Mark the clock used by a firmware resource, if possible
2272     + * b) CCF has a clock hand-off mechanism to make the sure the
2273     + * clock stays on until the proper driver comes along
2274     + */
2275     + .flags = CLK_IS_CRITICAL,
2276     },
2277     };
2278    
2279     diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
2280     index c6fb57cd576f5..aa5c0c6ead017 100644
2281     --- a/drivers/clk/qcom/gcc-sdm660.c
2282     +++ b/drivers/clk/qcom/gcc-sdm660.c
2283     @@ -666,7 +666,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
2284     .cmd_rcgr = 0x48044,
2285     .mnd_width = 0,
2286     .hid_width = 5,
2287     - .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
2288     + .parent_map = gcc_parent_map_xo_gpll0,
2289     .freq_tbl = ftbl_hmss_rbcpr_clk_src,
2290     .clkr.hw.init = &(struct clk_init_data){
2291     .name = "hmss_rbcpr_clk_src",
2292     diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
2293     index ba9f00dc9740c..7dd2e0b1a5866 100644
2294     --- a/drivers/clk/rockchip/clk-half-divider.c
2295     +++ b/drivers/clk/rockchip/clk-half-divider.c
2296     @@ -167,7 +167,7 @@ struct clk *rockchip_clk_register_halfdiv(const char *name,
2297     unsigned long flags,
2298     spinlock_t *lock)
2299     {
2300     - struct clk *clk;
2301     + struct clk *clk = ERR_PTR(-ENOMEM);
2302     struct clk_mux *mux = NULL;
2303     struct clk_gate *gate = NULL;
2304     struct clk_divider *div = NULL;
2305     diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
2306     index df1c941260d14..b4af4094309b0 100644
2307     --- a/drivers/cpufreq/armada-37xx-cpufreq.c
2308     +++ b/drivers/cpufreq/armada-37xx-cpufreq.c
2309     @@ -484,6 +484,12 @@ remove_opp:
2310     /* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
2311     late_initcall(armada37xx_cpufreq_driver_init);
2312    
2313     +static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = {
2314     + { .compatible = "marvell,armada-3700-nb-pm" },
2315     + { },
2316     +};
2317     +MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match);
2318     +
2319     MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
2320     MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
2321     MODULE_LICENSE("GPL");
2322     diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
2323     index 3a2f022f6bde2..bc6ccf2c7aae0 100644
2324     --- a/drivers/cpufreq/powernv-cpufreq.c
2325     +++ b/drivers/cpufreq/powernv-cpufreq.c
2326     @@ -884,12 +884,15 @@ static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
2327     unsigned long action, void *unused)
2328     {
2329     int cpu;
2330     - struct cpufreq_policy cpu_policy;
2331     + struct cpufreq_policy *cpu_policy;
2332    
2333     rebooting = true;
2334     for_each_online_cpu(cpu) {
2335     - cpufreq_get_policy(&cpu_policy, cpu);
2336     - powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
2337     + cpu_policy = cpufreq_cpu_get(cpu);
2338     + if (!cpu_policy)
2339     + continue;
2340     + powernv_cpufreq_target_index(cpu_policy, get_nominal_index());
2341     + cpufreq_cpu_put(cpu_policy);
2342     }
2343    
2344     return NOTIFY_DONE;
2345     diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
2346     index 137ed3df0c74d..9612da122ceba 100644
2347     --- a/drivers/crypto/caam/Kconfig
2348     +++ b/drivers/crypto/caam/Kconfig
2349     @@ -112,6 +112,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
2350     select CRYPTO_AUTHENC
2351     select CRYPTO_BLKCIPHER
2352     select CRYPTO_DES
2353     + select CRYPTO_XTS
2354     help
2355     Selecting this will use CAAM Queue Interface (QI) for sending
2356     & receiving crypto jobs to/from CAAM. This gives better performance
2357     diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
2358     index 8e3449670d2f0..2a605a419df8b 100644
2359     --- a/drivers/crypto/caam/caamalg_qi.c
2360     +++ b/drivers/crypto/caam/caamalg_qi.c
2361     @@ -18,6 +18,7 @@
2362     #include "qi.h"
2363     #include "jr.h"
2364     #include "caamalg_desc.h"
2365     +#include <asm/unaligned.h>
2366    
2367     /*
2368     * crypto alg
2369     @@ -67,6 +68,11 @@ struct caam_ctx {
2370     struct device *qidev;
2371     spinlock_t lock; /* Protects multiple init of driver context */
2372     struct caam_drv_ctx *drv_ctx[NUM_OP];
2373     + struct crypto_skcipher *fallback;
2374     +};
2375     +
2376     +struct caam_skcipher_req_ctx {
2377     + struct skcipher_request fallback_req;
2378     };
2379    
2380     static int aead_set_sh_desc(struct crypto_aead *aead)
2381     @@ -745,12 +751,17 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
2382     struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
2383     struct device *jrdev = ctx->jrdev;
2384     int ret = 0;
2385     + int err;
2386    
2387     if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
2388     dev_err(jrdev, "key size mismatch\n");
2389     goto badkey;
2390     }
2391    
2392     + err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
2393     + if (err)
2394     + return err;
2395     +
2396     ctx->cdata.keylen = keylen;
2397     ctx->cdata.key_virt = key;
2398     ctx->cdata.key_inline = true;
2399     @@ -1395,6 +1406,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
2400     return edesc;
2401     }
2402    
2403     +static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
2404     +{
2405     + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
2406     + unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
2407     +
2408     + return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
2409     +}
2410     +
2411     static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
2412     {
2413     struct skcipher_edesc *edesc;
2414     @@ -1405,6 +1424,21 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
2415     if (!req->cryptlen)
2416     return 0;
2417    
2418     + if (ctx->fallback && xts_skcipher_ivsize(req)) {
2419     + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
2420     +
2421     + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
2422     + skcipher_request_set_callback(&rctx->fallback_req,
2423     + req->base.flags,
2424     + req->base.complete,
2425     + req->base.data);
2426     + skcipher_request_set_crypt(&rctx->fallback_req, req->src,
2427     + req->dst, req->cryptlen, req->iv);
2428     +
2429     + return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
2430     + crypto_skcipher_decrypt(&rctx->fallback_req);
2431     + }
2432     +
2433     if (unlikely(caam_congested))
2434     return -EAGAIN;
2435    
2436     @@ -1529,6 +1563,7 @@ static struct caam_skcipher_alg driver_algs[] = {
2437     .base = {
2438     .cra_name = "xts(aes)",
2439     .cra_driver_name = "xts-aes-caam-qi",
2440     + .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
2441     .cra_blocksize = AES_BLOCK_SIZE,
2442     },
2443     .setkey = xts_skcipher_setkey,
2444     @@ -2462,9 +2497,32 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
2445     struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
2446     struct caam_skcipher_alg *caam_alg =
2447     container_of(alg, typeof(*caam_alg), skcipher);
2448     + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
2449     + u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
2450     + int ret = 0;
2451     +
2452     + if (alg_aai == OP_ALG_AAI_XTS) {
2453     + const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
2454     + struct crypto_skcipher *fallback;
2455     +
2456     + fallback = crypto_alloc_skcipher(tfm_name, 0,
2457     + CRYPTO_ALG_NEED_FALLBACK);
2458     + if (IS_ERR(fallback)) {
2459     + dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
2460     + tfm_name, PTR_ERR(fallback));
2461     + return PTR_ERR(fallback);
2462     + }
2463    
2464     - return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
2465     - false);
2466     + ctx->fallback = fallback;
2467     + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
2468     + crypto_skcipher_reqsize(fallback));
2469     + }
2470     +
2471     + ret = caam_init_common(ctx, &caam_alg->caam, false);
2472     + if (ret && ctx->fallback)
2473     + crypto_free_skcipher(ctx->fallback);
2474     +
2475     + return ret;
2476     }
2477    
2478     static int caam_aead_init(struct crypto_aead *tfm)
2479     @@ -2490,7 +2548,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
2480    
2481     static void caam_cra_exit(struct crypto_skcipher *tfm)
2482     {
2483     - caam_exit_common(crypto_skcipher_ctx(tfm));
2484     + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
2485     +
2486     + if (ctx->fallback)
2487     + crypto_free_skcipher(ctx->fallback);
2488     + caam_exit_common(ctx);
2489     }
2490    
2491     static void caam_aead_exit(struct crypto_aead *tfm)
2492     @@ -2524,7 +2586,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2493     alg->base.cra_module = THIS_MODULE;
2494     alg->base.cra_priority = CAAM_CRA_PRIORITY;
2495     alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2496     - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2497     + alg->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2498    
2499     alg->init = caam_cra_init;
2500     alg->exit = caam_cra_exit;
2501     diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
2502     index 64112c736810e..7234b95241e91 100644
2503     --- a/drivers/crypto/ccp/ccp-ops.c
2504     +++ b/drivers/crypto/ccp/ccp-ops.c
2505     @@ -1746,7 +1746,7 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2506     break;
2507     default:
2508     ret = -EINVAL;
2509     - goto e_ctx;
2510     + goto e_data;
2511     }
2512     } else {
2513     /* Stash the context */
2514     diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
2515     index dffa2aa855fdd..9b410ffafc4dd 100644
2516     --- a/drivers/crypto/chelsio/chtls/chtls_cm.c
2517     +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
2518     @@ -1053,6 +1053,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
2519     ndev = n->dev;
2520     if (!ndev)
2521     goto free_dst;
2522     + if (is_vlan_dev(ndev))
2523     + ndev = vlan_dev_real_dev(ndev);
2524     +
2525     port_id = cxgb4_port_idx(ndev);
2526    
2527     csk = chtls_sock_create(cdev);
2528     diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
2529     index c403d6b64e087..a5903cfc83523 100644
2530     --- a/drivers/crypto/chelsio/chtls/chtls_io.c
2531     +++ b/drivers/crypto/chelsio/chtls/chtls_io.c
2532     @@ -910,9 +910,9 @@ static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
2533     return (__force int)cpu_to_be16(thdr->length);
2534     }
2535    
2536     -static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
2537     +static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
2538     {
2539     - return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
2540     + return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0);
2541     }
2542    
2543     static int csk_wait_memory(struct chtls_dev *cdev,
2544     @@ -1210,6 +1210,7 @@ int chtls_sendpage(struct sock *sk, struct page *page,
2545     copied = 0;
2546     csk = rcu_dereference_sk_user_data(sk);
2547     cdev = csk->cdev;
2548     + lock_sock(sk);
2549     timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
2550    
2551     err = sk_stream_wait_connect(sk, &timeo);
2552     diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
2553     index 9181523ba7607..acaa504d5a798 100644
2554     --- a/drivers/crypto/ixp4xx_crypto.c
2555     +++ b/drivers/crypto/ixp4xx_crypto.c
2556     @@ -527,7 +527,7 @@ static void release_ixp_crypto(struct device *dev)
2557    
2558     if (crypt_virt) {
2559     dma_free_coherent(dev,
2560     - NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
2561     + NPE_QLEN * sizeof(struct crypt_ctl),
2562     crypt_virt, crypt_phys);
2563     }
2564     }
2565     diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
2566     index 7e3ad085b5bdd..efce3a83b35a8 100644
2567     --- a/drivers/crypto/mediatek/mtk-platform.c
2568     +++ b/drivers/crypto/mediatek/mtk-platform.c
2569     @@ -442,7 +442,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp)
2570     static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
2571     {
2572     struct mtk_ring **ring = cryp->ring;
2573     - int i, err = ENOMEM;
2574     + int i;
2575    
2576     for (i = 0; i < MTK_RING_MAX; i++) {
2577     ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
2578     @@ -469,14 +469,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
2579     return 0;
2580    
2581     err_cleanup:
2582     - for (; i--; ) {
2583     + do {
2584     dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
2585     ring[i]->res_base, ring[i]->res_dma);
2586     dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
2587     ring[i]->cmd_base, ring[i]->cmd_dma);
2588     kfree(ring[i]);
2589     - }
2590     - return err;
2591     + } while (i--);
2592     + return -ENOMEM;
2593     }
2594    
2595     static int mtk_crypto_probe(struct platform_device *pdev)
2596     diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
2597     index aba5db3c0588f..d7c0c982ba433 100644
2598     --- a/drivers/crypto/omap-sham.c
2599     +++ b/drivers/crypto/omap-sham.c
2600     @@ -453,6 +453,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
2601     struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
2602     u32 val, mask;
2603    
2604     + if (likely(ctx->digcnt))
2605     + omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
2606     +
2607     /*
2608     * Setting ALGO_CONST only for the first iteration and
2609     * CLOSE_HASH only for the last one. Note that flags mode bits
2610     diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
2611     index 2680e1525db58..13ecbb0e58528 100644
2612     --- a/drivers/crypto/picoxcell_crypto.c
2613     +++ b/drivers/crypto/picoxcell_crypto.c
2614     @@ -1697,11 +1697,6 @@ static int spacc_probe(struct platform_device *pdev)
2615     goto err_clk_put;
2616     }
2617    
2618     - ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
2619     - if (ret)
2620     - goto err_clk_disable;
2621     -
2622     -
2623     /*
2624     * Use an IRQ threshold of 50% as a default. This seems to be a
2625     * reasonable trade off of latency against throughput but can be
2626     @@ -1709,6 +1704,10 @@ static int spacc_probe(struct platform_device *pdev)
2627     */
2628     engine->stat_irq_thresh = (engine->fifo_sz / 2);
2629    
2630     + ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
2631     + if (ret)
2632     + goto err_clk_disable;
2633     +
2634     /*
2635     * Configure the interrupts. We only use the STAT_CNT interrupt as we
2636     * only submit a new packet for processing when we complete another in
2637     diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
2638     index 62d9825a49e9d..238936e2dfe2d 100644
2639     --- a/drivers/dma/dmatest.c
2640     +++ b/drivers/dma/dmatest.c
2641     @@ -1218,15 +1218,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
2642     add_threaded_test(info);
2643    
2644     /* Check if channel was added successfully */
2645     - dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
2646     -
2647     - if (dtc->chan) {
2648     + if (!list_empty(&info->channels)) {
2649     /*
2650     * if new channel was not successfully added, revert the
2651     * "test_channel" string to the name of the last successfully
2652     * added channel. exception for when users issues empty string
2653     * to channel parameter.
2654     */
2655     + dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
2656     if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
2657     && (strcmp("", strim(test_channel)) != 0)) {
2658     ret = -EINVAL;
2659     diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
2660     index a1b56f52db2f2..5e7fdc0b6e3db 100644
2661     --- a/drivers/dma/dw/core.c
2662     +++ b/drivers/dma/dw/core.c
2663     @@ -772,6 +772,10 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
2664     if (dws->dma_dev != chan->device->dev)
2665     return false;
2666    
2667     + /* permit channels in accordance with the channels mask */
2668     + if (dws->channels && !(dws->channels & dwc->mask))
2669     + return false;
2670     +
2671     /* We have to copy data since dws can be temporary storage */
2672     memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
2673    
2674     diff --git a/drivers/dma/dw/dw.c b/drivers/dma/dw/dw.c
2675     index 7a085b3c1854c..d9810980920a1 100644
2676     --- a/drivers/dma/dw/dw.c
2677     +++ b/drivers/dma/dw/dw.c
2678     @@ -14,7 +14,7 @@
2679     static void dw_dma_initialize_chan(struct dw_dma_chan *dwc)
2680     {
2681     struct dw_dma *dw = to_dw_dma(dwc->chan.device);
2682     - u32 cfghi = DWC_CFGH_FIFO_MODE;
2683     + u32 cfghi = is_slave_direction(dwc->direction) ? 0 : DWC_CFGH_FIFO_MODE;
2684     u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
2685     bool hs_polarity = dwc->dws.hs_polarity;
2686    
2687     diff --git a/drivers/dma/dw/of.c b/drivers/dma/dw/of.c
2688     index 9e27831dee324..43e975fb67142 100644
2689     --- a/drivers/dma/dw/of.c
2690     +++ b/drivers/dma/dw/of.c
2691     @@ -22,18 +22,21 @@ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
2692     };
2693     dma_cap_mask_t cap;
2694    
2695     - if (dma_spec->args_count != 3)
2696     + if (dma_spec->args_count < 3 || dma_spec->args_count > 4)
2697     return NULL;
2698    
2699     slave.src_id = dma_spec->args[0];
2700     slave.dst_id = dma_spec->args[0];
2701     slave.m_master = dma_spec->args[1];
2702     slave.p_master = dma_spec->args[2];
2703     + if (dma_spec->args_count >= 4)
2704     + slave.channels = dma_spec->args[3];
2705    
2706     if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
2707     slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
2708     slave.m_master >= dw->pdata->nr_masters ||
2709     - slave.p_master >= dw->pdata->nr_masters))
2710     + slave.p_master >= dw->pdata->nr_masters ||
2711     + slave.channels >= BIT(dw->pdata->nr_channels)))
2712     return NULL;
2713    
2714     dma_cap_zero(cap);
2715     diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
2716     index 5634437bb39d2..66669f9d690be 100644
2717     --- a/drivers/edac/aspeed_edac.c
2718     +++ b/drivers/edac/aspeed_edac.c
2719     @@ -209,8 +209,8 @@ static int config_irq(void *ctx, struct platform_device *pdev)
2720     /* register interrupt handler */
2721     irq = platform_get_irq(pdev, 0);
2722     dev_dbg(&pdev->dev, "got irq %d\n", irq);
2723     - if (!irq)
2724     - return -ENODEV;
2725     + if (irq < 0)
2726     + return irq;
2727    
2728     rc = devm_request_irq(&pdev->dev, irq, mcr_isr, IRQF_TRIGGER_HIGH,
2729     DRV_NAME, ctx);
2730     diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
2731     index 251f2b692785d..0c72daa519ffa 100644
2732     --- a/drivers/edac/i5100_edac.c
2733     +++ b/drivers/edac/i5100_edac.c
2734     @@ -1074,16 +1074,15 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
2735     PCI_DEVICE_ID_INTEL_5100_19, 0);
2736     if (!einj) {
2737     ret = -ENODEV;
2738     - goto bail_einj;
2739     + goto bail_mc_free;
2740     }
2741    
2742     rc = pci_enable_device(einj);
2743     if (rc < 0) {
2744     ret = rc;
2745     - goto bail_disable_einj;
2746     + goto bail_einj;
2747     }
2748    
2749     -
2750     mci->pdev = &pdev->dev;
2751    
2752     priv = mci->pvt_info;
2753     @@ -1149,14 +1148,14 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
2754     bail_scrub:
2755     priv->scrub_enable = 0;
2756     cancel_delayed_work_sync(&(priv->i5100_scrubbing));
2757     - edac_mc_free(mci);
2758     -
2759     -bail_disable_einj:
2760     pci_disable_device(einj);
2761    
2762     bail_einj:
2763     pci_dev_put(einj);
2764    
2765     +bail_mc_free:
2766     + edac_mc_free(mci);
2767     +
2768     bail_disable_ch1:
2769     pci_disable_device(ch1mm);
2770    
2771     diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
2772     index 6ac26d1b929f0..3247689467435 100644
2773     --- a/drivers/edac/ti_edac.c
2774     +++ b/drivers/edac/ti_edac.c
2775     @@ -278,7 +278,8 @@ static int ti_edac_probe(struct platform_device *pdev)
2776    
2777     /* add EMIF ECC error handler */
2778     error_irq = platform_get_irq(pdev, 0);
2779     - if (!error_irq) {
2780     + if (error_irq < 0) {
2781     + ret = error_irq;
2782     edac_printk(KERN_ERR, EDAC_MOD_NAME,
2783     "EMIF irq number not defined.\n");
2784     goto err;
2785     diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2786     index 2384aa018993d..7c58085031732 100644
2787     --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2788     +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2789     @@ -6984,8 +6984,7 @@ static int dm_update_plane_state(struct dc *dc,
2790     dm_old_plane_state->dc_state,
2791     dm_state->context)) {
2792    
2793     - ret = EINVAL;
2794     - return ret;
2795     + return -EINVAL;
2796     }
2797    
2798    
2799     diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
2800     index 6a626c82e264b..f6598c5a9a879 100644
2801     --- a/drivers/gpu/drm/drm_debugfs_crc.c
2802     +++ b/drivers/gpu/drm/drm_debugfs_crc.c
2803     @@ -144,8 +144,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
2804     source[len - 1] = '\0';
2805    
2806     ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
2807     - if (ret)
2808     + if (ret) {
2809     + kfree(source);
2810     return ret;
2811     + }
2812    
2813     spin_lock_irq(&crc->lock);
2814    
2815     diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
2816     index 570b59520fd13..2ff4b35151bf8 100644
2817     --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
2818     +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
2819     @@ -2120,7 +2120,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
2820     intel_dp->dpcd,
2821     sizeof(intel_dp->dpcd));
2822     cdv_intel_edp_panel_vdd_off(gma_encoder);
2823     - if (ret == 0) {
2824     + if (ret <= 0) {
2825     /* if this fails, presume the device is a ghost */
2826     DRM_INFO("failed to retrieve link info, disabling eDP\n");
2827     cdv_intel_dp_encoder_destroy(encoder);
2828     diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
2829     index 691c1a277d91b..dfcbb2b7cdda3 100644
2830     --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
2831     +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
2832     @@ -834,7 +834,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
2833     int i;
2834    
2835     a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
2836     - sizeof(a6xx_state->indexed_regs));
2837     + sizeof(*a6xx_state->indexed_regs));
2838     if (!a6xx_state->indexed_regs)
2839     return;
2840    
2841     diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
2842     index 36c85c05b7cf7..4aed5e9a84a45 100644
2843     --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
2844     +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
2845     @@ -819,7 +819,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
2846     struct drm_plane *plane;
2847     struct drm_display_mode *mode;
2848    
2849     - int cnt = 0, rc = 0, mixer_width, i, z_pos;
2850     + int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
2851    
2852     struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
2853     int multirect_count = 0;
2854     @@ -852,9 +852,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
2855    
2856     memset(pipe_staged, 0, sizeof(pipe_staged));
2857    
2858     - mixer_width = mode->hdisplay / cstate->num_mixers;
2859     + if (cstate->num_mixers) {
2860     + mixer_width = mode->hdisplay / cstate->num_mixers;
2861    
2862     - _dpu_crtc_setup_lm_bounds(crtc, state);
2863     + _dpu_crtc_setup_lm_bounds(crtc, state);
2864     + }
2865    
2866     crtc_rect.x2 = mode->hdisplay;
2867     crtc_rect.y2 = mode->vdisplay;
2868     diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
2869     index e8506335cd155..1694a7deb9133 100644
2870     --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
2871     +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
2872     @@ -26,6 +26,7 @@
2873     #include <drm/drm_drv.h>
2874     #include <drm/drm_fb_cma_helper.h>
2875     #include <drm/drm_fb_helper.h>
2876     +#include <drm/drm_fourcc.h>
2877     #include <drm/drm_gem_cma_helper.h>
2878     #include <drm/drm_gem_framebuffer_helper.h>
2879     #include <drm/drm_irq.h>
2880     @@ -87,8 +88,26 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
2881     clk_disable_unprepare(mxsfb->clk_axi);
2882     }
2883    
2884     +static struct drm_framebuffer *
2885     +mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv,
2886     + const struct drm_mode_fb_cmd2 *mode_cmd)
2887     +{
2888     + const struct drm_format_info *info;
2889     +
2890     + info = drm_get_format_info(dev, mode_cmd);
2891     + if (!info)
2892     + return ERR_PTR(-EINVAL);
2893     +
2894     + if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
2895     + dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n");
2896     + return ERR_PTR(-EINVAL);
2897     + }
2898     +
2899     + return drm_gem_fb_create(dev, file_priv, mode_cmd);
2900     +}
2901     +
2902     static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
2903     - .fb_create = drm_gem_fb_create,
2904     + .fb_create = mxsfb_fb_create,
2905     .atomic_check = drm_atomic_helper_check,
2906     .atomic_commit = drm_atomic_helper_commit,
2907     };
2908     diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
2909     index 6d9656323a3f4..f0ea782df836d 100644
2910     --- a/drivers/gpu/drm/panel/panel-simple.c
2911     +++ b/drivers/gpu/drm/panel/panel-simple.c
2912     @@ -2382,12 +2382,12 @@ static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
2913     static const struct panel_desc ortustech_com43h4m85ulc = {
2914     .modes = &ortustech_com43h4m85ulc_mode,
2915     .num_modes = 1,
2916     - .bpc = 8,
2917     + .bpc = 6,
2918     .size = {
2919     .width = 56,
2920     .height = 93,
2921     },
2922     - .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
2923     + .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
2924     .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
2925     };
2926    
2927     diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
2928     index 8822ec13a0d61..0d39a201c7591 100644
2929     --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
2930     +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
2931     @@ -75,6 +75,17 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
2932     return 0;
2933     }
2934    
2935     +void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev)
2936     +{
2937     + /*
2938     + * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs
2939     + * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order
2940     + * to operate correctly.
2941     + */
2942     + gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK);
2943     + gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16));
2944     +}
2945     +
2946     static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
2947     {
2948     u32 quirks = 0;
2949     @@ -304,6 +315,8 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
2950     int ret;
2951     u32 val;
2952    
2953     + panfrost_gpu_init_quirks(pfdev);
2954     +
2955     /* Just turn on everything for now */
2956     gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
2957     ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
2958     @@ -357,7 +370,6 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
2959     return err;
2960     }
2961    
2962     - panfrost_gpu_init_quirks(pfdev);
2963     panfrost_gpu_power_on(pfdev);
2964    
2965     return 0;
2966     diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h
2967     index 4112412087b27..468c51e7e46db 100644
2968     --- a/drivers/gpu/drm/panfrost/panfrost_gpu.h
2969     +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h
2970     @@ -16,4 +16,6 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
2971     void panfrost_gpu_power_on(struct panfrost_device *pfdev);
2972     void panfrost_gpu_power_off(struct panfrost_device *pfdev);
2973    
2974     +void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev);
2975     +
2976     #endif
2977     diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
2978     index ea38ac60581c6..eddaa62ad8b0e 100644
2979     --- a/drivers/gpu/drm/panfrost/panfrost_regs.h
2980     +++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
2981     @@ -51,6 +51,10 @@
2982     #define GPU_STATUS 0x34
2983     #define GPU_STATUS_PRFCNT_ACTIVE BIT(2)
2984     #define GPU_LATEST_FLUSH_ID 0x38
2985     +#define GPU_PWR_KEY 0x50 /* (WO) Power manager key register */
2986     +#define GPU_PWR_KEY_UNLOCK 0x2968A819
2987     +#define GPU_PWR_OVERRIDE0 0x54 /* (RW) Power manager override settings */
2988     +#define GPU_PWR_OVERRIDE1 0x58 /* (RW) Power manager override settings */
2989     #define GPU_FAULT_STATUS 0x3C
2990     #define GPU_FAULT_ADDRESS_LO 0x40
2991     #define GPU_FAULT_ADDRESS_HI 0x44
2992     diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
2993     index c190702fab726..6dcc05ab31eba 100644
2994     --- a/drivers/gpu/drm/virtio/virtgpu_kms.c
2995     +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
2996     @@ -96,8 +96,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
2997     vgdev->capsets[i].id > 0, 5 * HZ);
2998     if (ret == 0) {
2999     DRM_ERROR("timed out waiting for cap set %d\n", i);
3000     + spin_lock(&vgdev->display_info_lock);
3001     kfree(vgdev->capsets);
3002     vgdev->capsets = NULL;
3003     + spin_unlock(&vgdev->display_info_lock);
3004     return;
3005     }
3006     DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
3007     diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
3008     index 7ac20490e1b4c..92022a83bbd5e 100644
3009     --- a/drivers/gpu/drm/virtio/virtgpu_vq.c
3010     +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
3011     @@ -572,9 +572,13 @@ static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
3012     int i = le32_to_cpu(cmd->capset_index);
3013    
3014     spin_lock(&vgdev->display_info_lock);
3015     - vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
3016     - vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
3017     - vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
3018     + if (vgdev->capsets) {
3019     + vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
3020     + vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
3021     + vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
3022     + } else {
3023     + DRM_ERROR("invalid capset memory.");
3024     + }
3025     spin_unlock(&vgdev->display_info_lock);
3026     wake_up(&vgdev->resp_wq);
3027     }
3028     diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
3029     index d5585695c64d1..45d6ebbdbdb22 100644
3030     --- a/drivers/gpu/drm/vkms/vkms_composer.c
3031     +++ b/drivers/gpu/drm/vkms/vkms_composer.c
3032     @@ -33,7 +33,7 @@ static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer)
3033     + (i * composer->pitch)
3034     + (j * composer->cpp);
3035     /* XRGB format ignores Alpha channel */
3036     - memset(vaddr_out + src_offset + 24, 0, 8);
3037     + bitmap_clear(vaddr_out + src_offset, 24, 8);
3038     crc = crc32_le(crc, vaddr_out + src_offset,
3039     sizeof(u32));
3040     }
3041     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
3042     index e03a4d794240c..7363d0b488bd8 100644
3043     --- a/drivers/hid/hid-ids.h
3044     +++ b/drivers/hid/hid-ids.h
3045     @@ -1119,6 +1119,7 @@
3046     #define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819
3047     #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
3048     #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
3049     +#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
3050     #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
3051    
3052     #define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
3053     diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
3054     index e3d475f4baf66..b2bff932c524f 100644
3055     --- a/drivers/hid/hid-input.c
3056     +++ b/drivers/hid/hid-input.c
3057     @@ -797,7 +797,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
3058     case 0x3b: /* Battery Strength */
3059     hidinput_setup_battery(device, HID_INPUT_REPORT, field);
3060     usage->type = EV_PWR;
3061     - goto ignore;
3062     + return;
3063    
3064     case 0x3c: /* Invert */
3065     map_key_clear(BTN_TOOL_RUBBER);
3066     @@ -1059,7 +1059,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
3067     case HID_DC_BATTERYSTRENGTH:
3068     hidinput_setup_battery(device, HID_INPUT_REPORT, field);
3069     usage->type = EV_PWR;
3070     - goto ignore;
3071     + return;
3072     }
3073     goto unknown;
3074    
3075     diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
3076     index 6c55682c59740..044a93f3c1178 100644
3077     --- a/drivers/hid/hid-ite.c
3078     +++ b/drivers/hid/hid-ite.c
3079     @@ -44,6 +44,10 @@ static const struct hid_device_id ite_devices[] = {
3080     { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
3081     USB_VENDOR_ID_SYNAPTICS,
3082     USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
3083     + /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
3084     + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
3085     + USB_VENDOR_ID_SYNAPTICS,
3086     + USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) },
3087     { }
3088     };
3089     MODULE_DEVICE_TABLE(hid, ite_devices);
3090     diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
3091     index 1a6e600197d0b..509b9bb1362cb 100644
3092     --- a/drivers/hid/hid-roccat-kone.c
3093     +++ b/drivers/hid/hid-roccat-kone.c
3094     @@ -294,31 +294,40 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
3095     struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
3096     struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
3097     int retval = 0, difference, old_profile;
3098     + struct kone_settings *settings = (struct kone_settings *)buf;
3099    
3100     /* I need to get my data in one piece */
3101     if (off != 0 || count != sizeof(struct kone_settings))
3102     return -EINVAL;
3103    
3104     mutex_lock(&kone->kone_lock);
3105     - difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings));
3106     + difference = memcmp(settings, &kone->settings,
3107     + sizeof(struct kone_settings));
3108     if (difference) {
3109     - retval = kone_set_settings(usb_dev,
3110     - (struct kone_settings const *)buf);
3111     - if (retval) {
3112     - mutex_unlock(&kone->kone_lock);
3113     - return retval;
3114     + if (settings->startup_profile < 1 ||
3115     + settings->startup_profile > 5) {
3116     + retval = -EINVAL;
3117     + goto unlock;
3118     }
3119    
3120     + retval = kone_set_settings(usb_dev, settings);
3121     + if (retval)
3122     + goto unlock;
3123     +
3124     old_profile = kone->settings.startup_profile;
3125     - memcpy(&kone->settings, buf, sizeof(struct kone_settings));
3126     + memcpy(&kone->settings, settings, sizeof(struct kone_settings));
3127    
3128     kone_profile_activated(kone, kone->settings.startup_profile);
3129    
3130     if (kone->settings.startup_profile != old_profile)
3131     kone_profile_report(kone, kone->settings.startup_profile);
3132     }
3133     +unlock:
3134     mutex_unlock(&kone->kone_lock);
3135    
3136     + if (retval)
3137     + return retval;
3138     +
3139     return sizeof(struct kone_settings);
3140     }
3141     static BIN_ATTR(settings, 0660, kone_sysfs_read_settings,
3142     diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
3143     index 5c63a66007293..e22617def70bf 100644
3144     --- a/drivers/hwmon/pmbus/max34440.c
3145     +++ b/drivers/hwmon/pmbus/max34440.c
3146     @@ -387,7 +387,6 @@ static struct pmbus_driver_info max34440_info[] = {
3147     .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
3148     .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
3149     .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
3150     - .read_byte_data = max34440_read_byte_data,
3151     .read_word_data = max34440_read_word_data,
3152     .write_word_data = max34440_write_word_data,
3153     },
3154     @@ -418,7 +417,6 @@ static struct pmbus_driver_info max34440_info[] = {
3155     .func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
3156     .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
3157     .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
3158     - .read_byte_data = max34440_read_byte_data,
3159     .read_word_data = max34440_read_word_data,
3160     .write_word_data = max34440_write_word_data,
3161     },
3162     @@ -454,7 +452,6 @@ static struct pmbus_driver_info max34440_info[] = {
3163     .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
3164     .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
3165     .func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
3166     - .read_byte_data = max34440_read_byte_data,
3167     .read_word_data = max34440_read_word_data,
3168     .write_word_data = max34440_write_word_data,
3169     },
3170     diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
3171     index 84f1dcb698272..9b0c5d719232f 100644
3172     --- a/drivers/hwtracing/coresight/coresight-etm-perf.c
3173     +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
3174     @@ -126,10 +126,10 @@ static void free_sink_buffer(struct etm_event_data *event_data)
3175     cpumask_t *mask = &event_data->mask;
3176     struct coresight_device *sink;
3177    
3178     - if (WARN_ON(cpumask_empty(mask)))
3179     + if (!event_data->snk_config)
3180     return;
3181    
3182     - if (!event_data->snk_config)
3183     + if (WARN_ON(cpumask_empty(mask)))
3184     return;
3185    
3186     cpu = cpumask_first(mask);
3187     @@ -310,6 +310,16 @@ static void etm_event_start(struct perf_event *event, int flags)
3188     if (!event_data)
3189     goto fail;
3190    
3191     + /*
3192     + * Check if this ETM is allowed to trace, as decided
3193     + * at etm_setup_aux(). This could be due to an unreachable
3194     + * sink from this ETM. We can't do much in this case if
3195     + * the sink was specified or hinted to the driver. For
3196     + * now, simply don't record anything on this ETM.
3197     + */
3198     + if (!cpumask_test_cpu(cpu, &event_data->mask))
3199     + goto fail_end_stop;
3200     +
3201     path = etm_event_cpu_path(event_data, cpu);
3202     /* We need a sink, no need to continue without one */
3203     sink = coresight_get_sink(path);
3204     diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
3205     index 146ce40d8e0aa..2d08a8719506c 100644
3206     --- a/drivers/i2c/busses/Kconfig
3207     +++ b/drivers/i2c/busses/Kconfig
3208     @@ -1162,6 +1162,7 @@ config I2C_RCAR
3209     tristate "Renesas R-Car I2C Controller"
3210     depends on ARCH_RENESAS || COMPILE_TEST
3211     select I2C_SLAVE
3212     + select RESET_CONTROLLER if ARCH_RCAR_GEN3
3213     help
3214     If you say yes to this option, support will be included for the
3215     R-Car I2C controller.
3216     diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
3217     index ce70b5288472c..c70983780ae79 100644
3218     --- a/drivers/i2c/i2c-core-acpi.c
3219     +++ b/drivers/i2c/i2c-core-acpi.c
3220     @@ -264,6 +264,7 @@ static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level,
3221     void i2c_acpi_register_devices(struct i2c_adapter *adap)
3222     {
3223     acpi_status status;
3224     + acpi_handle handle;
3225    
3226     if (!has_acpi_companion(&adap->dev))
3227     return;
3228     @@ -274,6 +275,15 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
3229     adap, NULL);
3230     if (ACPI_FAILURE(status))
3231     dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
3232     +
3233     + if (!adap->dev.parent)
3234     + return;
3235     +
3236     + handle = ACPI_HANDLE(adap->dev.parent);
3237     + if (!handle)
3238     + return;
3239     +
3240     + acpi_walk_dep_device_list(handle);
3241     }
3242    
3243     const struct acpi_device_id *
3244     @@ -737,7 +747,6 @@ int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
3245     return -ENOMEM;
3246     }
3247    
3248     - acpi_walk_dep_device_list(handle);
3249     return 0;
3250     }
3251    
3252     diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
3253     index 5c051dba32a51..6cc71c90f85ea 100644
3254     --- a/drivers/i3c/master.c
3255     +++ b/drivers/i3c/master.c
3256     @@ -1760,6 +1760,21 @@ static void i3c_master_bus_cleanup(struct i3c_master_controller *master)
3257     i3c_master_detach_free_devs(master);
3258     }
3259    
3260     +static void i3c_master_attach_boardinfo(struct i3c_dev_desc *i3cdev)
3261     +{
3262     + struct i3c_master_controller *master = i3cdev->common.master;
3263     + struct i3c_dev_boardinfo *i3cboardinfo;
3264     +
3265     + list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
3266     + if (i3cdev->info.pid != i3cboardinfo->pid)
3267     + continue;
3268     +
3269     + i3cdev->boardinfo = i3cboardinfo;
3270     + i3cdev->info.static_addr = i3cboardinfo->static_addr;
3271     + return;
3272     + }
3273     +}
3274     +
3275     static struct i3c_dev_desc *
3276     i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev)
3277     {
3278     @@ -1815,10 +1830,10 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
3279     if (ret)
3280     goto err_detach_dev;
3281    
3282     + i3c_master_attach_boardinfo(newdev);
3283     +
3284     olddev = i3c_master_search_i3c_dev_duplicate(newdev);
3285     if (olddev) {
3286     - newdev->boardinfo = olddev->boardinfo;
3287     - newdev->info.static_addr = olddev->info.static_addr;
3288     newdev->dev = olddev->dev;
3289     if (newdev->dev)
3290     newdev->dev->desc = newdev;
3291     diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
3292     index 10db0bf0655a9..6d5719cea9f53 100644
3293     --- a/drivers/i3c/master/i3c-master-cdns.c
3294     +++ b/drivers/i3c/master/i3c-master-cdns.c
3295     @@ -1593,8 +1593,10 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
3296     master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
3297     sizeof(*master->ibi.slots),
3298     GFP_KERNEL);
3299     - if (!master->ibi.slots)
3300     + if (!master->ibi.slots) {
3301     + ret = -ENOMEM;
3302     goto err_disable_sysclk;
3303     + }
3304    
3305     writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
3306     writel(MST_INT_IBIR_THR, master->regs + MST_IER);
3307     diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
3308     index 74f3a2be17a64..14d6a537289cb 100644
3309     --- a/drivers/iio/adc/stm32-adc-core.c
3310     +++ b/drivers/iio/adc/stm32-adc-core.c
3311     @@ -780,6 +780,13 @@ static int stm32_adc_core_runtime_resume(struct device *dev)
3312     {
3313     return stm32_adc_core_hw_start(dev);
3314     }
3315     +
3316     +static int stm32_adc_core_runtime_idle(struct device *dev)
3317     +{
3318     + pm_runtime_mark_last_busy(dev);
3319     +
3320     + return 0;
3321     +}
3322     #endif
3323    
3324     static const struct dev_pm_ops stm32_adc_core_pm_ops = {
3325     @@ -787,7 +794,7 @@ static const struct dev_pm_ops stm32_adc_core_pm_ops = {
3326     pm_runtime_force_resume)
3327     SET_RUNTIME_PM_OPS(stm32_adc_core_runtime_suspend,
3328     stm32_adc_core_runtime_resume,
3329     - NULL)
3330     + stm32_adc_core_runtime_idle)
3331     };
3332    
3333     static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = {
3334     diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
3335     index e3cd9d2b0dd2b..98d2d74b96f78 100644
3336     --- a/drivers/infiniband/core/cma.c
3337     +++ b/drivers/infiniband/core/cma.c
3338     @@ -1803,19 +1803,30 @@ static void cma_release_port(struct rdma_id_private *id_priv)
3339     mutex_unlock(&lock);
3340     }
3341    
3342     -static void cma_leave_roce_mc_group(struct rdma_id_private *id_priv,
3343     - struct cma_multicast *mc)
3344     +static void destroy_mc(struct rdma_id_private *id_priv,
3345     + struct cma_multicast *mc)
3346     {
3347     - struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
3348     - struct net_device *ndev = NULL;
3349     + if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) {
3350     + ib_sa_free_multicast(mc->multicast.ib);
3351     + kfree(mc);
3352     + return;
3353     + }
3354    
3355     - if (dev_addr->bound_dev_if)
3356     - ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
3357     - if (ndev) {
3358     - cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
3359     - dev_put(ndev);
3360     + if (rdma_protocol_roce(id_priv->id.device,
3361     + id_priv->id.port_num)) {
3362     + struct rdma_dev_addr *dev_addr =
3363     + &id_priv->id.route.addr.dev_addr;
3364     + struct net_device *ndev = NULL;
3365     +
3366     + if (dev_addr->bound_dev_if)
3367     + ndev = dev_get_by_index(dev_addr->net,
3368     + dev_addr->bound_dev_if);
3369     + if (ndev) {
3370     + cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
3371     + dev_put(ndev);
3372     + }
3373     + kref_put(&mc->mcref, release_mc);
3374     }
3375     - kref_put(&mc->mcref, release_mc);
3376     }
3377    
3378     static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
3379     @@ -1823,16 +1834,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
3380     struct cma_multicast *mc;
3381    
3382     while (!list_empty(&id_priv->mc_list)) {
3383     - mc = container_of(id_priv->mc_list.next,
3384     - struct cma_multicast, list);
3385     + mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
3386     + list);
3387     list_del(&mc->list);
3388     - if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
3389     - id_priv->id.port_num)) {
3390     - ib_sa_free_multicast(mc->multicast.ib);
3391     - kfree(mc);
3392     - } else {
3393     - cma_leave_roce_mc_group(id_priv, mc);
3394     - }
3395     + destroy_mc(id_priv, mc);
3396     }
3397     }
3398    
3399     @@ -4182,16 +4187,6 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
3400     else
3401     pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
3402     status);
3403     - mutex_lock(&id_priv->qp_mutex);
3404     - if (!status && id_priv->id.qp) {
3405     - status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
3406     - be16_to_cpu(multicast->rec.mlid));
3407     - if (status)
3408     - pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n",
3409     - status);
3410     - }
3411     - mutex_unlock(&id_priv->qp_mutex);
3412     -
3413     event.status = status;
3414     event.param.ud.private_data = mc->context;
3415     if (!status) {
3416     @@ -4446,6 +4441,10 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
3417     struct cma_multicast *mc;
3418     int ret;
3419    
3420     + /* Not supported for kernel QPs */
3421     + if (WARN_ON(id->qp))
3422     + return -EINVAL;
3423     +
3424     if (!id->device)
3425     return -EINVAL;
3426    
3427     @@ -4496,25 +4495,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
3428     id_priv = container_of(id, struct rdma_id_private, id);
3429     spin_lock_irq(&id_priv->lock);
3430     list_for_each_entry(mc, &id_priv->mc_list, list) {
3431     - if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
3432     - list_del(&mc->list);
3433     - spin_unlock_irq(&id_priv->lock);
3434     -
3435     - if (id->qp)
3436     - ib_detach_mcast(id->qp,
3437     - &mc->multicast.ib->rec.mgid,
3438     - be16_to_cpu(mc->multicast.ib->rec.mlid));
3439     -
3440     - BUG_ON(id_priv->cma_dev->device != id->device);
3441     -
3442     - if (rdma_cap_ib_mcast(id->device, id->port_num)) {
3443     - ib_sa_free_multicast(mc->multicast.ib);
3444     - kfree(mc);
3445     - } else if (rdma_protocol_roce(id->device, id->port_num)) {
3446     - cma_leave_roce_mc_group(id_priv, mc);
3447     - }
3448     - return;
3449     - }
3450     + if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
3451     + continue;
3452     + list_del(&mc->list);
3453     + spin_unlock_irq(&id_priv->lock);
3454     +
3455     + WARN_ON(id_priv->cma_dev->device != id->device);
3456     + destroy_mc(id_priv, mc);
3457     + return;
3458     }
3459     spin_unlock_irq(&id_priv->lock);
3460     }
3461     diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
3462     index f4f79f1292b91..ef4be14af3bb9 100644
3463     --- a/drivers/infiniband/core/ucma.c
3464     +++ b/drivers/infiniband/core/ucma.c
3465     @@ -581,6 +581,7 @@ static int ucma_free_ctx(struct ucma_context *ctx)
3466     list_move_tail(&uevent->list, &list);
3467     }
3468     list_del(&ctx->list);
3469     + events_reported = ctx->events_reported;
3470     mutex_unlock(&ctx->file->mut);
3471    
3472     list_for_each_entry_safe(uevent, tmp, &list, list) {
3473     @@ -590,7 +591,6 @@ static int ucma_free_ctx(struct ucma_context *ctx)
3474     kfree(uevent);
3475     }
3476    
3477     - events_reported = ctx->events_reported;
3478     mutex_destroy(&ctx->mutex);
3479     kfree(ctx);
3480     return events_reported;
3481     @@ -1473,7 +1473,9 @@ static ssize_t ucma_process_join(struct ucma_file *file,
3482     return 0;
3483    
3484     err3:
3485     + mutex_lock(&ctx->mutex);
3486     rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
3487     + mutex_unlock(&ctx->mutex);
3488     ucma_cleanup_mc_events(mc);
3489     err2:
3490     xa_erase(&multicast_table, mc->id);
3491     @@ -1639,7 +1641,9 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
3492    
3493     cur_file = ctx->file;
3494     if (cur_file == new_file) {
3495     + mutex_lock(&cur_file->mut);
3496     resp.events_reported = ctx->events_reported;
3497     + mutex_unlock(&cur_file->mut);
3498     goto response;
3499     }
3500    
3501     diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
3502     index 0d42ba8c0b696..650f71dd4ab93 100644
3503     --- a/drivers/infiniband/core/umem.c
3504     +++ b/drivers/infiniband/core/umem.c
3505     @@ -151,13 +151,24 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
3506     dma_addr_t mask;
3507     int i;
3508    
3509     + /* rdma_for_each_block() has a bug if the page size is smaller than the
3510     + * page size used to build the umem. For now prevent smaller page sizes
3511     + * from being returned.
3512     + */
3513     + pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
3514     +
3515     /* At minimum, drivers must support PAGE_SIZE or smaller */
3516     if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
3517     return 0;
3518    
3519     va = virt;
3520     - /* max page size not to exceed MR length */
3521     - mask = roundup_pow_of_two(umem->length);
3522     + /* The best result is the smallest page size that results in the minimum
3523     + * number of required pages. Compute the largest page size that could
3524     + * work based on VA address bits that don't change.
3525     + */
3526     + mask = pgsz_bitmap &
3527     + GENMASK(BITS_PER_LONG - 1,
3528     + bits_per((umem->length - 1 + virt) ^ virt));
3529     /* offset into first SGL */
3530     pgoff = umem->address & ~PAGE_MASK;
3531    
3532     diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
3533     index a79fa67df8715..a405c64d2a82b 100644
3534     --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
3535     +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
3536     @@ -271,7 +271,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
3537     ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
3538     break;
3539     case IB_WR_LOCAL_INV:
3540     - break;
3541     case IB_WR_ATOMIC_CMP_AND_SWP:
3542     case IB_WR_ATOMIC_FETCH_AND_ADD:
3543     case IB_WR_LSO:
3544     diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
3545     index 0502c90c83edd..bb75328193957 100644
3546     --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
3547     +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
3548     @@ -4616,7 +4616,9 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3549     qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
3550     V2_QPC_BYTE_212_RETRY_CNT_M,
3551     V2_QPC_BYTE_212_RETRY_CNT_S);
3552     - qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer);
3553     + qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
3554     + V2_QPC_BYTE_244_RNR_CNT_M,
3555     + V2_QPC_BYTE_244_RNR_CNT_S);
3556    
3557     done:
3558     qp_attr->cur_qp_state = qp_attr->qp_state;
3559     @@ -4632,6 +4634,7 @@ done:
3560     }
3561    
3562     qp_init_attr->cap = qp_attr->cap;
3563     + qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
3564    
3565     out:
3566     mutex_unlock(&hr_qp->mutex);
3567     diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
3568     index 8dd2d666f6875..730e50c87a760 100644
3569     --- a/drivers/infiniband/hw/hns/hns_roce_qp.c
3570     +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
3571     @@ -1181,8 +1181,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3572    
3573     mutex_lock(&hr_qp->mutex);
3574    
3575     - cur_state = attr_mask & IB_QP_CUR_STATE ?
3576     - attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
3577     + if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state)
3578     + goto out;
3579     +
3580     + cur_state = hr_qp->state;
3581     new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
3582    
3583     if (ibqp->uobject &&
3584     diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
3585     index 8feec35f95a7c..6d6719fa7e46a 100644
3586     --- a/drivers/infiniband/hw/i40iw/i40iw.h
3587     +++ b/drivers/infiniband/hw/i40iw/i40iw.h
3588     @@ -398,8 +398,8 @@ static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp)
3589     }
3590    
3591     /* i40iw.c */
3592     -void i40iw_add_ref(struct ib_qp *);
3593     -void i40iw_rem_ref(struct ib_qp *);
3594     +void i40iw_qp_add_ref(struct ib_qp *ibqp);
3595     +void i40iw_qp_rem_ref(struct ib_qp *ibqp);
3596     struct ib_qp *i40iw_get_qp(struct ib_device *, int);
3597    
3598     void i40iw_flush_wqes(struct i40iw_device *iwdev,
3599     @@ -543,9 +543,8 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
3600     bool wait);
3601     void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);
3602     void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);
3603     -void i40iw_free_qp_resources(struct i40iw_device *iwdev,
3604     - struct i40iw_qp *iwqp,
3605     - u32 qp_num);
3606     +void i40iw_free_qp_resources(struct i40iw_qp *iwqp);
3607     +
3608     enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
3609     struct i40iw_dma_mem *memptr,
3610     u32 size, u32 mask);
3611     diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
3612     index fa7a5ff498c73..56c1e9abc52dc 100644
3613     --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
3614     +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
3615     @@ -2322,7 +2322,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
3616     iwqp = cm_node->iwqp;
3617     if (iwqp) {
3618     iwqp->cm_node = NULL;
3619     - i40iw_rem_ref(&iwqp->ibqp);
3620     + i40iw_qp_rem_ref(&iwqp->ibqp);
3621     cm_node->iwqp = NULL;
3622     } else if (cm_node->qhash_set) {
3623     i40iw_get_addr_info(cm_node, &nfo);
3624     @@ -3452,7 +3452,7 @@ void i40iw_cm_disconn(struct i40iw_qp *iwqp)
3625     kfree(work);
3626     return;
3627     }
3628     - i40iw_add_ref(&iwqp->ibqp);
3629     + i40iw_qp_add_ref(&iwqp->ibqp);
3630     spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
3631    
3632     work->iwqp = iwqp;
3633     @@ -3623,7 +3623,7 @@ static void i40iw_disconnect_worker(struct work_struct *work)
3634    
3635     kfree(dwork);
3636     i40iw_cm_disconn_true(iwqp);
3637     - i40iw_rem_ref(&iwqp->ibqp);
3638     + i40iw_qp_rem_ref(&iwqp->ibqp);
3639     }
3640    
3641     /**
3642     @@ -3745,7 +3745,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3643     cm_node->lsmm_size = accept.size + conn_param->private_data_len;
3644     i40iw_cm_init_tsa_conn(iwqp, cm_node);
3645     cm_id->add_ref(cm_id);
3646     - i40iw_add_ref(&iwqp->ibqp);
3647     + i40iw_qp_add_ref(&iwqp->ibqp);
3648    
3649     attr.qp_state = IB_QPS_RTS;
3650     cm_node->qhash_set = false;
3651     @@ -3908,7 +3908,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3652     iwqp->cm_node = cm_node;
3653     cm_node->iwqp = iwqp;
3654     iwqp->cm_id = cm_id;
3655     - i40iw_add_ref(&iwqp->ibqp);
3656     + i40iw_qp_add_ref(&iwqp->ibqp);
3657    
3658     if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
3659     cm_node->state = I40IW_CM_STATE_SYN_SENT;
3660     diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
3661     index ae8b97c306657..a7512508f7e60 100644
3662     --- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
3663     +++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
3664     @@ -313,7 +313,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
3665     __func__, info->qp_cq_id);
3666     continue;
3667     }
3668     - i40iw_add_ref(&iwqp->ibqp);
3669     + i40iw_qp_add_ref(&iwqp->ibqp);
3670     spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
3671     qp = &iwqp->sc_qp;
3672     spin_lock_irqsave(&iwqp->lock, flags);
3673     @@ -427,7 +427,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
3674     break;
3675     }
3676     if (info->qp)
3677     - i40iw_rem_ref(&iwqp->ibqp);
3678     + i40iw_qp_rem_ref(&iwqp->ibqp);
3679     } while (1);
3680    
3681     if (aeqcnt)
3682     diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
3683     index 016524683e17e..72db7c1dc2998 100644
3684     --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
3685     +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
3686     @@ -479,25 +479,6 @@ void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
3687     }
3688     }
3689    
3690     -/**
3691     - * i40iw_free_qp - callback after destroy cqp completes
3692     - * @cqp_request: cqp request for destroy qp
3693     - * @num: not used
3694     - */
3695     -static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
3696     -{
3697     - struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param;
3698     - struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
3699     - struct i40iw_device *iwdev;
3700     - u32 qp_num = iwqp->ibqp.qp_num;
3701     -
3702     - iwdev = iwqp->iwdev;
3703     -
3704     - i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
3705     - i40iw_free_qp_resources(iwdev, iwqp, qp_num);
3706     - i40iw_rem_devusecount(iwdev);
3707     -}
3708     -
3709     /**
3710     * i40iw_wait_event - wait for completion
3711     * @iwdev: iwarp device
3712     @@ -618,26 +599,23 @@ void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
3713     }
3714    
3715     /**
3716     - * i40iw_add_ref - add refcount for qp
3717     + * i40iw_qp_add_ref - add refcount for qp
3718     * @ibqp: iqarp qp
3719     */
3720     -void i40iw_add_ref(struct ib_qp *ibqp)
3721     +void i40iw_qp_add_ref(struct ib_qp *ibqp)
3722     {
3723     struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;
3724    
3725     - atomic_inc(&iwqp->refcount);
3726     + refcount_inc(&iwqp->refcount);
3727     }
3728    
3729     /**
3730     - * i40iw_rem_ref - rem refcount for qp and free if 0
3731     + * i40iw_qp_rem_ref - rem refcount for qp and free if 0
3732     * @ibqp: iqarp qp
3733     */
3734     -void i40iw_rem_ref(struct ib_qp *ibqp)
3735     +void i40iw_qp_rem_ref(struct ib_qp *ibqp)
3736     {
3737     struct i40iw_qp *iwqp;
3738     - enum i40iw_status_code status;
3739     - struct i40iw_cqp_request *cqp_request;
3740     - struct cqp_commands_info *cqp_info;
3741     struct i40iw_device *iwdev;
3742     u32 qp_num;
3743     unsigned long flags;
3744     @@ -645,7 +623,7 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
3745     iwqp = to_iwqp(ibqp);
3746     iwdev = iwqp->iwdev;
3747     spin_lock_irqsave(&iwdev->qptable_lock, flags);
3748     - if (!atomic_dec_and_test(&iwqp->refcount)) {
3749     + if (!refcount_dec_and_test(&iwqp->refcount)) {
3750     spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
3751     return;
3752     }
3753     @@ -653,25 +631,8 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
3754     qp_num = iwqp->ibqp.qp_num;
3755     iwdev->qp_table[qp_num] = NULL;
3756     spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
3757     - cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
3758     - if (!cqp_request)
3759     - return;
3760     -
3761     - cqp_request->callback_fcn = i40iw_free_qp;
3762     - cqp_request->param = (void *)&iwqp->sc_qp;
3763     - cqp_info = &cqp_request->info;
3764     - cqp_info->cqp_cmd = OP_QP_DESTROY;
3765     - cqp_info->post_sq = 1;
3766     - cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp;
3767     - cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
3768     - cqp_info->in.u.qp_destroy.remove_hash_idx = true;
3769     - status = i40iw_handle_cqp_op(iwdev, cqp_request);
3770     - if (!status)
3771     - return;
3772     + complete(&iwqp->free_qp);
3773    
3774     - i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
3775     - i40iw_free_qp_resources(iwdev, iwqp, qp_num);
3776     - i40iw_rem_devusecount(iwdev);
3777     }
3778    
3779     /**
3780     @@ -938,7 +899,7 @@ static void i40iw_terminate_timeout(struct timer_list *t)
3781     struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
3782    
3783     i40iw_terminate_done(qp, 1);
3784     - i40iw_rem_ref(&iwqp->ibqp);
3785     + i40iw_qp_rem_ref(&iwqp->ibqp);
3786     }
3787    
3788     /**
3789     @@ -950,7 +911,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
3790     struct i40iw_qp *iwqp;
3791    
3792     iwqp = (struct i40iw_qp *)qp->back_qp;
3793     - i40iw_add_ref(&iwqp->ibqp);
3794     + i40iw_qp_add_ref(&iwqp->ibqp);
3795     timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0);
3796     iwqp->terminate_timer.expires = jiffies + HZ;
3797     add_timer(&iwqp->terminate_timer);
3798     @@ -966,7 +927,7 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
3799    
3800     iwqp = (struct i40iw_qp *)qp->back_qp;
3801     if (del_timer(&iwqp->terminate_timer))
3802     - i40iw_rem_ref(&iwqp->ibqp);
3803     + i40iw_qp_rem_ref(&iwqp->ibqp);
3804     }
3805    
3806     /**
3807     diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
3808     index cd9ee1664a69e..22bf4f09c0647 100644
3809     --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
3810     +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
3811     @@ -366,11 +366,11 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
3812     * @iwqp: qp ptr (user or kernel)
3813     * @qp_num: qp number assigned
3814     */
3815     -void i40iw_free_qp_resources(struct i40iw_device *iwdev,
3816     - struct i40iw_qp *iwqp,
3817     - u32 qp_num)
3818     +void i40iw_free_qp_resources(struct i40iw_qp *iwqp)
3819     {
3820     struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
3821     + struct i40iw_device *iwdev = iwqp->iwdev;
3822     + u32 qp_num = iwqp->ibqp.qp_num;
3823    
3824     i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
3825     i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
3826     @@ -404,6 +404,10 @@ static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
3827     static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
3828     {
3829     struct i40iw_qp *iwqp = to_iwqp(ibqp);
3830     + struct ib_qp_attr attr;
3831     + struct i40iw_device *iwdev = iwqp->iwdev;
3832     +
3833     + memset(&attr, 0, sizeof(attr));
3834    
3835     iwqp->destroyed = 1;
3836    
3837     @@ -418,7 +422,15 @@ static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
3838     }
3839     }
3840    
3841     - i40iw_rem_ref(&iwqp->ibqp);
3842     + attr.qp_state = IB_QPS_ERR;
3843     + i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
3844     + i40iw_qp_rem_ref(&iwqp->ibqp);
3845     + wait_for_completion(&iwqp->free_qp);
3846     + i40iw_cqp_qp_destroy_cmd(&iwdev->sc_dev, &iwqp->sc_qp);
3847     + i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
3848     + i40iw_free_qp_resources(iwqp);
3849     + i40iw_rem_devusecount(iwdev);
3850     +
3851     return 0;
3852     }
3853    
3854     @@ -579,6 +591,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
3855     qp->back_qp = (void *)iwqp;
3856     qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
3857    
3858     + iwqp->iwdev = iwdev;
3859     iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
3860    
3861     if (i40iw_allocate_dma_mem(dev->hw,
3862     @@ -603,7 +616,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
3863     goto error;
3864     }
3865    
3866     - iwqp->iwdev = iwdev;
3867     iwqp->iwpd = iwpd;
3868     iwqp->ibqp.qp_num = qp_num;
3869     qp = &iwqp->sc_qp;
3870     @@ -717,7 +729,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
3871     goto error;
3872     }
3873    
3874     - i40iw_add_ref(&iwqp->ibqp);
3875     + refcount_set(&iwqp->refcount, 1);
3876     spin_lock_init(&iwqp->lock);
3877     iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
3878     iwdev->qp_table[qp_num] = iwqp;
3879     @@ -739,10 +751,11 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
3880     }
3881     init_completion(&iwqp->sq_drained);
3882     init_completion(&iwqp->rq_drained);
3883     + init_completion(&iwqp->free_qp);
3884    
3885     return &iwqp->ibqp;
3886     error:
3887     - i40iw_free_qp_resources(iwdev, iwqp, qp_num);
3888     + i40iw_free_qp_resources(iwqp);
3889     return ERR_PTR(err_code);
3890     }
3891    
3892     @@ -2654,13 +2667,13 @@ static const struct ib_device_ops i40iw_dev_ops = {
3893     .get_hw_stats = i40iw_get_hw_stats,
3894     .get_port_immutable = i40iw_port_immutable,
3895     .iw_accept = i40iw_accept,
3896     - .iw_add_ref = i40iw_add_ref,
3897     + .iw_add_ref = i40iw_qp_add_ref,
3898     .iw_connect = i40iw_connect,
3899     .iw_create_listen = i40iw_create_listen,
3900     .iw_destroy_listen = i40iw_destroy_listen,
3901     .iw_get_qp = i40iw_get_qp,
3902     .iw_reject = i40iw_reject,
3903     - .iw_rem_ref = i40iw_rem_ref,
3904     + .iw_rem_ref = i40iw_qp_rem_ref,
3905     .map_mr_sg = i40iw_map_mr_sg,
3906     .mmap = i40iw_mmap,
3907     .modify_qp = i40iw_modify_qp,
3908     diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
3909     index 3a413752ccc38..ad7d81041bc9a 100644
3910     --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
3911     +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
3912     @@ -140,7 +140,7 @@ struct i40iw_qp {
3913     struct i40iw_qp_host_ctx_info ctx_info;
3914     struct i40iwarp_offload_info iwarp_info;
3915     void *allocated_buffer;
3916     - atomic_t refcount;
3917     + refcount_t refcount;
3918     struct iw_cm_id *cm_id;
3919     void *cm_node;
3920     struct ib_mr *lsmm_mr;
3921     @@ -175,5 +175,6 @@ struct i40iw_qp {
3922     struct i40iw_dma_mem ietf_mem;
3923     struct completion sq_drained;
3924     struct completion rq_drained;
3925     + struct completion free_qp;
3926     };
3927     #endif
3928     diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
3929     index b591861934b3c..81d6a3460b55d 100644
3930     --- a/drivers/infiniband/hw/mlx4/cm.c
3931     +++ b/drivers/infiniband/hw/mlx4/cm.c
3932     @@ -280,6 +280,9 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
3933     if (!sriov->is_going_down && !id->scheduled_delete) {
3934     id->scheduled_delete = 1;
3935     schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
3936     + } else if (id->scheduled_delete) {
3937     + /* Adjust timeout if already scheduled */
3938     + mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
3939     }
3940     spin_unlock_irqrestore(&sriov->going_down_lock, flags);
3941     spin_unlock(&sriov->id_map_lock);
3942     diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
3943     index 57079110af9b5..08eccf2b6967d 100644
3944     --- a/drivers/infiniband/hw/mlx4/mad.c
3945     +++ b/drivers/infiniband/hw/mlx4/mad.c
3946     @@ -1307,6 +1307,18 @@ static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
3947     spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
3948     }
3949    
3950     +static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg)
3951     +{
3952     + unsigned long flags;
3953     + struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
3954     + struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
3955     +
3956     + spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
3957     + if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
3958     + queue_work(ctx->wi_wq, &ctx->work);
3959     + spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
3960     +}
3961     +
3962     static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
3963     struct mlx4_ib_demux_pv_qp *tun_qp,
3964     int index)
3965     @@ -2009,7 +2021,8 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
3966     cq_size *= 2;
3967    
3968     cq_attr.cqe = cq_size;
3969     - ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
3970     + ctx->cq = ib_create_cq(ctx->ib_dev,
3971     + create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler,
3972     NULL, ctx, &cq_attr);
3973     if (IS_ERR(ctx->cq)) {
3974     ret = PTR_ERR(ctx->cq);
3975     @@ -2046,6 +2059,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
3976     INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
3977    
3978     ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
3979     + ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq;
3980    
3981     ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
3982     if (ret) {
3983     @@ -2189,7 +2203,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
3984     goto err_mcg;
3985     }
3986    
3987     - snprintf(name, sizeof name, "mlx4_ibt%d", port);
3988     + snprintf(name, sizeof(name), "mlx4_ibt%d", port);
3989     ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3990     if (!ctx->wq) {
3991     pr_err("Failed to create tunnelling WQ for port %d\n", port);
3992     @@ -2197,7 +2211,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
3993     goto err_wq;
3994     }
3995    
3996     - snprintf(name, sizeof name, "mlx4_ibud%d", port);
3997     + snprintf(name, sizeof(name), "mlx4_ibwi%d", port);
3998     + ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3999     + if (!ctx->wi_wq) {
4000     + pr_err("Failed to create wire WQ for port %d\n", port);
4001     + ret = -ENOMEM;
4002     + goto err_wiwq;
4003     + }
4004     +
4005     + snprintf(name, sizeof(name), "mlx4_ibud%d", port);
4006     ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
4007     if (!ctx->ud_wq) {
4008     pr_err("Failed to create up/down WQ for port %d\n", port);
4009     @@ -2208,6 +2230,10 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
4010     return 0;
4011    
4012     err_udwq:
4013     + destroy_workqueue(ctx->wi_wq);
4014     + ctx->wi_wq = NULL;
4015     +
4016     +err_wiwq:
4017     destroy_workqueue(ctx->wq);
4018     ctx->wq = NULL;
4019    
4020     @@ -2255,12 +2281,14 @@ static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
4021     ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
4022     }
4023     flush_workqueue(ctx->wq);
4024     + flush_workqueue(ctx->wi_wq);
4025     for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
4026     destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
4027     free_pv_object(dev, i, ctx->port);
4028     }
4029     kfree(ctx->tun);
4030     destroy_workqueue(ctx->ud_wq);
4031     + destroy_workqueue(ctx->wi_wq);
4032     destroy_workqueue(ctx->wq);
4033     }
4034     }
4035     diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
4036     index eb53bb4c0c91c..0173e3931cc7f 100644
4037     --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
4038     +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
4039     @@ -459,6 +459,7 @@ struct mlx4_ib_demux_pv_ctx {
4040     struct ib_pd *pd;
4041     struct work_struct work;
4042     struct workqueue_struct *wq;
4043     + struct workqueue_struct *wi_wq;
4044     struct mlx4_ib_demux_pv_qp qp[2];
4045     };
4046    
4047     @@ -466,6 +467,7 @@ struct mlx4_ib_demux_ctx {
4048     struct ib_device *ib_dev;
4049     int port;
4050     struct workqueue_struct *wq;
4051     + struct workqueue_struct *wi_wq;
4052     struct workqueue_struct *ud_wq;
4053     spinlock_t ud_lock;
4054     atomic64_t subnet_prefix;
4055     diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
4056     index ff664355de550..73d5b8dc74d86 100644
4057     --- a/drivers/infiniband/hw/mlx5/cq.c
4058     +++ b/drivers/infiniband/hw/mlx5/cq.c
4059     @@ -167,7 +167,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
4060     {
4061     enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
4062     struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
4063     - struct mlx5_ib_srq *srq;
4064     + struct mlx5_ib_srq *srq = NULL;
4065     struct mlx5_ib_wq *wq;
4066     u16 wqe_ctr;
4067     u8 roce_packet_type;
4068     @@ -179,7 +179,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
4069    
4070     if (qp->ibqp.xrcd) {
4071     msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
4072     - srq = to_mibsrq(msrq);
4073     + if (msrq)
4074     + srq = to_mibsrq(msrq);
4075     } else {
4076     srq = to_msrq(qp->ibqp.srq);
4077     }
4078     diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
4079     index b781ad74e6de4..40c1a05c2445d 100644
4080     --- a/drivers/infiniband/hw/mlx5/main.c
4081     +++ b/drivers/infiniband/hw/mlx5/main.c
4082     @@ -888,7 +888,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
4083     /* We support 'Gappy' memory registration too */
4084     props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
4085     }
4086     - props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
4087     + /* IB_WR_REG_MR always requires changing the entity size with UMR */
4088     + if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
4089     + props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
4090     if (MLX5_CAP_GEN(mdev, sho)) {
4091     props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER;
4092     /* At this stage no support for signature handover */
4093     diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
4094     index 4494dab8c3d83..93040c994e2e3 100644
4095     --- a/drivers/infiniband/hw/qedr/main.c
4096     +++ b/drivers/infiniband/hw/qedr/main.c
4097     @@ -601,7 +601,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
4098     qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
4099    
4100     /* Part 2 - check capabilities */
4101     - page_size = ~dev->attr.page_size_caps + 1;
4102     + page_size = ~qed_attr->page_size_caps + 1;
4103     if (page_size > PAGE_SIZE) {
4104     DP_ERR(dev,
4105     "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
4106     diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
4107     index 6dea49e11f5f0..e521f3c3dbbf1 100644
4108     --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
4109     +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
4110     @@ -736,7 +736,7 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
4111     struct qedr_dev *dev = ep->dev;
4112     struct qedr_qp *qp;
4113     struct qed_iwarp_accept_in params;
4114     - int rc = 0;
4115     + int rc;
4116    
4117     DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
4118    
4119     @@ -759,8 +759,10 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
4120     params.ord = conn_param->ord;
4121    
4122     if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
4123     - &qp->iwarp_cm_flags))
4124     + &qp->iwarp_cm_flags)) {
4125     + rc = -EINVAL;
4126     goto err; /* QP already destroyed */
4127     + }
4128    
4129     rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
4130     if (rc) {
4131     diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
4132     index 16a994fd7d0a7..4408d33646647 100644
4133     --- a/drivers/infiniband/hw/qedr/verbs.c
4134     +++ b/drivers/infiniband/hw/qedr/verbs.c
4135     @@ -2405,7 +2405,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
4136     qp_attr->cap.max_recv_wr = qp->rq.max_wr;
4137     qp_attr->cap.max_send_sge = qp->sq.max_sges;
4138     qp_attr->cap.max_recv_sge = qp->rq.max_sges;
4139     - qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
4140     + qp_attr->cap.max_inline_data = dev->attr.max_inline;
4141     qp_init_attr->cap = qp_attr->cap;
4142    
4143     qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
4144     @@ -2518,6 +2518,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
4145    
4146     if (rdma_protocol_iwarp(&dev->ibdev, 1))
4147     qedr_iw_qp_rem_ref(&qp->ibqp);
4148     + else
4149     + kfree(qp);
4150    
4151     return 0;
4152     }
4153     diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
4154     index 18da1e1ea9797..833f3f1b87f5e 100644
4155     --- a/drivers/infiniband/sw/rdmavt/vt.c
4156     +++ b/drivers/infiniband/sw/rdmavt/vt.c
4157     @@ -95,9 +95,7 @@ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
4158     if (!rdi)
4159     return rdi;
4160    
4161     - rdi->ports = kcalloc(nports,
4162     - sizeof(struct rvt_ibport **),
4163     - GFP_KERNEL);
4164     + rdi->ports = kcalloc(nports, sizeof(*rdi->ports), GFP_KERNEL);
4165     if (!rdi->ports)
4166     ib_dealloc_device(&rdi->ibdev);
4167    
4168     diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
4169     index 46e111c218fd4..9bfb98056fc2a 100644
4170     --- a/drivers/infiniband/sw/rxe/rxe_recv.c
4171     +++ b/drivers/infiniband/sw/rxe/rxe_recv.c
4172     @@ -281,6 +281,8 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
4173     struct rxe_mc_elem *mce;
4174     struct rxe_qp *qp;
4175     union ib_gid dgid;
4176     + struct sk_buff *per_qp_skb;
4177     + struct rxe_pkt_info *per_qp_pkt;
4178     int err;
4179    
4180     if (skb->protocol == htons(ETH_P_IP))
4181     @@ -309,21 +311,29 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
4182     if (err)
4183     continue;
4184    
4185     - /* if *not* the last qp in the list
4186     - * increase the users of the skb then post to the next qp
4187     + /* for all but the last qp create a new clone of the
4188     + * skb and pass to the qp.
4189     */
4190     if (mce->qp_list.next != &mcg->qp_list)
4191     - skb_get(skb);
4192     + per_qp_skb = skb_clone(skb, GFP_ATOMIC);
4193     + else
4194     + per_qp_skb = skb;
4195     +
4196     + if (unlikely(!per_qp_skb))
4197     + continue;
4198    
4199     - pkt->qp = qp;
4200     + per_qp_pkt = SKB_TO_PKT(per_qp_skb);
4201     + per_qp_pkt->qp = qp;
4202     rxe_add_ref(qp);
4203     - rxe_rcv_pkt(pkt, skb);
4204     + rxe_rcv_pkt(per_qp_pkt, per_qp_skb);
4205     }
4206    
4207     spin_unlock_bh(&mcg->mcg_lock);
4208    
4209     rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
4210    
4211     + return;
4212     +
4213     err1:
4214     kfree_skb(skb);
4215     }
4216     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
4217     index 044bcacad6e48..69ecf37053a81 100644
4218     --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
4219     +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
4220     @@ -2463,6 +2463,8 @@ static struct net_device *ipoib_add_port(const char *format,
4221     /* call event handler to ensure pkey in sync */
4222     queue_work(ipoib_workqueue, &priv->flush_heavy);
4223    
4224     + ndev->rtnl_link_ops = ipoib_get_link_ops();
4225     +
4226     result = register_netdev(ndev);
4227     if (result) {
4228     pr_warn("%s: couldn't register ipoib port %d; error %d\n",
4229     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
4230     index 38c984d16996d..d5a90a66b45cf 100644
4231     --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
4232     +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
4233     @@ -144,6 +144,16 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
4234     return 0;
4235     }
4236    
4237     +static void ipoib_del_child_link(struct net_device *dev, struct list_head *head)
4238     +{
4239     + struct ipoib_dev_priv *priv = ipoib_priv(dev);
4240     +
4241     + if (!priv->parent)
4242     + return;
4243     +
4244     + unregister_netdevice_queue(dev, head);
4245     +}
4246     +
4247     static size_t ipoib_get_size(const struct net_device *dev)
4248     {
4249     return nla_total_size(2) + /* IFLA_IPOIB_PKEY */
4250     @@ -158,6 +168,7 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
4251     .priv_size = sizeof(struct ipoib_dev_priv),
4252     .setup = ipoib_setup_common,
4253     .newlink = ipoib_new_child_link,
4254     + .dellink = ipoib_del_child_link,
4255     .changelink = ipoib_changelink,
4256     .get_size = ipoib_get_size,
4257     .fill_info = ipoib_fill_info,
4258     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
4259     index 8ac8e18fbe0c3..58ca5e9c6079c 100644
4260     --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
4261     +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
4262     @@ -192,6 +192,8 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
4263     }
4264     priv = ipoib_priv(ndev);
4265    
4266     + ndev->rtnl_link_ops = ipoib_get_link_ops();
4267     +
4268     result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
4269    
4270     if (result && ndev->reg_state == NETREG_UNINITIALIZED)
4271     diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
4272     index 7c70492d9d6b5..f831f01501d58 100644
4273     --- a/drivers/input/keyboard/ep93xx_keypad.c
4274     +++ b/drivers/input/keyboard/ep93xx_keypad.c
4275     @@ -250,8 +250,8 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
4276     }
4277    
4278     keypad->irq = platform_get_irq(pdev, 0);
4279     - if (!keypad->irq) {
4280     - err = -ENXIO;
4281     + if (keypad->irq < 0) {
4282     + err = keypad->irq;
4283     goto failed_free;
4284     }
4285    
4286     diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
4287     index 94c94d7f5155f..d6c924032aaa8 100644
4288     --- a/drivers/input/keyboard/omap4-keypad.c
4289     +++ b/drivers/input/keyboard/omap4-keypad.c
4290     @@ -240,10 +240,8 @@ static int omap4_keypad_probe(struct platform_device *pdev)
4291     }
4292    
4293     irq = platform_get_irq(pdev, 0);
4294     - if (!irq) {
4295     - dev_err(&pdev->dev, "no keyboard irq assigned\n");
4296     - return -EINVAL;
4297     - }
4298     + if (irq < 0)
4299     + return irq;
4300    
4301     keypad_data = kzalloc(sizeof(struct omap4_keypad), GFP_KERNEL);
4302     if (!keypad_data) {
4303     diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
4304     index af3a6824f1a4d..77e0743a3cf85 100644
4305     --- a/drivers/input/keyboard/twl4030_keypad.c
4306     +++ b/drivers/input/keyboard/twl4030_keypad.c
4307     @@ -50,7 +50,7 @@ struct twl4030_keypad {
4308     bool autorepeat;
4309     unsigned int n_rows;
4310     unsigned int n_cols;
4311     - unsigned int irq;
4312     + int irq;
4313    
4314     struct device *dbg_dev;
4315     struct input_dev *input;
4316     @@ -376,10 +376,8 @@ static int twl4030_kp_probe(struct platform_device *pdev)
4317     }
4318    
4319     kp->irq = platform_get_irq(pdev, 0);
4320     - if (!kp->irq) {
4321     - dev_err(&pdev->dev, "no keyboard irq assigned\n");
4322     - return -EINVAL;
4323     - }
4324     + if (kp->irq < 0)
4325     + return kp->irq;
4326    
4327     error = matrix_keypad_build_keymap(keymap_data, NULL,
4328     TWL4030_MAX_ROWS,
4329     diff --git a/drivers/input/serio/sun4i-ps2.c b/drivers/input/serio/sun4i-ps2.c
4330     index a681a2c04e399..f15ed3dcdb9b2 100644
4331     --- a/drivers/input/serio/sun4i-ps2.c
4332     +++ b/drivers/input/serio/sun4i-ps2.c
4333     @@ -211,7 +211,6 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
4334     struct sun4i_ps2data *drvdata;
4335     struct serio *serio;
4336     struct device *dev = &pdev->dev;
4337     - unsigned int irq;
4338     int error;
4339    
4340     drvdata = kzalloc(sizeof(struct sun4i_ps2data), GFP_KERNEL);
4341     @@ -264,14 +263,12 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
4342     writel(0, drvdata->reg_base + PS2_REG_GCTL);
4343    
4344     /* Get IRQ for the device */
4345     - irq = platform_get_irq(pdev, 0);
4346     - if (!irq) {
4347     - dev_err(dev, "no IRQ found\n");
4348     - error = -ENXIO;
4349     + drvdata->irq = platform_get_irq(pdev, 0);
4350     + if (drvdata->irq < 0) {
4351     + error = drvdata->irq;
4352     goto err_disable_clk;
4353     }
4354    
4355     - drvdata->irq = irq;
4356     drvdata->serio = serio;
4357     drvdata->dev = dev;
4358    
4359     diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
4360     index 9ed258854349b..5e6ba5c4eca2a 100644
4361     --- a/drivers/input/touchscreen/imx6ul_tsc.c
4362     +++ b/drivers/input/touchscreen/imx6ul_tsc.c
4363     @@ -530,20 +530,25 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
4364    
4365     mutex_lock(&input_dev->mutex);
4366    
4367     - if (input_dev->users) {
4368     - retval = clk_prepare_enable(tsc->adc_clk);
4369     - if (retval)
4370     - goto out;
4371     -
4372     - retval = clk_prepare_enable(tsc->tsc_clk);
4373     - if (retval) {
4374     - clk_disable_unprepare(tsc->adc_clk);
4375     - goto out;
4376     - }
4377     + if (!input_dev->users)
4378     + goto out;
4379    
4380     - retval = imx6ul_tsc_init(tsc);
4381     + retval = clk_prepare_enable(tsc->adc_clk);
4382     + if (retval)
4383     + goto out;
4384     +
4385     + retval = clk_prepare_enable(tsc->tsc_clk);
4386     + if (retval) {
4387     + clk_disable_unprepare(tsc->adc_clk);
4388     + goto out;
4389     }
4390    
4391     + retval = imx6ul_tsc_init(tsc);
4392     + if (retval) {
4393     + clk_disable_unprepare(tsc->tsc_clk);
4394     + clk_disable_unprepare(tsc->adc_clk);
4395     + goto out;
4396     + }
4397     out:
4398     mutex_unlock(&input_dev->mutex);
4399     return retval;
4400     diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
4401     index b6f95f20f9244..cd8805d71d977 100644
4402     --- a/drivers/input/touchscreen/stmfts.c
4403     +++ b/drivers/input/touchscreen/stmfts.c
4404     @@ -479,7 +479,7 @@ static ssize_t stmfts_sysfs_hover_enable_write(struct device *dev,
4405    
4406     mutex_lock(&sdata->mutex);
4407    
4408     - if (value & sdata->hover_enabled)
4409     + if (value && sdata->hover_enabled)
4410     goto out;
4411    
4412     if (sdata->running)
4413     diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
4414     index 7543e395a2c64..a2ebc75af8c79 100644
4415     --- a/drivers/lightnvm/core.c
4416     +++ b/drivers/lightnvm/core.c
4417     @@ -1316,8 +1316,9 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
4418     strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
4419     i++;
4420    
4421     - if (i > 31) {
4422     - pr_err("max 31 devices can be reported.\n");
4423     + if (i >= ARRAY_SIZE(devices->info)) {
4424     + pr_err("max %zd devices can be reported.\n",
4425     + ARRAY_SIZE(devices->info));
4426     break;
4427     }
4428     }
4429     diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
4430     index 0b821a5b2db84..3e7d4b20ab34f 100644
4431     --- a/drivers/mailbox/mailbox.c
4432     +++ b/drivers/mailbox/mailbox.c
4433     @@ -82,9 +82,12 @@ static void msg_submit(struct mbox_chan *chan)
4434     exit:
4435     spin_unlock_irqrestore(&chan->lock, flags);
4436    
4437     - if (!err && (chan->txdone_method & TXDONE_BY_POLL))
4438     - /* kick start the timer immediately to avoid delays */
4439     - hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
4440     + /* kick start the timer immediately to avoid delays */
4441     + if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
4442     + /* but only if not already active */
4443     + if (!hrtimer_active(&chan->mbox->poll_hrt))
4444     + hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
4445     + }
4446     }
4447    
4448     static void tx_tick(struct mbox_chan *chan, int r)
4449     @@ -122,11 +125,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
4450     struct mbox_chan *chan = &mbox->chans[i];
4451    
4452     if (chan->active_req && chan->cl) {
4453     + resched = true;
4454     txdone = chan->mbox->ops->last_tx_done(chan);
4455     if (txdone)
4456     tx_tick(chan, 0);
4457     - else
4458     - resched = true;
4459     }
4460     }
4461    
4462     diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
4463     index 9a6ce9f5a7db5..3c8b365ce635a 100644
4464     --- a/drivers/mailbox/mtk-cmdq-mailbox.c
4465     +++ b/drivers/mailbox/mtk-cmdq-mailbox.c
4466     @@ -70,7 +70,7 @@ struct cmdq_task {
4467     struct cmdq {
4468     struct mbox_controller mbox;
4469     void __iomem *base;
4470     - u32 irq;
4471     + int irq;
4472     u32 thread_nr;
4473     u32 irq_mask;
4474     struct cmdq_thread *thread;
4475     @@ -474,10 +474,8 @@ static int cmdq_probe(struct platform_device *pdev)
4476     }
4477    
4478     cmdq->irq = platform_get_irq(pdev, 0);
4479     - if (!cmdq->irq) {
4480     - dev_err(dev, "failed to get irq\n");
4481     - return -EINVAL;
4482     - }
4483     + if (cmdq->irq < 0)
4484     + return cmdq->irq;
4485    
4486     cmdq->thread_nr = (u32)(unsigned long)of_device_get_match_data(dev);
4487     cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
4488     diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
4489     index 3ad18246fcb3c..7227d03dbbea7 100644
4490     --- a/drivers/md/md-bitmap.c
4491     +++ b/drivers/md/md-bitmap.c
4492     @@ -1954,6 +1954,7 @@ out:
4493     }
4494     EXPORT_SYMBOL_GPL(md_bitmap_load);
4495    
4496     +/* caller need to free returned bitmap with md_bitmap_free() */
4497     struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
4498     {
4499     int rv = 0;
4500     @@ -2017,6 +2018,7 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
4501     md_bitmap_unplug(mddev->bitmap);
4502     *low = lo;
4503     *high = hi;
4504     + md_bitmap_free(bitmap);
4505    
4506     return rv;
4507     }
4508     @@ -2620,4 +2622,3 @@ struct attribute_group md_bitmap_group = {
4509     .name = "bitmap",
4510     .attrs = md_bitmap_attrs,
4511     };
4512     -
4513     diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
4514     index d50737ec40394..afbbc552c3275 100644
4515     --- a/drivers/md/md-cluster.c
4516     +++ b/drivers/md/md-cluster.c
4517     @@ -1166,6 +1166,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
4518     * can't resize bitmap
4519     */
4520     goto out;
4521     + md_bitmap_free(bitmap);
4522     }
4523    
4524     return 0;
4525     diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c
4526     index 3f1ca40b9b987..8a8585261bb80 100644
4527     --- a/drivers/media/firewire/firedtv-fw.c
4528     +++ b/drivers/media/firewire/firedtv-fw.c
4529     @@ -272,8 +272,10 @@ static int node_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
4530    
4531     name_len = fw_csr_string(unit->directory, CSR_MODEL,
4532     name, sizeof(name));
4533     - if (name_len < 0)
4534     - return name_len;
4535     + if (name_len < 0) {
4536     + err = name_len;
4537     + goto fail_free;
4538     + }
4539     for (i = ARRAY_SIZE(model_names); --i; )
4540     if (strlen(model_names[i]) <= name_len &&
4541     strncmp(name, model_names[i], name_len) == 0)
4542     diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
4543     index de295114ca482..21666d705e372 100644
4544     --- a/drivers/media/i2c/m5mols/m5mols_core.c
4545     +++ b/drivers/media/i2c/m5mols/m5mols_core.c
4546     @@ -764,7 +764,8 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
4547    
4548     ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
4549     if (ret) {
4550     - info->set_power(&client->dev, 0);
4551     + if (info->set_power)
4552     + info->set_power(&client->dev, 0);
4553     return ret;
4554     }
4555    
4556     diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
4557     index 266e947572c1e..be6c882dd1d54 100644
4558     --- a/drivers/media/i2c/ov5640.c
4559     +++ b/drivers/media/i2c/ov5640.c
4560     @@ -34,6 +34,8 @@
4561     #define OV5640_REG_SYS_RESET02 0x3002
4562     #define OV5640_REG_SYS_CLOCK_ENABLE02 0x3006
4563     #define OV5640_REG_SYS_CTRL0 0x3008
4564     +#define OV5640_REG_SYS_CTRL0_SW_PWDN 0x42
4565     +#define OV5640_REG_SYS_CTRL0_SW_PWUP 0x02
4566     #define OV5640_REG_CHIP_ID 0x300a
4567     #define OV5640_REG_IO_MIPI_CTRL00 0x300e
4568     #define OV5640_REG_PAD_OUTPUT_ENABLE01 0x3017
4569     @@ -272,8 +274,7 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
4570     /* YUV422 UYVY VGA@30fps */
4571     static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
4572     {0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0},
4573     - {0x3103, 0x03, 0, 0}, {0x3017, 0x00, 0, 0}, {0x3018, 0x00, 0, 0},
4574     - {0x3630, 0x36, 0, 0},
4575     + {0x3103, 0x03, 0, 0}, {0x3630, 0x36, 0, 0},
4576     {0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
4577     {0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
4578     {0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0}, {0x370b, 0x60, 0, 0},
4579     @@ -740,7 +741,7 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg,
4580     * +->| PLL Root Div | - reg 0x3037, bit 4
4581     * +-+------------+
4582     * | +---------+
4583     - * +->| Bit Div | - reg 0x3035, bits 0-3
4584     + * +->| Bit Div | - reg 0x3034, bits 0-3
4585     * +-+-------+
4586     * | +-------------+
4587     * +->| SCLK Div | - reg 0x3108, bits 0-1
4588     @@ -1109,6 +1110,12 @@ static int ov5640_load_regs(struct ov5640_dev *sensor,
4589     val = regs->val;
4590     mask = regs->mask;
4591    
4592     + /* remain in power down mode for DVP */
4593     + if (regs->reg_addr == OV5640_REG_SYS_CTRL0 &&
4594     + val == OV5640_REG_SYS_CTRL0_SW_PWUP &&
4595     + sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
4596     + continue;
4597     +
4598     if (mask)
4599     ret = ov5640_mod_reg(sensor, reg_addr, mask, val);
4600     else
4601     @@ -1264,31 +1271,9 @@ static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on)
4602     if (ret)
4603     return ret;
4604    
4605     - /*
4606     - * enable VSYNC/HREF/PCLK DVP control lines
4607     - * & D[9:6] DVP data lines
4608     - *
4609     - * PAD OUTPUT ENABLE 01
4610     - * - 6: VSYNC output enable
4611     - * - 5: HREF output enable
4612     - * - 4: PCLK output enable
4613     - * - [3:0]: D[9:6] output enable
4614     - */
4615     - ret = ov5640_write_reg(sensor,
4616     - OV5640_REG_PAD_OUTPUT_ENABLE01,
4617     - on ? 0x7f : 0);
4618     - if (ret)
4619     - return ret;
4620     -
4621     - /*
4622     - * enable D[5:0] DVP data lines
4623     - *
4624     - * PAD OUTPUT ENABLE 02
4625     - * - [7:2]: D[5:0] output enable
4626     - */
4627     - return ov5640_write_reg(sensor,
4628     - OV5640_REG_PAD_OUTPUT_ENABLE02,
4629     - on ? 0xfc : 0);
4630     + return ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0, on ?
4631     + OV5640_REG_SYS_CTRL0_SW_PWUP :
4632     + OV5640_REG_SYS_CTRL0_SW_PWDN);
4633     }
4634    
4635     static int ov5640_set_stream_mipi(struct ov5640_dev *sensor, bool on)
4636     @@ -1987,6 +1972,95 @@ static void ov5640_set_power_off(struct ov5640_dev *sensor)
4637     clk_disable_unprepare(sensor->xclk);
4638     }
4639    
4640     +static int ov5640_set_power_mipi(struct ov5640_dev *sensor, bool on)
4641     +{
4642     + int ret;
4643     +
4644     + if (!on) {
4645     + /* Reset MIPI bus settings to their default values. */
4646     + ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x58);
4647     + ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x04);
4648     + ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x00);
4649     + return 0;
4650     + }
4651     +
4652     + /*
4653     + * Power up MIPI HS Tx and LS Rx; 2 data lanes mode
4654     + *
4655     + * 0x300e = 0x40
4656     + * [7:5] = 010 : 2 data lanes mode (see FIXME note in
4657     + * "ov5640_set_stream_mipi()")
4658     + * [4] = 0 : Power up MIPI HS Tx
4659     + * [3] = 0 : Power up MIPI LS Rx
4660     + * [2] = 0 : MIPI interface disabled
4661     + */
4662     + ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x40);
4663     + if (ret)
4664     + return ret;
4665     +
4666     + /*
4667     + * Gate clock and set LP11 in 'no packets mode' (idle)
4668     + *
4669     + * 0x4800 = 0x24
4670     + * [5] = 1 : Gate clock when 'no packets'
4671     + * [2] = 1 : MIPI bus in LP11 when 'no packets'
4672     + */
4673     + ret = ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x24);
4674     + if (ret)
4675     + return ret;
4676     +
4677     + /*
4678     + * Set data lanes and clock in LP11 when 'sleeping'
4679     + *
4680     + * 0x3019 = 0x70
4681     + * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping'
4682     + * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping'
4683     + * [4] = 1 : MIPI clock lane in LP11 when 'sleeping'
4684     + */
4685     + ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x70);
4686     + if (ret)
4687     + return ret;
4688     +
4689     + /* Give lanes some time to coax into LP11 state. */
4690     + usleep_range(500, 1000);
4691     +
4692     + return 0;
4693     +}
4694     +
4695     +static int ov5640_set_power_dvp(struct ov5640_dev *sensor, bool on)
4696     +{
4697     + int ret;
4698     +
4699     + if (!on) {
4700     + /* Reset settings to their default values. */
4701     + ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x00);
4702     + ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0x00);
4703     + return 0;
4704     + }
4705     +
4706     + /*
4707     + * enable VSYNC/HREF/PCLK DVP control lines
4708     + * & D[9:6] DVP data lines
4709     + *
4710     + * PAD OUTPUT ENABLE 01
4711     + * - 6: VSYNC output enable
4712     + * - 5: HREF output enable
4713     + * - 4: PCLK output enable
4714     + * - [3:0]: D[9:6] output enable
4715     + */
4716     + ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x7f);
4717     + if (ret)
4718     + return ret;
4719     +
4720     + /*
4721     + * enable D[5:0] DVP data lines
4722     + *
4723     + * PAD OUTPUT ENABLE 02
4724     + * - [7:2]: D[5:0] output enable
4725     + */
4726     + return ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0xfc);
4727     +}
4728     +
4729     static int ov5640_set_power(struct ov5640_dev *sensor, bool on)
4730     {
4731     int ret = 0;
4732     @@ -1999,67 +2073,17 @@ static int ov5640_set_power(struct ov5640_dev *sensor, bool on)
4733     ret = ov5640_restore_mode(sensor);
4734     if (ret)
4735     goto power_off;
4736     + }
4737    
4738     - /* We're done here for DVP bus, while CSI-2 needs setup. */
4739     - if (sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
4740     - return 0;
4741     -
4742     - /*
4743     - * Power up MIPI HS Tx and LS Rx; 2 data lanes mode
4744     - *
4745     - * 0x300e = 0x40
4746     - * [7:5] = 010 : 2 data lanes mode (see FIXME note in
4747     - * "ov5640_set_stream_mipi()")
4748     - * [4] = 0 : Power up MIPI HS Tx
4749     - * [3] = 0 : Power up MIPI LS Rx
4750     - * [2] = 0 : MIPI interface disabled
4751     - */
4752     - ret = ov5640_write_reg(sensor,
4753     - OV5640_REG_IO_MIPI_CTRL00, 0x40);
4754     - if (ret)
4755     - goto power_off;
4756     -
4757     - /*
4758     - * Gate clock and set LP11 in 'no packets mode' (idle)
4759     - *
4760     - * 0x4800 = 0x24
4761     - * [5] = 1 : Gate clock when 'no packets'
4762     - * [2] = 1 : MIPI bus in LP11 when 'no packets'
4763     - */
4764     - ret = ov5640_write_reg(sensor,
4765     - OV5640_REG_MIPI_CTRL00, 0x24);
4766     - if (ret)
4767     - goto power_off;
4768     -
4769     - /*
4770     - * Set data lanes and clock in LP11 when 'sleeping'
4771     - *
4772     - * 0x3019 = 0x70
4773     - * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping'
4774     - * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping'
4775     - * [4] = 1 : MIPI clock lane in LP11 when 'sleeping'
4776     - */
4777     - ret = ov5640_write_reg(sensor,
4778     - OV5640_REG_PAD_OUTPUT00, 0x70);
4779     - if (ret)
4780     - goto power_off;
4781     -
4782     - /* Give lanes some time to coax into LP11 state. */
4783     - usleep_range(500, 1000);
4784     -
4785     - } else {
4786     - if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
4787     - /* Reset MIPI bus settings to their default values. */
4788     - ov5640_write_reg(sensor,
4789     - OV5640_REG_IO_MIPI_CTRL00, 0x58);
4790     - ov5640_write_reg(sensor,
4791     - OV5640_REG_MIPI_CTRL00, 0x04);
4792     - ov5640_write_reg(sensor,
4793     - OV5640_REG_PAD_OUTPUT00, 0x00);
4794     - }
4795     + if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
4796     + ret = ov5640_set_power_mipi(sensor, on);
4797     + else
4798     + ret = ov5640_set_power_dvp(sensor, on);
4799     + if (ret)
4800     + goto power_off;
4801    
4802     + if (!on)
4803     ov5640_set_power_off(sensor);
4804     - }
4805    
4806     return 0;
4807    
4808     diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
4809     index dbbab75f135ec..cff99cf61ed4d 100644
4810     --- a/drivers/media/i2c/tc358743.c
4811     +++ b/drivers/media/i2c/tc358743.c
4812     @@ -919,8 +919,8 @@ static const struct cec_adap_ops tc358743_cec_adap_ops = {
4813     .adap_monitor_all_enable = tc358743_cec_adap_monitor_all_enable,
4814     };
4815    
4816     -static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
4817     - bool *handled)
4818     +static void tc358743_cec_handler(struct v4l2_subdev *sd, u16 intstatus,
4819     + bool *handled)
4820     {
4821     struct tc358743_state *state = to_state(sd);
4822     unsigned int cec_rxint, cec_txint;
4823     @@ -953,7 +953,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
4824     cec_transmit_attempt_done(state->cec_adap,
4825     CEC_TX_STATUS_ERROR);
4826     }
4827     - *handled = true;
4828     + if (handled)
4829     + *handled = true;
4830     }
4831     if ((intstatus & MASK_CEC_RINT) &&
4832     (cec_rxint & MASK_CECRIEND)) {
4833     @@ -968,7 +969,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
4834     msg.msg[i] = v & 0xff;
4835     }
4836     cec_received_msg(state->cec_adap, &msg);
4837     - *handled = true;
4838     + if (handled)
4839     + *handled = true;
4840     }
4841     i2c_wr16(sd, INTSTATUS,
4842     intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
4843     @@ -1432,7 +1434,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
4844    
4845     #ifdef CONFIG_VIDEO_TC358743_CEC
4846     if (intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)) {
4847     - tc358743_cec_isr(sd, intstatus, handled);
4848     + tc358743_cec_handler(sd, intstatus, handled);
4849     i2c_wr16(sd, INTSTATUS,
4850     intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
4851     intstatus &= ~(MASK_CEC_RINT | MASK_CEC_TINT);
4852     @@ -1461,7 +1463,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
4853     static irqreturn_t tc358743_irq_handler(int irq, void *dev_id)
4854     {
4855     struct tc358743_state *state = dev_id;
4856     - bool handled;
4857     + bool handled = false;
4858    
4859     tc358743_isr(&state->sd, 0, &handled);
4860    
4861     diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
4862     index a359da7773a90..ff2962cea6164 100644
4863     --- a/drivers/media/pci/bt8xx/bttv-driver.c
4864     +++ b/drivers/media/pci/bt8xx/bttv-driver.c
4865     @@ -4013,11 +4013,13 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
4866     btv->id = dev->device;
4867     if (pci_enable_device(dev)) {
4868     pr_warn("%d: Can't enable device\n", btv->c.nr);
4869     - return -EIO;
4870     + result = -EIO;
4871     + goto free_mem;
4872     }
4873     if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
4874     pr_warn("%d: No suitable DMA available\n", btv->c.nr);
4875     - return -EIO;
4876     + result = -EIO;
4877     + goto free_mem;
4878     }
4879     if (!request_mem_region(pci_resource_start(dev,0),
4880     pci_resource_len(dev,0),
4881     @@ -4025,7 +4027,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
4882     pr_warn("%d: can't request iomem (0x%llx)\n",
4883     btv->c.nr,
4884     (unsigned long long)pci_resource_start(dev, 0));
4885     - return -EBUSY;
4886     + result = -EBUSY;
4887     + goto free_mem;
4888     }
4889     pci_set_master(dev);
4890     pci_set_command(dev);
4891     @@ -4211,6 +4214,10 @@ fail0:
4892     release_mem_region(pci_resource_start(btv->c.pci,0),
4893     pci_resource_len(btv->c.pci,0));
4894     pci_disable_device(btv->c.pci);
4895     +
4896     +free_mem:
4897     + bttvs[btv->c.nr] = NULL;
4898     + kfree(btv);
4899     return result;
4900     }
4901    
4902     diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c
4903     index 79e1afb710758..5cc4ef21f9d37 100644
4904     --- a/drivers/media/pci/saa7134/saa7134-tvaudio.c
4905     +++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c
4906     @@ -683,7 +683,8 @@ int saa_dsp_writel(struct saa7134_dev *dev, int reg, u32 value)
4907     {
4908     int err;
4909    
4910     - audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n", reg << 2, value);
4911     + audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n",
4912     + (reg << 2) & 0xffffffff, value);
4913     err = saa_dsp_wait_bit(dev,SAA7135_DSP_RWSTATE_WRR);
4914     if (err < 0)
4915     return err;
4916     diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
4917     index cde0d254ec1c4..a77c49b185115 100644
4918     --- a/drivers/media/platform/exynos4-is/fimc-isp.c
4919     +++ b/drivers/media/platform/exynos4-is/fimc-isp.c
4920     @@ -305,8 +305,10 @@ static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on)
4921    
4922     if (on) {
4923     ret = pm_runtime_get_sync(&is->pdev->dev);
4924     - if (ret < 0)
4925     + if (ret < 0) {
4926     + pm_runtime_put(&is->pdev->dev);
4927     return ret;
4928     + }
4929     set_bit(IS_ST_PWR_ON, &is->state);
4930    
4931     ret = fimc_is_start_firmware(is);
4932     diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
4933     index e87c6a09205bd..efd06621951c7 100644
4934     --- a/drivers/media/platform/exynos4-is/fimc-lite.c
4935     +++ b/drivers/media/platform/exynos4-is/fimc-lite.c
4936     @@ -470,7 +470,7 @@ static int fimc_lite_open(struct file *file)
4937     set_bit(ST_FLITE_IN_USE, &fimc->state);
4938     ret = pm_runtime_get_sync(&fimc->pdev->dev);
4939     if (ret < 0)
4940     - goto unlock;
4941     + goto err_pm;
4942    
4943     ret = v4l2_fh_open(file);
4944     if (ret < 0)
4945     diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
4946     index 9c31d950cddf7..a07d796f63df0 100644
4947     --- a/drivers/media/platform/exynos4-is/media-dev.c
4948     +++ b/drivers/media/platform/exynos4-is/media-dev.c
4949     @@ -484,8 +484,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
4950     return -ENXIO;
4951    
4952     ret = pm_runtime_get_sync(fmd->pmf);
4953     - if (ret < 0)
4954     + if (ret < 0) {
4955     + pm_runtime_put(fmd->pmf);
4956     return ret;
4957     + }
4958    
4959     fmd->num_sensors = 0;
4960    
4961     @@ -1268,11 +1270,9 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd)
4962     if (IS_ERR(pctl->state_default))
4963     return PTR_ERR(pctl->state_default);
4964    
4965     + /* PINCTRL_STATE_IDLE is optional */
4966     pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl,
4967     PINCTRL_STATE_IDLE);
4968     - if (IS_ERR(pctl->state_idle))
4969     - return PTR_ERR(pctl->state_idle);
4970     -
4971     return 0;
4972     }
4973    
4974     diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
4975     index 540151bbf58f2..1aac167abb175 100644
4976     --- a/drivers/media/platform/exynos4-is/mipi-csis.c
4977     +++ b/drivers/media/platform/exynos4-is/mipi-csis.c
4978     @@ -510,8 +510,10 @@ static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
4979     if (enable) {
4980     s5pcsis_clear_counters(state);
4981     ret = pm_runtime_get_sync(&state->pdev->dev);
4982     - if (ret && ret != 1)
4983     + if (ret && ret != 1) {
4984     + pm_runtime_put_noidle(&state->pdev->dev);
4985     return ret;
4986     + }
4987     }
4988    
4989     mutex_lock(&state->lock);
4990     diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
4991     index 27779b75df543..ac112cf06ab31 100644
4992     --- a/drivers/media/platform/mx2_emmaprp.c
4993     +++ b/drivers/media/platform/mx2_emmaprp.c
4994     @@ -852,8 +852,11 @@ static int emmaprp_probe(struct platform_device *pdev)
4995     platform_set_drvdata(pdev, pcdev);
4996    
4997     irq = platform_get_irq(pdev, 0);
4998     - if (irq < 0)
4999     - return irq;
5000     + if (irq < 0) {
5001     + ret = irq;
5002     + goto rel_vdev;
5003     + }
5004     +
5005     ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0,
5006     dev_name(&pdev->dev), pcdev);
5007     if (ret)
5008     diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
5009     index 327c5716922ac..dce6b3685e135 100644
5010     --- a/drivers/media/platform/omap3isp/isp.c
5011     +++ b/drivers/media/platform/omap3isp/isp.c
5012     @@ -2330,8 +2330,10 @@ static int isp_probe(struct platform_device *pdev)
5013     mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
5014     isp->mmio_base[map_idx] =
5015     devm_ioremap_resource(isp->dev, mem);
5016     - if (IS_ERR(isp->mmio_base[map_idx]))
5017     - return PTR_ERR(isp->mmio_base[map_idx]);
5018     + if (IS_ERR(isp->mmio_base[map_idx])) {
5019     + ret = PTR_ERR(isp->mmio_base[map_idx]);
5020     + goto error;
5021     + }
5022     }
5023    
5024     ret = isp_get_clocks(isp);
5025     diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
5026     index 008afb85023be..3c5b9082ad723 100644
5027     --- a/drivers/media/platform/qcom/camss/camss-csiphy.c
5028     +++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
5029     @@ -176,8 +176,10 @@ static int csiphy_set_power(struct v4l2_subdev *sd, int on)
5030     int ret;
5031    
5032     ret = pm_runtime_get_sync(dev);
5033     - if (ret < 0)
5034     + if (ret < 0) {
5035     + pm_runtime_put_sync(dev);
5036     return ret;
5037     + }
5038    
5039     ret = csiphy_set_clock_rates(csiphy);
5040     if (ret < 0) {
5041     diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
5042     index 84e982f259a06..bbc430a003443 100644
5043     --- a/drivers/media/platform/qcom/venus/core.c
5044     +++ b/drivers/media/platform/qcom/venus/core.c
5045     @@ -316,8 +316,10 @@ static int venus_probe(struct platform_device *pdev)
5046     goto err_core_deinit;
5047    
5048     ret = pm_runtime_put_sync(dev);
5049     - if (ret)
5050     + if (ret) {
5051     + pm_runtime_get_noresume(dev);
5052     goto err_dev_unregister;
5053     + }
5054    
5055     return 0;
5056    
5057     @@ -328,6 +330,7 @@ err_core_deinit:
5058     err_venus_shutdown:
5059     venus_shutdown(core);
5060     err_runtime_disable:
5061     + pm_runtime_put_noidle(dev);
5062     pm_runtime_set_suspended(dev);
5063     pm_runtime_disable(dev);
5064     hfi_destroy(core);
5065     diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
5066     index 05b80a66e80ed..658825b4c4e8d 100644
5067     --- a/drivers/media/platform/qcom/venus/vdec.c
5068     +++ b/drivers/media/platform/qcom/venus/vdec.c
5069     @@ -993,8 +993,6 @@ static int vdec_stop_capture(struct venus_inst *inst)
5070     break;
5071     }
5072    
5073     - INIT_LIST_HEAD(&inst->registeredbufs);
5074     -
5075     return ret;
5076     }
5077    
5078     @@ -1091,6 +1089,14 @@ static int vdec_buf_init(struct vb2_buffer *vb)
5079     static void vdec_buf_cleanup(struct vb2_buffer *vb)
5080     {
5081     struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
5082     + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
5083     + struct venus_buffer *buf = to_venus_buffer(vbuf);
5084     +
5085     + mutex_lock(&inst->lock);
5086     + if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
5087     + if (!list_empty(&inst->registeredbufs))
5088     + list_del_init(&buf->reg_list);
5089     + mutex_unlock(&inst->lock);
5090    
5091     inst->buf_count--;
5092     if (!inst->buf_count)
5093     diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
5094     index 5c6b00737fe75..05c712e00a2a7 100644
5095     --- a/drivers/media/platform/rcar-fcp.c
5096     +++ b/drivers/media/platform/rcar-fcp.c
5097     @@ -103,8 +103,10 @@ int rcar_fcp_enable(struct rcar_fcp_device *fcp)
5098     return 0;
5099    
5100     ret = pm_runtime_get_sync(fcp->dev);
5101     - if (ret < 0)
5102     + if (ret < 0) {
5103     + pm_runtime_put_noidle(fcp->dev);
5104     return ret;
5105     + }
5106    
5107     return 0;
5108     }
5109     diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
5110     index c14af1b929dff..d27eccfa57cae 100644
5111     --- a/drivers/media/platform/rcar-vin/rcar-csi2.c
5112     +++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
5113     @@ -361,7 +361,6 @@ struct rcar_csi2 {
5114     struct media_pad pads[NR_OF_RCAR_CSI2_PAD];
5115    
5116     struct v4l2_async_notifier notifier;
5117     - struct v4l2_async_subdev asd;
5118     struct v4l2_subdev *remote;
5119    
5120     struct v4l2_mbus_framefmt mf;
5121     @@ -810,6 +809,8 @@ static int rcsi2_parse_v4l2(struct rcar_csi2 *priv,
5122    
5123     static int rcsi2_parse_dt(struct rcar_csi2 *priv)
5124     {
5125     + struct v4l2_async_subdev *asd;
5126     + struct fwnode_handle *fwnode;
5127     struct device_node *ep;
5128     struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
5129     int ret;
5130     @@ -833,24 +834,19 @@ static int rcsi2_parse_dt(struct rcar_csi2 *priv)
5131     return ret;
5132     }
5133    
5134     - priv->asd.match.fwnode =
5135     - fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep));
5136     - priv->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
5137     -
5138     + fwnode = fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep));
5139     of_node_put(ep);
5140    
5141     - v4l2_async_notifier_init(&priv->notifier);
5142     -
5143     - ret = v4l2_async_notifier_add_subdev(&priv->notifier, &priv->asd);
5144     - if (ret) {
5145     - fwnode_handle_put(priv->asd.match.fwnode);
5146     - return ret;
5147     - }
5148     + dev_dbg(priv->dev, "Found '%pOF'\n", to_of_node(fwnode));
5149    
5150     + v4l2_async_notifier_init(&priv->notifier);
5151     priv->notifier.ops = &rcar_csi2_notify_ops;
5152    
5153     - dev_dbg(priv->dev, "Found '%pOF'\n",
5154     - to_of_node(priv->asd.match.fwnode));
5155     + asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier, fwnode,
5156     + sizeof(*asd));
5157     + fwnode_handle_put(fwnode);
5158     + if (IS_ERR(asd))
5159     + return PTR_ERR(asd);
5160    
5161     ret = v4l2_async_subdev_notifier_register(&priv->subdev,
5162     &priv->notifier);
5163     diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
5164     index 3cb29b2e0b2b1..e5f6360801082 100644
5165     --- a/drivers/media/platform/rcar-vin/rcar-dma.c
5166     +++ b/drivers/media/platform/rcar-vin/rcar-dma.c
5167     @@ -1334,8 +1334,10 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel)
5168     int ret;
5169    
5170     ret = pm_runtime_get_sync(vin->dev);
5171     - if (ret < 0)
5172     + if (ret < 0) {
5173     + pm_runtime_put_noidle(vin->dev);
5174     return ret;
5175     + }
5176    
5177     /* Make register writes take effect immediately. */
5178     vnmc = rvin_read(vin, VNMC_REG);
5179     diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
5180     index 0f267a237b424..af3c8d405509e 100644
5181     --- a/drivers/media/platform/rcar_drif.c
5182     +++ b/drivers/media/platform/rcar_drif.c
5183     @@ -185,7 +185,6 @@ struct rcar_drif_frame_buf {
5184     /* OF graph endpoint's V4L2 async data */
5185     struct rcar_drif_graph_ep {
5186     struct v4l2_subdev *subdev; /* Async matched subdev */
5187     - struct v4l2_async_subdev asd; /* Async sub-device descriptor */
5188     };
5189    
5190     /* DMA buffer */
5191     @@ -1105,12 +1104,6 @@ static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier,
5192     struct rcar_drif_sdr *sdr =
5193     container_of(notifier, struct rcar_drif_sdr, notifier);
5194    
5195     - if (sdr->ep.asd.match.fwnode !=
5196     - of_fwnode_handle(subdev->dev->of_node)) {
5197     - rdrif_err(sdr, "subdev %s cannot bind\n", subdev->name);
5198     - return -EINVAL;
5199     - }
5200     -
5201     v4l2_set_subdev_hostdata(subdev, sdr);
5202     sdr->ep.subdev = subdev;
5203     rdrif_dbg(sdr, "bound asd %s\n", subdev->name);
5204     @@ -1214,7 +1207,7 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
5205     {
5206     struct v4l2_async_notifier *notifier = &sdr->notifier;
5207     struct fwnode_handle *fwnode, *ep;
5208     - int ret;
5209     + struct v4l2_async_subdev *asd;
5210    
5211     v4l2_async_notifier_init(notifier);
5212    
5213     @@ -1223,26 +1216,21 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
5214     if (!ep)
5215     return 0;
5216    
5217     + /* Get the endpoint properties */
5218     + rcar_drif_get_ep_properties(sdr, ep);
5219     +
5220     fwnode = fwnode_graph_get_remote_port_parent(ep);
5221     + fwnode_handle_put(ep);
5222     if (!fwnode) {
5223     dev_warn(sdr->dev, "bad remote port parent\n");
5224     - fwnode_handle_put(ep);
5225     return -EINVAL;
5226     }
5227    
5228     - sdr->ep.asd.match.fwnode = fwnode;
5229     - sdr->ep.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
5230     - ret = v4l2_async_notifier_add_subdev(notifier, &sdr->ep.asd);
5231     - if (ret) {
5232     - fwnode_handle_put(fwnode);
5233     - return ret;
5234     - }
5235     -
5236     - /* Get the endpoint properties */
5237     - rcar_drif_get_ep_properties(sdr, ep);
5238     -
5239     + asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
5240     + sizeof(*asd));
5241     fwnode_handle_put(fwnode);
5242     - fwnode_handle_put(ep);
5243     + if (IS_ERR(asd))
5244     + return PTR_ERR(asd);
5245    
5246     return 0;
5247     }
5248     diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
5249     index 36b821ccc1dba..bf9a75b75083b 100644
5250     --- a/drivers/media/platform/rockchip/rga/rga-buf.c
5251     +++ b/drivers/media/platform/rockchip/rga/rga-buf.c
5252     @@ -81,6 +81,7 @@ static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
5253    
5254     ret = pm_runtime_get_sync(rga->dev);
5255     if (ret < 0) {
5256     + pm_runtime_put_noidle(rga->dev);
5257     rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
5258     return ret;
5259     }
5260     diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
5261     index c6fbcd7036d6d..ee624804862e2 100644
5262     --- a/drivers/media/platform/s3c-camif/camif-core.c
5263     +++ b/drivers/media/platform/s3c-camif/camif-core.c
5264     @@ -464,7 +464,7 @@ static int s3c_camif_probe(struct platform_device *pdev)
5265    
5266     ret = camif_media_dev_init(camif);
5267     if (ret < 0)
5268     - goto err_alloc;
5269     + goto err_pm;
5270    
5271     ret = camif_register_sensor(camif);
5272     if (ret < 0)
5273     @@ -498,10 +498,9 @@ err_sens:
5274     media_device_unregister(&camif->media_dev);
5275     media_device_cleanup(&camif->media_dev);
5276     camif_unregister_media_entities(camif);
5277     -err_alloc:
5278     +err_pm:
5279     pm_runtime_put(dev);
5280     pm_runtime_disable(dev);
5281     -err_pm:
5282     camif_clk_put(camif);
5283     err_clk:
5284     s3c_camif_unregister_subdev(camif);
5285     diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
5286     index 7d52431c2c837..62d2320a72186 100644
5287     --- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
5288     +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
5289     @@ -79,8 +79,10 @@ int s5p_mfc_power_on(void)
5290     int i, ret = 0;
5291    
5292     ret = pm_runtime_get_sync(pm->device);
5293     - if (ret < 0)
5294     + if (ret < 0) {
5295     + pm_runtime_put_noidle(pm->device);
5296     return ret;
5297     + }
5298    
5299     /* clock control */
5300     for (i = 0; i < pm->num_clocks; i++) {
5301     diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
5302     index 675b5f2b4c2ee..a55ddf8d185d5 100644
5303     --- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
5304     +++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
5305     @@ -1367,7 +1367,7 @@ static int bdisp_probe(struct platform_device *pdev)
5306     ret = pm_runtime_get_sync(dev);
5307     if (ret < 0) {
5308     dev_err(dev, "failed to set PM\n");
5309     - goto err_dbg;
5310     + goto err_pm;
5311     }
5312    
5313     /* Filters */
5314     @@ -1395,7 +1395,6 @@ err_filter:
5315     bdisp_hw_free_filters(bdisp->dev);
5316     err_pm:
5317     pm_runtime_put(dev);
5318     -err_dbg:
5319     bdisp_debugfs_remove(bdisp);
5320     err_v4l2:
5321     v4l2_device_unregister(&bdisp->v4l2_dev);
5322     diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
5323     index 91369fb3ffaa4..2791107e641bc 100644
5324     --- a/drivers/media/platform/sti/delta/delta-v4l2.c
5325     +++ b/drivers/media/platform/sti/delta/delta-v4l2.c
5326     @@ -954,8 +954,10 @@ static void delta_run_work(struct work_struct *work)
5327     /* enable the hardware */
5328     if (!dec->pm) {
5329     ret = delta_get_sync(ctx);
5330     - if (ret)
5331     + if (ret) {
5332     + delta_put_autosuspend(ctx);
5333     goto err;
5334     + }
5335     }
5336    
5337     /* decode this access unit */
5338     diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
5339     index 401aaafa17109..43f279e2a6a38 100644
5340     --- a/drivers/media/platform/sti/hva/hva-hw.c
5341     +++ b/drivers/media/platform/sti/hva/hva-hw.c
5342     @@ -272,6 +272,7 @@ static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
5343    
5344     if (pm_runtime_get_sync(dev) < 0) {
5345     dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX);
5346     + pm_runtime_put_noidle(dev);
5347     mutex_unlock(&hva->protect_mutex);
5348     return -EFAULT;
5349     }
5350     @@ -388,7 +389,7 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
5351     ret = pm_runtime_get_sync(dev);
5352     if (ret < 0) {
5353     dev_err(dev, "%s failed to set PM\n", HVA_PREFIX);
5354     - goto err_clk;
5355     + goto err_pm;
5356     }
5357    
5358     /* check IP hardware version */
5359     @@ -553,6 +554,7 @@ void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
5360    
5361     if (pm_runtime_get_sync(dev) < 0) {
5362     seq_puts(s, "Cannot wake up IP\n");
5363     + pm_runtime_put_noidle(dev);
5364     mutex_unlock(&hva->protect_mutex);
5365     return;
5366     }
5367     diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
5368     index 9392e3409fba0..d41475f56ab54 100644
5369     --- a/drivers/media/platform/stm32/stm32-dcmi.c
5370     +++ b/drivers/media/platform/stm32/stm32-dcmi.c
5371     @@ -733,7 +733,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
5372     if (ret < 0) {
5373     dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
5374     __func__, ret);
5375     - goto err_release_buffers;
5376     + goto err_pm_put;
5377     }
5378    
5379     ret = media_pipeline_start(&dcmi->vdev->entity, &dcmi->pipeline);
5380     @@ -837,8 +837,6 @@ err_media_pipeline_stop:
5381    
5382     err_pm_put:
5383     pm_runtime_put(dcmi->dev);
5384     -
5385     -err_release_buffers:
5386     spin_lock_irq(&dcmi->irqlock);
5387     /*
5388     * Return all buffers to vb2 in QUEUED state.
5389     diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
5390     index 8b14ba4a3d9ea..817bd13370eff 100644
5391     --- a/drivers/media/platform/ti-vpe/vpe.c
5392     +++ b/drivers/media/platform/ti-vpe/vpe.c
5393     @@ -2435,6 +2435,8 @@ static int vpe_runtime_get(struct platform_device *pdev)
5394    
5395     r = pm_runtime_get_sync(&pdev->dev);
5396     WARN_ON(r < 0);
5397     + if (r)
5398     + pm_runtime_put_noidle(&pdev->dev);
5399     return r < 0 ? r : 0;
5400     }
5401    
5402     diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
5403     index c650e45bb0ad1..dc62533cf32ce 100644
5404     --- a/drivers/media/platform/vsp1/vsp1_drv.c
5405     +++ b/drivers/media/platform/vsp1/vsp1_drv.c
5406     @@ -562,7 +562,12 @@ int vsp1_device_get(struct vsp1_device *vsp1)
5407     int ret;
5408    
5409     ret = pm_runtime_get_sync(vsp1->dev);
5410     - return ret < 0 ? ret : 0;
5411     + if (ret < 0) {
5412     + pm_runtime_put_noidle(vsp1->dev);
5413     + return ret;
5414     + }
5415     +
5416     + return 0;
5417     }
5418    
5419     /*
5420     @@ -845,12 +850,12 @@ static int vsp1_probe(struct platform_device *pdev)
5421     /* Configure device parameters based on the version register. */
5422     pm_runtime_enable(&pdev->dev);
5423    
5424     - ret = pm_runtime_get_sync(&pdev->dev);
5425     + ret = vsp1_device_get(vsp1);
5426     if (ret < 0)
5427     goto done;
5428    
5429     vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
5430     - pm_runtime_put_sync(&pdev->dev);
5431     + vsp1_device_put(vsp1);
5432    
5433     for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
5434     if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) ==
5435     diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
5436     index 9cdef17b4793f..c12dda73cdd53 100644
5437     --- a/drivers/media/rc/ati_remote.c
5438     +++ b/drivers/media/rc/ati_remote.c
5439     @@ -835,6 +835,10 @@ static int ati_remote_probe(struct usb_interface *interface,
5440     err("%s: endpoint_in message size==0? \n", __func__);
5441     return -ENODEV;
5442     }
5443     + if (!usb_endpoint_is_int_out(endpoint_out)) {
5444     + err("%s: Unexpected endpoint_out\n", __func__);
5445     + return -ENODEV;
5446     + }
5447    
5448     ati_remote = kzalloc(sizeof (struct ati_remote), GFP_KERNEL);
5449     rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
5450     diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
5451     index b6e70fada3fb2..8fb186b25d6af 100644
5452     --- a/drivers/media/tuners/tuner-simple.c
5453     +++ b/drivers/media/tuners/tuner-simple.c
5454     @@ -500,7 +500,7 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer)
5455     case TUNER_TENA_9533_DI:
5456     case TUNER_YMEC_TVF_5533MF:
5457     tuner_dbg("This tuner doesn't have FM. Most cards have a TEA5767 for FM\n");
5458     - return 0;
5459     + return -EINVAL;
5460     case TUNER_PHILIPS_FM1216ME_MK3:
5461     case TUNER_PHILIPS_FM1236_MK3:
5462     case TUNER_PHILIPS_FMD1216ME_MK3:
5463     @@ -702,7 +702,8 @@ static int simple_set_radio_freq(struct dvb_frontend *fe,
5464     TUNER_RATIO_SELECT_50; /* 50 kHz step */
5465    
5466     /* Bandswitch byte */
5467     - simple_radio_bandswitch(fe, &buffer[0]);
5468     + if (simple_radio_bandswitch(fe, &buffer[0]))
5469     + return 0;
5470    
5471     /* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps
5472     freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) =
5473     diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
5474     index e399b9fad7574..a30a8a731eda8 100644
5475     --- a/drivers/media/usb/uvc/uvc_ctrl.c
5476     +++ b/drivers/media/usb/uvc/uvc_ctrl.c
5477     @@ -773,12 +773,16 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
5478     offset &= 7;
5479     mask = ((1LL << bits) - 1) << offset;
5480    
5481     - for (; bits > 0; data++) {
5482     + while (1) {
5483     u8 byte = *data & mask;
5484     value |= offset > 0 ? (byte >> offset) : (byte << (-offset));
5485     bits -= 8 - (offset > 0 ? offset : 0);
5486     + if (bits <= 0)
5487     + break;
5488     +
5489     offset -= 8;
5490     mask = (1 << bits) - 1;
5491     + data++;
5492     }
5493    
5494     /* Sign-extend the value if needed. */
5495     diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
5496     index b4499cddeffe5..ca3a9c2eec271 100644
5497     --- a/drivers/media/usb/uvc/uvc_entity.c
5498     +++ b/drivers/media/usb/uvc/uvc_entity.c
5499     @@ -73,10 +73,45 @@ static int uvc_mc_init_entity(struct uvc_video_chain *chain,
5500     int ret;
5501    
5502     if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) {
5503     + u32 function;
5504     +
5505     v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops);
5506     strscpy(entity->subdev.name, entity->name,
5507     sizeof(entity->subdev.name));
5508    
5509     + switch (UVC_ENTITY_TYPE(entity)) {
5510     + case UVC_VC_SELECTOR_UNIT:
5511     + function = MEDIA_ENT_F_VID_MUX;
5512     + break;
5513     + case UVC_VC_PROCESSING_UNIT:
5514     + case UVC_VC_EXTENSION_UNIT:
5515     + /* For lack of a better option. */
5516     + function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
5517     + break;
5518     + case UVC_COMPOSITE_CONNECTOR:
5519     + case UVC_COMPONENT_CONNECTOR:
5520     + function = MEDIA_ENT_F_CONN_COMPOSITE;
5521     + break;
5522     + case UVC_SVIDEO_CONNECTOR:
5523     + function = MEDIA_ENT_F_CONN_SVIDEO;
5524     + break;
5525     + case UVC_ITT_CAMERA:
5526     + function = MEDIA_ENT_F_CAM_SENSOR;
5527     + break;
5528     + case UVC_TT_VENDOR_SPECIFIC:
5529     + case UVC_ITT_VENDOR_SPECIFIC:
5530     + case UVC_ITT_MEDIA_TRANSPORT_INPUT:
5531     + case UVC_OTT_VENDOR_SPECIFIC:
5532     + case UVC_OTT_DISPLAY:
5533     + case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
5534     + case UVC_EXTERNAL_VENDOR_SPECIFIC:
5535     + default:
5536     + function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
5537     + break;
5538     + }
5539     +
5540     + entity->subdev.entity.function = function;
5541     +
5542     ret = media_entity_pads_init(&entity->subdev.entity,
5543     entity->num_pads, entity->pads);
5544    
5545     diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
5546     index 0335e69b70abe..5e6f3153b5ff8 100644
5547     --- a/drivers/media/usb/uvc/uvc_v4l2.c
5548     +++ b/drivers/media/usb/uvc/uvc_v4l2.c
5549     @@ -247,11 +247,41 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
5550     if (ret < 0)
5551     goto done;
5552    
5553     + /* After the probe, update fmt with the values returned from
5554     + * negotiation with the device.
5555     + */
5556     + for (i = 0; i < stream->nformats; ++i) {
5557     + if (probe->bFormatIndex == stream->format[i].index) {
5558     + format = &stream->format[i];
5559     + break;
5560     + }
5561     + }
5562     +
5563     + if (i == stream->nformats) {
5564     + uvc_trace(UVC_TRACE_FORMAT, "Unknown bFormatIndex %u\n",
5565     + probe->bFormatIndex);
5566     + return -EINVAL;
5567     + }
5568     +
5569     + for (i = 0; i < format->nframes; ++i) {
5570     + if (probe->bFrameIndex == format->frame[i].bFrameIndex) {
5571     + frame = &format->frame[i];
5572     + break;
5573     + }
5574     + }
5575     +
5576     + if (i == format->nframes) {
5577     + uvc_trace(UVC_TRACE_FORMAT, "Unknown bFrameIndex %u\n",
5578     + probe->bFrameIndex);
5579     + return -EINVAL;
5580     + }
5581     +
5582     fmt->fmt.pix.width = frame->wWidth;
5583     fmt->fmt.pix.height = frame->wHeight;
5584     fmt->fmt.pix.field = V4L2_FIELD_NONE;
5585     fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(format, frame);
5586     fmt->fmt.pix.sizeimage = probe->dwMaxVideoFrameSize;
5587     + fmt->fmt.pix.pixelformat = format->fcc;
5588     fmt->fmt.pix.colorspace = format->colorspace;
5589    
5590     if (uvc_format != NULL)
5591     diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c
5592     index 0b0ed72016da8..0309bd5a18008 100644
5593     --- a/drivers/memory/fsl-corenet-cf.c
5594     +++ b/drivers/memory/fsl-corenet-cf.c
5595     @@ -211,10 +211,8 @@ static int ccf_probe(struct platform_device *pdev)
5596     dev_set_drvdata(&pdev->dev, ccf);
5597    
5598     irq = platform_get_irq(pdev, 0);
5599     - if (!irq) {
5600     - dev_err(&pdev->dev, "%s: no irq\n", __func__);
5601     - return -ENXIO;
5602     - }
5603     + if (irq < 0)
5604     + return irq;
5605    
5606     ret = devm_request_irq(&pdev->dev, irq, ccf_irq, 0, pdev->name, ccf);
5607     if (ret) {
5608     diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
5609     index eff26c1b13940..27bc417029e11 100644
5610     --- a/drivers/memory/omap-gpmc.c
5611     +++ b/drivers/memory/omap-gpmc.c
5612     @@ -949,7 +949,7 @@ static int gpmc_cs_remap(int cs, u32 base)
5613     int ret;
5614     u32 old_base, size;
5615    
5616     - if (cs > gpmc_cs_num) {
5617     + if (cs >= gpmc_cs_num) {
5618     pr_err("%s: requested chip-select is disabled\n", __func__);
5619     return -ENODEV;
5620     }
5621     @@ -984,7 +984,7 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
5622     struct resource *res = &gpmc->mem;
5623     int r = -1;
5624    
5625     - if (cs > gpmc_cs_num) {
5626     + if (cs >= gpmc_cs_num) {
5627     pr_err("%s: requested chip-select is disabled\n", __func__);
5628     return -ENODEV;
5629     }
5630     @@ -2274,6 +2274,10 @@ static void gpmc_probe_dt_children(struct platform_device *pdev)
5631     }
5632     }
5633     #else
5634     +void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
5635     +{
5636     + memset(p, 0, sizeof(*p));
5637     +}
5638     static int gpmc_probe_dt(struct platform_device *pdev)
5639     {
5640     return 0;
5641     diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
5642     index 154270f8d8d78..bbcde58e2a11e 100644
5643     --- a/drivers/mfd/sm501.c
5644     +++ b/drivers/mfd/sm501.c
5645     @@ -1424,8 +1424,14 @@ static int sm501_plat_probe(struct platform_device *dev)
5646     goto err_claim;
5647     }
5648    
5649     - return sm501_init_dev(sm);
5650     + ret = sm501_init_dev(sm);
5651     + if (ret)
5652     + goto err_unmap;
5653     +
5654     + return 0;
5655    
5656     + err_unmap:
5657     + iounmap(sm->regs);
5658     err_claim:
5659     release_resource(sm->regs_claim);
5660     kfree(sm->regs_claim);
5661     diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
5662     index 1958833b3b74e..4fd57052ddd3d 100644
5663     --- a/drivers/misc/cardreader/rtsx_pcr.c
5664     +++ b/drivers/misc/cardreader/rtsx_pcr.c
5665     @@ -1534,12 +1534,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
5666     ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
5667     ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
5668     if (ret < 0)
5669     - goto disable_irq;
5670     + goto free_slots;
5671    
5672     schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
5673    
5674     return 0;
5675    
5676     +free_slots:
5677     + kfree(pcr->slots);
5678     disable_irq:
5679     free_irq(pcr->irq, (void *)pcr);
5680     disable_msi:
5681     diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
5682     index cde9a2fc13250..490ff49d11ede 100644
5683     --- a/drivers/misc/eeprom/at25.c
5684     +++ b/drivers/misc/eeprom/at25.c
5685     @@ -358,7 +358,7 @@ static int at25_probe(struct spi_device *spi)
5686     at25->nvmem_config.reg_read = at25_ee_read;
5687     at25->nvmem_config.reg_write = at25_ee_write;
5688     at25->nvmem_config.priv = at25;
5689     - at25->nvmem_config.stride = 4;
5690     + at25->nvmem_config.stride = 1;
5691     at25->nvmem_config.word_size = 1;
5692     at25->nvmem_config.size = chip.byte_len;
5693    
5694     diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
5695     index 01e27682ea303..a486c6c7f4077 100644
5696     --- a/drivers/misc/mic/scif/scif_rma.c
5697     +++ b/drivers/misc/mic/scif/scif_rma.c
5698     @@ -1381,6 +1381,8 @@ retry:
5699     (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
5700     pinned_pages->pages);
5701     if (nr_pages != pinned_pages->nr_pages) {
5702     + if (pinned_pages->nr_pages < 0)
5703     + pinned_pages->nr_pages = 0;
5704     if (try_upgrade) {
5705     if (ulimit)
5706     __scif_dec_pinned_vm_lock(mm, nr_pages);
5707     @@ -1400,7 +1402,6 @@ retry:
5708    
5709     if (pinned_pages->nr_pages < nr_pages) {
5710     err = -EFAULT;
5711     - pinned_pages->nr_pages = nr_pages;
5712     goto dec_pinned;
5713     }
5714    
5715     @@ -1413,7 +1414,6 @@ dec_pinned:
5716     __scif_dec_pinned_vm_lock(mm, nr_pages);
5717     /* Something went wrong! Rollback */
5718     error_unmap:
5719     - pinned_pages->nr_pages = nr_pages;
5720     scif_destroy_pinned_pages(pinned_pages);
5721     *pages = NULL;
5722     dev_dbg(scif_info.mdev.this_device,
5723     diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
5724     index 85942f6717c57..8aadc6055df17 100644
5725     --- a/drivers/misc/mic/vop/vop_main.c
5726     +++ b/drivers/misc/mic/vop/vop_main.c
5727     @@ -320,7 +320,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
5728     /* First assign the vring's allocated in host memory */
5729     vqconfig = _vop_vq_config(vdev->desc) + index;
5730     memcpy_fromio(&config, vqconfig, sizeof(config));
5731     - _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
5732     + _vr_size = round_up(vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN), 4);
5733     vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
5734     va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size);
5735     if (!va)
5736     diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
5737     index 30eac172f0170..7014ffe88632e 100644
5738     --- a/drivers/misc/mic/vop/vop_vringh.c
5739     +++ b/drivers/misc/mic/vop/vop_vringh.c
5740     @@ -296,7 +296,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
5741    
5742     num = le16_to_cpu(vqconfig[i].num);
5743     mutex_init(&vvr->vr_mutex);
5744     - vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
5745     + vr_size = PAGE_ALIGN(round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4) +
5746     sizeof(struct _mic_vring_info));
5747     vr->va = (void *)
5748     __get_free_pages(GFP_KERNEL | __GFP_ZERO,
5749     @@ -308,7 +308,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
5750     goto err;
5751     }
5752     vr->len = vr_size;
5753     - vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
5754     + vr->info = vr->va + round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4);
5755     vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i);
5756     vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size,
5757     DMA_BIDIRECTIONAL);
5758     @@ -602,6 +602,7 @@ static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
5759     size_t partlen;
5760     bool dma = VOP_USE_DMA && vi->dma_ch;
5761     int err = 0;
5762     + size_t offset = 0;
5763    
5764     if (dma) {
5765     dma_alignment = 1 << vi->dma_ch->device->copy_align;
5766     @@ -655,13 +656,20 @@ memcpy:
5767     * We are copying to IO below and should ideally use something
5768     * like copy_from_user_toio(..) if it existed.
5769     */
5770     - if (copy_from_user((void __force *)dbuf, ubuf, len)) {
5771     - err = -EFAULT;
5772     - dev_err(vop_dev(vdev), "%s %d err %d\n",
5773     - __func__, __LINE__, err);
5774     - goto err;
5775     + while (len) {
5776     + partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
5777     +
5778     + if (copy_from_user(vvr->buf, ubuf + offset, partlen)) {
5779     + err = -EFAULT;
5780     + dev_err(vop_dev(vdev), "%s %d err %d\n",
5781     + __func__, __LINE__, err);
5782     + goto err;
5783     + }
5784     + memcpy_toio(dbuf + offset, vvr->buf, partlen);
5785     + offset += partlen;
5786     + vdev->out_bytes += partlen;
5787     + len -= partlen;
5788     }
5789     - vdev->out_bytes += len;
5790     err = 0;
5791     err:
5792     vpdev->hw_ops->unmap(vpdev, dbuf);
5793     diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
5794     index 8531ae7811956..c49065887e8f5 100644
5795     --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
5796     +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
5797     @@ -657,8 +657,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
5798     if (retval < (int)produce_q->kernel_if->num_pages) {
5799     pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
5800     retval);
5801     - qp_release_pages(produce_q->kernel_if->u.h.header_page,
5802     - retval, false);
5803     + if (retval > 0)
5804     + qp_release_pages(produce_q->kernel_if->u.h.header_page,
5805     + retval, false);
5806     err = VMCI_ERROR_NO_MEM;
5807     goto out;
5808     }
5809     @@ -670,8 +671,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
5810     if (retval < (int)consume_q->kernel_if->num_pages) {
5811     pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
5812     retval);
5813     - qp_release_pages(consume_q->kernel_if->u.h.header_page,
5814     - retval, false);
5815     + if (retval > 0)
5816     + qp_release_pages(consume_q->kernel_if->u.h.header_page,
5817     + retval, false);
5818     qp_release_pages(produce_q->kernel_if->u.h.header_page,
5819     produce_q->kernel_if->num_pages, false);
5820     err = VMCI_ERROR_NO_MEM;
5821     diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
5822     index e0655278c5c32..3efaa9534a777 100644
5823     --- a/drivers/mmc/core/sdio_cis.c
5824     +++ b/drivers/mmc/core/sdio_cis.c
5825     @@ -26,6 +26,9 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
5826     unsigned i, nr_strings;
5827     char **buffer, *string;
5828    
5829     + if (size < 2)
5830     + return 0;
5831     +
5832     /* Find all null-terminated (including zero length) strings in
5833     the TPLLV1_INFO field. Trailing garbage is ignored. */
5834     buf += 2;
5835     diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
5836     index 0f1547f09d08b..72f5c7b300790 100644
5837     --- a/drivers/mtd/lpddr/lpddr2_nvm.c
5838     +++ b/drivers/mtd/lpddr/lpddr2_nvm.c
5839     @@ -393,6 +393,17 @@ static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add,
5840     return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK);
5841     }
5842    
5843     +static const struct mtd_info lpddr2_nvm_mtd_info = {
5844     + .type = MTD_RAM,
5845     + .writesize = 1,
5846     + .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
5847     + ._read = lpddr2_nvm_read,
5848     + ._write = lpddr2_nvm_write,
5849     + ._erase = lpddr2_nvm_erase,
5850     + ._unlock = lpddr2_nvm_unlock,
5851     + ._lock = lpddr2_nvm_lock,
5852     +};
5853     +
5854     /*
5855     * lpddr2_nvm driver probe method
5856     */
5857     @@ -433,6 +444,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
5858     .pfow_base = OW_BASE_ADDRESS,
5859     .fldrv_priv = pcm_data,
5860     };
5861     +
5862     if (IS_ERR(map->virt))
5863     return PTR_ERR(map->virt);
5864    
5865     @@ -444,22 +456,13 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
5866     return PTR_ERR(pcm_data->ctl_regs);
5867    
5868     /* Populate mtd_info data structure */
5869     - *mtd = (struct mtd_info) {
5870     - .dev = { .parent = &pdev->dev },
5871     - .name = pdev->dev.init_name,
5872     - .type = MTD_RAM,
5873     - .priv = map,
5874     - .size = resource_size(add_range),
5875     - .erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width,
5876     - .writesize = 1,
5877     - .writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width,
5878     - .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
5879     - ._read = lpddr2_nvm_read,
5880     - ._write = lpddr2_nvm_write,
5881     - ._erase = lpddr2_nvm_erase,
5882     - ._unlock = lpddr2_nvm_unlock,
5883     - ._lock = lpddr2_nvm_lock,
5884     - };
5885     + *mtd = lpddr2_nvm_mtd_info;
5886     + mtd->dev.parent = &pdev->dev;
5887     + mtd->name = pdev->dev.init_name;
5888     + mtd->priv = map;
5889     + mtd->size = resource_size(add_range);
5890     + mtd->erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width;
5891     + mtd->writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width;
5892    
5893     /* Verify the presence of the device looking for PFOW string */
5894     if (!lpddr2_nvm_pfow_present(map)) {
5895     diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
5896     index 4ced68be7ed7e..774970bfcf859 100644
5897     --- a/drivers/mtd/mtdoops.c
5898     +++ b/drivers/mtd/mtdoops.c
5899     @@ -279,12 +279,13 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
5900     kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
5901     record_size - MTDOOPS_HEADER_SIZE, NULL);
5902    
5903     - /* Panics must be written immediately */
5904     - if (reason != KMSG_DUMP_OOPS)
5905     + if (reason != KMSG_DUMP_OOPS) {
5906     + /* Panics must be written immediately */
5907     mtdoops_write(cxt, 1);
5908     -
5909     - /* For other cases, schedule work to write it "nicely" */
5910     - schedule_work(&cxt->work_write);
5911     + } else {
5912     + /* For other cases, schedule work to write it "nicely" */
5913     + schedule_work(&cxt->work_write);
5914     + }
5915     }
5916    
5917     static void mtdoops_notify_add(struct mtd_info *mtd)
5918     diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
5919     index 6b399a75f9aec..b6f114da57143 100644
5920     --- a/drivers/mtd/nand/raw/vf610_nfc.c
5921     +++ b/drivers/mtd/nand/raw/vf610_nfc.c
5922     @@ -850,8 +850,10 @@ static int vf610_nfc_probe(struct platform_device *pdev)
5923     }
5924    
5925     of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
5926     - if (!of_id)
5927     - return -ENODEV;
5928     + if (!of_id) {
5929     + err = -ENODEV;
5930     + goto err_disable_clk;
5931     + }
5932    
5933     nfc->variant = (enum vf610_nfc_variant)of_id->data;
5934    
5935     diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
5936     index e99d425aa93f5..b13b39763a405 100644
5937     --- a/drivers/mtd/nand/spi/gigadevice.c
5938     +++ b/drivers/mtd/nand/spi/gigadevice.c
5939     @@ -21,7 +21,7 @@
5940     #define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
5941    
5942     static SPINAND_OP_VARIANTS(read_cache_variants,
5943     - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
5944     + SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
5945     SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
5946     SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
5947     SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
5948     @@ -29,7 +29,7 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
5949     SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
5950    
5951     static SPINAND_OP_VARIANTS(read_cache_variants_f,
5952     - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
5953     + SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
5954     SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
5955     SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
5956     SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
5957     @@ -201,7 +201,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
5958     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
5959     &write_cache_variants,
5960     &update_cache_variants),
5961     - 0,
5962     + SPINAND_HAS_QE_BIT,
5963     SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
5964     gd5fxgq4xa_ecc_get_status)),
5965     SPINAND_INFO("GD5F2GQ4xA", 0xF2,
5966     @@ -210,7 +210,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
5967     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
5968     &write_cache_variants,
5969     &update_cache_variants),
5970     - 0,
5971     + SPINAND_HAS_QE_BIT,
5972     SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
5973     gd5fxgq4xa_ecc_get_status)),
5974     SPINAND_INFO("GD5F4GQ4xA", 0xF4,
5975     @@ -219,7 +219,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
5976     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
5977     &write_cache_variants,
5978     &update_cache_variants),
5979     - 0,
5980     + SPINAND_HAS_QE_BIT,
5981     SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
5982     gd5fxgq4xa_ecc_get_status)),
5983     SPINAND_INFO("GD5F1GQ4UExxG", 0xd1,
5984     @@ -228,7 +228,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
5985     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
5986     &write_cache_variants,
5987     &update_cache_variants),
5988     - 0,
5989     + SPINAND_HAS_QE_BIT,
5990     SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
5991     gd5fxgq4uexxg_ecc_get_status)),
5992     SPINAND_INFO("GD5F1GQ4UFxxG", 0xb148,
5993     @@ -237,7 +237,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
5994     SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
5995     &write_cache_variants,
5996     &update_cache_variants),
5997     - 0,
5998     + SPINAND_HAS_QE_BIT,
5999     SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
6000     gd5fxgq4ufxxg_ecc_get_status)),
6001     };
6002     diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
6003     index e5c207ad3c77d..aaa7ed1dc97ee 100644
6004     --- a/drivers/net/can/flexcan.c
6005     +++ b/drivers/net/can/flexcan.c
6006     @@ -1232,18 +1232,23 @@ static int flexcan_chip_start(struct net_device *dev)
6007     return err;
6008     }
6009    
6010     -/* flexcan_chip_stop
6011     +/* __flexcan_chip_stop
6012     *
6013     - * this functions is entered with clocks enabled
6014     + * this function is entered with clocks enabled
6015     */
6016     -static void flexcan_chip_stop(struct net_device *dev)
6017     +static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error)
6018     {
6019     struct flexcan_priv *priv = netdev_priv(dev);
6020     struct flexcan_regs __iomem *regs = priv->regs;
6021     + int err;
6022    
6023     /* freeze + disable module */
6024     - flexcan_chip_freeze(priv);
6025     - flexcan_chip_disable(priv);
6026     + err = flexcan_chip_freeze(priv);
6027     + if (err && !disable_on_error)
6028     + return err;
6029     + err = flexcan_chip_disable(priv);
6030     + if (err && !disable_on_error)
6031     + goto out_chip_unfreeze;
6032    
6033     /* Disable all interrupts */
6034     priv->write(0, &regs->imask2);
6035     @@ -1253,6 +1258,23 @@ static void flexcan_chip_stop(struct net_device *dev)
6036    
6037     flexcan_transceiver_disable(priv);
6038     priv->can.state = CAN_STATE_STOPPED;
6039     +
6040     + return 0;
6041     +
6042     + out_chip_unfreeze:
6043     + flexcan_chip_unfreeze(priv);
6044     +
6045     + return err;
6046     +}
6047     +
6048     +static inline int flexcan_chip_stop_disable_on_error(struct net_device *dev)
6049     +{
6050     + return __flexcan_chip_stop(dev, true);
6051     +}
6052     +
6053     +static inline int flexcan_chip_stop(struct net_device *dev)
6054     +{
6055     + return __flexcan_chip_stop(dev, false);
6056     }
6057    
6058     static int flexcan_open(struct net_device *dev)
6059     @@ -1341,7 +1363,7 @@ static int flexcan_close(struct net_device *dev)
6060    
6061     netif_stop_queue(dev);
6062     can_rx_offload_disable(&priv->offload);
6063     - flexcan_chip_stop(dev);
6064     + flexcan_chip_stop_disable_on_error(dev);
6065    
6066     can_rx_offload_del(&priv->offload);
6067     free_irq(dev->irq, dev);
6068     diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
6069     index 38ea5e600fb84..e6d0cb9ee02f0 100644
6070     --- a/drivers/net/can/m_can/m_can_platform.c
6071     +++ b/drivers/net/can/m_can/m_can_platform.c
6072     @@ -144,8 +144,6 @@ static int __maybe_unused m_can_runtime_suspend(struct device *dev)
6073     struct net_device *ndev = dev_get_drvdata(dev);
6074     struct m_can_classdev *mcan_class = netdev_priv(ndev);
6075    
6076     - m_can_class_suspend(dev);
6077     -
6078     clk_disable_unprepare(mcan_class->cclk);
6079     clk_disable_unprepare(mcan_class->hclk);
6080    
6081     diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
6082     index 9a63b51e1d82f..6f2dab7e33d65 100644
6083     --- a/drivers/net/dsa/realtek-smi-core.h
6084     +++ b/drivers/net/dsa/realtek-smi-core.h
6085     @@ -25,6 +25,9 @@ struct rtl8366_mib_counter {
6086     const char *name;
6087     };
6088    
6089     +/**
6090     + * struct rtl8366_vlan_mc - Virtual LAN member configuration
6091     + */
6092     struct rtl8366_vlan_mc {
6093     u16 vid;
6094     u16 untag;
6095     @@ -119,7 +122,6 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi);
6096     int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
6097     int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
6098     u32 untag, u32 fid);
6099     -int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val);
6100     int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
6101     unsigned int vid);
6102     int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
6103     diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
6104     index 99cdb2f18fa2f..49c626a336803 100644
6105     --- a/drivers/net/dsa/rtl8366.c
6106     +++ b/drivers/net/dsa/rtl8366.c
6107     @@ -36,12 +36,113 @@ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
6108     }
6109     EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
6110    
6111     +/**
6112     + * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
6113     + * @smi: the Realtek SMI device instance
6114     + * @vid: the VLAN ID to look up or allocate
6115     + * @vlanmc: the pointer will be assigned to a pointer to a valid member config
6116     + * if successful
6117     + * @return: index of a new member config or negative error number
6118     + */
6119     +static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
6120     + struct rtl8366_vlan_mc *vlanmc)
6121     +{
6122     + struct rtl8366_vlan_4k vlan4k;
6123     + int ret;
6124     + int i;
6125     +
6126     + /* Try to find an existing member config entry for this VID */
6127     + for (i = 0; i < smi->num_vlan_mc; i++) {
6128     + ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
6129     + if (ret) {
6130     + dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
6131     + i, vid);
6132     + return ret;
6133     + }
6134     +
6135     + if (vid == vlanmc->vid)
6136     + return i;
6137     + }
6138     +
6139     + /* We have no MC entry for this VID, try to find an empty one */
6140     + for (i = 0; i < smi->num_vlan_mc; i++) {
6141     + ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
6142     + if (ret) {
6143     + dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
6144     + i, vid);
6145     + return ret;
6146     + }
6147     +
6148     + if (vlanmc->vid == 0 && vlanmc->member == 0) {
6149     + /* Update the entry from the 4K table */
6150     + ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
6151     + if (ret) {
6152     + dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
6153     + i, vid);
6154     + return ret;
6155     + }
6156     +
6157     + vlanmc->vid = vid;
6158     + vlanmc->member = vlan4k.member;
6159     + vlanmc->untag = vlan4k.untag;
6160     + vlanmc->fid = vlan4k.fid;
6161     + ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
6162     + if (ret) {
6163     + dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
6164     + i, vid);
6165     + return ret;
6166     + }
6167     +
6168     + dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
6169     + i, vid);
6170     + return i;
6171     + }
6172     + }
6173     +
6174     + /* MC table is full, try to find an unused entry and replace it */
6175     + for (i = 0; i < smi->num_vlan_mc; i++) {
6176     + int used;
6177     +
6178     + ret = rtl8366_mc_is_used(smi, i, &used);
6179     + if (ret)
6180     + return ret;
6181     +
6182     + if (!used) {
6183     + /* Update the entry from the 4K table */
6184     + ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
6185     + if (ret)
6186     + return ret;
6187     +
6188     + vlanmc->vid = vid;
6189     + vlanmc->member = vlan4k.member;
6190     + vlanmc->untag = vlan4k.untag;
6191     + vlanmc->fid = vlan4k.fid;
6192     + ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
6193     + if (ret) {
6194     + dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
6195     + i, vid);
6196     + return ret;
6197     + }
6198     + dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
6199     + i, vid);
6200     + return i;
6201     + }
6202     + }
6203     +
6204     + dev_err(smi->dev, "all VLAN member configurations are in use\n");
6205     + return -ENOSPC;
6206     +}
6207     +
6208     int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
6209     u32 untag, u32 fid)
6210     {
6211     + struct rtl8366_vlan_mc vlanmc;
6212     struct rtl8366_vlan_4k vlan4k;
6213     + int mc;
6214     int ret;
6215     - int i;
6216     +
6217     + if (!smi->ops->is_vlan_valid(smi, vid))
6218     + return -EINVAL;
6219    
6220     dev_dbg(smi->dev,
6221     "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
6222     @@ -63,133 +164,58 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
6223     "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
6224     vid, vlan4k.member, vlan4k.untag);
6225    
6226     - /* Try to find an existing MC entry for this VID */
6227     - for (i = 0; i < smi->num_vlan_mc; i++) {
6228     - struct rtl8366_vlan_mc vlanmc;
6229     -
6230     - ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
6231     - if (ret)
6232     - return ret;
6233     -
6234     - if (vid == vlanmc.vid) {
6235     - /* update the MC entry */
6236     - vlanmc.member |= member;
6237     - vlanmc.untag |= untag;
6238     - vlanmc.fid = fid;
6239     -
6240     - ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
6241     + /* Find or allocate a member config for this VID */
6242     + ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
6243     + if (ret < 0)
6244     + return ret;
6245     + mc = ret;
6246    
6247     - dev_dbg(smi->dev,
6248     - "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
6249     - vid, vlanmc.member, vlanmc.untag);
6250     + /* Update the MC entry */
6251     + vlanmc.member |= member;
6252     + vlanmc.untag |= untag;
6253     + vlanmc.fid = fid;
6254    
6255     - break;
6256     - }
6257     - }
6258     + /* Commit updates to the MC entry */
6259     + ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
6260     + if (ret)
6261     + dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
6262     + mc, vid);
6263     + else
6264     + dev_dbg(smi->dev,
6265     + "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
6266     + vid, vlanmc.member, vlanmc.untag);
6267    
6268     return ret;
6269     }
6270     EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
6271    
6272     -int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val)
6273     -{
6274     - struct rtl8366_vlan_mc vlanmc;
6275     - int ret;
6276     - int index;
6277     -
6278     - ret = smi->ops->get_mc_index(smi, port, &index);
6279     - if (ret)
6280     - return ret;
6281     -
6282     - ret = smi->ops->get_vlan_mc(smi, index, &vlanmc);
6283     - if (ret)
6284     - return ret;
6285     -
6286     - *val = vlanmc.vid;
6287     - return 0;
6288     -}
6289     -EXPORT_SYMBOL_GPL(rtl8366_get_pvid);
6290     -
6291     int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
6292     unsigned int vid)
6293     {
6294     struct rtl8366_vlan_mc vlanmc;
6295     - struct rtl8366_vlan_4k vlan4k;
6296     + int mc;
6297     int ret;
6298     - int i;
6299     -
6300     - /* Try to find an existing MC entry for this VID */
6301     - for (i = 0; i < smi->num_vlan_mc; i++) {
6302     - ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
6303     - if (ret)
6304     - return ret;
6305     -
6306     - if (vid == vlanmc.vid) {
6307     - ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
6308     - if (ret)
6309     - return ret;
6310     -
6311     - ret = smi->ops->set_mc_index(smi, port, i);
6312     - return ret;
6313     - }
6314     - }
6315     -
6316     - /* We have no MC entry for this VID, try to find an empty one */
6317     - for (i = 0; i < smi->num_vlan_mc; i++) {
6318     - ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
6319     - if (ret)
6320     - return ret;
6321     -
6322     - if (vlanmc.vid == 0 && vlanmc.member == 0) {
6323     - /* Update the entry from the 4K table */
6324     - ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
6325     - if (ret)
6326     - return ret;
6327    
6328     - vlanmc.vid = vid;
6329     - vlanmc.member = vlan4k.member;
6330     - vlanmc.untag = vlan4k.untag;
6331     - vlanmc.fid = vlan4k.fid;
6332     - ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
6333     - if (ret)
6334     - return ret;
6335     -
6336     - ret = smi->ops->set_mc_index(smi, port, i);
6337     - return ret;
6338     - }
6339     - }
6340     -
6341     - /* MC table is full, try to find an unused entry and replace it */
6342     - for (i = 0; i < smi->num_vlan_mc; i++) {
6343     - int used;
6344     -
6345     - ret = rtl8366_mc_is_used(smi, i, &used);
6346     - if (ret)
6347     - return ret;
6348     -
6349     - if (!used) {
6350     - /* Update the entry from the 4K table */
6351     - ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
6352     - if (ret)
6353     - return ret;
6354     + if (!smi->ops->is_vlan_valid(smi, vid))
6355     + return -EINVAL;
6356    
6357     - vlanmc.vid = vid;
6358     - vlanmc.member = vlan4k.member;
6359     - vlanmc.untag = vlan4k.untag;
6360     - vlanmc.fid = vlan4k.fid;
6361     - ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
6362     - if (ret)
6363     - return ret;
6364     + /* Find or allocate a member config for this VID */
6365     + ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
6366     + if (ret < 0)
6367     + return ret;
6368     + mc = ret;
6369    
6370     - ret = smi->ops->set_mc_index(smi, port, i);
6371     - return ret;
6372     - }
6373     + ret = smi->ops->set_mc_index(smi, port, mc);
6374     + if (ret) {
6375     + dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
6376     + mc, port);
6377     + return ret;
6378     }
6379    
6380     - dev_err(smi->dev,
6381     - "all VLAN member configurations are in use\n");
6382     + dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
6383     + port, vid, mc);
6384    
6385     - return -ENOSPC;
6386     + return 0;
6387     }
6388     EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
6389    
6390     @@ -389,7 +415,8 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
6391     if (!smi->ops->is_vlan_valid(smi, vid))
6392     return;
6393    
6394     - dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
6395     + dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
6396     + vlan->vid_begin,
6397     port,
6398     untagged ? "untagged" : "tagged",
6399     pvid ? " PVID" : "no PVID");
6400     @@ -398,34 +425,29 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
6401     dev_err(smi->dev, "port is DSA or CPU port\n");
6402    
6403     for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
6404     - int pvid_val = 0;
6405     -
6406     - dev_info(smi->dev, "add VLAN %04x\n", vid);
6407     member |= BIT(port);
6408    
6409     if (untagged)
6410     untag |= BIT(port);
6411    
6412     - /* To ensure that we have a valid MC entry for this VLAN,
6413     - * initialize the port VLAN ID here.
6414     - */
6415     - ret = rtl8366_get_pvid(smi, port, &pvid_val);
6416     - if (ret < 0) {
6417     - dev_err(smi->dev, "could not lookup PVID for port %d\n",
6418     - port);
6419     - return;
6420     - }
6421     - if (pvid_val == 0) {
6422     - ret = rtl8366_set_pvid(smi, port, vid);
6423     - if (ret < 0)
6424     - return;
6425     - }
6426     -
6427     ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
6428     if (ret)
6429     dev_err(smi->dev,
6430     "failed to set up VLAN %04x",
6431     vid);
6432     +
6433     + if (!pvid)
6434     + continue;
6435     +
6436     + ret = rtl8366_set_pvid(smi, port, vid);
6437     + if (ret)
6438     + dev_err(smi->dev,
6439     + "failed to set PVID on port %d to VLAN %04x",
6440     + port, vid);
6441     +
6442     + if (!ret)
6443     + dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n",
6444     + vid, port);
6445     }
6446     }
6447     EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
6448     diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
6449     index f5cc8b0a7c74c..7f731bf369980 100644
6450     --- a/drivers/net/dsa/rtl8366rb.c
6451     +++ b/drivers/net/dsa/rtl8366rb.c
6452     @@ -1269,7 +1269,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
6453     if (smi->vlan4k_enabled)
6454     max = RTL8366RB_NUM_VIDS - 1;
6455    
6456     - if (vlan == 0 || vlan >= max)
6457     + if (vlan == 0 || vlan > max)
6458     return false;
6459    
6460     return true;
6461     diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
6462     index 16a939f9b04d5..22d634111b818 100644
6463     --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
6464     +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
6465     @@ -60,6 +60,89 @@ static struct ch_tc_pedit_fields pedits[] = {
6466     PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
6467     };
6468    
6469     +static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = {
6470     + /* Default supported NAT modes */
6471     + {
6472     + .chip = CHELSIO_T5,
6473     + .flags = CXGB4_ACTION_NATMODE_NONE,
6474     + .natmode = NAT_MODE_NONE,
6475     + },
6476     + {
6477     + .chip = CHELSIO_T5,
6478     + .flags = CXGB4_ACTION_NATMODE_DIP,
6479     + .natmode = NAT_MODE_DIP,
6480     + },
6481     + {
6482     + .chip = CHELSIO_T5,
6483     + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT,
6484     + .natmode = NAT_MODE_DIP_DP,
6485     + },
6486     + {
6487     + .chip = CHELSIO_T5,
6488     + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
6489     + CXGB4_ACTION_NATMODE_SIP,
6490     + .natmode = NAT_MODE_DIP_DP_SIP,
6491     + },
6492     + {
6493     + .chip = CHELSIO_T5,
6494     + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
6495     + CXGB4_ACTION_NATMODE_SPORT,
6496     + .natmode = NAT_MODE_DIP_DP_SP,
6497     + },
6498     + {
6499     + .chip = CHELSIO_T5,
6500     + .flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT,
6501     + .natmode = NAT_MODE_SIP_SP,
6502     + },
6503     + {
6504     + .chip = CHELSIO_T5,
6505     + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
6506     + CXGB4_ACTION_NATMODE_SPORT,
6507     + .natmode = NAT_MODE_DIP_SIP_SP,
6508     + },
6509     + {
6510     + .chip = CHELSIO_T5,
6511     + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
6512     + CXGB4_ACTION_NATMODE_DPORT |
6513     + CXGB4_ACTION_NATMODE_SPORT,
6514     + .natmode = NAT_MODE_ALL,
6515     + },
6516     + /* T6+ can ignore L4 ports when they're disabled. */
6517     + {
6518     + .chip = CHELSIO_T6,
6519     + .flags = CXGB4_ACTION_NATMODE_SIP,
6520     + .natmode = NAT_MODE_SIP_SP,
6521     + },
6522     + {
6523     + .chip = CHELSIO_T6,
6524     + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT,
6525     + .natmode = NAT_MODE_DIP_DP_SP,
6526     + },
6527     + {
6528     + .chip = CHELSIO_T6,
6529     + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP,
6530     + .natmode = NAT_MODE_ALL,
6531     + },
6532     +};
6533     +
6534     +static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs,
6535     + u8 natmode_flags)
6536     +{
6537     + u8 i = 0;
6538     +
6539     + /* Translate the enabled NAT 4-tuple fields to one of the
6540     + * hardware supported NAT mode configurations. This ensures
6541     + * that we pick a valid combination, where the disabled fields
6542     + * do not get overwritten to 0.
6543     + */
6544     + for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
6545     + if (cxgb4_natmode_config_array[i].flags == natmode_flags) {
6546     + fs->nat_mode = cxgb4_natmode_config_array[i].natmode;
6547     + return;
6548     + }
6549     + }
6550     +}
6551     +
6552     static struct ch_tc_flower_entry *allocate_flower_entry(void)
6553     {
6554     struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
6555     @@ -287,7 +370,8 @@ static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
6556     }
6557    
6558     static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
6559     - u32 mask, u32 offset, u8 htype)
6560     + u32 mask, u32 offset, u8 htype,
6561     + u8 *natmode_flags)
6562     {
6563     switch (htype) {
6564     case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
6565     @@ -312,60 +396,95 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
6566     switch (offset) {
6567     case PEDIT_IP4_SRC:
6568     offload_pedit(fs, val, mask, IP4_SRC);
6569     + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
6570     break;
6571     case PEDIT_IP4_DST:
6572     offload_pedit(fs, val, mask, IP4_DST);
6573     + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
6574     }
6575     - fs->nat_mode = NAT_MODE_ALL;
6576     break;
6577     case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
6578     switch (offset) {
6579     case PEDIT_IP6_SRC_31_0:
6580     offload_pedit(fs, val, mask, IP6_SRC_31_0);
6581     + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
6582     break;
6583     case PEDIT_IP6_SRC_63_32:
6584     offload_pedit(fs, val, mask, IP6_SRC_63_32);
6585     + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
6586     break;
6587     case PEDIT_IP6_SRC_95_64:
6588     offload_pedit(fs, val, mask, IP6_SRC_95_64);
6589     + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
6590     break;
6591     case PEDIT_IP6_SRC_127_96:
6592     offload_pedit(fs, val, mask, IP6_SRC_127_96);
6593     + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
6594     break;
6595     case PEDIT_IP6_DST_31_0:
6596     offload_pedit(fs, val, mask, IP6_DST_31_0);
6597     + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
6598     break;
6599     case PEDIT_IP6_DST_63_32:
6600     offload_pedit(fs, val, mask, IP6_DST_63_32);
6601     + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
6602     break;
6603     case PEDIT_IP6_DST_95_64:
6604     offload_pedit(fs, val, mask, IP6_DST_95_64);
6605     + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
6606     break;
6607     case PEDIT_IP6_DST_127_96:
6608     offload_pedit(fs, val, mask, IP6_DST_127_96);
6609     + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
6610     }
6611     - fs->nat_mode = NAT_MODE_ALL;
6612     break;
6613     case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
6614     switch (offset) {
6615     case PEDIT_TCP_SPORT_DPORT:
6616     - if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
6617     + if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
6618     fs->nat_fport = val;
6619     - else
6620     + *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
6621     + } else {
6622     fs->nat_lport = val >> 16;
6623     + *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
6624     + }
6625     }
6626     - fs->nat_mode = NAT_MODE_ALL;
6627     break;
6628     case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
6629     switch (offset) {
6630     case PEDIT_UDP_SPORT_DPORT:
6631     - if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
6632     + if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
6633     fs->nat_fport = val;
6634     - else
6635     + *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
6636     + } else {
6637     fs->nat_lport = val >> 16;
6638     + *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
6639     + }
6640     }
6641     - fs->nat_mode = NAT_MODE_ALL;
6642     + break;
6643     + }
6644     +}
6645     +
6646     +static int cxgb4_action_natmode_validate(struct net_device *dev,
6647     + struct adapter *adap, u8 natmode_flags)
6648     +{
6649     + u8 i = 0;
6650     +
6651     + /* Extract the NAT mode to enable based on what 4-tuple fields
6652     + * are enabled to be overwritten. This ensures that the
6653     + * disabled fields don't get overwritten to 0.
6654     + */
6655     + for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
6656     + const struct cxgb4_natmode_config *c;
6657     +
6658     + c = &cxgb4_natmode_config_array[i];
6659     + if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip &&
6660     + natmode_flags == c->flags)
6661     + return 0;
6662     }
6663     + netdev_err(dev, "%s: Unsupported NAT mode 4-tuple combination\n",
6664     + __func__);
6665     + return -EOPNOTSUPP;
6666     }
6667    
6668     static void cxgb4_process_flow_actions(struct net_device *in,
6669     @@ -374,6 +493,7 @@ static void cxgb4_process_flow_actions(struct net_device *in,
6670     {
6671     struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
6672     struct flow_action_entry *act;
6673     + u8 natmode_flags = 0;
6674     int i;
6675    
6676     flow_action_for_each(i, act, &rule->action) {
6677     @@ -424,13 +544,17 @@ static void cxgb4_process_flow_actions(struct net_device *in,
6678     val = act->mangle.val;
6679     offset = act->mangle.offset;
6680    
6681     - process_pedit_field(fs, val, mask, offset, htype);
6682     + process_pedit_field(fs, val, mask, offset, htype,
6683     + &natmode_flags);
6684     }
6685     break;
6686     default:
6687     break;
6688     }
6689     }
6690     + if (natmode_flags)
6691     + cxgb4_action_natmode_tweak(fs, natmode_flags);
6692     +
6693     }
6694    
6695     static bool valid_l4_mask(u32 mask)
6696     @@ -447,7 +571,8 @@ static bool valid_l4_mask(u32 mask)
6697     }
6698    
6699     static bool valid_pedit_action(struct net_device *dev,
6700     - const struct flow_action_entry *act)
6701     + const struct flow_action_entry *act,
6702     + u8 *natmode_flags)
6703     {
6704     u32 mask, offset;
6705     u8 htype;
6706     @@ -472,7 +597,10 @@ static bool valid_pedit_action(struct net_device *dev,
6707     case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
6708     switch (offset) {
6709     case PEDIT_IP4_SRC:
6710     + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
6711     + break;
6712     case PEDIT_IP4_DST:
6713     + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
6714     break;
6715     default:
6716     netdev_err(dev, "%s: Unsupported pedit field\n",
6717     @@ -486,10 +614,13 @@ static bool valid_pedit_action(struct net_device *dev,
6718     case PEDIT_IP6_SRC_63_32:
6719     case PEDIT_IP6_SRC_95_64:
6720     case PEDIT_IP6_SRC_127_96:
6721     + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
6722     + break;
6723     case PEDIT_IP6_DST_31_0:
6724     case PEDIT_IP6_DST_63_32:
6725     case PEDIT_IP6_DST_95_64:
6726     case PEDIT_IP6_DST_127_96:
6727     + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
6728     break;
6729     default:
6730     netdev_err(dev, "%s: Unsupported pedit field\n",
6731     @@ -505,6 +636,10 @@ static bool valid_pedit_action(struct net_device *dev,
6732     __func__);
6733     return false;
6734     }
6735     + if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
6736     + *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
6737     + else
6738     + *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
6739     break;
6740     default:
6741     netdev_err(dev, "%s: Unsupported pedit field\n",
6742     @@ -520,6 +655,10 @@ static bool valid_pedit_action(struct net_device *dev,
6743     __func__);
6744     return false;
6745     }
6746     + if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
6747     + *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
6748     + else
6749     + *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
6750     break;
6751     default:
6752     netdev_err(dev, "%s: Unsupported pedit field\n",
6753     @@ -538,10 +677,12 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
6754     struct flow_cls_offload *cls)
6755     {
6756     struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
6757     + struct adapter *adap = netdev2adap(dev);
6758     struct flow_action_entry *act;
6759     bool act_redir = false;
6760     bool act_pedit = false;
6761     bool act_vlan = false;
6762     + u8 natmode_flags = 0;
6763     int i;
6764    
6765     flow_action_for_each(i, act, &rule->action) {
6766     @@ -551,7 +692,6 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
6767     /* Do nothing */
6768     break;
6769     case FLOW_ACTION_REDIRECT: {
6770     - struct adapter *adap = netdev2adap(dev);
6771     struct net_device *n_dev, *target_dev;
6772     unsigned int i;
6773     bool found = false;
6774     @@ -601,7 +741,8 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
6775     }
6776     break;
6777     case FLOW_ACTION_MANGLE: {
6778     - bool pedit_valid = valid_pedit_action(dev, act);
6779     + bool pedit_valid = valid_pedit_action(dev, act,
6780     + &natmode_flags);
6781    
6782     if (!pedit_valid)
6783     return -EOPNOTSUPP;
6784     @@ -620,6 +761,14 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
6785     return -EINVAL;
6786     }
6787    
6788     + if (act_pedit) {
6789     + int ret;
6790     +
6791     + ret = cxgb4_action_natmode_validate(dev, adap, natmode_flags);
6792     + if (ret)
6793     + return ret;
6794     + }
6795     +
6796     return 0;
6797     }
6798    
6799     diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
6800     index eb4c95248baf6..c905debe6f7ac 100644
6801     --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
6802     +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
6803     @@ -108,6 +108,21 @@ struct ch_tc_pedit_fields {
6804     #define PEDIT_TCP_SPORT_DPORT 0x0
6805     #define PEDIT_UDP_SPORT_DPORT 0x0
6806    
6807     +enum cxgb4_action_natmode_flags {
6808     + CXGB4_ACTION_NATMODE_NONE = 0,
6809     + CXGB4_ACTION_NATMODE_DIP = (1 << 0),
6810     + CXGB4_ACTION_NATMODE_SIP = (1 << 1),
6811     + CXGB4_ACTION_NATMODE_DPORT = (1 << 2),
6812     + CXGB4_ACTION_NATMODE_SPORT = (1 << 3),
6813     +};
6814     +
6815     +/* TC PEDIT action to NATMODE translation entry */
6816     +struct cxgb4_natmode_config {
6817     + enum chip_type chip;
6818     + u8 flags;
6819     + u8 natmode;
6820     +};
6821     +
6822     int cxgb4_tc_flower_replace(struct net_device *dev,
6823     struct flow_cls_offload *cls);
6824     int cxgb4_tc_flower_destroy(struct net_device *dev,
6825     diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
6826     index 0dd64acd2a3fb..08cac1bfacafb 100644
6827     --- a/drivers/net/ethernet/cisco/enic/enic.h
6828     +++ b/drivers/net/ethernet/cisco/enic/enic.h
6829     @@ -171,6 +171,7 @@ struct enic {
6830     u16 num_vfs;
6831     #endif
6832     spinlock_t enic_api_lock;
6833     + bool enic_api_busy;
6834     struct enic_port_profile *pp;
6835    
6836     /* work queue cache line section */
6837     diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
6838     index b161f24522b87..b028ea2dec2b9 100644
6839     --- a/drivers/net/ethernet/cisco/enic/enic_api.c
6840     +++ b/drivers/net/ethernet/cisco/enic/enic_api.c
6841     @@ -34,6 +34,12 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
6842     struct vnic_dev *vdev = enic->vdev;
6843    
6844     spin_lock(&enic->enic_api_lock);
6845     + while (enic->enic_api_busy) {
6846     + spin_unlock(&enic->enic_api_lock);
6847     + cpu_relax();
6848     + spin_lock(&enic->enic_api_lock);
6849     + }
6850     +
6851     spin_lock_bh(&enic->devcmd_lock);
6852    
6853     vnic_dev_cmd_proxy_by_index_start(vdev, vf);
6854     diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
6855     index 6e2ab10ad2e6f..8314102002b0f 100644
6856     --- a/drivers/net/ethernet/cisco/enic/enic_main.c
6857     +++ b/drivers/net/ethernet/cisco/enic/enic_main.c
6858     @@ -2142,8 +2142,6 @@ static int enic_dev_wait(struct vnic_dev *vdev,
6859     int done;
6860     int err;
6861    
6862     - BUG_ON(in_interrupt());
6863     -
6864     err = start(vdev, arg);
6865     if (err)
6866     return err;
6867     @@ -2331,6 +2329,13 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
6868     rss_hash_bits, rss_base_cpu, rss_enable);
6869     }
6870    
6871     +static void enic_set_api_busy(struct enic *enic, bool busy)
6872     +{
6873     + spin_lock(&enic->enic_api_lock);
6874     + enic->enic_api_busy = busy;
6875     + spin_unlock(&enic->enic_api_lock);
6876     +}
6877     +
6878     static void enic_reset(struct work_struct *work)
6879     {
6880     struct enic *enic = container_of(work, struct enic, reset);
6881     @@ -2340,7 +2345,9 @@ static void enic_reset(struct work_struct *work)
6882    
6883     rtnl_lock();
6884    
6885     - spin_lock(&enic->enic_api_lock);
6886     + /* Stop any activity from infiniband */
6887     + enic_set_api_busy(enic, true);
6888     +
6889     enic_stop(enic->netdev);
6890     enic_dev_soft_reset(enic);
6891     enic_reset_addr_lists(enic);
6892     @@ -2348,7 +2355,10 @@ static void enic_reset(struct work_struct *work)
6893     enic_set_rss_nic_cfg(enic);
6894     enic_dev_set_ig_vlan_rewrite_mode(enic);
6895     enic_open(enic->netdev);
6896     - spin_unlock(&enic->enic_api_lock);
6897     +
6898     + /* Allow infiniband to fiddle with the device again */
6899     + enic_set_api_busy(enic, false);
6900     +
6901     call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
6902    
6903     rtnl_unlock();
6904     @@ -2360,7 +2370,9 @@ static void enic_tx_hang_reset(struct work_struct *work)
6905    
6906     rtnl_lock();
6907    
6908     - spin_lock(&enic->enic_api_lock);
6909     + /* Stop any activity from infiniband */
6910     + enic_set_api_busy(enic, true);
6911     +
6912     enic_dev_hang_notify(enic);
6913     enic_stop(enic->netdev);
6914     enic_dev_hang_reset(enic);
6915     @@ -2369,7 +2381,10 @@ static void enic_tx_hang_reset(struct work_struct *work)
6916     enic_set_rss_nic_cfg(enic);
6917     enic_dev_set_ig_vlan_rewrite_mode(enic);
6918     enic_open(enic->netdev);
6919     - spin_unlock(&enic->enic_api_lock);
6920     +
6921     + /* Allow infiniband to fiddle with the device again */
6922     + enic_set_api_busy(enic, false);
6923     +
6924     call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
6925    
6926     rtnl_unlock();
6927     diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
6928     index 96e9565f1e08a..1fbc243fc3f4c 100644
6929     --- a/drivers/net/ethernet/faraday/ftgmac100.c
6930     +++ b/drivers/net/ethernet/faraday/ftgmac100.c
6931     @@ -1807,6 +1807,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
6932     priv->rxdes0_edorr_mask = BIT(30);
6933     priv->txdes0_edotr_mask = BIT(30);
6934     priv->is_aspeed = true;
6935     + /* Disable ast2600 problematic HW arbitration */
6936     + if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
6937     + iowrite32(FTGMAC100_TM_DEFAULT,
6938     + priv->base + FTGMAC100_OFFSET_TM);
6939     + }
6940     } else {
6941     priv->rxdes0_edorr_mask = BIT(15);
6942     priv->txdes0_edotr_mask = BIT(15);
6943     diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
6944     index e5876a3fda91d..63b3e02fab162 100644
6945     --- a/drivers/net/ethernet/faraday/ftgmac100.h
6946     +++ b/drivers/net/ethernet/faraday/ftgmac100.h
6947     @@ -169,6 +169,14 @@
6948     #define FTGMAC100_MACCR_FAST_MODE (1 << 19)
6949     #define FTGMAC100_MACCR_SW_RST (1 << 31)
6950    
6951     +/*
6952     + * test mode control register
6953     + */
6954     +#define FTGMAC100_TM_RQ_TX_VALID_DIS (1 << 28)
6955     +#define FTGMAC100_TM_RQ_RR_IDLE_PREV (1 << 27)
6956     +#define FTGMAC100_TM_DEFAULT \
6957     + (FTGMAC100_TM_RQ_TX_VALID_DIS | FTGMAC100_TM_RQ_RR_IDLE_PREV)
6958     +
6959     /*
6960     * PHY control register
6961     */
6962     diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
6963     index a0e4b12ac4ea2..fd7fc6f20c9da 100644
6964     --- a/drivers/net/ethernet/freescale/fec_main.c
6965     +++ b/drivers/net/ethernet/freescale/fec_main.c
6966     @@ -1945,6 +1945,27 @@ out:
6967     return ret;
6968     }
6969    
6970     +static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
6971     +{
6972     + struct fec_enet_private *fep = netdev_priv(ndev);
6973     + struct phy_device *phy_dev = ndev->phydev;
6974     +
6975     + if (phy_dev) {
6976     + phy_reset_after_clk_enable(phy_dev);
6977     + } else if (fep->phy_node) {
6978     + /*
6979     + * If the PHY still is not bound to the MAC, but there is
6980     + * OF PHY node and a matching PHY device instance already,
6981     + * use the OF PHY node to obtain the PHY device instance,
6982     + * and then use that PHY device instance when triggering
6983     + * the PHY reset.
6984     + */
6985     + phy_dev = of_phy_find_device(fep->phy_node);
6986     + phy_reset_after_clk_enable(phy_dev);
6987     + put_device(&phy_dev->mdio.dev);
6988     + }
6989     +}
6990     +
6991     static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
6992     {
6993     struct fec_enet_private *fep = netdev_priv(ndev);
6994     @@ -1971,7 +1992,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
6995     if (ret)
6996     goto failed_clk_ref;
6997    
6998     - phy_reset_after_clk_enable(ndev->phydev);
6999     + fec_enet_phy_reset_after_clk_enable(ndev);
7000     } else {
7001     clk_disable_unprepare(fep->clk_enet_out);
7002     if (fep->clk_ptp) {
7003     @@ -2982,16 +3003,16 @@ fec_enet_open(struct net_device *ndev)
7004     /* Init MAC prior to mii bus probe */
7005     fec_restart(ndev);
7006    
7007     - /* Probe and connect to PHY when open the interface */
7008     - ret = fec_enet_mii_probe(ndev);
7009     - if (ret)
7010     - goto err_enet_mii_probe;
7011     -
7012     /* Call phy_reset_after_clk_enable() again if it failed during
7013     * phy_reset_after_clk_enable() before because the PHY wasn't probed.
7014     */
7015     if (reset_again)
7016     - phy_reset_after_clk_enable(ndev->phydev);
7017     + fec_enet_phy_reset_after_clk_enable(ndev);
7018     +
7019     + /* Probe and connect to PHY when open the interface */
7020     + ret = fec_enet_mii_probe(ndev);
7021     + if (ret)
7022     + goto err_enet_mii_probe;
7023    
7024     if (fep->quirks & FEC_QUIRK_ERR006687)
7025     imx6q_cpuidle_fec_irqs_used();
7026     diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
7027     index aa32a5b041129..a20d9147d5f22 100644
7028     --- a/drivers/net/ethernet/ibm/ibmveth.c
7029     +++ b/drivers/net/ethernet/ibm/ibmveth.c
7030     @@ -1317,6 +1317,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
7031     int offset = ibmveth_rxq_frame_offset(adapter);
7032     int csum_good = ibmveth_rxq_csum_good(adapter);
7033     int lrg_pkt = ibmveth_rxq_large_packet(adapter);
7034     + __sum16 iph_check = 0;
7035    
7036     skb = ibmveth_rxq_get_buffer(adapter);
7037    
7038     @@ -1353,16 +1354,26 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
7039     skb_put(skb, length);
7040     skb->protocol = eth_type_trans(skb, netdev);
7041    
7042     - if (csum_good) {
7043     - skb->ip_summed = CHECKSUM_UNNECESSARY;
7044     - ibmveth_rx_csum_helper(skb, adapter);
7045     + /* PHYP without PLSO support places a -1 in the ip
7046     + * checksum for large send frames.
7047     + */
7048     + if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
7049     + struct iphdr *iph = (struct iphdr *)skb->data;
7050     +
7051     + iph_check = iph->check;
7052     }
7053    
7054     - if (length > netdev->mtu + ETH_HLEN) {
7055     + if ((length > netdev->mtu + ETH_HLEN) ||
7056     + lrg_pkt || iph_check == 0xffff) {
7057     ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
7058     adapter->rx_large_packets++;
7059     }
7060    
7061     + if (csum_good) {
7062     + skb->ip_summed = CHECKSUM_UNNECESSARY;
7063     + ibmveth_rx_csum_helper(skb, adapter);
7064     + }
7065     +
7066     napi_gro_receive(napi, skb); /* send it up */
7067    
7068     netdev->stats.rx_packets++;
7069     diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
7070     index 5329af2337a91..48105a2eebe4d 100644
7071     --- a/drivers/net/ethernet/ibm/ibmvnic.c
7072     +++ b/drivers/net/ethernet/ibm/ibmvnic.c
7073     @@ -4074,8 +4074,13 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
7074     dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
7075     goto out;
7076     }
7077     + /* crq->change_mac_addr.mac_addr is the requested one
7078     + * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
7079     + */
7080     ether_addr_copy(netdev->dev_addr,
7081     &crq->change_mac_addr_rsp.mac_addr[0]);
7082     + ether_addr_copy(adapter->mac_addr,
7083     + &crq->change_mac_addr_rsp.mac_addr[0]);
7084     out:
7085     complete(&adapter->fw_done);
7086     return rc;
7087     @@ -4472,7 +4477,7 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
7088     case IBMVNIC_1GBPS:
7089     adapter->speed = SPEED_1000;
7090     break;
7091     - case IBMVNIC_10GBP:
7092     + case IBMVNIC_10GBPS:
7093     adapter->speed = SPEED_10000;
7094     break;
7095     case IBMVNIC_25GBPS:
7096     @@ -4487,6 +4492,9 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
7097     case IBMVNIC_100GBPS:
7098     adapter->speed = SPEED_100000;
7099     break;
7100     + case IBMVNIC_200GBPS:
7101     + adapter->speed = SPEED_200000;
7102     + break;
7103     default:
7104     if (netif_carrier_ok(netdev))
7105     netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
7106     diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
7107     index ebc39248b334a..0da20f19bb17c 100644
7108     --- a/drivers/net/ethernet/ibm/ibmvnic.h
7109     +++ b/drivers/net/ethernet/ibm/ibmvnic.h
7110     @@ -373,7 +373,7 @@ struct ibmvnic_phys_parms {
7111     #define IBMVNIC_10MBPS 0x40000000
7112     #define IBMVNIC_100MBPS 0x20000000
7113     #define IBMVNIC_1GBPS 0x10000000
7114     -#define IBMVNIC_10GBP 0x08000000
7115     +#define IBMVNIC_10GBPS 0x08000000
7116     #define IBMVNIC_40GBPS 0x04000000
7117     #define IBMVNIC_100GBPS 0x02000000
7118     #define IBMVNIC_25GBPS 0x01000000
7119     diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
7120     index ae195f8adff58..993f495e2bf7b 100644
7121     --- a/drivers/net/ethernet/korina.c
7122     +++ b/drivers/net/ethernet/korina.c
7123     @@ -1113,7 +1113,7 @@ out:
7124     return rc;
7125    
7126     probe_err_register:
7127     - kfree(lp->td_ring);
7128     + kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
7129     probe_err_td_ring:
7130     iounmap(lp->tx_dma_regs);
7131     probe_err_dma_tx:
7132     @@ -1133,6 +1133,7 @@ static int korina_remove(struct platform_device *pdev)
7133     iounmap(lp->eth_regs);
7134     iounmap(lp->rx_dma_regs);
7135     iounmap(lp->tx_dma_regs);
7136     + kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
7137    
7138     unregister_netdev(bif->dev);
7139     free_netdev(bif->dev);
7140     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
7141     index db3552f2d0877..f9797e5038841 100644
7142     --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
7143     +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
7144     @@ -942,6 +942,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
7145     bool clean_complete = true;
7146     int done;
7147    
7148     + if (!budget)
7149     + return 0;
7150     +
7151     if (priv->tx_ring_num[TX_XDP]) {
7152     xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
7153     if (xdp_tx_cq->xdp_busy) {
7154     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
7155     index a30edb436f4af..191ead7a7fa59 100644
7156     --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
7157     +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
7158     @@ -350,7 +350,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
7159     .dma = tx_info->map0_dma,
7160     };
7161    
7162     - if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
7163     + if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
7164     dma_unmap_page(priv->ddev, tx_info->map0_dma,
7165     PAGE_SIZE, priv->dma_dir);
7166     put_page(tx_info->page);
7167     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
7168     index 75fc283cacc36..492ff2ef9a404 100644
7169     --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
7170     +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
7171     @@ -498,8 +498,9 @@ static int mlx5_pps_event(struct notifier_block *nb,
7172     switch (clock->ptp_info.pin_config[pin].func) {
7173     case PTP_PF_EXTTS:
7174     ptp_event.index = pin;
7175     - ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
7176     - be64_to_cpu(eqe->data.pps.time_stamp));
7177     + ptp_event.timestamp =
7178     + mlx5_timecounter_cyc2time(clock,
7179     + be64_to_cpu(eqe->data.pps.time_stamp));
7180     if (clock->pps_info.enabled) {
7181     ptp_event.type = PTP_CLOCK_PPSUSR;
7182     ptp_event.pps_times.ts_real =
7183     diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
7184     index 66c97049f52b7..f838abdb35e1d 100644
7185     --- a/drivers/net/ethernet/realtek/r8169_main.c
7186     +++ b/drivers/net/ethernet/realtek/r8169_main.c
7187     @@ -5514,6 +5514,10 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
7188     dev->mtu = new_mtu;
7189     netdev_update_features(dev);
7190    
7191     + /* Reportedly at least Asus X453MA truncates packets otherwise */
7192     + if (tp->mac_version == RTL_GIGA_MAC_VER_37)
7193     + rtl_init_rxcfg(tp);
7194     +
7195     return 0;
7196     }
7197    
7198     @@ -6414,7 +6418,7 @@ static int rtl8169_close(struct net_device *dev)
7199    
7200     phy_disconnect(tp->phydev);
7201    
7202     - pci_free_irq(pdev, 0, tp);
7203     + free_irq(pci_irq_vector(pdev, 0), tp);
7204    
7205     dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
7206     tp->RxPhyAddr);
7207     @@ -6465,8 +6469,8 @@ static int rtl_open(struct net_device *dev)
7208    
7209     rtl_request_firmware(tp);
7210    
7211     - retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp,
7212     - dev->name);
7213     + retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
7214     + IRQF_NO_THREAD | IRQF_SHARED, dev->name, tp);
7215     if (retval < 0)
7216     goto err_release_fw_2;
7217    
7218     @@ -6499,7 +6503,7 @@ out:
7219     return retval;
7220    
7221     err_free_irq:
7222     - pci_free_irq(pdev, 0, tp);
7223     + free_irq(pci_irq_vector(pdev, 0), tp);
7224     err_release_fw_2:
7225     rtl_release_firmware(tp);
7226     rtl8169_rx_clear(tp);
7227     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7228     index 982be75fde833..189cdb7633671 100644
7229     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7230     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7231     @@ -175,32 +175,6 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv)
7232     }
7233     }
7234    
7235     -/**
7236     - * stmmac_stop_all_queues - Stop all queues
7237     - * @priv: driver private structure
7238     - */
7239     -static void stmmac_stop_all_queues(struct stmmac_priv *priv)
7240     -{
7241     - u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
7242     - u32 queue;
7243     -
7244     - for (queue = 0; queue < tx_queues_cnt; queue++)
7245     - netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
7246     -}
7247     -
7248     -/**
7249     - * stmmac_start_all_queues - Start all queues
7250     - * @priv: driver private structure
7251     - */
7252     -static void stmmac_start_all_queues(struct stmmac_priv *priv)
7253     -{
7254     - u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
7255     - u32 queue;
7256     -
7257     - for (queue = 0; queue < tx_queues_cnt; queue++)
7258     - netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
7259     -}
7260     -
7261     static void stmmac_service_event_schedule(struct stmmac_priv *priv)
7262     {
7263     if (!test_bit(STMMAC_DOWN, &priv->state) &&
7264     @@ -2737,7 +2711,7 @@ static int stmmac_open(struct net_device *dev)
7265     }
7266    
7267     stmmac_enable_all_queues(priv);
7268     - stmmac_start_all_queues(priv);
7269     + netif_tx_start_all_queues(priv->dev);
7270    
7271     return 0;
7272    
7273     @@ -2778,8 +2752,6 @@ static int stmmac_release(struct net_device *dev)
7274     phylink_stop(priv->phylink);
7275     phylink_disconnect_phy(priv->phylink);
7276    
7277     - stmmac_stop_all_queues(priv);
7278     -
7279     stmmac_disable_all_queues(priv);
7280    
7281     for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7282     @@ -4770,7 +4742,6 @@ int stmmac_suspend(struct device *dev)
7283     mutex_lock(&priv->lock);
7284    
7285     netif_device_detach(ndev);
7286     - stmmac_stop_all_queues(priv);
7287    
7288     stmmac_disable_all_queues(priv);
7289    
7290     @@ -4883,8 +4854,6 @@ int stmmac_resume(struct device *dev)
7291    
7292     stmmac_enable_all_queues(priv);
7293    
7294     - stmmac_start_all_queues(priv);
7295     -
7296     mutex_unlock(&priv->lock);
7297    
7298     if (!device_may_wakeup(priv->device)) {
7299     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
7300     index e57d59b0a7ae9..21d905d90650b 100644
7301     --- a/drivers/net/usb/qmi_wwan.c
7302     +++ b/drivers/net/usb/qmi_wwan.c
7303     @@ -1375,6 +1375,7 @@ static const struct usb_device_id products[] = {
7304     {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
7305     {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
7306     {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
7307     + {QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
7308    
7309     /* 4. Gobi 1000 devices */
7310     {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
7311     diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
7312     index dfc16770458d8..8b6598a3713d1 100644
7313     --- a/drivers/net/wan/hdlc.c
7314     +++ b/drivers/net/wan/hdlc.c
7315     @@ -46,7 +46,15 @@ static struct hdlc_proto *first_proto;
7316     static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
7317     struct packet_type *p, struct net_device *orig_dev)
7318     {
7319     - struct hdlc_device *hdlc = dev_to_hdlc(dev);
7320     + struct hdlc_device *hdlc;
7321     +
7322     + /* First make sure "dev" is an HDLC device */
7323     + if (!(dev->priv_flags & IFF_WAN_HDLC)) {
7324     + kfree_skb(skb);
7325     + return NET_RX_SUCCESS;
7326     + }
7327     +
7328     + hdlc = dev_to_hdlc(dev);
7329    
7330     if (!net_eq(dev_net(dev), &init_net)) {
7331     kfree_skb(skb);
7332     diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
7333     index 08e0a46501dec..c70a518b8b478 100644
7334     --- a/drivers/net/wan/hdlc_raw_eth.c
7335     +++ b/drivers/net/wan/hdlc_raw_eth.c
7336     @@ -99,6 +99,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
7337     old_qlen = dev->tx_queue_len;
7338     ether_setup(dev);
7339     dev->tx_queue_len = old_qlen;
7340     + dev->priv_flags &= ~IFF_TX_SKB_SHARING;
7341     eth_hw_addr_random(dev);
7342     call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
7343     netif_dormant_off(dev);
7344     diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
7345     index eca87f7c5b6c1..01e05af5ae085 100644
7346     --- a/drivers/net/wireless/ath/ath10k/ce.c
7347     +++ b/drivers/net/wireless/ath/ath10k/ce.c
7348     @@ -1555,7 +1555,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
7349     ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
7350     if (ret) {
7351     dma_free_coherent(ar->dev,
7352     - (nentries * sizeof(struct ce_desc_64) +
7353     + (nentries * sizeof(struct ce_desc) +
7354     CE_DESC_RING_ALIGN),
7355     src_ring->base_addr_owner_space_unaligned,
7356     base_addr);
7357     diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
7358     index 9f0e7b4943ec6..8ca0a808a644d 100644
7359     --- a/drivers/net/wireless/ath/ath10k/htt_rx.c
7360     +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
7361     @@ -142,6 +142,14 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
7362     BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
7363    
7364     idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
7365     +
7366     + if (idx < 0 || idx >= htt->rx_ring.size) {
7367     + ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
7368     + idx &= htt->rx_ring.size_mask;
7369     + ret = -ENOMEM;
7370     + goto fail;
7371     + }
7372     +
7373     while (num > 0) {
7374     skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
7375     if (!skb) {
7376     diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
7377     index d373602a80145..915ba2a7f7448 100644
7378     --- a/drivers/net/wireless/ath/ath10k/mac.c
7379     +++ b/drivers/net/wireless/ath/ath10k/mac.c
7380     @@ -7131,7 +7131,7 @@ ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
7381     struct ieee80211_channel *channel)
7382     {
7383     int ret;
7384     - enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
7385     + enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
7386    
7387     lockdep_assert_held(&ar->conf_mutex);
7388    
7389     diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
7390     index 5e7ea838a9218..814131a0680a4 100644
7391     --- a/drivers/net/wireless/ath/ath6kl/main.c
7392     +++ b/drivers/net/wireless/ath/ath6kl/main.c
7393     @@ -430,6 +430,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
7394    
7395     ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid);
7396    
7397     + if (aid < 1 || aid > AP_MAX_NUM_STA)
7398     + return;
7399     +
7400     if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) {
7401     struct ieee80211_mgmt *mgmt =
7402     (struct ieee80211_mgmt *) assoc_info;
7403     diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
7404     index 2382c6c46851e..c610fe21c85c0 100644
7405     --- a/drivers/net/wireless/ath/ath6kl/wmi.c
7406     +++ b/drivers/net/wireless/ath/ath6kl/wmi.c
7407     @@ -2645,6 +2645,11 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
7408     return -EINVAL;
7409     }
7410    
7411     + if (tsid >= 16) {
7412     + ath6kl_err("invalid tsid: %d\n", tsid);
7413     + return -EINVAL;
7414     + }
7415     +
7416     skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
7417     if (!skb)
7418     return -ENOMEM;
7419     diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
7420     index 3f563e02d17da..2ed98aaed6fb5 100644
7421     --- a/drivers/net/wireless/ath/ath9k/hif_usb.c
7422     +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
7423     @@ -449,10 +449,19 @@ static void hif_usb_stop(void *hif_handle)
7424     spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
7425    
7426     /* The pending URBs have to be canceled. */
7427     + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
7428     list_for_each_entry_safe(tx_buf, tx_buf_tmp,
7429     &hif_dev->tx.tx_pending, list) {
7430     + usb_get_urb(tx_buf->urb);
7431     + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
7432     usb_kill_urb(tx_buf->urb);
7433     + list_del(&tx_buf->list);
7434     + usb_free_urb(tx_buf->urb);
7435     + kfree(tx_buf->buf);
7436     + kfree(tx_buf);
7437     + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
7438     }
7439     + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
7440    
7441     usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
7442     }
7443     @@ -762,27 +771,37 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
7444     struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
7445     unsigned long flags;
7446    
7447     + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
7448     list_for_each_entry_safe(tx_buf, tx_buf_tmp,
7449     &hif_dev->tx.tx_buf, list) {
7450     + usb_get_urb(tx_buf->urb);
7451     + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
7452     usb_kill_urb(tx_buf->urb);
7453     list_del(&tx_buf->list);
7454     usb_free_urb(tx_buf->urb);
7455     kfree(tx_buf->buf);
7456     kfree(tx_buf);
7457     + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
7458     }
7459     + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
7460    
7461     spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
7462     hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
7463     spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
7464    
7465     + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
7466     list_for_each_entry_safe(tx_buf, tx_buf_tmp,
7467     &hif_dev->tx.tx_pending, list) {
7468     + usb_get_urb(tx_buf->urb);
7469     + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
7470     usb_kill_urb(tx_buf->urb);
7471     list_del(&tx_buf->list);
7472     usb_free_urb(tx_buf->urb);
7473     kfree(tx_buf->buf);
7474     kfree(tx_buf);
7475     + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
7476     }
7477     + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
7478    
7479     usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
7480     }
7481     diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
7482     index d2e062eaf5614..510e61e97dbcb 100644
7483     --- a/drivers/net/wireless/ath/ath9k/htc_hst.c
7484     +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
7485     @@ -339,6 +339,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
7486    
7487     if (skb) {
7488     htc_hdr = (struct htc_frame_hdr *) skb->data;
7489     + if (htc_hdr->endpoint_id >= ARRAY_SIZE(htc_handle->endpoint))
7490     + goto ret;
7491     endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
7492     skb_pull(skb, sizeof(struct htc_frame_hdr));
7493    
7494     diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
7495     index ad051f34e65b2..46ae4ec4ad47d 100644
7496     --- a/drivers/net/wireless/ath/wcn36xx/main.c
7497     +++ b/drivers/net/wireless/ath/wcn36xx/main.c
7498     @@ -163,7 +163,7 @@ static struct ieee80211_supported_band wcn_band_5ghz = {
7499     .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
7500     .mcs = {
7501     .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
7502     - .rx_highest = cpu_to_le16(72),
7503     + .rx_highest = cpu_to_le16(150),
7504     .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
7505     }
7506     }
7507     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
7508     index 85cf96461ddeb..e9bb8dbdc9aa8 100644
7509     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
7510     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
7511     @@ -483,7 +483,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
7512     ret = brcmf_proto_hdrpull(drvr, true, skb, ifp);
7513    
7514     if (ret || !(*ifp) || !(*ifp)->ndev) {
7515     - if (ret != -ENODATA && *ifp)
7516     + if (ret != -ENODATA && *ifp && (*ifp)->ndev)
7517     (*ifp)->ndev->stats.rx_errors++;
7518     brcmu_pkt_buf_free_skb(skb);
7519     return -ENODATA;
7520     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
7521     index e3dd8623be4ec..c2705d7a4247e 100644
7522     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
7523     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
7524     @@ -1619,6 +1619,8 @@ fail:
7525     BRCMF_TX_IOCTL_MAX_MSG_SIZE,
7526     msgbuf->ioctbuf,
7527     msgbuf->ioctbuf_handle);
7528     + if (msgbuf->txflow_wq)
7529     + destroy_workqueue(msgbuf->txflow_wq);
7530     kfree(msgbuf);
7531     }
7532     return -ENOMEM;
7533     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
7534     index 7ef36234a25dc..66797dc5e90d5 100644
7535     --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
7536     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
7537     @@ -5065,8 +5065,10 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
7538     pi->pi_fptr.radioloftget = wlc_lcnphy_get_radio_loft;
7539     pi->pi_fptr.detach = wlc_phy_detach_lcnphy;
7540    
7541     - if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
7542     + if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) {
7543     + kfree(pi->u.pi_lcnphy);
7544     return false;
7545     + }
7546    
7547     if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
7548     if (pi_lcn->lcnphy_tempsense_option == 3) {
7549     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
7550     index ed92a8e8cd519..01b26b3327b01 100644
7551     --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
7552     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
7553     @@ -3650,9 +3650,12 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
7554     tail->apply_time_max_delay = cpu_to_le32(delay);
7555    
7556     IWL_DEBUG_TE(mvm,
7557     - "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
7558     - channel->hw_value, req_dur, duration, delay,
7559     - dtim_interval);
7560     + "ROC: Requesting to remain on channel %u for %ums\n",
7561     + channel->hw_value, req_dur);
7562     + IWL_DEBUG_TE(mvm,
7563     + "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
7564     + duration, delay, dtim_interval);
7565     +
7566     /* Set the node address */
7567     memcpy(tail->node_addr, vif->addr, ETH_ALEN);
7568    
7569     diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
7570     index 59f0651d148bb..629af26675cf1 100644
7571     --- a/drivers/net/wireless/marvell/mwifiex/scan.c
7572     +++ b/drivers/net/wireless/marvell/mwifiex/scan.c
7573     @@ -1891,7 +1891,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
7574     chan, CFG80211_BSS_FTYPE_UNKNOWN,
7575     bssid, timestamp,
7576     cap_info_bitmap, beacon_period,
7577     - ie_buf, ie_len, rssi, GFP_KERNEL);
7578     + ie_buf, ie_len, rssi, GFP_ATOMIC);
7579     if (bss) {
7580     bss_priv = (struct mwifiex_bss_priv *)bss->priv;
7581     bss_priv->band = band;
7582     diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
7583     index fec38b6e86ffd..b322c2755e9a4 100644
7584     --- a/drivers/net/wireless/marvell/mwifiex/sdio.c
7585     +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
7586     @@ -1996,6 +1996,8 @@ error:
7587     kfree(card->mpa_rx.buf);
7588     card->mpa_tx.buf_size = 0;
7589     card->mpa_rx.buf_size = 0;
7590     + card->mpa_tx.buf = NULL;
7591     + card->mpa_rx.buf = NULL;
7592     }
7593    
7594     return ret;
7595     diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
7596     index c2365eeb70168..528107d70c1cb 100644
7597     --- a/drivers/net/wireless/marvell/mwifiex/usb.c
7598     +++ b/drivers/net/wireless/marvell/mwifiex/usb.c
7599     @@ -1353,7 +1353,8 @@ static void mwifiex_usb_cleanup_tx_aggr(struct mwifiex_adapter *adapter)
7600     skb_dequeue(&port->tx_aggr.aggr_list)))
7601     mwifiex_write_data_complete(adapter, skb_tmp,
7602     0, -1);
7603     - del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
7604     + if (port->tx_aggr.timer_cnxt.hold_timer.function)
7605     + del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
7606     port->tx_aggr.timer_cnxt.is_hold_timer_set = false;
7607     port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0;
7608     }
7609     diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
7610     index c0c32805fb8de..106f1a846f499 100644
7611     --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
7612     +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
7613     @@ -834,6 +834,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
7614     default:
7615     pr_warn("VIF%u.%u: unsupported iftype %d\n", vif->mac->macid,
7616     vif->vifid, vif->wdev.iftype);
7617     + dev_kfree_skb(cmd_skb);
7618     ret = -EINVAL;
7619     goto out;
7620     }
7621     @@ -1996,6 +1997,7 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
7622     break;
7623     default:
7624     pr_err("unsupported iftype %d\n", vif->wdev.iftype);
7625     + dev_kfree_skb(cmd_skb);
7626     ret = -EINVAL;
7627     goto out;
7628     }
7629     diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
7630     index 3499b211dad51..048984ca81fdb 100644
7631     --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
7632     +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
7633     @@ -5447,7 +5447,6 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
7634     ret = usb_submit_urb(urb, GFP_KERNEL);
7635     if (ret) {
7636     usb_unanchor_urb(urb);
7637     - usb_free_urb(urb);
7638     goto error;
7639     }
7640    
7641     @@ -5456,6 +5455,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
7642     rtl8xxxu_write32(priv, REG_USB_HIMR, val32);
7643    
7644     error:
7645     + usb_free_urb(urb);
7646     return ret;
7647     }
7648    
7649     @@ -5781,6 +5781,7 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
7650     struct rtl8xxxu_priv *priv = hw->priv;
7651     struct rtl8xxxu_rx_urb *rx_urb;
7652     struct rtl8xxxu_tx_urb *tx_urb;
7653     + struct sk_buff *skb;
7654     unsigned long flags;
7655     int ret, i;
7656    
7657     @@ -5831,6 +5832,13 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
7658     rx_urb->hw = hw;
7659    
7660     ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
7661     + if (ret) {
7662     + if (ret != -ENOMEM) {
7663     + skb = (struct sk_buff *)rx_urb->urb.context;
7664     + dev_kfree_skb(skb);
7665     + }
7666     + rtl8xxxu_queue_rx_urb(priv, rx_urb);
7667     + }
7668     }
7669     exit:
7670     /*
7671     diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
7672     index 87824a4caba98..a47d871ae506a 100644
7673     --- a/drivers/net/wireless/realtek/rtw88/pci.h
7674     +++ b/drivers/net/wireless/realtek/rtw88/pci.h
7675     @@ -13,8 +13,8 @@
7676     #define RTK_BEQ_TX_DESC_NUM 256
7677    
7678     #define RTK_MAX_RX_DESC_NUM 512
7679     -/* 8K + rx desc size */
7680     -#define RTK_PCI_RX_BUF_SIZE (8192 + 24)
7681     +/* 11K + rx desc size */
7682     +#define RTK_PCI_RX_BUF_SIZE (11454 + 24)
7683    
7684     #define RTK_PCI_CTRL 0x300
7685     #define BIT_RST_TRXDMA_INTF BIT(20)
7686     diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
7687     index 156c2a18a2394..abb37659de343 100644
7688     --- a/drivers/ntb/hw/amd/ntb_hw_amd.c
7689     +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
7690     @@ -1036,6 +1036,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
7691    
7692     err_dma_mask:
7693     pci_clear_master(pdev);
7694     + pci_release_regions(pdev);
7695     err_pci_regions:
7696     pci_disable_device(pdev);
7697     err_pci_enable:
7698     diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
7699     index af0b51d1d43e8..f5d12bf109c78 100644
7700     --- a/drivers/nvme/host/pci.c
7701     +++ b/drivers/nvme/host/pci.c
7702     @@ -3110,7 +3110,8 @@ static const struct pci_device_id nvme_id_table[] = {
7703     NVME_QUIRK_DEALLOCATE_ZEROES, },
7704     { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
7705     .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
7706     - NVME_QUIRK_MEDIUM_PRIO_SQ },
7707     + NVME_QUIRK_MEDIUM_PRIO_SQ |
7708     + NVME_QUIRK_DISABLE_WRITE_ZEROES, },
7709     { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
7710     .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
7711     { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
7712     diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
7713     index 7d7176369edf7..6b2f1e290fa73 100644
7714     --- a/drivers/nvme/target/core.c
7715     +++ b/drivers/nvme/target/core.c
7716     @@ -1048,7 +1048,8 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
7717     * in case a host died before it enabled the controller. Hence, simply
7718     * reset the keep alive timer when the controller is enabled.
7719     */
7720     - mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
7721     + if (ctrl->kato)
7722     + mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
7723     }
7724    
7725     static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
7726     diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
7727     index 960542dea5adb..84f4078216a36 100644
7728     --- a/drivers/nvmem/core.c
7729     +++ b/drivers/nvmem/core.c
7730     @@ -130,16 +130,14 @@ static void nvmem_cell_add(struct nvmem_cell *cell)
7731     blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
7732     }
7733    
7734     -static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
7735     - const struct nvmem_cell_info *info,
7736     - struct nvmem_cell *cell)
7737     +static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem,
7738     + const struct nvmem_cell_info *info,
7739     + struct nvmem_cell *cell)
7740     {
7741     cell->nvmem = nvmem;
7742     cell->offset = info->offset;
7743     cell->bytes = info->bytes;
7744     - cell->name = kstrdup_const(info->name, GFP_KERNEL);
7745     - if (!cell->name)
7746     - return -ENOMEM;
7747     + cell->name = info->name;
7748    
7749     cell->bit_offset = info->bit_offset;
7750     cell->nbits = info->nbits;
7751     @@ -151,13 +149,30 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
7752     if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
7753     dev_err(&nvmem->dev,
7754     "cell %s unaligned to nvmem stride %d\n",
7755     - cell->name, nvmem->stride);
7756     + cell->name ?: "<unknown>", nvmem->stride);
7757     return -EINVAL;
7758     }
7759    
7760     return 0;
7761     }
7762    
7763     +static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
7764     + const struct nvmem_cell_info *info,
7765     + struct nvmem_cell *cell)
7766     +{
7767     + int err;
7768     +
7769     + err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell);
7770     + if (err)
7771     + return err;
7772     +
7773     + cell->name = kstrdup_const(info->name, GFP_KERNEL);
7774     + if (!cell->name)
7775     + return -ENOMEM;
7776     +
7777     + return 0;
7778     +}
7779     +
7780     /**
7781     * nvmem_add_cells() - Add cell information to an nvmem device
7782     *
7783     @@ -1174,7 +1189,7 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
7784     if (!nvmem)
7785     return -EINVAL;
7786    
7787     - rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
7788     + rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
7789     if (rc)
7790     return rc;
7791    
7792     @@ -1204,7 +1219,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
7793     if (!nvmem)
7794     return -EINVAL;
7795    
7796     - rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
7797     + rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
7798     if (rc)
7799     return rc;
7800    
7801     diff --git a/drivers/opp/core.c b/drivers/opp/core.c
7802     index 29dfaa591f8b0..8867bab72e171 100644
7803     --- a/drivers/opp/core.c
7804     +++ b/drivers/opp/core.c
7805     @@ -1796,6 +1796,9 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
7806     {
7807     int index;
7808    
7809     + if (!opp_table->genpd_virt_devs)
7810     + return;
7811     +
7812     for (index = 0; index < opp_table->required_opp_count; index++) {
7813     if (!opp_table->genpd_virt_devs[index])
7814     continue;
7815     @@ -1842,6 +1845,9 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
7816     if (!opp_table)
7817     return ERR_PTR(-ENOMEM);
7818    
7819     + if (opp_table->genpd_virt_devs)
7820     + return opp_table;
7821     +
7822     /*
7823     * If the genpd's OPP table isn't already initialized, parsing of the
7824     * required-opps fail for dev. We should retry this after genpd's OPP
7825     diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
7826     index f2481e80e2723..d0e60441dc8f2 100644
7827     --- a/drivers/pci/controller/pci-aardvark.c
7828     +++ b/drivers/pci/controller/pci-aardvark.c
7829     @@ -503,7 +503,7 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
7830     * Initialize the configuration space of the PCI-to-PCI bridge
7831     * associated with the given PCIe interface.
7832     */
7833     -static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
7834     +static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
7835     {
7836     struct pci_bridge_emul *bridge = &pcie->bridge;
7837    
7838     @@ -527,8 +527,7 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
7839     bridge->data = pcie;
7840     bridge->ops = &advk_pci_bridge_emul_ops;
7841    
7842     - pci_bridge_emul_init(bridge, 0);
7843     -
7844     + return pci_bridge_emul_init(bridge, 0);
7845     }
7846    
7847     static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
7848     @@ -1027,7 +1026,11 @@ static int advk_pcie_probe(struct platform_device *pdev)
7849    
7850     advk_pcie_setup_hw(pcie);
7851    
7852     - advk_sw_pci_bridge_init(pcie);
7853     + ret = advk_sw_pci_bridge_init(pcie);
7854     + if (ret) {
7855     + dev_err(dev, "Failed to register emulated root PCI bridge\n");
7856     + return ret;
7857     + }
7858    
7859     ret = advk_pcie_init_irq_domain(pcie);
7860     if (ret) {
7861     diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
7862     index 0a3f61be5625b..a1298f6784ac9 100644
7863     --- a/drivers/pci/controller/pcie-iproc-msi.c
7864     +++ b/drivers/pci/controller/pcie-iproc-msi.c
7865     @@ -209,15 +209,20 @@ static int iproc_msi_irq_set_affinity(struct irq_data *data,
7866     struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
7867     int target_cpu = cpumask_first(mask);
7868     int curr_cpu;
7869     + int ret;
7870    
7871     curr_cpu = hwirq_to_cpu(msi, data->hwirq);
7872     if (curr_cpu == target_cpu)
7873     - return IRQ_SET_MASK_OK_DONE;
7874     + ret = IRQ_SET_MASK_OK_DONE;
7875     + else {
7876     + /* steer MSI to the target CPU */
7877     + data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
7878     + ret = IRQ_SET_MASK_OK;
7879     + }
7880    
7881     - /* steer MSI to the target CPU */
7882     - data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
7883     + irq_data_update_effective_affinity(data, cpumask_of(target_cpu));
7884    
7885     - return IRQ_SET_MASK_OK;
7886     + return ret;
7887     }
7888    
7889     static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
7890     diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
7891     index 9c116cbaa95d8..e15220385666f 100644
7892     --- a/drivers/pci/iov.c
7893     +++ b/drivers/pci/iov.c
7894     @@ -158,6 +158,7 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
7895     virtfn->device = iov->vf_device;
7896     virtfn->is_virtfn = 1;
7897     virtfn->physfn = pci_dev_get(dev);
7898     + virtfn->no_command_memory = 1;
7899    
7900     if (id == 0)
7901     pci_read_vf_config_common(virtfn);
7902     diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
7903     index 9e1c3c7eeba9b..170ccb164c604 100644
7904     --- a/drivers/perf/thunderx2_pmu.c
7905     +++ b/drivers/perf/thunderx2_pmu.c
7906     @@ -627,14 +627,17 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
7907     list_for_each_entry(rentry, &list, node) {
7908     if (resource_type(rentry->res) == IORESOURCE_MEM) {
7909     res = *rentry->res;
7910     + rentry = NULL;
7911     break;
7912     }
7913     }
7914     + acpi_dev_free_resource_list(&list);
7915    
7916     - if (!rentry->res)
7917     + if (rentry) {
7918     + dev_err(dev, "PMU type %d: Fail to find resource\n", type);
7919     return NULL;
7920     + }
7921    
7922     - acpi_dev_free_resource_list(&list);
7923     base = devm_ioremap_resource(dev, &res);
7924     if (IS_ERR(base)) {
7925     dev_err(dev, "PMU type %d: Fail to map resource\n", type);
7926     diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
7927     index 328aea9f6be32..50b37f8f5c7ff 100644
7928     --- a/drivers/perf/xgene_pmu.c
7929     +++ b/drivers/perf/xgene_pmu.c
7930     @@ -1459,17 +1459,6 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
7931     }
7932    
7933     #if defined(CONFIG_ACPI)
7934     -static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data)
7935     -{
7936     - struct resource *res = data;
7937     -
7938     - if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32)
7939     - acpi_dev_resource_memory(ares, res);
7940     -
7941     - /* Always tell the ACPI core to skip this resource */
7942     - return 1;
7943     -}
7944     -
7945     static struct
7946     xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
7947     struct acpi_device *adev, u32 type)
7948     @@ -1481,6 +1470,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
7949     struct hw_pmu_info *inf;
7950     void __iomem *dev_csr;
7951     struct resource res;
7952     + struct resource_entry *rentry;
7953     int enable_bit;
7954     int rc;
7955    
7956     @@ -1489,11 +1479,23 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
7957     return NULL;
7958    
7959     INIT_LIST_HEAD(&resource_list);
7960     - rc = acpi_dev_get_resources(adev, &resource_list,
7961     - acpi_pmu_dev_add_resource, &res);
7962     + rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
7963     + if (rc <= 0) {
7964     + dev_err(dev, "PMU type %d: No resources found\n", type);
7965     + return NULL;
7966     + }
7967     +
7968     + list_for_each_entry(rentry, &resource_list, node) {
7969     + if (resource_type(rentry->res) == IORESOURCE_MEM) {
7970     + res = *rentry->res;
7971     + rentry = NULL;
7972     + break;
7973     + }
7974     + }
7975     acpi_dev_free_resource_list(&resource_list);
7976     - if (rc < 0) {
7977     - dev_err(dev, "PMU type %d: No resource address found\n", type);
7978     +
7979     + if (rentry) {
7980     + dev_err(dev, "PMU type %d: No memory resource found\n", type);
7981     return NULL;
7982     }
7983    
7984     diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
7985     index dcf7df797af75..0ed14de0134cf 100644
7986     --- a/drivers/pinctrl/bcm/Kconfig
7987     +++ b/drivers/pinctrl/bcm/Kconfig
7988     @@ -23,6 +23,7 @@ config PINCTRL_BCM2835
7989     select PINMUX
7990     select PINCONF
7991     select GENERIC_PINCONF
7992     + select GPIOLIB
7993     select GPIOLIB_IRQCHIP
7994     default ARCH_BCM2835 || ARCH_BRCMSTB
7995     help
7996     diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
7997     index 3a235487e38d7..d8bcbefcba890 100644
7998     --- a/drivers/pinctrl/pinctrl-mcp23s08.c
7999     +++ b/drivers/pinctrl/pinctrl-mcp23s08.c
8000     @@ -122,7 +122,7 @@ static const struct regmap_config mcp23x08_regmap = {
8001     .max_register = MCP_OLAT,
8002     };
8003    
8004     -static const struct reg_default mcp23x16_defaults[] = {
8005     +static const struct reg_default mcp23x17_defaults[] = {
8006     {.reg = MCP_IODIR << 1, .def = 0xffff},
8007     {.reg = MCP_IPOL << 1, .def = 0x0000},
8008     {.reg = MCP_GPINTEN << 1, .def = 0x0000},
8009     @@ -133,23 +133,23 @@ static const struct reg_default mcp23x16_defaults[] = {
8010     {.reg = MCP_OLAT << 1, .def = 0x0000},
8011     };
8012    
8013     -static const struct regmap_range mcp23x16_volatile_range = {
8014     +static const struct regmap_range mcp23x17_volatile_range = {
8015     .range_min = MCP_INTF << 1,
8016     .range_max = MCP_GPIO << 1,
8017     };
8018    
8019     -static const struct regmap_access_table mcp23x16_volatile_table = {
8020     - .yes_ranges = &mcp23x16_volatile_range,
8021     +static const struct regmap_access_table mcp23x17_volatile_table = {
8022     + .yes_ranges = &mcp23x17_volatile_range,
8023     .n_yes_ranges = 1,
8024     };
8025    
8026     -static const struct regmap_range mcp23x16_precious_range = {
8027     - .range_min = MCP_GPIO << 1,
8028     +static const struct regmap_range mcp23x17_precious_range = {
8029     + .range_min = MCP_INTCAP << 1,
8030     .range_max = MCP_GPIO << 1,
8031     };
8032    
8033     -static const struct regmap_access_table mcp23x16_precious_table = {
8034     - .yes_ranges = &mcp23x16_precious_range,
8035     +static const struct regmap_access_table mcp23x17_precious_table = {
8036     + .yes_ranges = &mcp23x17_precious_range,
8037     .n_yes_ranges = 1,
8038     };
8039    
8040     @@ -159,10 +159,10 @@ static const struct regmap_config mcp23x17_regmap = {
8041    
8042     .reg_stride = 2,
8043     .max_register = MCP_OLAT << 1,
8044     - .volatile_table = &mcp23x16_volatile_table,
8045     - .precious_table = &mcp23x16_precious_table,
8046     - .reg_defaults = mcp23x16_defaults,
8047     - .num_reg_defaults = ARRAY_SIZE(mcp23x16_defaults),
8048     + .volatile_table = &mcp23x17_volatile_table,
8049     + .precious_table = &mcp23x17_precious_table,
8050     + .reg_defaults = mcp23x17_defaults,
8051     + .num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults),
8052     .cache_type = REGCACHE_FLAT,
8053     .val_format_endian = REGMAP_ENDIAN_LITTLE,
8054     };
8055     diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
8056     index 8fe51e43f1bc1..59b5b7eebb05a 100644
8057     --- a/drivers/platform/x86/mlx-platform.c
8058     +++ b/drivers/platform/x86/mlx-platform.c
8059     @@ -243,15 +243,6 @@ static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
8060     },
8061     };
8062    
8063     -static struct i2c_board_info mlxplat_mlxcpld_ng_psu[] = {
8064     - {
8065     - I2C_BOARD_INFO("24c32", 0x51),
8066     - },
8067     - {
8068     - I2C_BOARD_INFO("24c32", 0x50),
8069     - },
8070     -};
8071     -
8072     static struct i2c_board_info mlxplat_mlxcpld_pwr[] = {
8073     {
8074     I2C_BOARD_INFO("dps460", 0x59),
8075     @@ -611,15 +602,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_psu_items_data[] = {
8076     .label = "psu1",
8077     .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
8078     .mask = BIT(0),
8079     - .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[0],
8080     - .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
8081     + .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
8082     },
8083     {
8084     .label = "psu2",
8085     .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
8086     .mask = BIT(1),
8087     - .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[1],
8088     - .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
8089     + .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
8090     },
8091     };
8092    
8093     diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
8094     index 599a0f66a3845..a34d95ed70b20 100644
8095     --- a/drivers/pwm/pwm-img.c
8096     +++ b/drivers/pwm/pwm-img.c
8097     @@ -277,6 +277,8 @@ static int img_pwm_probe(struct platform_device *pdev)
8098     return PTR_ERR(pwm->pwm_clk);
8099     }
8100    
8101     + platform_set_drvdata(pdev, pwm);
8102     +
8103     pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT);
8104     pm_runtime_use_autosuspend(&pdev->dev);
8105     pm_runtime_enable(&pdev->dev);
8106     @@ -313,7 +315,6 @@ static int img_pwm_probe(struct platform_device *pdev)
8107     goto err_suspend;
8108     }
8109    
8110     - platform_set_drvdata(pdev, pwm);
8111     return 0;
8112    
8113     err_suspend:
8114     diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
8115     index 75bbfe5f3bc29..d77cec2769b76 100644
8116     --- a/drivers/pwm/pwm-lpss.c
8117     +++ b/drivers/pwm/pwm-lpss.c
8118     @@ -93,10 +93,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
8119     * The equation is:
8120     * base_unit = round(base_unit_range * freq / c)
8121     */
8122     - base_unit_range = BIT(lpwm->info->base_unit_bits) - 1;
8123     + base_unit_range = BIT(lpwm->info->base_unit_bits);
8124     freq *= base_unit_range;
8125    
8126     base_unit = DIV_ROUND_CLOSEST_ULL(freq, c);
8127     + /* base_unit must not be 0 and we also want to avoid overflowing it */
8128     + base_unit = clamp_val(base_unit, 1, base_unit_range - 1);
8129    
8130     on_time_div = 255ULL * duty_ns;
8131     do_div(on_time_div, period_ns);
8132     @@ -104,8 +106,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
8133    
8134     orig_ctrl = ctrl = pwm_lpss_read(pwm);
8135     ctrl &= ~PWM_ON_TIME_DIV_MASK;
8136     - ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT);
8137     - base_unit &= base_unit_range;
8138     + ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT);
8139     ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT;
8140     ctrl |= on_time_div;
8141    
8142     diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
8143     index 0b85a80ae7ef6..2b08fdeb87c18 100644
8144     --- a/drivers/rapidio/devices/rio_mport_cdev.c
8145     +++ b/drivers/rapidio/devices/rio_mport_cdev.c
8146     @@ -873,15 +873,16 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
8147     rmcd_error("get_user_pages_unlocked err=%ld",
8148     pinned);
8149     nr_pages = 0;
8150     - } else
8151     + } else {
8152     rmcd_error("pinned %ld out of %ld pages",
8153     pinned, nr_pages);
8154     + /*
8155     + * Set nr_pages up to mean "how many pages to unpin, in
8156     + * the error handler:
8157     + */
8158     + nr_pages = pinned;
8159     + }
8160     ret = -EFAULT;
8161     - /*
8162     - * Set nr_pages up to mean "how many pages to unpin, in
8163     - * the error handler:
8164     - */
8165     - nr_pages = pinned;
8166     goto err_pg;
8167     }
8168    
8169     @@ -1682,6 +1683,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
8170     struct rio_dev *rdev;
8171     struct rio_switch *rswitch = NULL;
8172     struct rio_mport *mport;
8173     + struct device *dev;
8174     size_t size;
8175     u32 rval;
8176     u32 swpinfo = 0;
8177     @@ -1696,8 +1698,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
8178     rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
8179     dev_info.comptag, dev_info.destid, dev_info.hopcount);
8180    
8181     - if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) {
8182     + dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name);
8183     + if (dev) {
8184     rmcd_debug(RDEV, "device %s already exists", dev_info.name);
8185     + put_device(dev);
8186     return -EEXIST;
8187     }
8188    
8189     diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
8190     index 55fc80de5ef16..ee850cffe1542 100644
8191     --- a/drivers/regulator/core.c
8192     +++ b/drivers/regulator/core.c
8193     @@ -5158,15 +5158,20 @@ regulator_register(const struct regulator_desc *regulator_desc,
8194     else if (regulator_desc->supply_name)
8195     rdev->supply_name = regulator_desc->supply_name;
8196    
8197     - /*
8198     - * Attempt to resolve the regulator supply, if specified,
8199     - * but don't return an error if we fail because we will try
8200     - * to resolve it again later as more regulators are added.
8201     - */
8202     - if (regulator_resolve_supply(rdev))
8203     - rdev_dbg(rdev, "unable to resolve supply\n");
8204     -
8205     ret = set_machine_constraints(rdev, constraints);
8206     + if (ret == -EPROBE_DEFER) {
8207     + /* Regulator might be in bypass mode and so needs its supply
8208     + * to set the constraints */
8209     + /* FIXME: this currently triggers a chicken-and-egg problem
8210     + * when creating -SUPPLY symlink in sysfs to a regulator
8211     + * that is just being created */
8212     + ret = regulator_resolve_supply(rdev);
8213     + if (!ret)
8214     + ret = set_machine_constraints(rdev, constraints);
8215     + else
8216     + rdev_dbg(rdev, "unable to resolve supply early: %pe\n",
8217     + ERR_PTR(ret));
8218     + }
8219     if (ret < 0)
8220     goto wash;
8221    
8222     diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
8223     index 4abbeea782fa4..19903de6268db 100644
8224     --- a/drivers/rpmsg/qcom_smd.c
8225     +++ b/drivers/rpmsg/qcom_smd.c
8226     @@ -1338,7 +1338,7 @@ static int qcom_smd_parse_edge(struct device *dev,
8227     ret = of_property_read_u32(node, key, &edge->edge_id);
8228     if (ret) {
8229     dev_err(dev, "edge missing %s property\n", key);
8230     - return -EINVAL;
8231     + goto put_node;
8232     }
8233    
8234     edge->remote_pid = QCOM_SMEM_HOST_ANY;
8235     @@ -1349,32 +1349,37 @@ static int qcom_smd_parse_edge(struct device *dev,
8236     edge->mbox_client.knows_txdone = true;
8237     edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0);
8238     if (IS_ERR(edge->mbox_chan)) {
8239     - if (PTR_ERR(edge->mbox_chan) != -ENODEV)
8240     - return PTR_ERR(edge->mbox_chan);
8241     + if (PTR_ERR(edge->mbox_chan) != -ENODEV) {
8242     + ret = PTR_ERR(edge->mbox_chan);
8243     + goto put_node;
8244     + }
8245    
8246     edge->mbox_chan = NULL;
8247    
8248     syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
8249     if (!syscon_np) {
8250     dev_err(dev, "no qcom,ipc node\n");
8251     - return -ENODEV;
8252     + ret = -ENODEV;
8253     + goto put_node;
8254     }
8255    
8256     edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
8257     - if (IS_ERR(edge->ipc_regmap))
8258     - return PTR_ERR(edge->ipc_regmap);
8259     + if (IS_ERR(edge->ipc_regmap)) {
8260     + ret = PTR_ERR(edge->ipc_regmap);
8261     + goto put_node;
8262     + }
8263    
8264     key = "qcom,ipc";
8265     ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
8266     if (ret < 0) {
8267     dev_err(dev, "no offset in %s\n", key);
8268     - return -EINVAL;
8269     + goto put_node;
8270     }
8271    
8272     ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
8273     if (ret < 0) {
8274     dev_err(dev, "no bit in %s\n", key);
8275     - return -EINVAL;
8276     + goto put_node;
8277     }
8278     }
8279    
8280     @@ -1385,7 +1390,8 @@ static int qcom_smd_parse_edge(struct device *dev,
8281     irq = irq_of_parse_and_map(node, 0);
8282     if (irq < 0) {
8283     dev_err(dev, "required smd interrupt missing\n");
8284     - return -EINVAL;
8285     + ret = irq;
8286     + goto put_node;
8287     }
8288    
8289     ret = devm_request_irq(dev, irq,
8290     @@ -1393,12 +1399,18 @@ static int qcom_smd_parse_edge(struct device *dev,
8291     node->name, edge);
8292     if (ret) {
8293     dev_err(dev, "failed to request smd irq\n");
8294     - return ret;
8295     + goto put_node;
8296     }
8297    
8298     edge->irq = irq;
8299    
8300     return 0;
8301     +
8302     +put_node:
8303     + of_node_put(node);
8304     + edge->of_node = NULL;
8305     +
8306     + return ret;
8307     }
8308    
8309     /*
8310     diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
8311     index 4ce28aa490cdb..8c4613617ef11 100644
8312     --- a/drivers/s390/net/qeth_l2_main.c
8313     +++ b/drivers/s390/net/qeth_l2_main.c
8314     @@ -1168,12 +1168,6 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
8315     NULL
8316     };
8317    
8318     - /* Role should not change by itself, but if it did, */
8319     - /* information from the hardware is authoritative. */
8320     - mutex_lock(&data->card->sbp_lock);
8321     - data->card->options.sbp.role = entry->role;
8322     - mutex_unlock(&data->card->sbp_lock);
8323     -
8324     snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
8325     snprintf(env_role, sizeof(env_role), "ROLE=%s",
8326     (entry->role == QETH_SBP_ROLE_NONE) ? "none" :
8327     diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
8328     index 0760d0bd8a10b..0fa455357594e 100644
8329     --- a/drivers/scsi/be2iscsi/be_main.c
8330     +++ b/drivers/scsi/be2iscsi/be_main.c
8331     @@ -3020,6 +3020,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
8332     goto create_eq_error;
8333     }
8334    
8335     + mem->dma = paddr;
8336     mem->va = eq_vaddress;
8337     ret = be_fill_queue(eq, phba->params.num_eq_entries,
8338     sizeof(struct be_eq_entry), eq_vaddress);
8339     @@ -3029,7 +3030,6 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
8340     goto create_eq_error;
8341     }
8342    
8343     - mem->dma = paddr;
8344     ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
8345     BEISCSI_EQ_DELAY_DEF);
8346     if (ret) {
8347     @@ -3086,6 +3086,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
8348     goto create_cq_error;
8349     }
8350    
8351     + mem->dma = paddr;
8352     ret = be_fill_queue(cq, phba->params.num_cq_entries,
8353     sizeof(struct sol_cqe), cq_vaddress);
8354     if (ret) {
8355     @@ -3095,7 +3096,6 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
8356     goto create_cq_error;
8357     }
8358    
8359     - mem->dma = paddr;
8360     ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
8361     false, 0);
8362     if (ret) {
8363     diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
8364     index 2f9213b257a4a..93e4011809919 100644
8365     --- a/drivers/scsi/bfa/bfad.c
8366     +++ b/drivers/scsi/bfa/bfad.c
8367     @@ -749,6 +749,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
8368    
8369     if (bfad->pci_bar0_kva == NULL) {
8370     printk(KERN_ERR "Fail to map bar0\n");
8371     + rc = -ENODEV;
8372     goto out_release_region;
8373     }
8374    
8375     diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
8376     index e519238864758..1b6f9351b43f9 100644
8377     --- a/drivers/scsi/csiostor/csio_hw.c
8378     +++ b/drivers/scsi/csiostor/csio_hw.c
8379     @@ -2384,7 +2384,7 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
8380     FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
8381     FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
8382     FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
8383     - ret = EINVAL;
8384     + ret = -EINVAL;
8385     goto bye;
8386     }
8387    
8388     diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
8389     index df897df5cafee..8a76284b59b08 100644
8390     --- a/drivers/scsi/ibmvscsi/ibmvfc.c
8391     +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
8392     @@ -4788,6 +4788,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
8393     if (IS_ERR(vhost->work_thread)) {
8394     dev_err(dev, "Couldn't create kernel thread: %ld\n",
8395     PTR_ERR(vhost->work_thread));
8396     + rc = PTR_ERR(vhost->work_thread);
8397     goto free_host_mem;
8398     }
8399    
8400     diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
8401     index b7e44634d0dc2..3d58d24de6b61 100644
8402     --- a/drivers/scsi/mpt3sas/mpt3sas_base.c
8403     +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
8404     @@ -1708,18 +1708,22 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
8405     /* TMs are on msix_index == 0 */
8406     if (reply_q->msix_index == 0)
8407     continue;
8408     + synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
8409     if (reply_q->irq_poll_scheduled) {
8410     /* Calling irq_poll_disable will wait for any pending
8411     * callbacks to have completed.
8412     */
8413     irq_poll_disable(&reply_q->irqpoll);
8414     irq_poll_enable(&reply_q->irqpoll);
8415     - reply_q->irq_poll_scheduled = false;
8416     - reply_q->irq_line_enable = true;
8417     - enable_irq(reply_q->os_irq);
8418     - continue;
8419     + /* check how the scheduled poll has ended,
8420     + * clean up only if necessary
8421     + */
8422     + if (reply_q->irq_poll_scheduled) {
8423     + reply_q->irq_poll_scheduled = false;
8424     + reply_q->irq_line_enable = true;
8425     + enable_irq(reply_q->os_irq);
8426     + }
8427     }
8428     - synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
8429     }
8430     }
8431    
8432     diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
8433     index 8906aceda4c43..0354898d7cac1 100644
8434     --- a/drivers/scsi/mvumi.c
8435     +++ b/drivers/scsi/mvumi.c
8436     @@ -2425,6 +2425,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
8437     if (IS_ERR(mhba->dm_thread)) {
8438     dev_err(&mhba->pdev->dev,
8439     "failed to create device scan thread\n");
8440     + ret = PTR_ERR(mhba->dm_thread);
8441     mutex_unlock(&mhba->sas_discovery_mutex);
8442     goto fail_create_thread;
8443     }
8444     diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
8445     index 3d0e345947c1f..9c0955c334e3e 100644
8446     --- a/drivers/scsi/qedf/qedf_main.c
8447     +++ b/drivers/scsi/qedf/qedf_main.c
8448     @@ -668,7 +668,7 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
8449     rdata = fcport->rdata;
8450     if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
8451     QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
8452     - rc = 1;
8453     + rc = SUCCESS;
8454     goto out;
8455     }
8456    
8457     diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
8458     index 946cebc4c9322..90aa64604ad78 100644
8459     --- a/drivers/scsi/qedi/qedi_fw.c
8460     +++ b/drivers/scsi/qedi/qedi_fw.c
8461     @@ -59,6 +59,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
8462     "Freeing tid=0x%x for cid=0x%x\n",
8463     cmd->task_id, qedi_conn->iscsi_conn_id);
8464    
8465     + spin_lock(&qedi_conn->list_lock);
8466     if (likely(cmd->io_cmd_in_list)) {
8467     cmd->io_cmd_in_list = false;
8468     list_del_init(&cmd->io_cmd);
8469     @@ -69,6 +70,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
8470     cmd->task_id, qedi_conn->iscsi_conn_id,
8471     &cmd->io_cmd);
8472     }
8473     + spin_unlock(&qedi_conn->list_lock);
8474    
8475     cmd->state = RESPONSE_RECEIVED;
8476     qedi_clear_task_idx(qedi, cmd->task_id);
8477     @@ -122,6 +124,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
8478     "Freeing tid=0x%x for cid=0x%x\n",
8479     cmd->task_id, qedi_conn->iscsi_conn_id);
8480    
8481     + spin_lock(&qedi_conn->list_lock);
8482     if (likely(cmd->io_cmd_in_list)) {
8483     cmd->io_cmd_in_list = false;
8484     list_del_init(&cmd->io_cmd);
8485     @@ -132,6 +135,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
8486     cmd->task_id, qedi_conn->iscsi_conn_id,
8487     &cmd->io_cmd);
8488     }
8489     + spin_unlock(&qedi_conn->list_lock);
8490    
8491     cmd->state = RESPONSE_RECEIVED;
8492     qedi_clear_task_idx(qedi, cmd->task_id);
8493     @@ -222,11 +226,13 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
8494    
8495     tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
8496    
8497     + spin_lock(&qedi_conn->list_lock);
8498     if (likely(qedi_cmd->io_cmd_in_list)) {
8499     qedi_cmd->io_cmd_in_list = false;
8500     list_del_init(&qedi_cmd->io_cmd);
8501     qedi_conn->active_cmd_count--;
8502     }
8503     + spin_unlock(&qedi_conn->list_lock);
8504    
8505     if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
8506     ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
8507     @@ -288,11 +294,13 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
8508     ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
8509     qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
8510    
8511     + spin_lock(&qedi_conn->list_lock);
8512     if (likely(cmd->io_cmd_in_list)) {
8513     cmd->io_cmd_in_list = false;
8514     list_del_init(&cmd->io_cmd);
8515     qedi_conn->active_cmd_count--;
8516     }
8517     + spin_unlock(&qedi_conn->list_lock);
8518    
8519     memset(task_ctx, '\0', sizeof(*task_ctx));
8520    
8521     @@ -817,8 +825,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
8522     qedi_clear_task_idx(qedi_conn->qedi, rtid);
8523    
8524     spin_lock(&qedi_conn->list_lock);
8525     - list_del_init(&dbg_cmd->io_cmd);
8526     - qedi_conn->active_cmd_count--;
8527     + if (likely(dbg_cmd->io_cmd_in_list)) {
8528     + dbg_cmd->io_cmd_in_list = false;
8529     + list_del_init(&dbg_cmd->io_cmd);
8530     + qedi_conn->active_cmd_count--;
8531     + }
8532     spin_unlock(&qedi_conn->list_lock);
8533     qedi_cmd->state = CLEANUP_RECV;
8534     wake_up_interruptible(&qedi_conn->wait_queue);
8535     @@ -1236,6 +1247,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
8536     qedi_conn->cmd_cleanup_req++;
8537     qedi_iscsi_cleanup_task(ctask, true);
8538    
8539     + cmd->io_cmd_in_list = false;
8540     list_del_init(&cmd->io_cmd);
8541     qedi_conn->active_cmd_count--;
8542     QEDI_WARN(&qedi->dbg_ctx,
8543     @@ -1447,8 +1459,11 @@ ldel_exit:
8544     spin_unlock_bh(&qedi_conn->tmf_work_lock);
8545    
8546     spin_lock(&qedi_conn->list_lock);
8547     - list_del_init(&cmd->io_cmd);
8548     - qedi_conn->active_cmd_count--;
8549     + if (likely(cmd->io_cmd_in_list)) {
8550     + cmd->io_cmd_in_list = false;
8551     + list_del_init(&cmd->io_cmd);
8552     + qedi_conn->active_cmd_count--;
8553     + }
8554     spin_unlock(&qedi_conn->list_lock);
8555    
8556     clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
8557     diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
8558     index 0f2622a48311c..755f66b1ff9c7 100644
8559     --- a/drivers/scsi/qedi/qedi_iscsi.c
8560     +++ b/drivers/scsi/qedi/qedi_iscsi.c
8561     @@ -972,11 +972,13 @@ static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
8562     {
8563     struct qedi_cmd *cmd, *cmd_tmp;
8564    
8565     + spin_lock(&qedi_conn->list_lock);
8566     list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
8567     io_cmd) {
8568     list_del_init(&cmd->io_cmd);
8569     qedi_conn->active_cmd_count--;
8570     }
8571     + spin_unlock(&qedi_conn->list_lock);
8572     }
8573    
8574     static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
8575     diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
8576     index 62d2ee825c97a..b300e11095828 100644
8577     --- a/drivers/scsi/qla2xxx/qla_init.c
8578     +++ b/drivers/scsi/qla2xxx/qla_init.c
8579     @@ -71,6 +71,16 @@ void qla2x00_sp_free(srb_t *sp)
8580     qla2x00_rel_sp(sp);
8581     }
8582    
8583     +void qla2xxx_rel_done_warning(srb_t *sp, int res)
8584     +{
8585     + WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
8586     +}
8587     +
8588     +void qla2xxx_rel_free_warning(srb_t *sp)
8589     +{
8590     + WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
8591     +}
8592     +
8593     /* Asynchronous Login/Logout Routines -------------------------------------- */
8594    
8595     unsigned long
8596     diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
8597     index 0c3d907af7692..6dfde42d799b5 100644
8598     --- a/drivers/scsi/qla2xxx/qla_inline.h
8599     +++ b/drivers/scsi/qla2xxx/qla_inline.h
8600     @@ -183,10 +183,15 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
8601     return sp;
8602     }
8603    
8604     +void qla2xxx_rel_done_warning(srb_t *sp, int res);
8605     +void qla2xxx_rel_free_warning(srb_t *sp);
8606     +
8607     static inline void
8608     qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
8609     {
8610     sp->qpair = NULL;
8611     + sp->done = qla2xxx_rel_done_warning;
8612     + sp->free = qla2xxx_rel_free_warning;
8613     mempool_free(sp, qpair->srb_mempool);
8614     QLA_QPAIR_MARK_NOT_BUSY(qpair);
8615     }
8616     diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
8617     index f4815a4084d8c..11656e864fca9 100644
8618     --- a/drivers/scsi/qla2xxx/qla_nvme.c
8619     +++ b/drivers/scsi/qla2xxx/qla_nvme.c
8620     @@ -682,7 +682,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
8621     struct nvme_fc_port_template *tmpl;
8622     struct qla_hw_data *ha;
8623     struct nvme_fc_port_info pinfo;
8624     - int ret = EINVAL;
8625     + int ret = -EINVAL;
8626    
8627     if (!IS_ENABLED(CONFIG_NVME_FC))
8628     return ret;
8629     diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
8630     index a7acc266cec06..d84d95cac2a13 100644
8631     --- a/drivers/scsi/qla2xxx/qla_target.c
8632     +++ b/drivers/scsi/qla2xxx/qla_target.c
8633     @@ -5677,7 +5677,7 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
8634     /* found existing exchange */
8635     qpair->retry_term_cnt++;
8636     if (qpair->retry_term_cnt >= 5) {
8637     - rc = EIO;
8638     + rc = -EIO;
8639     qpair->retry_term_cnt = 0;
8640     ql_log(ql_log_warn, vha, 0xffff,
8641     "Unable to send ABTS Respond. Dumping firmware.\n");
8642     diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
8643     index 5504ab11decc7..df43cf6405a8e 100644
8644     --- a/drivers/scsi/qla4xxx/ql4_os.c
8645     +++ b/drivers/scsi/qla4xxx/ql4_os.c
8646     @@ -1220,7 +1220,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
8647     le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
8648     exit_host_stats:
8649     if (ql_iscsi_stats)
8650     - dma_free_coherent(&ha->pdev->dev, host_stats_size,
8651     + dma_free_coherent(&ha->pdev->dev, stats_size,
8652     ql_iscsi_stats, iscsi_stats_dma);
8653    
8654     ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
8655     diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
8656     index 7a3a942b40df0..dd2175e9bfa17 100644
8657     --- a/drivers/scsi/smartpqi/smartpqi.h
8658     +++ b/drivers/scsi/smartpqi/smartpqi.h
8659     @@ -357,7 +357,7 @@ struct pqi_event_response {
8660     struct pqi_iu_header header;
8661     u8 event_type;
8662     u8 reserved2 : 7;
8663     - u8 request_acknowlege : 1;
8664     + u8 request_acknowledge : 1;
8665     __le16 event_id;
8666     __le32 additional_event_id;
8667     union {
8668     diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
8669     index 5ae074505386a..093ed5d1eef20 100644
8670     --- a/drivers/scsi/smartpqi/smartpqi_init.c
8671     +++ b/drivers/scsi/smartpqi/smartpqi_init.c
8672     @@ -527,8 +527,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
8673     put_unaligned_be16(cdb_length, &cdb[7]);
8674     break;
8675     default:
8676     - dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
8677     - cmd);
8678     + dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
8679     break;
8680     }
8681    
8682     @@ -2450,7 +2449,6 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
8683     offload_to_mirror =
8684     (offload_to_mirror >= layout_map_count - 1) ?
8685     0 : offload_to_mirror + 1;
8686     - WARN_ON(offload_to_mirror >= layout_map_count);
8687     device->offload_to_mirror = offload_to_mirror;
8688     /*
8689     * Avoid direct use of device->offload_to_mirror within this
8690     @@ -2903,10 +2901,14 @@ static int pqi_interpret_task_management_response(
8691     return rc;
8692     }
8693    
8694     -static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
8695     - struct pqi_queue_group *queue_group)
8696     +static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
8697     +{
8698     + pqi_take_ctrl_offline(ctrl_info);
8699     +}
8700     +
8701     +static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
8702     {
8703     - unsigned int num_responses;
8704     + int num_responses;
8705     pqi_index_t oq_pi;
8706     pqi_index_t oq_ci;
8707     struct pqi_io_request *io_request;
8708     @@ -2918,6 +2920,13 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
8709    
8710     while (1) {
8711     oq_pi = readl(queue_group->oq_pi);
8712     + if (oq_pi >= ctrl_info->num_elements_per_oq) {
8713     + pqi_invalid_response(ctrl_info);
8714     + dev_err(&ctrl_info->pci_dev->dev,
8715     + "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
8716     + oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
8717     + return -1;
8718     + }
8719     if (oq_pi == oq_ci)
8720     break;
8721    
8722     @@ -2926,10 +2935,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
8723     (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
8724    
8725     request_id = get_unaligned_le16(&response->request_id);
8726     - WARN_ON(request_id >= ctrl_info->max_io_slots);
8727     + if (request_id >= ctrl_info->max_io_slots) {
8728     + pqi_invalid_response(ctrl_info);
8729     + dev_err(&ctrl_info->pci_dev->dev,
8730     + "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
8731     + request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
8732     + return -1;
8733     + }
8734    
8735     io_request = &ctrl_info->io_request_pool[request_id];
8736     - WARN_ON(atomic_read(&io_request->refcount) == 0);
8737     + if (atomic_read(&io_request->refcount) == 0) {
8738     + pqi_invalid_response(ctrl_info);
8739     + dev_err(&ctrl_info->pci_dev->dev,
8740     + "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
8741     + request_id, oq_pi, oq_ci);
8742     + return -1;
8743     + }
8744    
8745     switch (response->header.iu_type) {
8746     case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
8747     @@ -2959,24 +2980,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
8748     io_request->error_info = ctrl_info->error_buffer +
8749     (get_unaligned_le16(&response->error_index) *
8750     PQI_ERROR_BUFFER_ELEMENT_LENGTH);
8751     - pqi_process_io_error(response->header.iu_type,
8752     - io_request);
8753     + pqi_process_io_error(response->header.iu_type, io_request);
8754     break;
8755     default:
8756     + pqi_invalid_response(ctrl_info);
8757     dev_err(&ctrl_info->pci_dev->dev,
8758     - "unexpected IU type: 0x%x\n",
8759     - response->header.iu_type);
8760     - break;
8761     + "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
8762     + response->header.iu_type, oq_pi, oq_ci);
8763     + return -1;
8764     }
8765    
8766     - io_request->io_complete_callback(io_request,
8767     - io_request->context);
8768     + io_request->io_complete_callback(io_request, io_request->context);
8769    
8770     /*
8771     * Note that the I/O request structure CANNOT BE TOUCHED after
8772     * returning from the I/O completion callback!
8773     */
8774     -
8775     oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
8776     }
8777    
8778     @@ -3289,9 +3308,9 @@ static void pqi_ofa_capture_event_payload(struct pqi_event *event,
8779     }
8780     }
8781    
8782     -static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
8783     +static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
8784     {
8785     - unsigned int num_events;
8786     + int num_events;
8787     pqi_index_t oq_pi;
8788     pqi_index_t oq_ci;
8789     struct pqi_event_queue *event_queue;
8790     @@ -3305,26 +3324,31 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
8791    
8792     while (1) {
8793     oq_pi = readl(event_queue->oq_pi);
8794     + if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
8795     + pqi_invalid_response(ctrl_info);
8796     + dev_err(&ctrl_info->pci_dev->dev,
8797     + "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
8798     + oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
8799     + return -1;
8800     + }
8801     +
8802     if (oq_pi == oq_ci)
8803     break;
8804    
8805     num_events++;
8806     - response = event_queue->oq_element_array +
8807     - (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
8808     + response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
8809    
8810     event_index =
8811     pqi_event_type_to_event_index(response->event_type);
8812    
8813     - if (event_index >= 0) {
8814     - if (response->request_acknowlege) {
8815     - event = &ctrl_info->events[event_index];
8816     - event->pending = true;
8817     - event->event_type = response->event_type;
8818     - event->event_id = response->event_id;
8819     - event->additional_event_id =
8820     - response->additional_event_id;
8821     + if (event_index >= 0 && response->request_acknowledge) {
8822     + event = &ctrl_info->events[event_index];
8823     + event->pending = true;
8824     + event->event_type = response->event_type;
8825     + event->event_id = response->event_id;
8826     + event->additional_event_id = response->additional_event_id;
8827     + if (event->event_type == PQI_EVENT_TYPE_OFA)
8828     pqi_ofa_capture_event_payload(event, response);
8829     - }
8830     }
8831    
8832     oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
8833     @@ -3439,7 +3463,8 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
8834     {
8835     struct pqi_ctrl_info *ctrl_info;
8836     struct pqi_queue_group *queue_group;
8837     - unsigned int num_responses_handled;
8838     + int num_io_responses_handled;
8839     + int num_events_handled;
8840    
8841     queue_group = data;
8842     ctrl_info = queue_group->ctrl_info;
8843     @@ -3447,17 +3472,25 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
8844     if (!pqi_is_valid_irq(ctrl_info))
8845     return IRQ_NONE;
8846    
8847     - num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
8848     + num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
8849     + if (num_io_responses_handled < 0)
8850     + goto out;
8851    
8852     - if (irq == ctrl_info->event_irq)
8853     - num_responses_handled += pqi_process_event_intr(ctrl_info);
8854     + if (irq == ctrl_info->event_irq) {
8855     + num_events_handled = pqi_process_event_intr(ctrl_info);
8856     + if (num_events_handled < 0)
8857     + goto out;
8858     + } else {
8859     + num_events_handled = 0;
8860     + }
8861    
8862     - if (num_responses_handled)
8863     + if (num_io_responses_handled + num_events_handled > 0)
8864     atomic_inc(&ctrl_info->num_interrupts);
8865    
8866     pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
8867     pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
8868    
8869     +out:
8870     return IRQ_HANDLED;
8871     }
8872    
8873     diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
8874     index c49e9f6c46f87..4f066e3b19af1 100644
8875     --- a/drivers/scsi/ufs/ufs-qcom.c
8876     +++ b/drivers/scsi/ufs/ufs-qcom.c
8877     @@ -1492,9 +1492,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
8878     */
8879     }
8880     mask <<= offset;
8881     -
8882     - pm_runtime_get_sync(host->hba->dev);
8883     - ufshcd_hold(host->hba, false);
8884     ufshcd_rmwl(host->hba, TEST_BUS_SEL,
8885     (u32)host->testbus.select_major << 19,
8886     REG_UFS_CFG1);
8887     @@ -1507,8 +1504,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
8888     * committed before returning.
8889     */
8890     mb();
8891     - ufshcd_release(host->hba);
8892     - pm_runtime_put_sync(host->hba->dev);
8893    
8894     return 0;
8895     }
8896     diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
8897     index 63ee96eb58c68..130c798921b5d 100644
8898     --- a/drivers/slimbus/core.c
8899     +++ b/drivers/slimbus/core.c
8900     @@ -302,8 +302,6 @@ int slim_unregister_controller(struct slim_controller *ctrl)
8901     {
8902     /* Remove all clients */
8903     device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device);
8904     - /* Enter Clock Pause */
8905     - slim_ctrl_clk_pause(ctrl, false, 0);
8906     ida_simple_remove(&ctrl_ida, ctrl->id);
8907    
8908     return 0;
8909     @@ -327,8 +325,8 @@ void slim_report_absent(struct slim_device *sbdev)
8910     mutex_lock(&ctrl->lock);
8911     sbdev->is_laddr_valid = false;
8912     mutex_unlock(&ctrl->lock);
8913     -
8914     - ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
8915     + if (!ctrl->get_laddr)
8916     + ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
8917     slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN);
8918     }
8919     EXPORT_SYMBOL_GPL(slim_report_absent);
8920     diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
8921     index 01a17d84b6064..ce265bf7de868 100644
8922     --- a/drivers/slimbus/qcom-ngd-ctrl.c
8923     +++ b/drivers/slimbus/qcom-ngd-ctrl.c
8924     @@ -1273,9 +1273,13 @@ static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl,
8925     {
8926     struct qcom_slim_ngd_qmi *qmi =
8927     container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
8928     + struct qcom_slim_ngd_ctrl *ctrl =
8929     + container_of(qmi, struct qcom_slim_ngd_ctrl, qmi);
8930    
8931     qmi->svc_info.sq_node = 0;
8932     qmi->svc_info.sq_port = 0;
8933     +
8934     + qcom_slim_ngd_enable(ctrl, false);
8935     }
8936    
8937     static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = {
8938     diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
8939     index f4fb527d83018..c5dd026fe889f 100644
8940     --- a/drivers/soc/fsl/qbman/bman.c
8941     +++ b/drivers/soc/fsl/qbman/bman.c
8942     @@ -660,7 +660,7 @@ int bm_shutdown_pool(u32 bpid)
8943     }
8944     done:
8945     put_affine_portal();
8946     - return 0;
8947     + return err;
8948     }
8949    
8950     struct gen_pool *bm_bpalloc;
8951     diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
8952     index 4433cb4de564e..7646b4b56bed9 100644
8953     --- a/drivers/spi/spi-omap2-mcspi.c
8954     +++ b/drivers/spi/spi-omap2-mcspi.c
8955     @@ -24,7 +24,6 @@
8956     #include <linux/of.h>
8957     #include <linux/of_device.h>
8958     #include <linux/gcd.h>
8959     -#include <linux/iopoll.h>
8960    
8961     #include <linux/spi/spi.h>
8962     #include <linux/gpio.h>
8963     @@ -348,9 +347,19 @@ disable_fifo:
8964    
8965     static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
8966     {
8967     - u32 val;
8968     -
8969     - return readl_poll_timeout(reg, val, val & bit, 1, MSEC_PER_SEC);
8970     + unsigned long timeout;
8971     +
8972     + timeout = jiffies + msecs_to_jiffies(1000);
8973     + while (!(readl_relaxed(reg) & bit)) {
8974     + if (time_after(jiffies, timeout)) {
8975     + if (!(readl_relaxed(reg) & bit))
8976     + return -ETIMEDOUT;
8977     + else
8978     + return 0;
8979     + }
8980     + cpu_relax();
8981     + }
8982     + return 0;
8983     }
8984    
8985     static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
8986     diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
8987     index 7b7151ec14c8a..1d948fee1a039 100644
8988     --- a/drivers/spi/spi-s3c64xx.c
8989     +++ b/drivers/spi/spi-s3c64xx.c
8990     @@ -122,6 +122,7 @@
8991    
8992     struct s3c64xx_spi_dma_data {
8993     struct dma_chan *ch;
8994     + dma_cookie_t cookie;
8995     enum dma_transfer_direction direction;
8996     };
8997    
8998     @@ -264,12 +265,13 @@ static void s3c64xx_spi_dmacb(void *data)
8999     spin_unlock_irqrestore(&sdd->lock, flags);
9000     }
9001    
9002     -static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
9003     +static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
9004     struct sg_table *sgt)
9005     {
9006     struct s3c64xx_spi_driver_data *sdd;
9007     struct dma_slave_config config;
9008     struct dma_async_tx_descriptor *desc;
9009     + int ret;
9010    
9011     memset(&config, 0, sizeof(config));
9012    
9013     @@ -293,12 +295,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
9014    
9015     desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
9016     dma->direction, DMA_PREP_INTERRUPT);
9017     + if (!desc) {
9018     + dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
9019     + dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
9020     + return -ENOMEM;
9021     + }
9022    
9023     desc->callback = s3c64xx_spi_dmacb;
9024     desc->callback_param = dma;
9025    
9026     - dmaengine_submit(desc);
9027     + dma->cookie = dmaengine_submit(desc);
9028     + ret = dma_submit_error(dma->cookie);
9029     + if (ret) {
9030     + dev_err(&sdd->pdev->dev, "DMA submission failed");
9031     + return -EIO;
9032     + }
9033     +
9034     dma_async_issue_pending(dma->ch);
9035     + return 0;
9036     }
9037    
9038     static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
9039     @@ -348,11 +362,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master,
9040     return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
9041     }
9042    
9043     -static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
9044     +static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
9045     struct spi_transfer *xfer, int dma_mode)
9046     {
9047     void __iomem *regs = sdd->regs;
9048     u32 modecfg, chcfg;
9049     + int ret = 0;
9050    
9051     modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
9052     modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
9053     @@ -378,7 +393,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
9054     chcfg |= S3C64XX_SPI_CH_TXCH_ON;
9055     if (dma_mode) {
9056     modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
9057     - prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
9058     + ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
9059     } else {
9060     switch (sdd->cur_bpw) {
9061     case 32:
9062     @@ -410,12 +425,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
9063     writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
9064     | S3C64XX_SPI_PACKET_CNT_EN,
9065     regs + S3C64XX_SPI_PACKET_CNT);
9066     - prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
9067     + ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
9068     }
9069     }
9070    
9071     + if (ret)
9072     + return ret;
9073     +
9074     writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
9075     writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
9076     +
9077     + return 0;
9078     }
9079    
9080     static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
9081     @@ -548,9 +568,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
9082     return 0;
9083     }
9084    
9085     -static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
9086     +static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
9087     {
9088     void __iomem *regs = sdd->regs;
9089     + int ret;
9090     u32 val;
9091    
9092     /* Disable Clock */
9093     @@ -598,7 +619,9 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
9094    
9095     if (sdd->port_conf->clk_from_cmu) {
9096     /* The src_clk clock is divided internally by 2 */
9097     - clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
9098     + ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
9099     + if (ret)
9100     + return ret;
9101     } else {
9102     /* Configure Clock */
9103     val = readl(regs + S3C64XX_SPI_CLK_CFG);
9104     @@ -612,6 +635,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
9105     val |= S3C64XX_SPI_ENCLK_ENABLE;
9106     writel(val, regs + S3C64XX_SPI_CLK_CFG);
9107     }
9108     +
9109     + return 0;
9110     }
9111    
9112     #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
9113     @@ -654,7 +679,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
9114     sdd->cur_bpw = bpw;
9115     sdd->cur_speed = speed;
9116     sdd->cur_mode = spi->mode;
9117     - s3c64xx_spi_config(sdd);
9118     + status = s3c64xx_spi_config(sdd);
9119     + if (status)
9120     + return status;
9121     }
9122    
9123     if (!is_polling(sdd) && (xfer->len > fifo_len) &&
9124     @@ -678,13 +705,18 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
9125     sdd->state &= ~RXBUSY;
9126     sdd->state &= ~TXBUSY;
9127    
9128     - s3c64xx_enable_datapath(sdd, xfer, use_dma);
9129     -
9130     /* Start the signals */
9131     s3c64xx_spi_set_cs(spi, true);
9132    
9133     + status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
9134     +
9135     spin_unlock_irqrestore(&sdd->lock, flags);
9136    
9137     + if (status) {
9138     + dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
9139     + break;
9140     + }
9141     +
9142     if (use_dma)
9143     status = s3c64xx_wait_for_dma(sdd, xfer);
9144     else
9145     diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
9146     index 147481bf680c3..a6c893ddbf280 100644
9147     --- a/drivers/staging/emxx_udc/emxx_udc.c
9148     +++ b/drivers/staging/emxx_udc/emxx_udc.c
9149     @@ -2594,7 +2594,7 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep,
9150    
9151     if (req->unaligned) {
9152     if (!ep->virt_buf)
9153     - ep->virt_buf = dma_alloc_coherent(NULL, PAGE_SIZE,
9154     + ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE,
9155     &ep->phys_buf,
9156     GFP_ATOMIC | GFP_DMA);
9157     if (ep->epnum > 0) {
9158     @@ -3153,7 +3153,7 @@ static int nbu2ss_drv_remove(struct platform_device *pdev)
9159     for (i = 0; i < NUM_ENDPOINTS; i++) {
9160     ep = &udc->ep[i];
9161     if (ep->virt_buf)
9162     - dma_free_coherent(NULL, PAGE_SIZE, (void *)ep->virt_buf,
9163     + dma_free_coherent(udc->dev, PAGE_SIZE, (void *)ep->virt_buf,
9164     ep->phys_buf);
9165     }
9166    
9167     diff --git a/drivers/staging/media/ipu3/ipu3-css-params.c b/drivers/staging/media/ipu3/ipu3-css-params.c
9168     index 4533dacad4beb..ef3b5d07137a1 100644
9169     --- a/drivers/staging/media/ipu3/ipu3-css-params.c
9170     +++ b/drivers/staging/media/ipu3/ipu3-css-params.c
9171     @@ -161,7 +161,7 @@ imgu_css_scaler_calc(u32 input_width, u32 input_height, u32 target_width,
9172    
9173     memset(&cfg->scaler_coeffs_chroma, 0,
9174     sizeof(cfg->scaler_coeffs_chroma));
9175     - memset(&cfg->scaler_coeffs_luma, 0, sizeof(*cfg->scaler_coeffs_luma));
9176     + memset(&cfg->scaler_coeffs_luma, 0, sizeof(cfg->scaler_coeffs_luma));
9177     do {
9178     phase_step_correction++;
9179    
9180     diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
9181     index 5c33bcb0db2ee..00e34c392a388 100644
9182     --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
9183     +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
9184     @@ -585,7 +585,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
9185    
9186     prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE,
9187     sizeof(struct ieee80211_rxb *),
9188     - GFP_KERNEL);
9189     + GFP_ATOMIC);
9190     if (!prxbIndicateArray)
9191     return;
9192    
9193     diff --git a/drivers/staging/wilc1000/wilc_mon.c b/drivers/staging/wilc1000/wilc_mon.c
9194     index d6f14f69ad64e..017e8e91334f1 100644
9195     --- a/drivers/staging/wilc1000/wilc_mon.c
9196     +++ b/drivers/staging/wilc1000/wilc_mon.c
9197     @@ -236,11 +236,10 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
9198    
9199     if (register_netdevice(wl->monitor_dev)) {
9200     netdev_err(real_dev, "register_netdevice failed\n");
9201     + free_netdev(wl->monitor_dev);
9202     return NULL;
9203     }
9204     priv = netdev_priv(wl->monitor_dev);
9205     - if (!priv)
9206     - return NULL;
9207    
9208     priv->real_ndev = real_dev;
9209    
9210     diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
9211     index ea925b102b322..d6634baebb474 100644
9212     --- a/drivers/target/target_core_user.c
9213     +++ b/drivers/target/target_core_user.c
9214     @@ -669,7 +669,7 @@ static void scatter_data_area(struct tcmu_dev *udev,
9215     void *from, *to = NULL;
9216     size_t copy_bytes, to_offset, offset;
9217     struct scatterlist *sg;
9218     - struct page *page;
9219     + struct page *page = NULL;
9220    
9221     for_each_sg(data_sg, sg, data_nents, i) {
9222     int sg_remaining = sg->length;
9223     diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
9224     index ee0604cd9c6b2..0c498b20d8cb5 100644
9225     --- a/drivers/tty/hvc/hvcs.c
9226     +++ b/drivers/tty/hvc/hvcs.c
9227     @@ -1218,13 +1218,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
9228    
9229     tty_wait_until_sent(tty, HVCS_CLOSE_WAIT);
9230    
9231     - /*
9232     - * This line is important because it tells hvcs_open that this
9233     - * device needs to be re-configured the next time hvcs_open is
9234     - * called.
9235     - */
9236     - tty->driver_data = NULL;
9237     -
9238     free_irq(irq, hvcsd);
9239     return;
9240     } else if (hvcsd->port.count < 0) {
9241     @@ -1239,6 +1232,13 @@ static void hvcs_cleanup(struct tty_struct * tty)
9242     {
9243     struct hvcs_struct *hvcsd = tty->driver_data;
9244    
9245     + /*
9246     + * This line is important because it tells hvcs_open that this
9247     + * device needs to be re-configured the next time hvcs_open is
9248     + * called.
9249     + */
9250     + tty->driver_data = NULL;
9251     +
9252     tty_port_put(&hvcsd->port);
9253     }
9254    
9255     diff --git a/drivers/tty/ipwireless/network.c b/drivers/tty/ipwireless/network.c
9256     index cf20616340a1a..fe569f6294a24 100644
9257     --- a/drivers/tty/ipwireless/network.c
9258     +++ b/drivers/tty/ipwireless/network.c
9259     @@ -117,7 +117,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
9260     skb->len,
9261     notify_packet_sent,
9262     network);
9263     - if (ret == -1) {
9264     + if (ret < 0) {
9265     skb_pull(skb, 2);
9266     return 0;
9267     }
9268     @@ -134,7 +134,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
9269     notify_packet_sent,
9270     network);
9271     kfree(buf);
9272     - if (ret == -1)
9273     + if (ret < 0)
9274     return 0;
9275     }
9276     kfree_skb(skb);
9277     diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
9278     index fad3401e604d9..23584769fc292 100644
9279     --- a/drivers/tty/ipwireless/tty.c
9280     +++ b/drivers/tty/ipwireless/tty.c
9281     @@ -218,7 +218,7 @@ static int ipw_write(struct tty_struct *linux_tty,
9282     ret = ipwireless_send_packet(tty->hardware, IPW_CHANNEL_RAS,
9283     buf, count,
9284     ipw_write_packet_sent_callback, tty);
9285     - if (ret == -1) {
9286     + if (ret < 0) {
9287     mutex_unlock(&tty->ipw_tty_mutex);
9288     return 0;
9289     }
9290     diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
9291     index 00099a8439d21..c6a1d8c4e6894 100644
9292     --- a/drivers/tty/pty.c
9293     +++ b/drivers/tty/pty.c
9294     @@ -120,10 +120,10 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
9295     spin_lock_irqsave(&to->port->lock, flags);
9296     /* Stuff the data into the input queue of the other end */
9297     c = tty_insert_flip_string(to->port, buf, c);
9298     + spin_unlock_irqrestore(&to->port->lock, flags);
9299     /* And shovel */
9300     if (c)
9301     tty_flip_buffer_push(to->port);
9302     - spin_unlock_irqrestore(&to->port->lock, flags);
9303     }
9304     return c;
9305     }
9306     diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
9307     index 67a9eb3f94cec..a9751a83d5dbb 100644
9308     --- a/drivers/tty/serial/Kconfig
9309     +++ b/drivers/tty/serial/Kconfig
9310     @@ -10,6 +10,7 @@ menu "Serial drivers"
9311    
9312     config SERIAL_EARLYCON
9313     bool
9314     + depends on SERIAL_CORE
9315     help
9316     Support for early consoles with the earlycon parameter. This enables
9317     the console before standard serial driver is probed. The console is
9318     diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
9319     index d2fc050a34454..ec8e608b46baa 100644
9320     --- a/drivers/tty/serial/fsl_lpuart.c
9321     +++ b/drivers/tty/serial/fsl_lpuart.c
9322     @@ -635,26 +635,24 @@ static int lpuart32_poll_init(struct uart_port *port)
9323     spin_lock_irqsave(&sport->port.lock, flags);
9324    
9325     /* Disable Rx & Tx */
9326     - lpuart32_write(&sport->port, UARTCTRL, 0);
9327     + lpuart32_write(&sport->port, 0, UARTCTRL);
9328    
9329     temp = lpuart32_read(&sport->port, UARTFIFO);
9330    
9331     /* Enable Rx and Tx FIFO */
9332     - lpuart32_write(&sport->port, UARTFIFO,
9333     - temp | UARTFIFO_RXFE | UARTFIFO_TXFE);
9334     + lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO);
9335    
9336     /* flush Tx and Rx FIFO */
9337     - lpuart32_write(&sport->port, UARTFIFO,
9338     - UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH);
9339     + lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO);
9340    
9341     /* explicitly clear RDRF */
9342     if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) {
9343     lpuart32_read(&sport->port, UARTDATA);
9344     - lpuart32_write(&sport->port, UARTFIFO, UARTFIFO_RXUF);
9345     + lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO);
9346     }
9347    
9348     /* Enable Rx and Tx */
9349     - lpuart32_write(&sport->port, UARTCTRL, UARTCTRL_RE | UARTCTRL_TE);
9350     + lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
9351     spin_unlock_irqrestore(&sport->port.lock, flags);
9352    
9353     return 0;
9354     @@ -663,12 +661,12 @@ static int lpuart32_poll_init(struct uart_port *port)
9355     static void lpuart32_poll_put_char(struct uart_port *port, unsigned char c)
9356     {
9357     lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE);
9358     - lpuart32_write(port, UARTDATA, c);
9359     + lpuart32_write(port, c, UARTDATA);
9360     }
9361    
9362     static int lpuart32_poll_get_char(struct uart_port *port)
9363     {
9364     - if (!(lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF))
9365     + if (!(lpuart32_read(port, UARTWATER) >> UARTWATER_RXCNT_OFF))
9366     return NO_POLL_CHAR;
9367    
9368     return lpuart32_read(port, UARTDATA);
9369     diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
9370     index 9900888afbcd8..c0c39cf303871 100644
9371     --- a/drivers/usb/cdns3/gadget.c
9372     +++ b/drivers/usb/cdns3/gadget.c
9373     @@ -2545,12 +2545,12 @@ void cdns3_gadget_exit(struct cdns3 *cdns)
9374    
9375     priv_dev = cdns->gadget_dev;
9376    
9377     - devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
9378    
9379     pm_runtime_mark_last_busy(cdns->dev);
9380     pm_runtime_put_autosuspend(cdns->dev);
9381    
9382     usb_del_gadget_udc(&priv_dev->gadget);
9383     + devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
9384    
9385     cdns3_free_all_eps(priv_dev);
9386    
9387     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
9388     index 7499ba118665a..808722b8294a4 100644
9389     --- a/drivers/usb/class/cdc-acm.c
9390     +++ b/drivers/usb/class/cdc-acm.c
9391     @@ -1243,9 +1243,21 @@ static int acm_probe(struct usb_interface *intf,
9392     }
9393     }
9394     } else {
9395     + int class = -1;
9396     +
9397     data_intf_num = union_header->bSlaveInterface0;
9398     control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
9399     data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
9400     +
9401     + if (control_interface)
9402     + class = control_interface->cur_altsetting->desc.bInterfaceClass;
9403     +
9404     + if (class != USB_CLASS_COMM && class != USB_CLASS_CDC_DATA) {
9405     + dev_dbg(&intf->dev, "Broken union descriptor, assuming single interface\n");
9406     + combined_interfaces = 1;
9407     + control_interface = data_interface = intf;
9408     + goto look_for_collapsed_interface;
9409     + }
9410     }
9411    
9412     if (!control_interface || !data_interface) {
9413     @@ -1900,6 +1912,17 @@ static const struct usb_device_id acm_ids[] = {
9414     .driver_info = IGNORE_DEVICE,
9415     },
9416    
9417     + /* Exclude ETAS ES58x */
9418     + { USB_DEVICE(0x108c, 0x0159), /* ES581.4 */
9419     + .driver_info = IGNORE_DEVICE,
9420     + },
9421     + { USB_DEVICE(0x108c, 0x0168), /* ES582.1 */
9422     + .driver_info = IGNORE_DEVICE,
9423     + },
9424     + { USB_DEVICE(0x108c, 0x0169), /* ES584.1 */
9425     + .driver_info = IGNORE_DEVICE,
9426     + },
9427     +
9428     { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
9429     .driver_info = SEND_ZERO_PACKET,
9430     },
9431     diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
9432     index 70afb2ca1eabd..9875e2fe33db2 100644
9433     --- a/drivers/usb/class/cdc-wdm.c
9434     +++ b/drivers/usb/class/cdc-wdm.c
9435     @@ -58,6 +58,9 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
9436    
9437     #define WDM_MAX 16
9438    
9439     +/* we cannot wait forever at flush() */
9440     +#define WDM_FLUSH_TIMEOUT (30 * HZ)
9441     +
9442     /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */
9443     #define WDM_DEFAULT_BUFSIZE 256
9444    
9445     @@ -151,7 +154,7 @@ static void wdm_out_callback(struct urb *urb)
9446     kfree(desc->outbuf);
9447     desc->outbuf = NULL;
9448     clear_bit(WDM_IN_USE, &desc->flags);
9449     - wake_up(&desc->wait);
9450     + wake_up_all(&desc->wait);
9451     }
9452    
9453     static void wdm_in_callback(struct urb *urb)
9454     @@ -393,6 +396,9 @@ static ssize_t wdm_write
9455     if (test_bit(WDM_RESETTING, &desc->flags))
9456     r = -EIO;
9457    
9458     + if (test_bit(WDM_DISCONNECTING, &desc->flags))
9459     + r = -ENODEV;
9460     +
9461     if (r < 0) {
9462     rv = r;
9463     goto out_free_mem_pm;
9464     @@ -424,6 +430,7 @@ static ssize_t wdm_write
9465     if (rv < 0) {
9466     desc->outbuf = NULL;
9467     clear_bit(WDM_IN_USE, &desc->flags);
9468     + wake_up_all(&desc->wait); /* for wdm_wait_for_response() */
9469     dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
9470     rv = usb_translate_errors(rv);
9471     goto out_free_mem_pm;
9472     @@ -583,28 +590,58 @@ err:
9473     return rv;
9474     }
9475    
9476     -static int wdm_flush(struct file *file, fl_owner_t id)
9477     +static int wdm_wait_for_response(struct file *file, long timeout)
9478     {
9479     struct wdm_device *desc = file->private_data;
9480     + long rv; /* Use long here because (int) MAX_SCHEDULE_TIMEOUT < 0. */
9481     +
9482     + /*
9483     + * Needs both flags. We cannot do with one because resetting it would
9484     + * cause a race with write() yet we need to signal a disconnect.
9485     + */
9486     + rv = wait_event_interruptible_timeout(desc->wait,
9487     + !test_bit(WDM_IN_USE, &desc->flags) ||
9488     + test_bit(WDM_DISCONNECTING, &desc->flags),
9489     + timeout);
9490    
9491     - wait_event(desc->wait,
9492     - /*
9493     - * needs both flags. We cannot do with one
9494     - * because resetting it would cause a race
9495     - * with write() yet we need to signal
9496     - * a disconnect
9497     - */
9498     - !test_bit(WDM_IN_USE, &desc->flags) ||
9499     - test_bit(WDM_DISCONNECTING, &desc->flags));
9500     -
9501     - /* cannot dereference desc->intf if WDM_DISCONNECTING */
9502     + /*
9503     + * To report the correct error. This is best effort.
9504     + * We are inevitably racing with the hardware.
9505     + */
9506     if (test_bit(WDM_DISCONNECTING, &desc->flags))
9507     return -ENODEV;
9508     - if (desc->werr < 0)
9509     - dev_err(&desc->intf->dev, "Error in flush path: %d\n",
9510     - desc->werr);
9511     + if (!rv)
9512     + return -EIO;
9513     + if (rv < 0)
9514     + return -EINTR;
9515     +
9516     + spin_lock_irq(&desc->iuspin);
9517     + rv = desc->werr;
9518     + desc->werr = 0;
9519     + spin_unlock_irq(&desc->iuspin);
9520     +
9521     + return usb_translate_errors(rv);
9522     +
9523     +}
9524     +
9525     +/*
9526     + * You need to send a signal when you react to malicious or defective hardware.
9527     + * Also, don't abort when fsync() returned -EINVAL, for older kernels which do
9528     + * not implement wdm_flush() will return -EINVAL.
9529     + */
9530     +static int wdm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
9531     +{
9532     + return wdm_wait_for_response(file, MAX_SCHEDULE_TIMEOUT);
9533     +}
9534    
9535     - return usb_translate_errors(desc->werr);
9536     +/*
9537     + * Same with wdm_fsync(), except it uses finite timeout in order to react to
9538     + * malicious or defective hardware which ceased communication after close() was
9539     + * implicitly called due to process termination.
9540     + */
9541     +static int wdm_flush(struct file *file, fl_owner_t id)
9542     +{
9543     + return wdm_wait_for_response(file, WDM_FLUSH_TIMEOUT);
9544     }
9545    
9546     static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait)
9547     @@ -729,6 +766,7 @@ static const struct file_operations wdm_fops = {
9548     .owner = THIS_MODULE,
9549     .read = wdm_read,
9550     .write = wdm_write,
9551     + .fsync = wdm_fsync,
9552     .open = wdm_open,
9553     .flush = wdm_flush,
9554     .release = wdm_release,
9555     diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
9556     index da923ec176122..31ca5abb4c12a 100644
9557     --- a/drivers/usb/core/urb.c
9558     +++ b/drivers/usb/core/urb.c
9559     @@ -772,11 +772,12 @@ void usb_block_urb(struct urb *urb)
9560     EXPORT_SYMBOL_GPL(usb_block_urb);
9561    
9562     /**
9563     - * usb_kill_anchored_urbs - cancel transfer requests en masse
9564     + * usb_kill_anchored_urbs - kill all URBs associated with an anchor
9565     * @anchor: anchor the requests are bound to
9566     *
9567     - * this allows all outstanding URBs to be killed starting
9568     - * from the back of the queue
9569     + * This kills all outstanding URBs starting from the back of the queue,
9570     + * with guarantee that no completer callbacks will take place from the
9571     + * anchor after this function returns.
9572     *
9573     * This routine should not be called by a driver after its disconnect
9574     * method has returned.
9575     @@ -784,20 +785,26 @@ EXPORT_SYMBOL_GPL(usb_block_urb);
9576     void usb_kill_anchored_urbs(struct usb_anchor *anchor)
9577     {
9578     struct urb *victim;
9579     + int surely_empty;
9580    
9581     - spin_lock_irq(&anchor->lock);
9582     - while (!list_empty(&anchor->urb_list)) {
9583     - victim = list_entry(anchor->urb_list.prev, struct urb,
9584     - anchor_list);
9585     - /* we must make sure the URB isn't freed before we kill it*/
9586     - usb_get_urb(victim);
9587     - spin_unlock_irq(&anchor->lock);
9588     - /* this will unanchor the URB */
9589     - usb_kill_urb(victim);
9590     - usb_put_urb(victim);
9591     + do {
9592     spin_lock_irq(&anchor->lock);
9593     - }
9594     - spin_unlock_irq(&anchor->lock);
9595     + while (!list_empty(&anchor->urb_list)) {
9596     + victim = list_entry(anchor->urb_list.prev,
9597     + struct urb, anchor_list);
9598     + /* make sure the URB isn't freed before we kill it */
9599     + usb_get_urb(victim);
9600     + spin_unlock_irq(&anchor->lock);
9601     + /* this will unanchor the URB */
9602     + usb_kill_urb(victim);
9603     + usb_put_urb(victim);
9604     + spin_lock_irq(&anchor->lock);
9605     + }
9606     + surely_empty = usb_anchor_check_wakeup(anchor);
9607     +
9608     + spin_unlock_irq(&anchor->lock);
9609     + cpu_relax();
9610     + } while (!surely_empty);
9611     }
9612     EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
9613    
9614     @@ -816,21 +823,27 @@ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
9615     void usb_poison_anchored_urbs(struct usb_anchor *anchor)
9616     {
9617     struct urb *victim;
9618     + int surely_empty;
9619    
9620     - spin_lock_irq(&anchor->lock);
9621     - anchor->poisoned = 1;
9622     - while (!list_empty(&anchor->urb_list)) {
9623     - victim = list_entry(anchor->urb_list.prev, struct urb,
9624     - anchor_list);
9625     - /* we must make sure the URB isn't freed before we kill it*/
9626     - usb_get_urb(victim);
9627     - spin_unlock_irq(&anchor->lock);
9628     - /* this will unanchor the URB */
9629     - usb_poison_urb(victim);
9630     - usb_put_urb(victim);
9631     + do {
9632     spin_lock_irq(&anchor->lock);
9633     - }
9634     - spin_unlock_irq(&anchor->lock);
9635     + anchor->poisoned = 1;
9636     + while (!list_empty(&anchor->urb_list)) {
9637     + victim = list_entry(anchor->urb_list.prev,
9638     + struct urb, anchor_list);
9639     + /* make sure the URB isn't freed before we kill it */
9640     + usb_get_urb(victim);
9641     + spin_unlock_irq(&anchor->lock);
9642     + /* this will unanchor the URB */
9643     + usb_poison_urb(victim);
9644     + usb_put_urb(victim);
9645     + spin_lock_irq(&anchor->lock);
9646     + }
9647     + surely_empty = usb_anchor_check_wakeup(anchor);
9648     +
9649     + spin_unlock_irq(&anchor->lock);
9650     + cpu_relax();
9651     + } while (!surely_empty);
9652     }
9653     EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
9654    
9655     @@ -970,14 +983,20 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
9656     {
9657     struct urb *victim;
9658     unsigned long flags;
9659     + int surely_empty;
9660     +
9661     + do {
9662     + spin_lock_irqsave(&anchor->lock, flags);
9663     + while (!list_empty(&anchor->urb_list)) {
9664     + victim = list_entry(anchor->urb_list.prev,
9665     + struct urb, anchor_list);
9666     + __usb_unanchor_urb(victim, anchor);
9667     + }
9668     + surely_empty = usb_anchor_check_wakeup(anchor);
9669    
9670     - spin_lock_irqsave(&anchor->lock, flags);
9671     - while (!list_empty(&anchor->urb_list)) {
9672     - victim = list_entry(anchor->urb_list.prev, struct urb,
9673     - anchor_list);
9674     - __usb_unanchor_urb(victim, anchor);
9675     - }
9676     - spin_unlock_irqrestore(&anchor->lock, flags);
9677     + spin_unlock_irqrestore(&anchor->lock, flags);
9678     + cpu_relax();
9679     + } while (!surely_empty);
9680     }
9681    
9682     EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
9683     diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
9684     index f7528f732b2aa..70ac47a341ac2 100644
9685     --- a/drivers/usb/dwc2/gadget.c
9686     +++ b/drivers/usb/dwc2/gadget.c
9687     @@ -712,8 +712,11 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
9688     */
9689     static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
9690     {
9691     + const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
9692     int is_isoc = hs_ep->isochronous;
9693     unsigned int maxsize;
9694     + u32 mps = hs_ep->ep.maxpacket;
9695     + int dir_in = hs_ep->dir_in;
9696    
9697     if (is_isoc)
9698     maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
9699     @@ -722,6 +725,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
9700     else
9701     maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
9702    
9703     + /* Interrupt OUT EP with mps not multiple of 4 */
9704     + if (hs_ep->index)
9705     + if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
9706     + maxsize = mps * MAX_DMA_DESC_NUM_GENERIC;
9707     +
9708     return maxsize;
9709     }
9710    
9711     @@ -737,11 +745,14 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
9712     * Isochronous - descriptor rx/tx bytes bitfield limit,
9713     * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
9714     * have concatenations from various descriptors within one packet.
9715     + * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds
9716     + * to a single descriptor.
9717     *
9718     * Selects corresponding mask for RX/TX bytes as well.
9719     */
9720     static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
9721     {
9722     + const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
9723     u32 mps = hs_ep->ep.maxpacket;
9724     int dir_in = hs_ep->dir_in;
9725     u32 desc_size = 0;
9726     @@ -765,6 +776,13 @@ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
9727     desc_size -= desc_size % mps;
9728     }
9729    
9730     + /* Interrupt OUT EP with mps not multiple of 4 */
9731     + if (hs_ep->index)
9732     + if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) {
9733     + desc_size = mps;
9734     + *mask = DEV_DMA_NBYTES_MASK;
9735     + }
9736     +
9737     return desc_size;
9738     }
9739    
9740     @@ -1123,13 +1141,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
9741     length += (mps - (length % mps));
9742     }
9743    
9744     - /*
9745     - * If more data to send, adjust DMA for EP0 out data stage.
9746     - * ureq->dma stays unchanged, hence increment it by already
9747     - * passed passed data count before starting new transaction.
9748     - */
9749     - if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT &&
9750     - continuing)
9751     + if (continuing)
9752     offset = ureq->actual;
9753    
9754     /* Fill DDMA chain entries */
9755     @@ -2319,22 +2331,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
9756     */
9757     static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
9758     {
9759     + const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
9760     struct dwc2_hsotg *hsotg = hs_ep->parent;
9761     unsigned int bytes_rem = 0;
9762     + unsigned int bytes_rem_correction = 0;
9763     struct dwc2_dma_desc *desc = hs_ep->desc_list;
9764     int i;
9765     u32 status;
9766     + u32 mps = hs_ep->ep.maxpacket;
9767     + int dir_in = hs_ep->dir_in;
9768    
9769     if (!desc)
9770     return -EINVAL;
9771    
9772     + /* Interrupt OUT EP with mps not multiple of 4 */
9773     + if (hs_ep->index)
9774     + if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
9775     + bytes_rem_correction = 4 - (mps % 4);
9776     +
9777     for (i = 0; i < hs_ep->desc_count; ++i) {
9778     status = desc->status;
9779     bytes_rem += status & DEV_DMA_NBYTES_MASK;
9780     + bytes_rem -= bytes_rem_correction;
9781    
9782     if (status & DEV_DMA_STS_MASK)
9783     dev_err(hsotg->dev, "descriptor %d closed with %x\n",
9784     i, status & DEV_DMA_STS_MASK);
9785     +
9786     + if (status & DEV_DMA_L)
9787     + break;
9788     +
9789     desc++;
9790     }
9791    
9792     diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
9793     index 31e090ac9f1ec..6d3812678b8c6 100644
9794     --- a/drivers/usb/dwc2/params.c
9795     +++ b/drivers/usb/dwc2/params.c
9796     @@ -846,7 +846,7 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
9797     int dwc2_init_params(struct dwc2_hsotg *hsotg)
9798     {
9799     const struct of_device_id *match;
9800     - void (*set_params)(void *data);
9801     + void (*set_params)(struct dwc2_hsotg *data);
9802    
9803     dwc2_set_default_params(hsotg);
9804     dwc2_get_device_properties(hsotg);
9805     diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
9806     index 526c275ad0bc5..4cbf295390062 100644
9807     --- a/drivers/usb/dwc3/core.c
9808     +++ b/drivers/usb/dwc3/core.c
9809     @@ -117,6 +117,7 @@ static void __dwc3_set_mode(struct work_struct *work)
9810     struct dwc3 *dwc = work_to_dwc(work);
9811     unsigned long flags;
9812     int ret;
9813     + u32 reg;
9814    
9815     if (dwc->dr_mode != USB_DR_MODE_OTG)
9816     return;
9817     @@ -168,6 +169,11 @@ static void __dwc3_set_mode(struct work_struct *work)
9818     otg_set_vbus(dwc->usb2_phy->otg, true);
9819     phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
9820     phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
9821     + if (dwc->dis_split_quirk) {
9822     + reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
9823     + reg |= DWC3_GUCTL3_SPLITDISABLE;
9824     + dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
9825     + }
9826     }
9827     break;
9828     case DWC3_GCTL_PRTCAP_DEVICE:
9829     @@ -1323,6 +1329,9 @@ static void dwc3_get_properties(struct dwc3 *dwc)
9830     dwc->dis_metastability_quirk = device_property_read_bool(dev,
9831     "snps,dis_metastability_quirk");
9832    
9833     + dwc->dis_split_quirk = device_property_read_bool(dev,
9834     + "snps,dis-split-quirk");
9835     +
9836     dwc->lpm_nyet_threshold = lpm_nyet_threshold;
9837     dwc->tx_de_emphasis = tx_de_emphasis;
9838    
9839     @@ -1835,10 +1844,26 @@ static int dwc3_resume(struct device *dev)
9840    
9841     return 0;
9842     }
9843     +
9844     +static void dwc3_complete(struct device *dev)
9845     +{
9846     + struct dwc3 *dwc = dev_get_drvdata(dev);
9847     + u32 reg;
9848     +
9849     + if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST &&
9850     + dwc->dis_split_quirk) {
9851     + reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
9852     + reg |= DWC3_GUCTL3_SPLITDISABLE;
9853     + dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
9854     + }
9855     +}
9856     +#else
9857     +#define dwc3_complete NULL
9858     #endif /* CONFIG_PM_SLEEP */
9859    
9860     static const struct dev_pm_ops dwc3_dev_pm_ops = {
9861     SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
9862     + .complete = dwc3_complete,
9863     SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
9864     dwc3_runtime_idle)
9865     };
9866     diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
9867     index ce4acbf7fef90..4dfbffa944de1 100644
9868     --- a/drivers/usb/dwc3/core.h
9869     +++ b/drivers/usb/dwc3/core.h
9870     @@ -136,6 +136,7 @@
9871     #define DWC3_GEVNTCOUNT(n) (0xc40c + ((n) * 0x10))
9872    
9873     #define DWC3_GHWPARAMS8 0xc600
9874     +#define DWC3_GUCTL3 0xc60c
9875     #define DWC3_GFLADJ 0xc630
9876    
9877     /* Device Registers */
9878     @@ -375,6 +376,9 @@
9879     /* Global User Control Register 2 */
9880     #define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
9881    
9882     +/* Global User Control Register 3 */
9883     +#define DWC3_GUCTL3_SPLITDISABLE BIT(14)
9884     +
9885     /* Device Configuration Register */
9886     #define DWC3_DCFG_DEVADDR(addr) ((addr) << 3)
9887     #define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f)
9888     @@ -1038,6 +1042,7 @@ struct dwc3_scratchpad_array {
9889     * 2 - No de-emphasis
9890     * 3 - Reserved
9891     * @dis_metastability_quirk: set to disable metastability quirk.
9892     + * @dis_split_quirk: set to disable split boundary.
9893     * @imod_interval: set the interrupt moderation interval in 250ns
9894     * increments or 0 to disable.
9895     */
9896     @@ -1229,6 +1234,8 @@ struct dwc3 {
9897    
9898     unsigned dis_metastability_quirk:1;
9899    
9900     + unsigned dis_split_quirk:1;
9901     +
9902     u16 imod_interval;
9903     };
9904    
9905     diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
9906     index bdac3e7d7b184..d055e00f81808 100644
9907     --- a/drivers/usb/dwc3/dwc3-of-simple.c
9908     +++ b/drivers/usb/dwc3/dwc3-of-simple.c
9909     @@ -183,6 +183,7 @@ static const struct of_device_id of_dwc3_simple_match[] = {
9910     { .compatible = "amlogic,meson-axg-dwc3" },
9911     { .compatible = "amlogic,meson-gxl-dwc3" },
9912     { .compatible = "allwinner,sun50i-h6-dwc3" },
9913     + { .compatible = "hisilicon,hi3670-dwc3" },
9914     { /* Sentinel */ }
9915     };
9916     MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
9917     diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
9918     index 1f638759a9533..92a7c3a839454 100644
9919     --- a/drivers/usb/gadget/function/f_ncm.c
9920     +++ b/drivers/usb/gadget/function/f_ncm.c
9921     @@ -85,8 +85,10 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
9922     /* peak (theoretical) bulk transfer rate in bits-per-second */
9923     static inline unsigned ncm_bitrate(struct usb_gadget *g)
9924     {
9925     - if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
9926     - return 13 * 1024 * 8 * 1000 * 8;
9927     + if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
9928     + return 4250000000U;
9929     + else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
9930     + return 3750000000U;
9931     else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
9932     return 13 * 512 * 8 * 1000 * 8;
9933     else
9934     @@ -1534,7 +1536,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
9935     fs_ncm_notify_desc.bEndpointAddress;
9936    
9937     status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
9938     - ncm_ss_function, NULL);
9939     + ncm_ss_function, ncm_ss_function);
9940     if (status)
9941     goto fail;
9942    
9943     diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
9944     index 9c7ed2539ff77..8ed1295d7e350 100644
9945     --- a/drivers/usb/gadget/function/f_printer.c
9946     +++ b/drivers/usb/gadget/function/f_printer.c
9947     @@ -31,6 +31,7 @@
9948     #include <linux/types.h>
9949     #include <linux/ctype.h>
9950     #include <linux/cdev.h>
9951     +#include <linux/kref.h>
9952    
9953     #include <asm/byteorder.h>
9954     #include <linux/io.h>
9955     @@ -64,7 +65,7 @@ struct printer_dev {
9956     struct usb_gadget *gadget;
9957     s8 interface;
9958     struct usb_ep *in_ep, *out_ep;
9959     -
9960     + struct kref kref;
9961     struct list_head rx_reqs; /* List of free RX structs */
9962     struct list_head rx_reqs_active; /* List of Active RX xfers */
9963     struct list_head rx_buffers; /* List of completed xfers */
9964     @@ -218,6 +219,13 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget,
9965    
9966     /*-------------------------------------------------------------------------*/
9967    
9968     +static void printer_dev_free(struct kref *kref)
9969     +{
9970     + struct printer_dev *dev = container_of(kref, struct printer_dev, kref);
9971     +
9972     + kfree(dev);
9973     +}
9974     +
9975     static struct usb_request *
9976     printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags)
9977     {
9978     @@ -348,6 +356,7 @@ printer_open(struct inode *inode, struct file *fd)
9979    
9980     spin_unlock_irqrestore(&dev->lock, flags);
9981    
9982     + kref_get(&dev->kref);
9983     DBG(dev, "printer_open returned %x\n", ret);
9984     return ret;
9985     }
9986     @@ -365,6 +374,7 @@ printer_close(struct inode *inode, struct file *fd)
9987     dev->printer_status &= ~PRINTER_SELECTED;
9988     spin_unlock_irqrestore(&dev->lock, flags);
9989    
9990     + kref_put(&dev->kref, printer_dev_free);
9991     DBG(dev, "printer_close\n");
9992    
9993     return 0;
9994     @@ -1350,7 +1360,8 @@ static void gprinter_free(struct usb_function *f)
9995     struct f_printer_opts *opts;
9996    
9997     opts = container_of(f->fi, struct f_printer_opts, func_inst);
9998     - kfree(dev);
9999     +
10000     + kref_put(&dev->kref, printer_dev_free);
10001     mutex_lock(&opts->lock);
10002     --opts->refcnt;
10003     mutex_unlock(&opts->lock);
10004     @@ -1419,6 +1430,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi)
10005     return ERR_PTR(-ENOMEM);
10006     }
10007    
10008     + kref_init(&dev->kref);
10009     ++opts->refcnt;
10010     dev->minor = opts->minor;
10011     dev->pnp_string = opts->pnp_string;
10012     diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
10013     index fbe96ef1ac7a4..891e9f7f40d59 100644
10014     --- a/drivers/usb/gadget/function/u_ether.c
10015     +++ b/drivers/usb/gadget/function/u_ether.c
10016     @@ -93,7 +93,7 @@ struct eth_dev {
10017     static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
10018     {
10019     if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
10020     - gadget->speed == USB_SPEED_SUPER))
10021     + gadget->speed >= USB_SPEED_SUPER))
10022     return qmult * DEFAULT_QLEN;
10023     else
10024     return DEFAULT_QLEN;
10025     diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
10026     index 4de91653a2c7b..5eb62240c7f87 100644
10027     --- a/drivers/usb/host/ohci-hcd.c
10028     +++ b/drivers/usb/host/ohci-hcd.c
10029     @@ -673,20 +673,24 @@ retry:
10030    
10031     /* handle root hub init quirks ... */
10032     val = roothub_a (ohci);
10033     - val &= ~(RH_A_PSM | RH_A_OCPM);
10034     + /* Configure for per-port over-current protection by default */
10035     + val &= ~RH_A_NOCP;
10036     + val |= RH_A_OCPM;
10037     if (ohci->flags & OHCI_QUIRK_SUPERIO) {
10038     - /* NSC 87560 and maybe others */
10039     + /* NSC 87560 and maybe others.
10040     + * Ganged power switching, no over-current protection.
10041     + */
10042     val |= RH_A_NOCP;
10043     - val &= ~(RH_A_POTPGT | RH_A_NPS);
10044     - ohci_writel (ohci, val, &ohci->regs->roothub.a);
10045     + val &= ~(RH_A_POTPGT | RH_A_NPS | RH_A_PSM | RH_A_OCPM);
10046     } else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
10047     (ohci->flags & OHCI_QUIRK_HUB_POWER)) {
10048     /* hub power always on; required for AMD-756 and some
10049     - * Mac platforms. ganged overcurrent reporting, if any.
10050     + * Mac platforms.
10051     */
10052     val |= RH_A_NPS;
10053     - ohci_writel (ohci, val, &ohci->regs->roothub.a);
10054     }
10055     + ohci_writel(ohci, val, &ohci->regs->roothub.a);
10056     +
10057     ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
10058     ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
10059     &ohci->regs->roothub.b);
10060     diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
10061     index bad154f446f8d..0d10ede581cbd 100644
10062     --- a/drivers/usb/host/xhci.c
10063     +++ b/drivers/usb/host/xhci.c
10064     @@ -1915,8 +1915,6 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
10065     ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
10066     trace_xhci_add_endpoint(ep_ctx);
10067    
10068     - xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
10069     -
10070     xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
10071     (unsigned int) ep->desc.bEndpointAddress,
10072     udev->slot_id,
10073     @@ -2949,6 +2947,7 @@ static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
10074     xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
10075     virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
10076     virt_dev->eps[i].new_ring = NULL;
10077     + xhci_debugfs_create_endpoint(xhci, virt_dev, i);
10078     }
10079     command_cleanup:
10080     kfree(command->completion);
10081     diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
10082     index 927b608461c82..bf32997c557ff 100644
10083     --- a/drivers/vfio/pci/vfio_pci_config.c
10084     +++ b/drivers/vfio/pci/vfio_pci_config.c
10085     @@ -406,7 +406,7 @@ bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
10086     * PF SR-IOV capability, there's therefore no need to trigger
10087     * faults based on the virtual value.
10088     */
10089     - return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
10090     + return pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY);
10091     }
10092    
10093     /*
10094     @@ -518,8 +518,8 @@ static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos,
10095    
10096     count = vfio_default_config_read(vdev, pos, count, perm, offset, val);
10097    
10098     - /* Mask in virtual memory enable for SR-IOV devices */
10099     - if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) {
10100     + /* Mask in virtual memory enable */
10101     + if (offset == PCI_COMMAND && vdev->pdev->no_command_memory) {
10102     u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
10103     u32 tmp_val = le32_to_cpu(*val);
10104    
10105     @@ -587,9 +587,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
10106     * shows it disabled (phys_mem/io, then the device has
10107     * undergone some kind of backdoor reset and needs to be
10108     * restored before we allow it to enable the bars.
10109     - * SR-IOV devices will trigger this, but we catch them later
10110     + * SR-IOV devices will trigger this - for mem enable let's
10111     + * catch this now and for io enable it will be caught later
10112     */
10113     - if ((new_mem && virt_mem && !phys_mem) ||
10114     + if ((new_mem && virt_mem && !phys_mem &&
10115     + !pdev->no_command_memory) ||
10116     (new_io && virt_io && !phys_io) ||
10117     vfio_need_bar_restore(vdev))
10118     vfio_bar_restore(vdev);
10119     @@ -1732,12 +1734,14 @@ int vfio_config_init(struct vfio_pci_device *vdev)
10120     vconfig[PCI_INTERRUPT_PIN]);
10121    
10122     vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
10123     -
10124     + }
10125     + if (pdev->no_command_memory) {
10126     /*
10127     - * VFs do no implement the memory enable bit of the COMMAND
10128     - * register therefore we'll not have it set in our initial
10129     - * copy of config space after pci_enable_device(). For
10130     - * consistency with PFs, set the virtual enable bit here.
10131     + * VFs and devices that set pdev->no_command_memory do not
10132     + * implement the memory enable bit of the COMMAND register
10133     + * therefore we'll not have it set in our initial copy of
10134     + * config space after pci_enable_device(). For consistency
10135     + * with PFs, set the virtual enable bit here.
10136     */
10137     *(__le16 *)&vconfig[PCI_COMMAND] |=
10138     cpu_to_le16(PCI_COMMAND_MEMORY);
10139     diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
10140     index 1d9fb25929459..869dce5f134dd 100644
10141     --- a/drivers/vfio/pci/vfio_pci_intrs.c
10142     +++ b/drivers/vfio/pci/vfio_pci_intrs.c
10143     @@ -352,11 +352,13 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
10144     vdev->ctx[vector].producer.token = trigger;
10145     vdev->ctx[vector].producer.irq = irq;
10146     ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
10147     - if (unlikely(ret))
10148     + if (unlikely(ret)) {
10149     dev_info(&pdev->dev,
10150     "irq bypass producer (token %p) registration fails: %d\n",
10151     vdev->ctx[vector].producer.token, ret);
10152    
10153     + vdev->ctx[vector].producer.token = NULL;
10154     + }
10155     vdev->ctx[vector].trigger = trigger;
10156    
10157     return 0;
10158     diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
10159     index c6220f57fdf3e..3b31e83a92155 100644
10160     --- a/drivers/vfio/vfio_iommu_type1.c
10161     +++ b/drivers/vfio/vfio_iommu_type1.c
10162     @@ -631,7 +631,8 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
10163    
10164     ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
10165     if (ret) {
10166     - vfio_unpin_page_external(dma, iova, do_accounting);
10167     + if (put_pfn(phys_pfn[i], dma->prot) && do_accounting)
10168     + vfio_lock_acct(dma, -1, true);
10169     goto pin_unwind;
10170     }
10171     }
10172     diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
10173     index 2355f00f57732..1f6301375fd33 100644
10174     --- a/drivers/video/backlight/sky81452-backlight.c
10175     +++ b/drivers/video/backlight/sky81452-backlight.c
10176     @@ -196,6 +196,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt(
10177     num_entry);
10178     if (ret < 0) {
10179     dev_err(dev, "led-sources node is invalid.\n");
10180     + of_node_put(np);
10181     return ERR_PTR(-EINVAL);
10182     }
10183    
10184     diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
10185     index 4ca07866f2f66..5dda824d0da3f 100644
10186     --- a/drivers/video/fbdev/aty/radeon_base.c
10187     +++ b/drivers/video/fbdev/aty/radeon_base.c
10188     @@ -2323,7 +2323,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
10189    
10190     ret = radeon_kick_out_firmware_fb(pdev);
10191     if (ret)
10192     - return ret;
10193     + goto err_release_fb;
10194    
10195     /* request the mem regions */
10196     ret = pci_request_region(pdev, 0, "radeonfb framebuffer");
10197     diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
10198     index 97abcd497c7e0..bf76dadbed87f 100644
10199     --- a/drivers/video/fbdev/core/fbmem.c
10200     +++ b/drivers/video/fbdev/core/fbmem.c
10201     @@ -1001,6 +1001,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
10202     return 0;
10203     }
10204    
10205     + /* bitfill_aligned() assumes that it's at least 8x8 */
10206     + if (var->xres < 8 || var->yres < 8)
10207     + return -EINVAL;
10208     +
10209     ret = info->fbops->fb_check_var(var, info);
10210    
10211     if (ret)
10212     diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
10213     index dfe3eb769638b..fde27feae5d0c 100644
10214     --- a/drivers/video/fbdev/sis/init.c
10215     +++ b/drivers/video/fbdev/sis/init.c
10216     @@ -2428,6 +2428,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
10217    
10218     i = 0;
10219    
10220     + if (SiS_Pr->ChipType == SIS_730)
10221     + queuedata = &FQBQData730[0];
10222     + else
10223     + queuedata = &FQBQData[0];
10224     +
10225     if(ModeNo > 0x13) {
10226    
10227     /* Get VCLK */
10228     @@ -2445,12 +2450,6 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
10229     /* Get half colordepth */
10230     colorth = colortharray[(SiS_Pr->SiS_ModeType - ModeEGA)];
10231    
10232     - if(SiS_Pr->ChipType == SIS_730) {
10233     - queuedata = &FQBQData730[0];
10234     - } else {
10235     - queuedata = &FQBQData[0];
10236     - }
10237     -
10238     do {
10239     templ = SiS_CalcDelay2(SiS_Pr, queuedata[i]) * VCLK * colorth;
10240    
10241     diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
10242     index 4b83109202b1c..3c4d20618de4c 100644
10243     --- a/drivers/video/fbdev/vga16fb.c
10244     +++ b/drivers/video/fbdev/vga16fb.c
10245     @@ -243,7 +243,7 @@ static void vga16fb_update_fix(struct fb_info *info)
10246     }
10247    
10248     static void vga16fb_clock_chip(struct vga16fb_par *par,
10249     - unsigned int pixclock,
10250     + unsigned int *pixclock,
10251     const struct fb_info *info,
10252     int mul, int div)
10253     {
10254     @@ -259,14 +259,14 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
10255     { 0 /* bad */, 0x00, 0x00}};
10256     int err;
10257    
10258     - pixclock = (pixclock * mul) / div;
10259     + *pixclock = (*pixclock * mul) / div;
10260     best = vgaclocks;
10261     - err = pixclock - best->pixclock;
10262     + err = *pixclock - best->pixclock;
10263     if (err < 0) err = -err;
10264     for (ptr = vgaclocks + 1; ptr->pixclock; ptr++) {
10265     int tmp;
10266    
10267     - tmp = pixclock - ptr->pixclock;
10268     + tmp = *pixclock - ptr->pixclock;
10269     if (tmp < 0) tmp = -tmp;
10270     if (tmp < err) {
10271     err = tmp;
10272     @@ -275,7 +275,7 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
10273     }
10274     par->misc |= best->misc;
10275     par->clkdiv = best->seq_clock_mode;
10276     - pixclock = (best->pixclock * div) / mul;
10277     + *pixclock = (best->pixclock * div) / mul;
10278     }
10279    
10280     #define FAIL(X) return -EINVAL
10281     @@ -497,10 +497,10 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var,
10282    
10283     if (mode & MODE_8BPP)
10284     /* pixel clock == vga clock / 2 */
10285     - vga16fb_clock_chip(par, var->pixclock, info, 1, 2);
10286     + vga16fb_clock_chip(par, &var->pixclock, info, 1, 2);
10287     else
10288     /* pixel clock == vga clock */
10289     - vga16fb_clock_chip(par, var->pixclock, info, 1, 1);
10290     + vga16fb_clock_chip(par, &var->pixclock, info, 1, 1);
10291    
10292     var->red.offset = var->green.offset = var->blue.offset =
10293     var->transp.offset = 0;
10294     diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
10295     index 93d5bebf9572a..fb292f9cf29df 100644
10296     --- a/drivers/virt/fsl_hypervisor.c
10297     +++ b/drivers/virt/fsl_hypervisor.c
10298     @@ -157,7 +157,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
10299    
10300     unsigned int i;
10301     long ret = 0;
10302     - int num_pinned; /* return value from get_user_pages() */
10303     + int num_pinned = 0; /* return value from get_user_pages_fast() */
10304     phys_addr_t remote_paddr; /* The next address in the remote buffer */
10305     uint32_t count; /* The number of bytes left to copy */
10306    
10307     @@ -174,7 +174,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
10308     return -EINVAL;
10309    
10310     /*
10311     - * The array of pages returned by get_user_pages() covers only
10312     + * The array of pages returned by get_user_pages_fast() covers only
10313     * page-aligned memory. Since the user buffer is probably not
10314     * page-aligned, we need to handle the discrepancy.
10315     *
10316     @@ -224,7 +224,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
10317    
10318     /*
10319     * 'pages' is an array of struct page pointers that's initialized by
10320     - * get_user_pages().
10321     + * get_user_pages_fast().
10322     */
10323     pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
10324     if (!pages) {
10325     @@ -241,7 +241,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
10326     if (!sg_list_unaligned) {
10327     pr_debug("fsl-hv: could not allocate S/G list\n");
10328     ret = -ENOMEM;
10329     - goto exit;
10330     + goto free_pages;
10331     }
10332     sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
10333    
10334     @@ -250,7 +250,6 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
10335     num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
10336    
10337     if (num_pinned != num_pages) {
10338     - /* get_user_pages() failed */
10339     pr_debug("fsl-hv: could not lock source buffer\n");
10340     ret = (num_pinned < 0) ? num_pinned : -EFAULT;
10341     goto exit;
10342     @@ -292,13 +291,13 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
10343     virt_to_phys(sg_list), num_pages);
10344    
10345     exit:
10346     - if (pages) {
10347     - for (i = 0; i < num_pages; i++)
10348     - if (pages[i])
10349     - put_page(pages[i]);
10350     + if (pages && (num_pinned > 0)) {
10351     + for (i = 0; i < num_pinned; i++)
10352     + put_page(pages[i]);
10353     }
10354    
10355     kfree(sg_list_unaligned);
10356     +free_pages:
10357     kfree(pages);
10358    
10359     if (!ret)
10360     diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
10361     index 87eaf357ae01f..adf015aa4126f 100644
10362     --- a/drivers/watchdog/sp5100_tco.h
10363     +++ b/drivers/watchdog/sp5100_tco.h
10364     @@ -70,7 +70,7 @@
10365     #define EFCH_PM_DECODEEN_WDT_TMREN BIT(7)
10366    
10367    
10368     -#define EFCH_PM_DECODEEN3 0x00
10369     +#define EFCH_PM_DECODEEN3 0x03
10370     #define EFCH_PM_DECODEEN_SECOND_RES GENMASK(1, 0)
10371     #define EFCH_PM_WATCHDOG_DISABLE ((u8)GENMASK(3, 2))
10372    
10373     diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
10374     index 3729f99fd8eca..8494846ccdc5f 100644
10375     --- a/drivers/watchdog/watchdog_dev.c
10376     +++ b/drivers/watchdog/watchdog_dev.c
10377     @@ -971,8 +971,10 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
10378     wd_data->wdd = wdd;
10379     wdd->wd_data = wd_data;
10380    
10381     - if (IS_ERR_OR_NULL(watchdog_kworker))
10382     + if (IS_ERR_OR_NULL(watchdog_kworker)) {
10383     + kfree(wd_data);
10384     return -ENODEV;
10385     + }
10386    
10387     device_initialize(&wd_data->dev);
10388     wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
10389     @@ -998,7 +1000,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
10390     pr_err("%s: a legacy watchdog module is probably present.\n",
10391     wdd->info->identity);
10392     old_wd_data = NULL;
10393     - kfree(wd_data);
10394     + put_device(&wd_data->dev);
10395     return err;
10396     }
10397     }
10398     diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
10399     index 689162e2e1755..3150c19cdc2fb 100644
10400     --- a/fs/cifs/asn1.c
10401     +++ b/fs/cifs/asn1.c
10402     @@ -530,8 +530,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
10403     return 0;
10404     } else if ((cls != ASN1_CTX) || (con != ASN1_CON)
10405     || (tag != ASN1_EOC)) {
10406     - cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n",
10407     - cls, con, tag, end, *end);
10408     + cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n",
10409     + cls, con, tag, end);
10410     return 0;
10411     }
10412    
10413     @@ -541,8 +541,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
10414     return 0;
10415     } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
10416     || (tag != ASN1_SEQ)) {
10417     - cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n",
10418     - cls, con, tag, end, *end);
10419     + cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 1\n",
10420     + cls, con, tag, end);
10421     return 0;
10422     }
10423    
10424     @@ -552,8 +552,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
10425     return 0;
10426     } else if ((cls != ASN1_CTX) || (con != ASN1_CON)
10427     || (tag != ASN1_EOC)) {
10428     - cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n",
10429     - cls, con, tag, end, *end);
10430     + cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n",
10431     + cls, con, tag, end);
10432     return 0;
10433     }
10434    
10435     @@ -564,8 +564,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
10436     return 0;
10437     } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
10438     || (tag != ASN1_SEQ)) {
10439     - cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n",
10440     - cls, con, tag, end, *end);
10441     + cifs_dbg(FYI, "cls = %d con = %d tag = %d sequence_end = %p exit 1\n",
10442     + cls, con, tag, sequence_end);
10443     return 0;
10444     }
10445    
10446     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
10447     index 9a89e5f7c4da3..776029a57e717 100644
10448     --- a/fs/cifs/smb2ops.c
10449     +++ b/fs/cifs/smb2ops.c
10450     @@ -3707,7 +3707,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
10451     if (rc) {
10452     cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
10453     enc ? "en" : "de");
10454     - return 0;
10455     + return rc;
10456     }
10457    
10458     rc = smb3_crypto_aead_allocate(server);
10459     @@ -3886,7 +3886,8 @@ smb3_is_transform_hdr(void *buf)
10460     static int
10461     decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
10462     unsigned int buf_data_size, struct page **pages,
10463     - unsigned int npages, unsigned int page_data_size)
10464     + unsigned int npages, unsigned int page_data_size,
10465     + bool is_offloaded)
10466     {
10467     struct kvec iov[2];
10468     struct smb_rqst rqst = {NULL};
10469     @@ -3912,7 +3913,8 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
10470    
10471     memmove(buf, iov[1].iov_base, buf_data_size);
10472    
10473     - server->total_read = buf_data_size + page_data_size;
10474     + if (!is_offloaded)
10475     + server->total_read = buf_data_size + page_data_size;
10476    
10477     return rc;
10478     }
10479     @@ -4126,7 +4128,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
10480     struct mid_q_entry *mid;
10481    
10482     rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
10483     - dw->ppages, dw->npages, dw->len);
10484     + dw->ppages, dw->npages, dw->len, true);
10485     if (rc) {
10486     cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
10487     goto free_pages;
10488     @@ -4232,7 +4234,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
10489    
10490     non_offloaded_decrypt:
10491     rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
10492     - pages, npages, len);
10493     + pages, npages, len, false);
10494     if (rc)
10495     goto free_pages;
10496    
10497     @@ -4288,7 +4290,7 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
10498     server->total_read += length;
10499    
10500     buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
10501     - length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
10502     + length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0, false);
10503     if (length)
10504     return length;
10505    
10506     diff --git a/fs/d_path.c b/fs/d_path.c
10507     index 0f1fc1743302f..a69e2cd36e6e3 100644
10508     --- a/fs/d_path.c
10509     +++ b/fs/d_path.c
10510     @@ -102,6 +102,8 @@ restart:
10511    
10512     if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
10513     struct mount *parent = READ_ONCE(mnt->mnt_parent);
10514     + struct mnt_namespace *mnt_ns;
10515     +
10516     /* Escaped? */
10517     if (dentry != vfsmnt->mnt_root) {
10518     bptr = *buffer;
10519     @@ -116,7 +118,9 @@ restart:
10520     vfsmnt = &mnt->mnt;
10521     continue;
10522     }
10523     - if (is_mounted(vfsmnt) && !is_anon_ns(mnt->mnt_ns))
10524     + mnt_ns = READ_ONCE(mnt->mnt_ns);
10525     + /* open-coded is_mounted() to use local mnt_ns */
10526     + if (!IS_ERR_OR_NULL(mnt_ns) && !is_anon_ns(mnt_ns))
10527     error = 1; // absolute root
10528     else
10529     error = 2; // detached or not attached yet
10530     diff --git a/fs/dlm/config.c b/fs/dlm/config.c
10531     index 3b21082e1b550..3b1012a3c4396 100644
10532     --- a/fs/dlm/config.c
10533     +++ b/fs/dlm/config.c
10534     @@ -216,6 +216,7 @@ struct dlm_space {
10535     struct list_head members;
10536     struct mutex members_lock;
10537     int members_count;
10538     + struct dlm_nodes *nds;
10539     };
10540    
10541     struct dlm_comms {
10542     @@ -424,6 +425,7 @@ static struct config_group *make_space(struct config_group *g, const char *name)
10543     INIT_LIST_HEAD(&sp->members);
10544     mutex_init(&sp->members_lock);
10545     sp->members_count = 0;
10546     + sp->nds = nds;
10547     return &sp->group;
10548    
10549     fail:
10550     @@ -445,6 +447,7 @@ static void drop_space(struct config_group *g, struct config_item *i)
10551     static void release_space(struct config_item *i)
10552     {
10553     struct dlm_space *sp = config_item_to_space(i);
10554     + kfree(sp->nds);
10555     kfree(sp);
10556     }
10557    
10558     diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
10559     index dbccf46f17709..37347ba868b70 100644
10560     --- a/fs/ext4/fsmap.c
10561     +++ b/fs/ext4/fsmap.c
10562     @@ -108,6 +108,9 @@ static int ext4_getfsmap_helper(struct super_block *sb,
10563    
10564     /* Are we just counting mappings? */
10565     if (info->gfi_head->fmh_count == 0) {
10566     + if (info->gfi_head->fmh_entries == UINT_MAX)
10567     + return EXT4_QUERY_RANGE_ABORT;
10568     +
10569     if (rec_fsblk > info->gfi_next_fsblk)
10570     info->gfi_head->fmh_entries++;
10571    
10572     diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
10573     index 170934430d7d7..029e693e201cf 100644
10574     --- a/fs/f2fs/sysfs.c
10575     +++ b/fs/f2fs/sysfs.c
10576     @@ -788,4 +788,5 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
10577     }
10578     kobject_del(&sbi->s_kobj);
10579     kobject_put(&sbi->s_kobj);
10580     + wait_for_completion(&sbi->s_kobj_unregister);
10581     }
10582     diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
10583     index a30ea7ecb790a..80867a1a94f26 100644
10584     --- a/fs/iomap/buffered-io.c
10585     +++ b/fs/iomap/buffered-io.c
10586     @@ -559,6 +559,7 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
10587    
10588     if (PageUptodate(page))
10589     return 0;
10590     + ClearPageError(page);
10591    
10592     do {
10593     iomap_adjust_read_range(inode, iop, &block_start,
10594     diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
10595     index 7b5f76efef02d..8c98fd92bf665 100644
10596     --- a/fs/iomap/direct-io.c
10597     +++ b/fs/iomap/direct-io.c
10598     @@ -377,6 +377,16 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
10599     return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
10600     case IOMAP_INLINE:
10601     return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
10602     + case IOMAP_DELALLOC:
10603     + /*
10604     + * DIO is not serialised against mmap() access at all, and so
10605     + * if the page_mkwrite occurs between the writeback and the
10606     + * iomap_apply() call in the DIO path, then it will see the
10607     + * DELALLOC block that the page-mkwrite allocated.
10608     + */
10609     + pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
10610     + dio->iocb->ki_filp, current->comm);
10611     + return -EIO;
10612     default:
10613     WARN_ON_ONCE(1);
10614     return -EIO;
10615     diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
10616     index d4359a1df3d5e..84933a0af49b6 100644
10617     --- a/fs/ntfs/inode.c
10618     +++ b/fs/ntfs/inode.c
10619     @@ -1809,6 +1809,12 @@ int ntfs_read_inode_mount(struct inode *vi)
10620     brelse(bh);
10621     }
10622    
10623     + if (le32_to_cpu(m->bytes_allocated) != vol->mft_record_size) {
10624     + ntfs_error(sb, "Incorrect mft record size %u in superblock, should be %u.",
10625     + le32_to_cpu(m->bytes_allocated), vol->mft_record_size);
10626     + goto err_out;
10627     + }
10628     +
10629     /* Apply the mst fixups. */
10630     if (post_read_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size)) {
10631     /* FIXME: Try to use the $MFTMirr now. */
10632     diff --git a/fs/proc/base.c b/fs/proc/base.c
10633     index 529d0c6ec6f9c..b690074e65ffa 100644
10634     --- a/fs/proc/base.c
10635     +++ b/fs/proc/base.c
10636     @@ -1036,7 +1036,6 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
10637    
10638     static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
10639     {
10640     - static DEFINE_MUTEX(oom_adj_mutex);
10641     struct mm_struct *mm = NULL;
10642     struct task_struct *task;
10643     int err = 0;
10644     @@ -1076,7 +1075,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
10645     struct task_struct *p = find_lock_task_mm(task);
10646    
10647     if (p) {
10648     - if (atomic_read(&p->mm->mm_users) > 1) {
10649     + if (test_bit(MMF_MULTIPROCESS, &p->mm->flags)) {
10650     mm = p->mm;
10651     mmgrab(mm);
10652     }
10653     diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
10654     index 53429c29c7842..276c27fb99280 100644
10655     --- a/fs/quota/quota_v2.c
10656     +++ b/fs/quota/quota_v2.c
10657     @@ -284,6 +284,7 @@ static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot)
10658     d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
10659     d->dqb_btime = cpu_to_le64(m->dqb_btime);
10660     d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
10661     + d->dqb_pad = 0;
10662     if (qtree_entry_unused(info, dp))
10663     d->dqb_itime = cpu_to_le64(1);
10664     }
10665     diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
10666     index 4146954549560..355523f4a4bf3 100644
10667     --- a/fs/ramfs/file-nommu.c
10668     +++ b/fs/ramfs/file-nommu.c
10669     @@ -224,7 +224,7 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
10670     if (!pages)
10671     goto out_free;
10672    
10673     - nr = find_get_pages(inode->i_mapping, &pgoff, lpages, pages);
10674     + nr = find_get_pages_contig(inode->i_mapping, pgoff, lpages, pages);
10675     if (nr != lpages)
10676     goto out_free_pages; /* leave if some pages were missing */
10677    
10678     diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
10679     index 70387650436cf..ac35ddf0dd603 100644
10680     --- a/fs/reiserfs/inode.c
10681     +++ b/fs/reiserfs/inode.c
10682     @@ -2161,7 +2161,8 @@ out_end_trans:
10683     out_inserted_sd:
10684     clear_nlink(inode);
10685     th->t_trans_id = 0; /* so the caller can't use this handle later */
10686     - unlock_new_inode(inode); /* OK to do even if we hadn't locked it */
10687     + if (inode->i_state & I_NEW)
10688     + unlock_new_inode(inode);
10689     iput(inode);
10690     return err;
10691     }
10692     diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
10693     index a6bce5b1fb1dc..1b9c7a387dc71 100644
10694     --- a/fs/reiserfs/super.c
10695     +++ b/fs/reiserfs/super.c
10696     @@ -1258,6 +1258,10 @@ static int reiserfs_parse_options(struct super_block *s,
10697     "turned on.");
10698     return 0;
10699     }
10700     + if (qf_names[qtype] !=
10701     + REISERFS_SB(s)->s_qf_names[qtype])
10702     + kfree(qf_names[qtype]);
10703     + qf_names[qtype] = NULL;
10704     if (*arg) { /* Some filename specified? */
10705     if (REISERFS_SB(s)->s_qf_names[qtype]
10706     && strcmp(REISERFS_SB(s)->s_qf_names[qtype],
10707     @@ -1287,10 +1291,6 @@ static int reiserfs_parse_options(struct super_block *s,
10708     else
10709     *mount_options |= 1 << REISERFS_GRPQUOTA;
10710     } else {
10711     - if (qf_names[qtype] !=
10712     - REISERFS_SB(s)->s_qf_names[qtype])
10713     - kfree(qf_names[qtype]);
10714     - qf_names[qtype] = NULL;
10715     if (qtype == USRQUOTA)
10716     *mount_options &= ~(1 << REISERFS_USRQUOTA);
10717     else
10718     diff --git a/fs/udf/inode.c b/fs/udf/inode.c
10719     index ea80036d7897b..97a192eb9949c 100644
10720     --- a/fs/udf/inode.c
10721     +++ b/fs/udf/inode.c
10722     @@ -139,21 +139,24 @@ void udf_evict_inode(struct inode *inode)
10723     struct udf_inode_info *iinfo = UDF_I(inode);
10724     int want_delete = 0;
10725    
10726     - if (!inode->i_nlink && !is_bad_inode(inode)) {
10727     - want_delete = 1;
10728     - udf_setsize(inode, 0);
10729     - udf_update_inode(inode, IS_SYNC(inode));
10730     + if (!is_bad_inode(inode)) {
10731     + if (!inode->i_nlink) {
10732     + want_delete = 1;
10733     + udf_setsize(inode, 0);
10734     + udf_update_inode(inode, IS_SYNC(inode));
10735     + }
10736     + if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
10737     + inode->i_size != iinfo->i_lenExtents) {
10738     + udf_warn(inode->i_sb,
10739     + "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
10740     + inode->i_ino, inode->i_mode,
10741     + (unsigned long long)inode->i_size,
10742     + (unsigned long long)iinfo->i_lenExtents);
10743     + }
10744     }
10745     truncate_inode_pages_final(&inode->i_data);
10746     invalidate_inode_buffers(inode);
10747     clear_inode(inode);
10748     - if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
10749     - inode->i_size != iinfo->i_lenExtents) {
10750     - udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
10751     - inode->i_ino, inode->i_mode,
10752     - (unsigned long long)inode->i_size,
10753     - (unsigned long long)iinfo->i_lenExtents);
10754     - }
10755     kfree(iinfo->i_ext.i_data);
10756     iinfo->i_ext.i_data = NULL;
10757     udf_clear_extent_cache(inode);
10758     diff --git a/fs/udf/super.c b/fs/udf/super.c
10759     index 4baa1ca91e9be..a0cd766b41cdb 100644
10760     --- a/fs/udf/super.c
10761     +++ b/fs/udf/super.c
10762     @@ -1352,6 +1352,12 @@ static int udf_load_sparable_map(struct super_block *sb,
10763     (int)spm->numSparingTables);
10764     return -EIO;
10765     }
10766     + if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) {
10767     + udf_err(sb, "error loading logical volume descriptor: "
10768     + "Too big sparing table size (%u)\n",
10769     + le32_to_cpu(spm->sizeSparingTable));
10770     + return -EIO;
10771     + }
10772    
10773     for (i = 0; i < spm->numSparingTables; i++) {
10774     loc = le32_to_cpu(spm->locSparingTable[i]);
10775     diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
10776     index 8ea1efc97b41d..42085e70c01ac 100644
10777     --- a/fs/xfs/libxfs/xfs_rtbitmap.c
10778     +++ b/fs/xfs/libxfs/xfs_rtbitmap.c
10779     @@ -1018,7 +1018,6 @@ xfs_rtalloc_query_range(
10780     struct xfs_mount *mp = tp->t_mountp;
10781     xfs_rtblock_t rtstart;
10782     xfs_rtblock_t rtend;
10783     - xfs_rtblock_t rem;
10784     int is_free;
10785     int error = 0;
10786    
10787     @@ -1027,13 +1026,12 @@ xfs_rtalloc_query_range(
10788     if (low_rec->ar_startext >= mp->m_sb.sb_rextents ||
10789     low_rec->ar_startext == high_rec->ar_startext)
10790     return 0;
10791     - if (high_rec->ar_startext > mp->m_sb.sb_rextents)
10792     - high_rec->ar_startext = mp->m_sb.sb_rextents;
10793     + high_rec->ar_startext = min(high_rec->ar_startext,
10794     + mp->m_sb.sb_rextents - 1);
10795    
10796     /* Iterate the bitmap, looking for discrepancies. */
10797     rtstart = low_rec->ar_startext;
10798     - rem = high_rec->ar_startext - rtstart;
10799     - while (rem) {
10800     + while (rtstart <= high_rec->ar_startext) {
10801     /* Is the first block free? */
10802     error = xfs_rtcheck_range(mp, tp, rtstart, 1, 1, &rtend,
10803     &is_free);
10804     @@ -1042,7 +1040,7 @@ xfs_rtalloc_query_range(
10805    
10806     /* How long does the extent go for? */
10807     error = xfs_rtfind_forw(mp, tp, rtstart,
10808     - high_rec->ar_startext - 1, &rtend);
10809     + high_rec->ar_startext, &rtend);
10810     if (error)
10811     break;
10812    
10813     @@ -1055,7 +1053,6 @@ xfs_rtalloc_query_range(
10814     break;
10815     }
10816    
10817     - rem -= rtend - rtstart + 1;
10818     rtstart = rtend + 1;
10819     }
10820    
10821     diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
10822     index c13754e119be1..01c0933a4d10d 100644
10823     --- a/fs/xfs/xfs_fsmap.c
10824     +++ b/fs/xfs/xfs_fsmap.c
10825     @@ -26,7 +26,7 @@
10826     #include "xfs_rtalloc.h"
10827    
10828     /* Convert an xfs_fsmap to an fsmap. */
10829     -void
10830     +static void
10831     xfs_fsmap_from_internal(
10832     struct fsmap *dest,
10833     struct xfs_fsmap *src)
10834     @@ -154,8 +154,7 @@ xfs_fsmap_owner_from_rmap(
10835     /* getfsmap query state */
10836     struct xfs_getfsmap_info {
10837     struct xfs_fsmap_head *head;
10838     - xfs_fsmap_format_t formatter; /* formatting fn */
10839     - void *format_arg; /* format buffer */
10840     + struct fsmap *fsmap_recs; /* mapping records */
10841     struct xfs_buf *agf_bp; /* AGF, for refcount queries */
10842     xfs_daddr_t next_daddr; /* next daddr we expect */
10843     u64 missing_owner; /* owner of holes */
10844     @@ -223,6 +222,20 @@ xfs_getfsmap_is_shared(
10845     return 0;
10846     }
10847    
10848     +static inline void
10849     +xfs_getfsmap_format(
10850     + struct xfs_mount *mp,
10851     + struct xfs_fsmap *xfm,
10852     + struct xfs_getfsmap_info *info)
10853     +{
10854     + struct fsmap *rec;
10855     +
10856     + trace_xfs_getfsmap_mapping(mp, xfm);
10857     +
10858     + rec = &info->fsmap_recs[info->head->fmh_entries++];
10859     + xfs_fsmap_from_internal(rec, xfm);
10860     +}
10861     +
10862     /*
10863     * Format a reverse mapping for getfsmap, having translated rm_startblock
10864     * into the appropriate daddr units.
10865     @@ -255,6 +268,9 @@ xfs_getfsmap_helper(
10866    
10867     /* Are we just counting mappings? */
10868     if (info->head->fmh_count == 0) {
10869     + if (info->head->fmh_entries == UINT_MAX)
10870     + return -ECANCELED;
10871     +
10872     if (rec_daddr > info->next_daddr)
10873     info->head->fmh_entries++;
10874    
10875     @@ -284,10 +300,7 @@ xfs_getfsmap_helper(
10876     fmr.fmr_offset = 0;
10877     fmr.fmr_length = rec_daddr - info->next_daddr;
10878     fmr.fmr_flags = FMR_OF_SPECIAL_OWNER;
10879     - error = info->formatter(&fmr, info->format_arg);
10880     - if (error)
10881     - return error;
10882     - info->head->fmh_entries++;
10883     + xfs_getfsmap_format(mp, &fmr, info);
10884     }
10885    
10886     if (info->last)
10887     @@ -319,11 +332,8 @@ xfs_getfsmap_helper(
10888     if (shared)
10889     fmr.fmr_flags |= FMR_OF_SHARED;
10890     }
10891     - error = info->formatter(&fmr, info->format_arg);
10892     - if (error)
10893     - return error;
10894     - info->head->fmh_entries++;
10895    
10896     + xfs_getfsmap_format(mp, &fmr, info);
10897     out:
10898     rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount);
10899     if (info->next_daddr < rec_daddr)
10900     @@ -791,11 +801,11 @@ xfs_getfsmap_check_keys(
10901     #endif /* CONFIG_XFS_RT */
10902    
10903     /*
10904     - * Get filesystem's extents as described in head, and format for
10905     - * output. Calls formatter to fill the user's buffer until all
10906     - * extents are mapped, until the passed-in head->fmh_count slots have
10907     - * been filled, or until the formatter short-circuits the loop, if it
10908     - * is tracking filled-in extents on its own.
10909     + * Get filesystem's extents as described in head, and format for output. Fills
10910     + * in the supplied records array until there are no more reverse mappings to
10911     + * return or head.fmh_entries == head.fmh_count. In the second case, this
10912     + * function returns -ECANCELED to indicate that more records would have been
10913     + * returned.
10914     *
10915     * Key to Confusion
10916     * ----------------
10917     @@ -815,8 +825,7 @@ int
10918     xfs_getfsmap(
10919     struct xfs_mount *mp,
10920     struct xfs_fsmap_head *head,
10921     - xfs_fsmap_format_t formatter,
10922     - void *arg)
10923     + struct fsmap *fsmap_recs)
10924     {
10925     struct xfs_trans *tp = NULL;
10926     struct xfs_fsmap dkeys[2]; /* per-dev keys */
10927     @@ -891,8 +900,7 @@ xfs_getfsmap(
10928    
10929     info.next_daddr = head->fmh_keys[0].fmr_physical +
10930     head->fmh_keys[0].fmr_length;
10931     - info.formatter = formatter;
10932     - info.format_arg = arg;
10933     + info.fsmap_recs = fsmap_recs;
10934     info.head = head;
10935    
10936     /*
10937     diff --git a/fs/xfs/xfs_fsmap.h b/fs/xfs/xfs_fsmap.h
10938     index c6c57739b8626..a0775788e7b13 100644
10939     --- a/fs/xfs/xfs_fsmap.h
10940     +++ b/fs/xfs/xfs_fsmap.h
10941     @@ -27,13 +27,9 @@ struct xfs_fsmap_head {
10942     struct xfs_fsmap fmh_keys[2]; /* low and high keys */
10943     };
10944    
10945     -void xfs_fsmap_from_internal(struct fsmap *dest, struct xfs_fsmap *src);
10946     void xfs_fsmap_to_internal(struct xfs_fsmap *dest, struct fsmap *src);
10947    
10948     -/* fsmap to userspace formatter - copy to user & advance pointer */
10949     -typedef int (*xfs_fsmap_format_t)(struct xfs_fsmap *, void *);
10950     -
10951     int xfs_getfsmap(struct xfs_mount *mp, struct xfs_fsmap_head *head,
10952     - xfs_fsmap_format_t formatter, void *arg);
10953     + struct fsmap *out_recs);
10954    
10955     #endif /* __XFS_FSMAP_H__ */
10956     diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
10957     index 60c4526312771..bf0435dbec436 100644
10958     --- a/fs/xfs/xfs_ioctl.c
10959     +++ b/fs/xfs/xfs_ioctl.c
10960     @@ -1832,39 +1832,17 @@ out_free_buf:
10961     return error;
10962     }
10963    
10964     -struct getfsmap_info {
10965     - struct xfs_mount *mp;
10966     - struct fsmap_head __user *data;
10967     - unsigned int idx;
10968     - __u32 last_flags;
10969     -};
10970     -
10971     -STATIC int
10972     -xfs_getfsmap_format(struct xfs_fsmap *xfm, void *priv)
10973     -{
10974     - struct getfsmap_info *info = priv;
10975     - struct fsmap fm;
10976     -
10977     - trace_xfs_getfsmap_mapping(info->mp, xfm);
10978     -
10979     - info->last_flags = xfm->fmr_flags;
10980     - xfs_fsmap_from_internal(&fm, xfm);
10981     - if (copy_to_user(&info->data->fmh_recs[info->idx++], &fm,
10982     - sizeof(struct fsmap)))
10983     - return -EFAULT;
10984     -
10985     - return 0;
10986     -}
10987     -
10988     STATIC int
10989     xfs_ioc_getfsmap(
10990     struct xfs_inode *ip,
10991     struct fsmap_head __user *arg)
10992     {
10993     - struct getfsmap_info info = { NULL };
10994     struct xfs_fsmap_head xhead = {0};
10995     struct fsmap_head head;
10996     - bool aborted = false;
10997     + struct fsmap *recs;
10998     + unsigned int count;
10999     + __u32 last_flags = 0;
11000     + bool done = false;
11001     int error;
11002    
11003     if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
11004     @@ -1876,38 +1854,112 @@ xfs_ioc_getfsmap(
11005     sizeof(head.fmh_keys[1].fmr_reserved)))
11006     return -EINVAL;
11007    
11008     + /*
11009     + * Use an internal memory buffer so that we don't have to copy fsmap
11010     + * data to userspace while holding locks. Start by trying to allocate
11011     + * up to 128k for the buffer, but fall back to a single page if needed.
11012     + */
11013     + count = min_t(unsigned int, head.fmh_count,
11014     + 131072 / sizeof(struct fsmap));
11015     + recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL);
11016     + if (!recs) {
11017     + count = min_t(unsigned int, head.fmh_count,
11018     + PAGE_SIZE / sizeof(struct fsmap));
11019     + recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL);
11020     + if (!recs)
11021     + return -ENOMEM;
11022     + }
11023     +
11024     xhead.fmh_iflags = head.fmh_iflags;
11025     - xhead.fmh_count = head.fmh_count;
11026     xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
11027     xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
11028    
11029     trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
11030     trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
11031    
11032     - info.mp = ip->i_mount;
11033     - info.data = arg;
11034     - error = xfs_getfsmap(ip->i_mount, &xhead, xfs_getfsmap_format, &info);
11035     - if (error == -ECANCELED) {
11036     - error = 0;
11037     - aborted = true;
11038     - } else if (error)
11039     - return error;
11040     + head.fmh_entries = 0;
11041     + do {
11042     + struct fsmap __user *user_recs;
11043     + struct fsmap *last_rec;
11044     +
11045     + user_recs = &arg->fmh_recs[head.fmh_entries];
11046     + xhead.fmh_entries = 0;
11047     + xhead.fmh_count = min_t(unsigned int, count,
11048     + head.fmh_count - head.fmh_entries);
11049     +
11050     + /* Run query, record how many entries we got. */
11051     + error = xfs_getfsmap(ip->i_mount, &xhead, recs);
11052     + switch (error) {
11053     + case 0:
11054     + /*
11055     + * There are no more records in the result set. Copy
11056     + * whatever we got to userspace and break out.
11057     + */
11058     + done = true;
11059     + break;
11060     + case -ECANCELED:
11061     + /*
11062     + * The internal memory buffer is full. Copy whatever
11063     + * records we got to userspace and go again if we have
11064     + * not yet filled the userspace buffer.
11065     + */
11066     + error = 0;
11067     + break;
11068     + default:
11069     + goto out_free;
11070     + }
11071     + head.fmh_entries += xhead.fmh_entries;
11072     + head.fmh_oflags = xhead.fmh_oflags;
11073    
11074     - /* If we didn't abort, set the "last" flag in the last fmx */
11075     - if (!aborted && info.idx) {
11076     - info.last_flags |= FMR_OF_LAST;
11077     - if (copy_to_user(&info.data->fmh_recs[info.idx - 1].fmr_flags,
11078     - &info.last_flags, sizeof(info.last_flags)))
11079     - return -EFAULT;
11080     + /*
11081     + * If the caller wanted a record count or there aren't any
11082     + * new records to return, we're done.
11083     + */
11084     + if (head.fmh_count == 0 || xhead.fmh_entries == 0)
11085     + break;
11086     +
11087     + /* Copy all the records we got out to userspace. */
11088     + if (copy_to_user(user_recs, recs,
11089     + xhead.fmh_entries * sizeof(struct fsmap))) {
11090     + error = -EFAULT;
11091     + goto out_free;
11092     + }
11093     +
11094     + /* Remember the last record flags we copied to userspace. */
11095     + last_rec = &recs[xhead.fmh_entries - 1];
11096     + last_flags = last_rec->fmr_flags;
11097     +
11098     + /* Set up the low key for the next iteration. */
11099     + xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec);
11100     + trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
11101     + } while (!done && head.fmh_entries < head.fmh_count);
11102     +
11103     + /*
11104     + * If there are no more records in the query result set and we're not
11105     + * in counting mode, mark the last record returned with the LAST flag.
11106     + */
11107     + if (done && head.fmh_count > 0 && head.fmh_entries > 0) {
11108     + struct fsmap __user *user_rec;
11109     +
11110     + last_flags |= FMR_OF_LAST;
11111     + user_rec = &arg->fmh_recs[head.fmh_entries - 1];
11112     +
11113     + if (copy_to_user(&user_rec->fmr_flags, &last_flags,
11114     + sizeof(last_flags))) {
11115     + error = -EFAULT;
11116     + goto out_free;
11117     + }
11118     }
11119    
11120     /* copy back header */
11121     - head.fmh_entries = xhead.fmh_entries;
11122     - head.fmh_oflags = xhead.fmh_oflags;
11123     - if (copy_to_user(arg, &head, sizeof(struct fsmap_head)))
11124     - return -EFAULT;
11125     + if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) {
11126     + error = -EFAULT;
11127     + goto out_free;
11128     + }
11129    
11130     - return 0;
11131     +out_free:
11132     + kmem_free(recs);
11133     + return error;
11134     }
11135    
11136     STATIC int
11137     diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
11138     index 4a48a8c75b4f7..b583669370825 100644
11139     --- a/fs/xfs/xfs_rtalloc.c
11140     +++ b/fs/xfs/xfs_rtalloc.c
11141     @@ -247,6 +247,9 @@ xfs_rtallocate_extent_block(
11142     end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1;
11143     i <= end;
11144     i++) {
11145     + /* Make sure we don't scan off the end of the rt volume. */
11146     + maxlen = min(mp->m_sb.sb_rextents, i + maxlen) - i;
11147     +
11148     /*
11149     * See if there's a free extent of maxlen starting at i.
11150     * If it's not so then next will contain the first non-free.
11151     @@ -442,6 +445,14 @@ xfs_rtallocate_extent_near(
11152     */
11153     if (bno >= mp->m_sb.sb_rextents)
11154     bno = mp->m_sb.sb_rextents - 1;
11155     +
11156     + /* Make sure we don't run off the end of the rt volume. */
11157     + maxlen = min(mp->m_sb.sb_rextents, bno + maxlen) - bno;
11158     + if (maxlen < minlen) {
11159     + *rtblock = NULLRTBLOCK;
11160     + return 0;
11161     + }
11162     +
11163     /*
11164     * Try the exact allocation first.
11165     */
11166     diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
11167     index 26a6d58ca78cc..81c7ea83e8079 100644
11168     --- a/include/linux/bpf_verifier.h
11169     +++ b/include/linux/bpf_verifier.h
11170     @@ -342,6 +342,7 @@ struct bpf_subprog_info {
11171     u32 start; /* insn idx of function entry point */
11172     u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
11173     u16 stack_depth; /* max. stack depth used by this function */
11174     + bool has_tail_call;
11175     };
11176    
11177     /* single container for all structs
11178     diff --git a/include/linux/oom.h b/include/linux/oom.h
11179     index c696c265f0193..b9df34326772c 100644
11180     --- a/include/linux/oom.h
11181     +++ b/include/linux/oom.h
11182     @@ -55,6 +55,7 @@ struct oom_control {
11183     };
11184    
11185     extern struct mutex oom_lock;
11186     +extern struct mutex oom_adj_mutex;
11187    
11188     static inline void set_current_oom_origin(void)
11189     {
11190     diff --git a/include/linux/overflow.h b/include/linux/overflow.h
11191     index 659045046468f..50c93ca0c3d6f 100644
11192     --- a/include/linux/overflow.h
11193     +++ b/include/linux/overflow.h
11194     @@ -3,6 +3,7 @@
11195     #define __LINUX_OVERFLOW_H
11196    
11197     #include <linux/compiler.h>
11198     +#include <linux/limits.h>
11199    
11200     /*
11201     * In the fallback code below, we need to compute the minimum and
11202     diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
11203     index 8679ccd722e89..3468794f83d23 100644
11204     --- a/include/linux/page_owner.h
11205     +++ b/include/linux/page_owner.h
11206     @@ -11,7 +11,7 @@ extern struct page_ext_operations page_owner_ops;
11207     extern void __reset_page_owner(struct page *page, unsigned int order);
11208     extern void __set_page_owner(struct page *page,
11209     unsigned int order, gfp_t gfp_mask);
11210     -extern void __split_page_owner(struct page *page, unsigned int order);
11211     +extern void __split_page_owner(struct page *page, unsigned int nr);
11212     extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
11213     extern void __set_page_owner_migrate_reason(struct page *page, int reason);
11214     extern void __dump_page_owner(struct page *page);
11215     @@ -31,10 +31,10 @@ static inline void set_page_owner(struct page *page,
11216     __set_page_owner(page, order, gfp_mask);
11217     }
11218    
11219     -static inline void split_page_owner(struct page *page, unsigned int order)
11220     +static inline void split_page_owner(struct page *page, unsigned int nr)
11221     {
11222     if (static_branch_unlikely(&page_owner_inited))
11223     - __split_page_owner(page, order);
11224     + __split_page_owner(page, nr);
11225     }
11226     static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
11227     {
11228     diff --git a/include/linux/pci.h b/include/linux/pci.h
11229     index e92bd9b32f369..6a6a819c5b49b 100644
11230     --- a/include/linux/pci.h
11231     +++ b/include/linux/pci.h
11232     @@ -423,6 +423,7 @@ struct pci_dev {
11233     unsigned int is_probed:1; /* Device probing in progress */
11234     unsigned int link_active_reporting:1;/* Device capable of reporting link active */
11235     unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
11236     + unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
11237     pci_dev_flags_t dev_flags;
11238     atomic_t enable_cnt; /* pci_enable_device has been called */
11239    
11240     diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
11241     index f3eaf9ec00a1b..70078be166e3c 100644
11242     --- a/include/linux/platform_data/dma-dw.h
11243     +++ b/include/linux/platform_data/dma-dw.h
11244     @@ -21,6 +21,7 @@
11245     * @dst_id: dst request line
11246     * @m_master: memory master for transfers on allocated channel
11247     * @p_master: peripheral master for transfers on allocated channel
11248     + * @channels: mask of the channels permitted for allocation (zero value means any)
11249     * @hs_polarity:set active low polarity of handshake interface
11250     */
11251     struct dw_dma_slave {
11252     @@ -29,6 +30,7 @@ struct dw_dma_slave {
11253     u8 dst_id;
11254     u8 m_master;
11255     u8 p_master;
11256     + u8 channels;
11257     bool hs_polarity;
11258     };
11259    
11260     diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
11261     index ecdc6542070f1..dfd82eab29025 100644
11262     --- a/include/linux/sched/coredump.h
11263     +++ b/include/linux/sched/coredump.h
11264     @@ -72,6 +72,7 @@ static inline int get_dumpable(struct mm_struct *mm)
11265     #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
11266     #define MMF_OOM_VICTIM 25 /* mm is the oom victim */
11267     #define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
11268     +#define MMF_MULTIPROCESS 27 /* mm is shared between processes */
11269     #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
11270    
11271     #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
11272     diff --git a/include/net/ip.h b/include/net/ip.h
11273     index 4b15cc1c224c6..0278d63c15274 100644
11274     --- a/include/net/ip.h
11275     +++ b/include/net/ip.h
11276     @@ -439,12 +439,18 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
11277     bool forwarding)
11278     {
11279     struct net *net = dev_net(dst->dev);
11280     + unsigned int mtu;
11281    
11282     if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
11283     ip_mtu_locked(dst) ||
11284     !forwarding)
11285     return dst_mtu(dst);
11286    
11287     + /* 'forwarding = true' case should always honour route mtu */
11288     + mtu = dst_metric_raw(dst, RTAX_MTU);
11289     + if (mtu)
11290     + return mtu;
11291     +
11292     return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
11293     }
11294    
11295     diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
11296     index 0d3920896d502..716db4a0fed89 100644
11297     --- a/include/net/netfilter/nf_log.h
11298     +++ b/include/net/netfilter/nf_log.h
11299     @@ -108,6 +108,7 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
11300     unsigned int logflags);
11301     void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
11302     struct sock *sk);
11303     +void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb);
11304     void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
11305     unsigned int hooknum, const struct sk_buff *skb,
11306     const struct net_device *in,
11307     diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
11308     index a91b2af64ec47..8e94279af47df 100644
11309     --- a/include/rdma/ib_umem.h
11310     +++ b/include/rdma/ib_umem.h
11311     @@ -95,10 +95,11 @@ static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offs
11312     size_t length) {
11313     return -EINVAL;
11314     }
11315     -static inline int ib_umem_find_best_pgsz(struct ib_umem *umem,
11316     - unsigned long pgsz_bitmap,
11317     - unsigned long virt) {
11318     - return -EINVAL;
11319     +static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
11320     + unsigned long pgsz_bitmap,
11321     + unsigned long virt)
11322     +{
11323     + return 0;
11324     }
11325    
11326     #endif /* CONFIG_INFINIBAND_USER_MEM */
11327     diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
11328     index 731ac09ed2313..5b567b43e1b16 100644
11329     --- a/include/scsi/scsi_common.h
11330     +++ b/include/scsi/scsi_common.h
11331     @@ -25,6 +25,13 @@ scsi_command_size(const unsigned char *cmnd)
11332     scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
11333     }
11334    
11335     +static inline unsigned char
11336     +scsi_command_control(const unsigned char *cmnd)
11337     +{
11338     + return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
11339     + cmnd[1] : cmnd[COMMAND_SIZE(cmnd[0]) - 1];
11340     +}
11341     +
11342     /* Returns a human-readable name for the device */
11343     extern const char *scsi_device_type(unsigned type);
11344    
11345     diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
11346     index 65c056ce91128..8341e2c489824 100644
11347     --- a/include/sound/hda_codec.h
11348     +++ b/include/sound/hda_codec.h
11349     @@ -254,6 +254,7 @@ struct hda_codec {
11350     unsigned int force_pin_prefix:1; /* Add location prefix */
11351     unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
11352     unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
11353     + unsigned int forced_resume:1; /* forced resume for jack */
11354    
11355     #ifdef CONFIG_PM
11356     unsigned long power_on_acct;
11357     diff --git a/include/trace/events/target.h b/include/trace/events/target.h
11358     index 914a872dd3435..e87a3716b0ac9 100644
11359     --- a/include/trace/events/target.h
11360     +++ b/include/trace/events/target.h
11361     @@ -140,6 +140,7 @@ TRACE_EVENT(target_sequencer_start,
11362     __field( unsigned int, opcode )
11363     __field( unsigned int, data_length )
11364     __field( unsigned int, task_attribute )
11365     + __field( unsigned char, control )
11366     __array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
11367     __string( initiator, cmd->se_sess->se_node_acl->initiatorname )
11368     ),
11369     @@ -149,6 +150,7 @@ TRACE_EVENT(target_sequencer_start,
11370     __entry->opcode = cmd->t_task_cdb[0];
11371     __entry->data_length = cmd->data_length;
11372     __entry->task_attribute = cmd->sam_task_attr;
11373     + __entry->control = scsi_command_control(cmd->t_task_cdb);
11374     memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE);
11375     __assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
11376     ),
11377     @@ -158,9 +160,7 @@ TRACE_EVENT(target_sequencer_start,
11378     show_opcode_name(__entry->opcode),
11379     __entry->data_length, __print_hex(__entry->cdb, 16),
11380     show_task_attribute_name(__entry->task_attribute),
11381     - scsi_command_size(__entry->cdb) <= 16 ?
11382     - __entry->cdb[scsi_command_size(__entry->cdb) - 1] :
11383     - __entry->cdb[1]
11384     + __entry->control
11385     )
11386     );
11387    
11388     @@ -175,6 +175,7 @@ TRACE_EVENT(target_cmd_complete,
11389     __field( unsigned int, opcode )
11390     __field( unsigned int, data_length )
11391     __field( unsigned int, task_attribute )
11392     + __field( unsigned char, control )
11393     __field( unsigned char, scsi_status )
11394     __field( unsigned char, sense_length )
11395     __array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
11396     @@ -187,6 +188,7 @@ TRACE_EVENT(target_cmd_complete,
11397     __entry->opcode = cmd->t_task_cdb[0];
11398     __entry->data_length = cmd->data_length;
11399     __entry->task_attribute = cmd->sam_task_attr;
11400     + __entry->control = scsi_command_control(cmd->t_task_cdb);
11401     __entry->scsi_status = cmd->scsi_status;
11402     __entry->sense_length = cmd->scsi_status == SAM_STAT_CHECK_CONDITION ?
11403     min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0;
11404     @@ -203,9 +205,7 @@ TRACE_EVENT(target_cmd_complete,
11405     show_opcode_name(__entry->opcode),
11406     __entry->data_length, __print_hex(__entry->cdb, 16),
11407     show_task_attribute_name(__entry->task_attribute),
11408     - scsi_command_size(__entry->cdb) <= 16 ?
11409     - __entry->cdb[scsi_command_size(__entry->cdb) - 1] :
11410     - __entry->cdb[1]
11411     + __entry->control
11412     )
11413     );
11414    
11415     diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
11416     index bb7b271397a66..ceccd980ffcfe 100644
11417     --- a/include/uapi/linux/perf_event.h
11418     +++ b/include/uapi/linux/perf_event.h
11419     @@ -1131,7 +1131,7 @@ union perf_mem_data_src {
11420    
11421     #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
11422     /* 1 free */
11423     -#define PERF_MEM_SNOOPX_SHIFT 37
11424     +#define PERF_MEM_SNOOPX_SHIFT 38
11425    
11426     /* locked instruction */
11427     #define PERF_MEM_LOCK_NA 0x01 /* not available */
11428     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
11429     index ae27dd77a73cb..507474f79195f 100644
11430     --- a/kernel/bpf/verifier.c
11431     +++ b/kernel/bpf/verifier.c
11432     @@ -1160,6 +1160,10 @@ static int check_subprogs(struct bpf_verifier_env *env)
11433     for (i = 0; i < insn_cnt; i++) {
11434     u8 code = insn[i].code;
11435    
11436     + if (code == (BPF_JMP | BPF_CALL) &&
11437     + insn[i].imm == BPF_FUNC_tail_call &&
11438     + insn[i].src_reg != BPF_PSEUDO_CALL)
11439     + subprog[cur_subprog].has_tail_call = true;
11440     if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
11441     goto next;
11442     if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
11443     @@ -2612,6 +2616,31 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
11444     int ret_prog[MAX_CALL_FRAMES];
11445    
11446     process_func:
11447     + /* protect against potential stack overflow that might happen when
11448     + * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
11449     + * depth for such case down to 256 so that the worst case scenario
11450     + * would result in 8k stack size (32 which is tailcall limit * 256 =
11451     + * 8k).
11452     + *
11453     + * To get the idea what might happen, see an example:
11454     + * func1 -> sub rsp, 128
11455     + * subfunc1 -> sub rsp, 256
11456     + * tailcall1 -> add rsp, 256
11457     + * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
11458     + * subfunc2 -> sub rsp, 64
11459     + * subfunc22 -> sub rsp, 128
11460     + * tailcall2 -> add rsp, 128
11461     + * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
11462     + *
11463     + * tailcall will unwind the current stack frame but it will not get rid
11464     + * of caller's stack as shown on the example above.
11465     + */
11466     + if (idx && subprog[idx].has_tail_call && depth >= 256) {
11467     + verbose(env,
11468     + "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
11469     + depth);
11470     + return -EACCES;
11471     + }
11472     /* round up to 32-bytes, since this is granularity
11473     * of interpreter stack size
11474     */
11475     diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
11476     index 3a5184eb6977d..46821793637a1 100644
11477     --- a/kernel/debug/kdb/kdb_io.c
11478     +++ b/kernel/debug/kdb/kdb_io.c
11479     @@ -679,12 +679,16 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
11480     size_avail = sizeof(kdb_buffer) - len;
11481     goto kdb_print_out;
11482     }
11483     - if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH)
11484     + if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH) {
11485     /*
11486     * This was a interactive search (using '/' at more
11487     - * prompt) and it has completed. Clear the flag.
11488     + * prompt) and it has completed. Replace the \0 with
11489     + * its original value to ensure multi-line strings
11490     + * are handled properly, and return to normal mode.
11491     */
11492     + *cphold = replaced_byte;
11493     kdb_grepping_flag = 0;
11494     + }
11495     /*
11496     * at this point the string is a full line and
11497     * should be printed, up to the null.
11498     diff --git a/kernel/fork.c b/kernel/fork.c
11499     index 594272569a80f..e3d5963d8c6f5 100644
11500     --- a/kernel/fork.c
11501     +++ b/kernel/fork.c
11502     @@ -1750,6 +1750,25 @@ static __always_inline void delayed_free_task(struct task_struct *tsk)
11503     free_task(tsk);
11504     }
11505    
11506     +static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
11507     +{
11508     + /* Skip if kernel thread */
11509     + if (!tsk->mm)
11510     + return;
11511     +
11512     + /* Skip if spawning a thread or using vfork */
11513     + if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM)
11514     + return;
11515     +
11516     + /* We need to synchronize with __set_oom_adj */
11517     + mutex_lock(&oom_adj_mutex);
11518     + set_bit(MMF_MULTIPROCESS, &tsk->mm->flags);
11519     + /* Update the values in case they were changed after copy_signal */
11520     + tsk->signal->oom_score_adj = current->signal->oom_score_adj;
11521     + tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min;
11522     + mutex_unlock(&oom_adj_mutex);
11523     +}
11524     +
11525     /*
11526     * This creates a new process as a copy of the old one,
11527     * but does not actually start it yet.
11528     @@ -2222,6 +2241,8 @@ static __latent_entropy struct task_struct *copy_process(
11529     trace_task_newtask(p, clone_flags);
11530     uprobe_copy_process(p, clone_flags);
11531    
11532     + copy_oom_score_adj(clone_flags, p);
11533     +
11534     return p;
11535    
11536     bad_fork_cancel_cgroup:
11537     diff --git a/kernel/module.c b/kernel/module.c
11538     index 819c5d3b4c295..45513909b01d5 100644
11539     --- a/kernel/module.c
11540     +++ b/kernel/module.c
11541     @@ -88,8 +88,9 @@ EXPORT_SYMBOL_GPL(module_mutex);
11542     static LIST_HEAD(modules);
11543    
11544     /* Work queue for freeing init sections in success case */
11545     -static struct work_struct init_free_wq;
11546     -static struct llist_head init_free_list;
11547     +static void do_free_init(struct work_struct *w);
11548     +static DECLARE_WORK(init_free_wq, do_free_init);
11549     +static LLIST_HEAD(init_free_list);
11550    
11551     #ifdef CONFIG_MODULES_TREE_LOOKUP
11552    
11553     @@ -3563,14 +3564,6 @@ static void do_free_init(struct work_struct *w)
11554     }
11555     }
11556    
11557     -static int __init modules_wq_init(void)
11558     -{
11559     - INIT_WORK(&init_free_wq, do_free_init);
11560     - init_llist_head(&init_free_list);
11561     - return 0;
11562     -}
11563     -module_init(modules_wq_init);
11564     -
11565     /*
11566     * This is where the real work happens.
11567     *
11568     diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
11569     index b6c5895ced36b..69c4cd472def3 100644
11570     --- a/kernel/power/hibernate.c
11571     +++ b/kernel/power/hibernate.c
11572     @@ -839,17 +839,6 @@ static int software_resume(void)
11573    
11574     /* Check if the device is there */
11575     swsusp_resume_device = name_to_dev_t(resume_file);
11576     -
11577     - /*
11578     - * name_to_dev_t is ineffective to verify parition if resume_file is in
11579     - * integer format. (e.g. major:minor)
11580     - */
11581     - if (isdigit(resume_file[0]) && resume_wait) {
11582     - int partno;
11583     - while (!get_gendisk(swsusp_resume_device, &partno))
11584     - msleep(10);
11585     - }
11586     -
11587     if (!swsusp_resume_device) {
11588     /*
11589     * Some device discovery might still be in progress; we need
11590     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
11591     index 79ce22de44095..4511532b08b84 100644
11592     --- a/kernel/sched/core.c
11593     +++ b/kernel/sched/core.c
11594     @@ -36,7 +36,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
11595    
11596     DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
11597    
11598     -#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
11599     +#ifdef CONFIG_SCHED_DEBUG
11600     /*
11601     * Debugging: various feature bits
11602     *
11603     diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
11604     index b02a83ff40687..dddaf61378f62 100644
11605     --- a/kernel/sched/fair.c
11606     +++ b/kernel/sched/fair.c
11607     @@ -5936,7 +5936,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
11608     /*
11609     * Scan the local SMT mask for idle CPUs.
11610     */
11611     -static int select_idle_smt(struct task_struct *p, int target)
11612     +static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
11613     {
11614     int cpu, si_cpu = -1;
11615    
11616     @@ -5944,7 +5944,8 @@ static int select_idle_smt(struct task_struct *p, int target)
11617     return -1;
11618    
11619     for_each_cpu(cpu, cpu_smt_mask(target)) {
11620     - if (!cpumask_test_cpu(cpu, p->cpus_ptr))
11621     + if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
11622     + !cpumask_test_cpu(cpu, sched_domain_span(sd)))
11623     continue;
11624     if (available_idle_cpu(cpu))
11625     return cpu;
11626     @@ -5962,7 +5963,7 @@ static inline int select_idle_core(struct task_struct *p, struct sched_domain *s
11627     return -1;
11628     }
11629    
11630     -static inline int select_idle_smt(struct task_struct *p, int target)
11631     +static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
11632     {
11633     return -1;
11634     }
11635     @@ -6072,7 +6073,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
11636     if ((unsigned)i < nr_cpumask_bits)
11637     return i;
11638    
11639     - i = select_idle_smt(p, target);
11640     + i = select_idle_smt(p, sd, target);
11641     if ((unsigned)i < nr_cpumask_bits)
11642     return i;
11643    
11644     diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
11645     index 9f2a9e34a78d5..3e7590813844f 100644
11646     --- a/kernel/sched/sched.h
11647     +++ b/kernel/sched/sched.h
11648     @@ -1568,7 +1568,7 @@ enum {
11649    
11650     #undef SCHED_FEAT
11651    
11652     -#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
11653     +#ifdef CONFIG_SCHED_DEBUG
11654    
11655     /*
11656     * To support run-time toggling of sched features, all the translation units
11657     @@ -1576,6 +1576,7 @@ enum {
11658     */
11659     extern const_debug unsigned int sysctl_sched_features;
11660    
11661     +#ifdef CONFIG_JUMP_LABEL
11662     #define SCHED_FEAT(name, enabled) \
11663     static __always_inline bool static_branch_##name(struct static_key *key) \
11664     { \
11665     @@ -1588,7 +1589,13 @@ static __always_inline bool static_branch_##name(struct static_key *key) \
11666     extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
11667     #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
11668    
11669     -#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */
11670     +#else /* !CONFIG_JUMP_LABEL */
11671     +
11672     +#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
11673     +
11674     +#endif /* CONFIG_JUMP_LABEL */
11675     +
11676     +#else /* !SCHED_DEBUG */
11677    
11678     /*
11679     * Each translation unit has its own copy of sysctl_sched_features to allow
11680     @@ -1604,7 +1611,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features =
11681    
11682     #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
11683    
11684     -#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */
11685     +#endif /* SCHED_DEBUG */
11686    
11687     extern struct static_key_false sched_numa_balancing;
11688     extern struct static_key_false sched_schedstats;
11689     diff --git a/lib/crc32.c b/lib/crc32.c
11690     index 4a20455d1f61e..bf60ef26a45c2 100644
11691     --- a/lib/crc32.c
11692     +++ b/lib/crc32.c
11693     @@ -331,7 +331,7 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
11694     return crc;
11695     }
11696    
11697     -#if CRC_LE_BITS == 1
11698     +#if CRC_BE_BITS == 1
11699     u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
11700     {
11701     return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
11702     diff --git a/lib/idr.c b/lib/idr.c
11703     index c2cf2c52bbde5..4d2eef0259d2c 100644
11704     --- a/lib/idr.c
11705     +++ b/lib/idr.c
11706     @@ -470,6 +470,7 @@ alloc:
11707     goto retry;
11708     nospc:
11709     xas_unlock_irqrestore(&xas, flags);
11710     + kfree(alloc);
11711     return -ENOSPC;
11712     }
11713     EXPORT_SYMBOL(ida_alloc_range);
11714     diff --git a/mm/filemap.c b/mm/filemap.c
11715     index 51b2cb5aa5030..db542b4948838 100644
11716     --- a/mm/filemap.c
11717     +++ b/mm/filemap.c
11718     @@ -847,10 +847,10 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
11719     }
11720     EXPORT_SYMBOL_GPL(replace_page_cache_page);
11721    
11722     -static int __add_to_page_cache_locked(struct page *page,
11723     - struct address_space *mapping,
11724     - pgoff_t offset, gfp_t gfp_mask,
11725     - void **shadowp)
11726     +noinline int __add_to_page_cache_locked(struct page *page,
11727     + struct address_space *mapping,
11728     + pgoff_t offset, gfp_t gfp_mask,
11729     + void **shadowp)
11730     {
11731     XA_STATE(xas, &mapping->i_pages, offset);
11732     int huge = PageHuge(page);
11733     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
11734     index 873de55d93fb2..9295d9d70681e 100644
11735     --- a/mm/huge_memory.c
11736     +++ b/mm/huge_memory.c
11737     @@ -2569,7 +2569,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
11738    
11739     ClearPageCompound(head);
11740    
11741     - split_page_owner(head, HPAGE_PMD_ORDER);
11742     + split_page_owner(head, HPAGE_PMD_NR);
11743    
11744     /* See comment in __split_huge_page_tail() */
11745     if (PageAnon(head)) {
11746     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
11747     index ca1632850fb76..edf98f8588eeb 100644
11748     --- a/mm/memcontrol.c
11749     +++ b/mm/memcontrol.c
11750     @@ -5398,7 +5398,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
11751     struct page *page = NULL;
11752     swp_entry_t ent = pte_to_swp_entry(ptent);
11753    
11754     - if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
11755     + if (!(mc.flags & MOVE_ANON))
11756     return NULL;
11757    
11758     /*
11759     @@ -5417,6 +5417,9 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
11760     return page;
11761     }
11762    
11763     + if (non_swap_entry(ent))
11764     + return NULL;
11765     +
11766     /*
11767     * Because lookup_swap_cache() updates some statistics counter,
11768     * we call find_get_page() with swapper_space directly.
11769     diff --git a/mm/oom_kill.c b/mm/oom_kill.c
11770     index d58c481b3df83..212e718743018 100644
11771     --- a/mm/oom_kill.c
11772     +++ b/mm/oom_kill.c
11773     @@ -63,6 +63,8 @@ int sysctl_oom_dump_tasks = 1;
11774     * and mark_oom_victim
11775     */
11776     DEFINE_MUTEX(oom_lock);
11777     +/* Serializes oom_score_adj and oom_score_adj_min updates */
11778     +DEFINE_MUTEX(oom_adj_mutex);
11779    
11780     static inline bool is_memcg_oom(struct oom_control *oc)
11781     {
11782     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
11783     index aff0bb4629bdf..c20e664866c33 100644
11784     --- a/mm/page_alloc.c
11785     +++ b/mm/page_alloc.c
11786     @@ -3130,7 +3130,7 @@ void split_page(struct page *page, unsigned int order)
11787    
11788     for (i = 1; i < (1 << order); i++)
11789     set_page_refcounted(page + i);
11790     - split_page_owner(page, order);
11791     + split_page_owner(page, 1 << order);
11792     }
11793     EXPORT_SYMBOL_GPL(split_page);
11794    
11795     @@ -3385,7 +3385,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
11796    
11797     #endif /* CONFIG_FAIL_PAGE_ALLOC */
11798    
11799     -static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
11800     +noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
11801     {
11802     return __should_fail_alloc_page(gfp_mask, order);
11803     }
11804     diff --git a/mm/page_owner.c b/mm/page_owner.c
11805     index 18ecde9f45b24..83d08943bcdee 100644
11806     --- a/mm/page_owner.c
11807     +++ b/mm/page_owner.c
11808     @@ -204,7 +204,7 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
11809     page_owner->last_migrate_reason = reason;
11810     }
11811    
11812     -void __split_page_owner(struct page *page, unsigned int order)
11813     +void __split_page_owner(struct page *page, unsigned int nr)
11814     {
11815     int i;
11816     struct page_ext *page_ext = lookup_page_ext(page);
11817     @@ -213,7 +213,7 @@ void __split_page_owner(struct page *page, unsigned int order)
11818     if (unlikely(!page_ext))
11819     return;
11820    
11821     - for (i = 0; i < (1 << order); i++) {
11822     + for (i = 0; i < nr; i++) {
11823     page_owner = get_page_owner(page_ext);
11824     page_owner->order = 0;
11825     page_ext = page_ext_next(page_ext);
11826     diff --git a/mm/swapfile.c b/mm/swapfile.c
11827     index cf62bdb7b3045..ff83ffe7a9108 100644
11828     --- a/mm/swapfile.c
11829     +++ b/mm/swapfile.c
11830     @@ -3284,7 +3284,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
11831     error = inode_drain_writes(inode);
11832     if (error) {
11833     inode->i_flags &= ~S_SWAPFILE;
11834     - goto bad_swap_unlock_inode;
11835     + goto free_swap_address_space;
11836     }
11837    
11838     mutex_lock(&swapon_mutex);
11839     @@ -3309,6 +3309,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
11840    
11841     error = 0;
11842     goto out;
11843     +free_swap_address_space:
11844     + exit_swap_address_space(p->type);
11845     bad_swap_unlock_inode:
11846     inode_unlock(inode);
11847     bad_swap:
11848     diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
11849     index 40e96a610e2b4..8648c5211ebe6 100644
11850     --- a/net/bluetooth/l2cap_sock.c
11851     +++ b/net/bluetooth/l2cap_sock.c
11852     @@ -1344,8 +1344,6 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
11853    
11854     parent = bt_sk(sk)->parent;
11855    
11856     - sock_set_flag(sk, SOCK_ZAPPED);
11857     -
11858     switch (chan->state) {
11859     case BT_OPEN:
11860     case BT_BOUND:
11861     @@ -1372,8 +1370,11 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
11862    
11863     break;
11864     }
11865     -
11866     release_sock(sk);
11867     +
11868     + /* Only zap after cleanup to avoid use after free race */
11869     + sock_set_flag(sk, SOCK_ZAPPED);
11870     +
11871     }
11872    
11873     static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state,
11874     diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
11875     index 12a4f4d936810..3fda71a8579d1 100644
11876     --- a/net/bridge/netfilter/ebt_dnat.c
11877     +++ b/net/bridge/netfilter/ebt_dnat.c
11878     @@ -21,7 +21,7 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
11879     {
11880     const struct ebt_nat_info *info = par->targinfo;
11881    
11882     - if (skb_ensure_writable(skb, ETH_ALEN))
11883     + if (skb_ensure_writable(skb, 0))
11884     return EBT_DROP;
11885    
11886     ether_addr_copy(eth_hdr(skb)->h_dest, info->mac);
11887     diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
11888     index 0cad62a4052b9..307790562b492 100644
11889     --- a/net/bridge/netfilter/ebt_redirect.c
11890     +++ b/net/bridge/netfilter/ebt_redirect.c
11891     @@ -21,7 +21,7 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
11892     {
11893     const struct ebt_redirect_info *info = par->targinfo;
11894    
11895     - if (skb_ensure_writable(skb, ETH_ALEN))
11896     + if (skb_ensure_writable(skb, 0))
11897     return EBT_DROP;
11898    
11899     if (xt_hooknum(par) != NF_BR_BROUTING)
11900     diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
11901     index 27443bf229a3b..7dfbcdfc30e5d 100644
11902     --- a/net/bridge/netfilter/ebt_snat.c
11903     +++ b/net/bridge/netfilter/ebt_snat.c
11904     @@ -22,7 +22,7 @@ ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par)
11905     {
11906     const struct ebt_nat_info *info = par->targinfo;
11907    
11908     - if (skb_ensure_writable(skb, ETH_ALEN * 2))
11909     + if (skb_ensure_writable(skb, 0))
11910     return EBT_DROP;
11911    
11912     ether_addr_copy(eth_hdr(skb)->h_source, info->mac);
11913     diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
11914     index a8dd956b5e8e1..916fdf2464bc2 100644
11915     --- a/net/can/j1939/transport.c
11916     +++ b/net/can/j1939/transport.c
11917     @@ -580,6 +580,7 @@ sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv,
11918     skb->dev = priv->ndev;
11919     can_skb_reserve(skb);
11920     can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
11921     + can_skb_prv(skb)->skbcnt = 0;
11922     /* reserve CAN header */
11923     skb_reserve(skb, offsetof(struct can_frame, data));
11924    
11925     @@ -1487,6 +1488,7 @@ j1939_session *j1939_session_fresh_new(struct j1939_priv *priv,
11926     skb->dev = priv->ndev;
11927     can_skb_reserve(skb);
11928     can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
11929     + can_skb_prv(skb)->skbcnt = 0;
11930     skcb = j1939_skb_to_cb(skb);
11931     memcpy(skcb, rel_skcb, sizeof(*skcb));
11932    
11933     diff --git a/net/core/filter.c b/net/core/filter.c
11934     index c441f9961e917..b040b7bf28582 100644
11935     --- a/net/core/filter.c
11936     +++ b/net/core/filter.c
11937     @@ -4270,7 +4270,8 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
11938     cmpxchg(&sk->sk_pacing_status,
11939     SK_PACING_NONE,
11940     SK_PACING_NEEDED);
11941     - sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
11942     + sk->sk_max_pacing_rate = (val == ~0U) ?
11943     + ~0UL : (unsigned int)val;
11944     sk->sk_pacing_rate = min(sk->sk_pacing_rate,
11945     sk->sk_max_pacing_rate);
11946     break;
11947     diff --git a/net/core/sock.c b/net/core/sock.c
11948     index 919f1a1739e90..1b765e62658f0 100644
11949     --- a/net/core/sock.c
11950     +++ b/net/core/sock.c
11951     @@ -928,8 +928,6 @@ set_rcvbuf:
11952     break;
11953    
11954     case SO_TIMESTAMPING_NEW:
11955     - sock_set_flag(sk, SOCK_TSTAMP_NEW);
11956     - /* fall through */
11957     case SO_TIMESTAMPING_OLD:
11958     if (val & ~SOF_TIMESTAMPING_MASK) {
11959     ret = -EINVAL;
11960     @@ -958,16 +956,14 @@ set_rcvbuf:
11961     }
11962    
11963     sk->sk_tsflags = val;
11964     + sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
11965     +
11966     if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
11967     sock_enable_timestamp(sk,
11968     SOCK_TIMESTAMPING_RX_SOFTWARE);
11969     - else {
11970     - if (optname == SO_TIMESTAMPING_NEW)
11971     - sock_reset_flag(sk, SOCK_TSTAMP_NEW);
11972     -
11973     + else
11974     sock_disable_timestamp(sk,
11975     (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
11976     - }
11977     break;
11978    
11979     case SO_RCVLOWAT:
11980     @@ -1110,7 +1106,7 @@ set_rcvbuf:
11981    
11982     case SO_MAX_PACING_RATE:
11983     {
11984     - unsigned long ulval = (val == ~0U) ? ~0UL : val;
11985     + unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
11986    
11987     if (sizeof(ulval) != sizeof(val) &&
11988     optlen >= sizeof(ulval) &&
11989     diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
11990     index 73f46cb5e51da..d00533aea1f05 100644
11991     --- a/net/ipv4/icmp.c
11992     +++ b/net/ipv4/icmp.c
11993     @@ -239,7 +239,7 @@ static struct {
11994     /**
11995     * icmp_global_allow - Are we allowed to send one more ICMP message ?
11996     *
11997     - * Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec.
11998     + * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec.
11999     * Returns false if we reached the limit and can not send another packet.
12000     * Note: called with BH disabled
12001     */
12002     @@ -267,7 +267,10 @@ bool icmp_global_allow(void)
12003     }
12004     credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
12005     if (credit) {
12006     - credit--;
12007     + /* We want to use a credit of one in average, but need to randomize
12008     + * it for security reasons.
12009     + */
12010     + credit = max_t(int, credit - prandom_u32_max(3), 0);
12011     rc = true;
12012     }
12013     WRITE_ONCE(icmp_global.credit, credit);
12014     diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
12015     index 85ba1453ba5ca..fedad3a3e61b8 100644
12016     --- a/net/ipv4/ip_gre.c
12017     +++ b/net/ipv4/ip_gre.c
12018     @@ -603,9 +603,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
12019     }
12020    
12021     if (dev->header_ops) {
12022     - /* Need space for new headers */
12023     - if (skb_cow_head(skb, dev->needed_headroom -
12024     - (tunnel->hlen + sizeof(struct iphdr))))
12025     + if (skb_cow_head(skb, 0))
12026     goto free_skb;
12027    
12028     tnl_params = (const struct iphdr *)skb->data;
12029     @@ -723,7 +721,11 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
12030     len = tunnel->tun_hlen - len;
12031     tunnel->hlen = tunnel->hlen + len;
12032    
12033     - dev->needed_headroom = dev->needed_headroom + len;
12034     + if (dev->header_ops)
12035     + dev->hard_header_len += len;
12036     + else
12037     + dev->needed_headroom += len;
12038     +
12039     if (set_mtu)
12040     dev->mtu = max_t(int, dev->mtu - len, 68);
12041    
12042     @@ -926,6 +928,7 @@ static void __gre_tunnel_init(struct net_device *dev)
12043     tunnel->parms.iph.protocol = IPPROTO_GRE;
12044    
12045     tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
12046     + dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
12047    
12048     dev->features |= GRE_FEATURES;
12049     dev->hw_features |= GRE_FEATURES;
12050     @@ -969,10 +972,14 @@ static int ipgre_tunnel_init(struct net_device *dev)
12051     return -EINVAL;
12052     dev->flags = IFF_BROADCAST;
12053     dev->header_ops = &ipgre_header_ops;
12054     + dev->hard_header_len = tunnel->hlen + sizeof(*iph);
12055     + dev->needed_headroom = 0;
12056     }
12057     #endif
12058     } else if (!tunnel->collect_md) {
12059     dev->header_ops = &ipgre_header_ops;
12060     + dev->hard_header_len = tunnel->hlen + sizeof(*iph);
12061     + dev->needed_headroom = 0;
12062     }
12063    
12064     return ip_tunnel_init(dev);
12065     diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c
12066     index 7a83f881efa9e..136030ad2e546 100644
12067     --- a/net/ipv4/netfilter/nf_log_arp.c
12068     +++ b/net/ipv4/netfilter/nf_log_arp.c
12069     @@ -43,16 +43,31 @@ static void dump_arp_packet(struct nf_log_buf *m,
12070     const struct nf_loginfo *info,
12071     const struct sk_buff *skb, unsigned int nhoff)
12072     {
12073     - const struct arphdr *ah;
12074     - struct arphdr _arph;
12075     const struct arppayload *ap;
12076     struct arppayload _arpp;
12077     + const struct arphdr *ah;
12078     + unsigned int logflags;
12079     + struct arphdr _arph;
12080    
12081     ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
12082     if (ah == NULL) {
12083     nf_log_buf_add(m, "TRUNCATED");
12084     return;
12085     }
12086     +
12087     + if (info->type == NF_LOG_TYPE_LOG)
12088     + logflags = info->u.log.logflags;
12089     + else
12090     + logflags = NF_LOG_DEFAULT_MASK;
12091     +
12092     + if (logflags & NF_LOG_MACDECODE) {
12093     + nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
12094     + eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
12095     + nf_log_dump_vlan(m, skb);
12096     + nf_log_buf_add(m, "MACPROTO=%04x ",
12097     + ntohs(eth_hdr(skb)->h_proto));
12098     + }
12099     +
12100     nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d",
12101     ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op));
12102    
12103     diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
12104     index 4b2d49cc9f1a1..cb288ffbcfde2 100644
12105     --- a/net/ipv4/netfilter/nf_log_ipv4.c
12106     +++ b/net/ipv4/netfilter/nf_log_ipv4.c
12107     @@ -284,8 +284,10 @@ static void dump_ipv4_mac_header(struct nf_log_buf *m,
12108    
12109     switch (dev->type) {
12110     case ARPHRD_ETHER:
12111     - nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
12112     - eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
12113     + nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
12114     + eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
12115     + nf_log_dump_vlan(m, skb);
12116     + nf_log_buf_add(m, "MACPROTO=%04x ",
12117     ntohs(eth_hdr(skb)->h_proto));
12118     return;
12119     default:
12120     diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
12121     index afa2c5049845f..ea32b113089d3 100644
12122     --- a/net/ipv4/nexthop.c
12123     +++ b/net/ipv4/nexthop.c
12124     @@ -763,7 +763,7 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
12125     remove_nh_grp_entry(net, nhge, nlinfo);
12126    
12127     /* make sure all see the newly published array before releasing rtnl */
12128     - synchronize_rcu();
12129     + synchronize_net();
12130     }
12131    
12132     static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
12133     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
12134     index 7a5f64cf1fdd2..a293d4968d1eb 100644
12135     --- a/net/ipv4/route.c
12136     +++ b/net/ipv4/route.c
12137     @@ -2728,10 +2728,12 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
12138     if (IS_ERR(rt))
12139     return rt;
12140    
12141     - if (flp4->flowi4_proto)
12142     + if (flp4->flowi4_proto) {
12143     + flp4->flowi4_oif = rt->dst.dev->ifindex;
12144     rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
12145     flowi4_to_flowi(flp4),
12146     sk, 0);
12147     + }
12148    
12149     return rt;
12150     }
12151     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
12152     index ab5358281000e..62f8ba4048180 100644
12153     --- a/net/ipv4/tcp_input.c
12154     +++ b/net/ipv4/tcp_input.c
12155     @@ -5696,6 +5696,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
12156     tcp_data_snd_check(sk);
12157     if (!inet_csk_ack_scheduled(sk))
12158     goto no_ack;
12159     + } else {
12160     + tcp_update_wl(tp, TCP_SKB_CB(skb)->seq);
12161     }
12162    
12163     __tcp_ack_snd_check(sk, 0);
12164     diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
12165     index 9ca6c32065ec6..0646fce31b67a 100644
12166     --- a/net/ipv6/ip6_fib.c
12167     +++ b/net/ipv6/ip6_fib.c
12168     @@ -2519,8 +2519,10 @@ static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
12169     iter->skip = *pos;
12170    
12171     if (iter->tbl) {
12172     + loff_t p = 0;
12173     +
12174     ipv6_route_seq_setup_walk(iter, net);
12175     - return ipv6_route_seq_next(seq, NULL, pos);
12176     + return ipv6_route_seq_next(seq, NULL, &p);
12177     } else {
12178     return NULL;
12179     }
12180     diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
12181     index 22b80db6d8826..5b40258d3a5e9 100644
12182     --- a/net/ipv6/netfilter/nf_log_ipv6.c
12183     +++ b/net/ipv6/netfilter/nf_log_ipv6.c
12184     @@ -297,9 +297,11 @@ static void dump_ipv6_mac_header(struct nf_log_buf *m,
12185    
12186     switch (dev->type) {
12187     case ARPHRD_ETHER:
12188     - nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
12189     - eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
12190     - ntohs(eth_hdr(skb)->h_proto));
12191     + nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
12192     + eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
12193     + nf_log_dump_vlan(m, skb);
12194     + nf_log_buf_add(m, "MACPROTO=%04x ",
12195     + ntohs(eth_hdr(skb)->h_proto));
12196     return;
12197     default:
12198     break;
12199     diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
12200     index a9dda5c228f60..fa293feef935d 100644
12201     --- a/net/mac80211/cfg.c
12202     +++ b/net/mac80211/cfg.c
12203     @@ -698,7 +698,8 @@ void sta_set_rate_info_tx(struct sta_info *sta,
12204     u16 brate;
12205    
12206     sband = ieee80211_get_sband(sta->sdata);
12207     - if (sband) {
12208     + WARN_ON_ONCE(sband && !sband->bitrates);
12209     + if (sband && sband->bitrates) {
12210     brate = sband->bitrates[rate->idx].bitrate;
12211     rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
12212     }
12213     diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
12214     index f5d96107af6de..4f14d8a06915a 100644
12215     --- a/net/mac80211/sta_info.c
12216     +++ b/net/mac80211/sta_info.c
12217     @@ -2083,6 +2083,10 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
12218     int rate_idx = STA_STATS_GET(LEGACY_IDX, rate);
12219    
12220     sband = local->hw.wiphy->bands[band];
12221     +
12222     + if (WARN_ON_ONCE(!sband->bitrates))
12223     + break;
12224     +
12225     brate = sband->bitrates[rate_idx].bitrate;
12226     if (rinfo->bw == RATE_INFO_BW_5)
12227     shift = 2;
12228     diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
12229     index 3cccc88ef817b..99168af0c28d9 100644
12230     --- a/net/netfilter/ipvs/ip_vs_ctl.c
12231     +++ b/net/netfilter/ipvs/ip_vs_ctl.c
12232     @@ -2465,6 +2465,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
12233     /* Set timeout values for (tcp tcpfin udp) */
12234     ret = ip_vs_set_timeout(ipvs, (struct ip_vs_timeout_user *)arg);
12235     goto out_unlock;
12236     + } else if (!len) {
12237     + /* No more commands with len == 0 below */
12238     + ret = -EINVAL;
12239     + goto out_unlock;
12240     }
12241    
12242     usvc_compat = (struct ip_vs_service_user *)arg;
12243     @@ -2541,9 +2545,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
12244     break;
12245     case IP_VS_SO_SET_DELDEST:
12246     ret = ip_vs_del_dest(svc, &udest);
12247     - break;
12248     - default:
12249     - ret = -EINVAL;
12250     }
12251    
12252     out_unlock:
12253     diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
12254     index c62a131a60948..cefc39878b1a4 100644
12255     --- a/net/netfilter/ipvs/ip_vs_xmit.c
12256     +++ b/net/netfilter/ipvs/ip_vs_xmit.c
12257     @@ -615,6 +615,8 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
12258     if (ret == NF_ACCEPT) {
12259     nf_reset_ct(skb);
12260     skb_forward_csum(skb);
12261     + if (skb->dev)
12262     + skb->tstamp = 0;
12263     }
12264     return ret;
12265     }
12266     @@ -655,6 +657,8 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
12267    
12268     if (!local) {
12269     skb_forward_csum(skb);
12270     + if (skb->dev)
12271     + skb->tstamp = 0;
12272     NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
12273     NULL, skb_dst(skb)->dev, dst_output);
12274     } else
12275     @@ -675,6 +679,8 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
12276     if (!local) {
12277     ip_vs_drop_early_demux_sk(skb);
12278     skb_forward_csum(skb);
12279     + if (skb->dev)
12280     + skb->tstamp = 0;
12281     NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
12282     NULL, skb_dst(skb)->dev, dst_output);
12283     } else
12284     diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
12285     index 1926fd56df56a..848b137151c26 100644
12286     --- a/net/netfilter/nf_conntrack_proto_tcp.c
12287     +++ b/net/netfilter/nf_conntrack_proto_tcp.c
12288     @@ -541,13 +541,20 @@ static bool tcp_in_window(const struct nf_conn *ct,
12289     swin = win << sender->td_scale;
12290     sender->td_maxwin = (swin == 0 ? 1 : swin);
12291     sender->td_maxend = end + sender->td_maxwin;
12292     - /*
12293     - * We haven't seen traffic in the other direction yet
12294     - * but we have to tweak window tracking to pass III
12295     - * and IV until that happens.
12296     - */
12297     - if (receiver->td_maxwin == 0)
12298     + if (receiver->td_maxwin == 0) {
12299     + /* We haven't seen traffic in the other
12300     + * direction yet but we have to tweak window
12301     + * tracking to pass III and IV until that
12302     + * happens.
12303     + */
12304     receiver->td_end = receiver->td_maxend = sack;
12305     + } else if (sack == receiver->td_end + 1) {
12306     + /* Likely a reply to a keepalive.
12307     + * Needed for III.
12308     + */
12309     + receiver->td_end++;
12310     + }
12311     +
12312     }
12313     } else if (((state->state == TCP_CONNTRACK_SYN_SENT
12314     && dir == IP_CT_DIR_ORIGINAL)
12315     diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c
12316     index f108a76925dd8..ec6e7d6860163 100644
12317     --- a/net/netfilter/nf_dup_netdev.c
12318     +++ b/net/netfilter/nf_dup_netdev.c
12319     @@ -19,6 +19,7 @@ static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev)
12320     skb_push(skb, skb->mac_len);
12321    
12322     skb->dev = dev;
12323     + skb->tstamp = 0;
12324     dev_queue_xmit(skb);
12325     }
12326    
12327     diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c
12328     index ae5628ddbe6d7..fd7c5f0f5c25b 100644
12329     --- a/net/netfilter/nf_log_common.c
12330     +++ b/net/netfilter/nf_log_common.c
12331     @@ -171,6 +171,18 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
12332     }
12333     EXPORT_SYMBOL_GPL(nf_log_dump_packet_common);
12334    
12335     +void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb)
12336     +{
12337     + u16 vid;
12338     +
12339     + if (!skb_vlan_tag_present(skb))
12340     + return;
12341     +
12342     + vid = skb_vlan_tag_get(skb);
12343     + nf_log_buf_add(m, "VPROTO=%04x VID=%u ", ntohs(skb->vlan_proto), vid);
12344     +}
12345     +EXPORT_SYMBOL_GPL(nf_log_dump_vlan);
12346     +
12347     /* bridge and netdev logging families share this code. */
12348     void nf_log_l2packet(struct net *net, u_int8_t pf,
12349     __be16 protocol,
12350     diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
12351     index 3087e23297dbf..b77985986b24e 100644
12352     --- a/net/netfilter/nft_fwd_netdev.c
12353     +++ b/net/netfilter/nft_fwd_netdev.c
12354     @@ -138,6 +138,7 @@ static void nft_fwd_neigh_eval(const struct nft_expr *expr,
12355     return;
12356    
12357     skb->dev = dev;
12358     + skb->tstamp = 0;
12359     neigh_xmit(neigh_table, dev, addr, skb);
12360     out:
12361     regs->verdict.code = verdict;
12362     diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
12363     index 1b261375722e3..4170acc2dc282 100644
12364     --- a/net/nfc/netlink.c
12365     +++ b/net/nfc/netlink.c
12366     @@ -1225,7 +1225,7 @@ static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info)
12367     u32 idx;
12368     char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
12369    
12370     - if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
12371     + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_FIRMWARE_NAME])
12372     return -EINVAL;
12373    
12374     idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
12375     diff --git a/net/sched/act_api.c b/net/sched/act_api.c
12376     index 4a5ef2adb2e57..1dc642b11443c 100644
12377     --- a/net/sched/act_api.c
12378     +++ b/net/sched/act_api.c
12379     @@ -706,13 +706,6 @@ int tcf_action_destroy(struct tc_action *actions[], int bind)
12380     return ret;
12381     }
12382    
12383     -static int tcf_action_destroy_1(struct tc_action *a, int bind)
12384     -{
12385     - struct tc_action *actions[] = { a, NULL };
12386     -
12387     - return tcf_action_destroy(actions, bind);
12388     -}
12389     -
12390     static int tcf_action_put(struct tc_action *p)
12391     {
12392     return __tcf_action_put(p, false);
12393     @@ -932,13 +925,6 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
12394     if (err < 0)
12395     goto err_mod;
12396    
12397     - if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN) &&
12398     - !rcu_access_pointer(a->goto_chain)) {
12399     - tcf_action_destroy_1(a, bind);
12400     - NL_SET_ERR_MSG(extack, "can't use goto chain with NULL chain");
12401     - return ERR_PTR(-EINVAL);
12402     - }
12403     -
12404     if (!name && tb[TCA_ACT_COOKIE])
12405     tcf_set_action_cookie(&a->act_cookie, cookie);
12406    
12407     diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
12408     index bdaa04a9a7fa4..a5a2bf01eb9bc 100644
12409     --- a/net/sched/act_tunnel_key.c
12410     +++ b/net/sched/act_tunnel_key.c
12411     @@ -315,7 +315,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
12412    
12413     metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
12414     0, flags,
12415     - key_id, 0);
12416     + key_id, opts_len);
12417     } else {
12418     NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
12419     ret = -EINVAL;
12420     diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
12421     index 0c5fcb8ed404d..aeea67f908415 100644
12422     --- a/net/smc/smc_core.c
12423     +++ b/net/smc/smc_core.c
12424     @@ -795,7 +795,7 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
12425     return buf_desc;
12426     }
12427    
12428     -#define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
12429     +#define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
12430    
12431     static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
12432     bool is_dmb, int bufsize)
12433     diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
12434     index 3645cd241d3ea..cf4d6d7e72822 100644
12435     --- a/net/sunrpc/auth_gss/svcauth_gss.c
12436     +++ b/net/sunrpc/auth_gss/svcauth_gss.c
12437     @@ -1095,9 +1095,9 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
12438     struct gssp_in_token *in_token)
12439     {
12440     struct kvec *argv = &rqstp->rq_arg.head[0];
12441     - unsigned int page_base, length;
12442     - int pages, i, res;
12443     - size_t inlen;
12444     + unsigned int length, pgto_offs, pgfrom_offs;
12445     + int pages, i, res, pgto, pgfrom;
12446     + size_t inlen, to_offs, from_offs;
12447    
12448     res = gss_read_common_verf(gc, argv, authp, in_handle);
12449     if (res)
12450     @@ -1125,17 +1125,24 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
12451     memcpy(page_address(in_token->pages[0]), argv->iov_base, length);
12452     inlen -= length;
12453    
12454     - i = 1;
12455     - page_base = rqstp->rq_arg.page_base;
12456     + to_offs = length;
12457     + from_offs = rqstp->rq_arg.page_base;
12458     while (inlen) {
12459     - length = min_t(unsigned int, inlen, PAGE_SIZE);
12460     - memcpy(page_address(in_token->pages[i]),
12461     - page_address(rqstp->rq_arg.pages[i]) + page_base,
12462     + pgto = to_offs >> PAGE_SHIFT;
12463     + pgfrom = from_offs >> PAGE_SHIFT;
12464     + pgto_offs = to_offs & ~PAGE_MASK;
12465     + pgfrom_offs = from_offs & ~PAGE_MASK;
12466     +
12467     + length = min_t(unsigned int, inlen,
12468     + min_t(unsigned int, PAGE_SIZE - pgto_offs,
12469     + PAGE_SIZE - pgfrom_offs));
12470     + memcpy(page_address(in_token->pages[pgto]) + pgto_offs,
12471     + page_address(rqstp->rq_arg.pages[pgfrom]) + pgfrom_offs,
12472     length);
12473    
12474     + to_offs += length;
12475     + from_offs += length;
12476     inlen -= length;
12477     - page_base = 0;
12478     - i++;
12479     }
12480     return 0;
12481     }
12482     diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
12483     index 217106c66a13c..25e8922c10b28 100644
12484     --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
12485     +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
12486     @@ -609,10 +609,11 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
12487     while (remaining) {
12488     len = min_t(u32, PAGE_SIZE - pageoff, remaining);
12489    
12490     - memcpy(dst, page_address(*ppages), len);
12491     + memcpy(dst, page_address(*ppages) + pageoff, len);
12492     remaining -= len;
12493     dst += len;
12494     pageoff = 0;
12495     + ppages++;
12496     }
12497     }
12498    
12499     diff --git a/net/tipc/msg.c b/net/tipc/msg.c
12500     index ee4b2261e7957..b0ed3c944b2d1 100644
12501     --- a/net/tipc/msg.c
12502     +++ b/net/tipc/msg.c
12503     @@ -140,7 +140,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
12504     if (fragid == FIRST_FRAGMENT) {
12505     if (unlikely(head))
12506     goto err;
12507     - frag = skb_unshare(frag, GFP_ATOMIC);
12508     + if (skb_cloned(frag))
12509     + frag = skb_copy(frag, GFP_ATOMIC);
12510     if (unlikely(!frag))
12511     goto err;
12512     head = *headbuf = frag;
12513     diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
12514     index 25fca390cdcf5..933a3187d3bf2 100644
12515     --- a/net/tls/tls_device.c
12516     +++ b/net/tls/tls_device.c
12517     @@ -405,14 +405,14 @@ static int tls_push_data(struct sock *sk,
12518     struct tls_context *tls_ctx = tls_get_ctx(sk);
12519     struct tls_prot_info *prot = &tls_ctx->prot_info;
12520     struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
12521     - int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
12522     struct tls_record_info *record = ctx->open_record;
12523     int tls_push_record_flags;
12524     struct page_frag *pfrag;
12525     size_t orig_size = size;
12526     u32 max_open_record_len;
12527     - int copy, rc = 0;
12528     + bool more = false;
12529     bool done = false;
12530     + int copy, rc = 0;
12531     long timeo;
12532    
12533     if (flags &
12534     @@ -480,9 +480,8 @@ handle_error:
12535     if (!size) {
12536     last_record:
12537     tls_push_record_flags = flags;
12538     - if (more) {
12539     - tls_ctx->pending_open_record_frags =
12540     - !!record->num_frags;
12541     + if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
12542     + more = true;
12543     break;
12544     }
12545    
12546     @@ -514,6 +513,8 @@ last_record:
12547     }
12548     } while (!done);
12549    
12550     + tls_ctx->pending_open_record_frags = more;
12551     +
12552     if (orig_size - size > 0)
12553     rc = orig_size - size;
12554    
12555     diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
12556     index 7bc4f37655237..672b70730e898 100644
12557     --- a/net/wireless/nl80211.c
12558     +++ b/net/wireless/nl80211.c
12559     @@ -2227,7 +2227,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
12560     * case we'll continue with more data in the next round,
12561     * but break unconditionally so unsplit data stops here.
12562     */
12563     - state->split_start++;
12564     + if (state->split)
12565     + state->split_start++;
12566     + else
12567     + state->split_start = 0;
12568     break;
12569     case 9:
12570     if (rdev->wiphy.extended_capabilities &&
12571     @@ -4496,16 +4499,14 @@ static int nl80211_parse_he_obss_pd(struct nlattr *attrs,
12572     if (err)
12573     return err;
12574    
12575     - if (!tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET] ||
12576     - !tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET])
12577     - return -EINVAL;
12578     -
12579     - he_obss_pd->min_offset =
12580     - nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]);
12581     - he_obss_pd->max_offset =
12582     - nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]);
12583     + if (tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET])
12584     + he_obss_pd->min_offset =
12585     + nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]);
12586     + if (tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET])
12587     + he_obss_pd->max_offset =
12588     + nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]);
12589    
12590     - if (he_obss_pd->min_offset >= he_obss_pd->max_offset)
12591     + if (he_obss_pd->min_offset > he_obss_pd->max_offset)
12592     return -EINVAL;
12593    
12594     he_obss_pd->enable = true;
12595     diff --git a/samples/mic/mpssd/mpssd.c b/samples/mic/mpssd/mpssd.c
12596     index a11bf6c5b53b4..cd3f16a6f5caf 100644
12597     --- a/samples/mic/mpssd/mpssd.c
12598     +++ b/samples/mic/mpssd/mpssd.c
12599     @@ -403,9 +403,9 @@ mic_virtio_copy(struct mic_info *mic, int fd,
12600    
12601     static inline unsigned _vring_size(unsigned int num, unsigned long align)
12602     {
12603     - return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
12604     + return _ALIGN_UP(((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
12605     + align - 1) & ~(align - 1))
12606     - + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num;
12607     + + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num, 4);
12608     }
12609    
12610     /*
12611     diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
12612     index d5ad7b2539c75..d86825261b515 100644
12613     --- a/security/integrity/ima/ima_crypto.c
12614     +++ b/security/integrity/ima/ima_crypto.c
12615     @@ -688,6 +688,8 @@ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
12616     /* now accumulate with current aggregate */
12617     rc = crypto_shash_update(shash, d.digest,
12618     crypto_shash_digestsize(tfm));
12619     + if (rc != 0)
12620     + return rc;
12621     }
12622     if (!rc)
12623     crypto_shash_final(shash, digest);
12624     diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
12625     index c8b9c0b315d8f..250a92b187265 100644
12626     --- a/sound/core/seq/oss/seq_oss.c
12627     +++ b/sound/core/seq/oss/seq_oss.c
12628     @@ -174,9 +174,12 @@ odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
12629     if (snd_BUG_ON(!dp))
12630     return -ENXIO;
12631    
12632     - mutex_lock(&register_mutex);
12633     + if (cmd != SNDCTL_SEQ_SYNC &&
12634     + mutex_lock_interruptible(&register_mutex))
12635     + return -ERESTARTSYS;
12636     rc = snd_seq_oss_ioctl(dp, cmd, arg);
12637     - mutex_unlock(&register_mutex);
12638     + if (cmd != SNDCTL_SEQ_SYNC)
12639     + mutex_unlock(&register_mutex);
12640     return rc;
12641     }
12642    
12643     diff --git a/sound/firewire/bebob/bebob_hwdep.c b/sound/firewire/bebob/bebob_hwdep.c
12644     index 45b740f44c459..c362eb38ab906 100644
12645     --- a/sound/firewire/bebob/bebob_hwdep.c
12646     +++ b/sound/firewire/bebob/bebob_hwdep.c
12647     @@ -36,12 +36,11 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
12648     }
12649    
12650     memset(&event, 0, sizeof(event));
12651     + count = min_t(long, count, sizeof(event.lock_status));
12652     if (bebob->dev_lock_changed) {
12653     event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
12654     event.lock_status.status = (bebob->dev_lock_count > 0);
12655     bebob->dev_lock_changed = false;
12656     -
12657     - count = min_t(long, count, sizeof(event.lock_status));
12658     }
12659    
12660     spin_unlock_irq(&bebob->lock);
12661     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
12662     index 590ea262f2e20..9a1968932b783 100644
12663     --- a/sound/pci/hda/hda_intel.c
12664     +++ b/sound/pci/hda/hda_intel.c
12665     @@ -1001,12 +1001,14 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt)
12666     azx_init_pci(chip);
12667     hda_intel_init_chip(chip, true);
12668    
12669     - if (status && from_rt) {
12670     - list_for_each_codec(codec, &chip->bus)
12671     - if (!codec->relaxed_resume &&
12672     - (status & (1 << codec->addr)))
12673     - schedule_delayed_work(&codec->jackpoll_work,
12674     - codec->jackpoll_interval);
12675     + if (from_rt) {
12676     + list_for_each_codec(codec, &chip->bus) {
12677     + if (codec->relaxed_resume)
12678     + continue;
12679     +
12680     + if (codec->forced_resume || (status & (1 << codec->addr)))
12681     + pm_request_resume(hda_codec_dev(codec));
12682     + }
12683     }
12684    
12685     /* power down again for link-controlled chips */
12686     diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
12687     index 6aa39339db0ab..459aff6c10bc5 100644
12688     --- a/sound/pci/hda/patch_ca0132.c
12689     +++ b/sound/pci/hda/patch_ca0132.c
12690     @@ -1065,6 +1065,7 @@ enum {
12691     QUIRK_R3DI,
12692     QUIRK_R3D,
12693     QUIRK_AE5,
12694     + QUIRK_AE7,
12695     };
12696    
12697     #ifdef CONFIG_PCI
12698     @@ -1184,6 +1185,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
12699     SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
12700     SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D),
12701     SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5),
12702     + SND_PCI_QUIRK(0x1102, 0x0081, "Sound Blaster AE-7", QUIRK_AE7),
12703     {}
12704     };
12705    
12706     @@ -4674,6 +4676,15 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
12707     ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
12708     tmp = FLOAT_THREE;
12709     break;
12710     + case QUIRK_AE7:
12711     + ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
12712     + tmp = FLOAT_THREE;
12713     + chipio_set_conn_rate(codec, MEM_CONNID_MICIN2,
12714     + SR_96_000);
12715     + chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2,
12716     + SR_96_000);
12717     + dspio_set_uint_param(codec, 0x80, 0x01, FLOAT_ZERO);
12718     + break;
12719     default:
12720     tmp = FLOAT_ONE;
12721     break;
12722     @@ -4719,6 +4730,14 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
12723     case QUIRK_AE5:
12724     ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
12725     break;
12726     + case QUIRK_AE7:
12727     + ca0113_mmio_command_set(codec, 0x30, 0x28, 0x3f);
12728     + chipio_set_conn_rate(codec, MEM_CONNID_MICIN2,
12729     + SR_96_000);
12730     + chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2,
12731     + SR_96_000);
12732     + dspio_set_uint_param(codec, 0x80, 0x01, FLOAT_ZERO);
12733     + break;
12734     default:
12735     break;
12736     }
12737     @@ -4728,7 +4747,10 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
12738     if (ca0132_quirk(spec) == QUIRK_R3DI)
12739     chipio_set_conn_rate(codec, 0x0F, SR_96_000);
12740    
12741     - tmp = FLOAT_ZERO;
12742     + if (ca0132_quirk(spec) == QUIRK_AE7)
12743     + tmp = FLOAT_THREE;
12744     + else
12745     + tmp = FLOAT_ZERO;
12746     dspio_set_uint_param(codec, 0x80, 0x00, tmp);
12747    
12748     switch (ca0132_quirk(spec)) {
12749     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
12750     index 51798632d334c..df4771b9eff24 100644
12751     --- a/sound/pci/hda/patch_hdmi.c
12752     +++ b/sound/pci/hda/patch_hdmi.c
12753     @@ -2001,22 +2001,25 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
12754     int pinctl;
12755     int err = 0;
12756    
12757     + mutex_lock(&spec->pcm_lock);
12758     if (hinfo->nid) {
12759     pcm_idx = hinfo_to_pcm_index(codec, hinfo);
12760     - if (snd_BUG_ON(pcm_idx < 0))
12761     - return -EINVAL;
12762     + if (snd_BUG_ON(pcm_idx < 0)) {
12763     + err = -EINVAL;
12764     + goto unlock;
12765     + }
12766     cvt_idx = cvt_nid_to_cvt_index(codec, hinfo->nid);
12767     - if (snd_BUG_ON(cvt_idx < 0))
12768     - return -EINVAL;
12769     + if (snd_BUG_ON(cvt_idx < 0)) {
12770     + err = -EINVAL;
12771     + goto unlock;
12772     + }
12773     per_cvt = get_cvt(spec, cvt_idx);
12774     -
12775     snd_BUG_ON(!per_cvt->assigned);
12776     per_cvt->assigned = 0;
12777     hinfo->nid = 0;
12778    
12779     azx_stream(get_azx_dev(substream))->stripe = 0;
12780    
12781     - mutex_lock(&spec->pcm_lock);
12782     snd_hda_spdif_ctls_unassign(codec, pcm_idx);
12783     clear_bit(pcm_idx, &spec->pcm_in_use);
12784     pin_idx = hinfo_to_pin_index(codec, hinfo);
12785     @@ -2044,10 +2047,11 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
12786     per_pin->setup = false;
12787     per_pin->channels = 0;
12788     mutex_unlock(&per_pin->lock);
12789     - unlock:
12790     - mutex_unlock(&spec->pcm_lock);
12791     }
12792    
12793     +unlock:
12794     + mutex_unlock(&spec->pcm_lock);
12795     +
12796     return err;
12797     }
12798    
12799     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
12800     index 4dfd714f718b8..7a24e9f0d2fe7 100644
12801     --- a/sound/pci/hda/patch_realtek.c
12802     +++ b/sound/pci/hda/patch_realtek.c
12803     @@ -1141,6 +1141,7 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid)
12804     codec->single_adc_amp = 1;
12805     /* FIXME: do we need this for all Realtek codec models? */
12806     codec->spdif_status_reset = 1;
12807     + codec->forced_resume = 1;
12808     codec->patch_ops = alc_patch_ops;
12809    
12810     err = alc_codec_rename_from_preset(codec);
12811     @@ -1920,6 +1921,8 @@ enum {
12812     ALC1220_FIXUP_CLEVO_P950,
12813     ALC1220_FIXUP_CLEVO_PB51ED,
12814     ALC1220_FIXUP_CLEVO_PB51ED_PINS,
12815     + ALC887_FIXUP_ASUS_AUDIO,
12816     + ALC887_FIXUP_ASUS_HMIC,
12817     };
12818    
12819     static void alc889_fixup_coef(struct hda_codec *codec,
12820     @@ -2132,6 +2135,31 @@ static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec,
12821     alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
12822     }
12823    
12824     +static void alc887_asus_hp_automute_hook(struct hda_codec *codec,
12825     + struct hda_jack_callback *jack)
12826     +{
12827     + struct alc_spec *spec = codec->spec;
12828     + unsigned int vref;
12829     +
12830     + snd_hda_gen_hp_automute(codec, jack);
12831     +
12832     + if (spec->gen.hp_jack_present)
12833     + vref = AC_PINCTL_VREF_80;
12834     + else
12835     + vref = AC_PINCTL_VREF_HIZ;
12836     + snd_hda_set_pin_ctl(codec, 0x19, PIN_HP | vref);
12837     +}
12838     +
12839     +static void alc887_fixup_asus_jack(struct hda_codec *codec,
12840     + const struct hda_fixup *fix, int action)
12841     +{
12842     + struct alc_spec *spec = codec->spec;
12843     + if (action != HDA_FIXUP_ACT_PROBE)
12844     + return;
12845     + snd_hda_set_pin_ctl_cache(codec, 0x1b, PIN_HP);
12846     + spec->gen.hp_automute_hook = alc887_asus_hp_automute_hook;
12847     +}
12848     +
12849     static const struct hda_fixup alc882_fixups[] = {
12850     [ALC882_FIXUP_ABIT_AW9D_MAX] = {
12851     .type = HDA_FIXUP_PINS,
12852     @@ -2389,6 +2417,20 @@ static const struct hda_fixup alc882_fixups[] = {
12853     .chained = true,
12854     .chain_id = ALC1220_FIXUP_CLEVO_PB51ED,
12855     },
12856     + [ALC887_FIXUP_ASUS_AUDIO] = {
12857     + .type = HDA_FIXUP_PINS,
12858     + .v.pins = (const struct hda_pintbl[]) {
12859     + { 0x15, 0x02a14150 }, /* use as headset mic, without its own jack detect */
12860     + { 0x19, 0x22219420 },
12861     + {}
12862     + },
12863     + },
12864     + [ALC887_FIXUP_ASUS_HMIC] = {
12865     + .type = HDA_FIXUP_FUNC,
12866     + .v.func = alc887_fixup_asus_jack,
12867     + .chained = true,
12868     + .chain_id = ALC887_FIXUP_ASUS_AUDIO,
12869     + },
12870     };
12871    
12872     static const struct snd_pci_quirk alc882_fixup_tbl[] = {
12873     @@ -2422,6 +2464,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
12874     SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD),
12875     SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
12876     SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
12877     + SND_PCI_QUIRK(0x1043, 0x2390, "Asus D700SA", ALC887_FIXUP_ASUS_HMIC),
12878     SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
12879     SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
12880     SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
12881     @@ -6215,6 +6258,7 @@ enum {
12882     ALC269_FIXUP_LEMOTE_A190X,
12883     ALC256_FIXUP_INTEL_NUC8_RUGGED,
12884     ALC255_FIXUP_XIAOMI_HEADSET_MIC,
12885     + ALC274_FIXUP_HP_MIC,
12886     };
12887    
12888     static const struct hda_fixup alc269_fixups[] = {
12889     @@ -7594,6 +7638,14 @@ static const struct hda_fixup alc269_fixups[] = {
12890     .chained = true,
12891     .chain_id = ALC289_FIXUP_ASUS_GA401
12892     },
12893     + [ALC274_FIXUP_HP_MIC] = {
12894     + .type = HDA_FIXUP_VERBS,
12895     + .v.verbs = (const struct hda_verb[]) {
12896     + { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
12897     + { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
12898     + { }
12899     + },
12900     + },
12901     };
12902    
12903     static const struct snd_pci_quirk alc269_fixup_tbl[] = {
12904     @@ -7745,6 +7797,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
12905     SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
12906     SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
12907     SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
12908     + SND_PCI_QUIRK(0x103c, 0x874e, "HP", ALC274_FIXUP_HP_MIC),
12909     + SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
12910     SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
12911     SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
12912     SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
12913     @@ -8070,6 +8124,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
12914     {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
12915     {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
12916     {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
12917     + {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
12918     {}
12919     };
12920     #define ALC225_STANDARD_PINS \
12921     @@ -9633,6 +9688,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
12922     SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
12923     SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
12924     SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
12925     + SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
12926     SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
12927     SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
12928     SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
12929     diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
12930     index 68165de1c8dea..7a1ffbaf48be5 100644
12931     --- a/sound/soc/codecs/tlv320aic32x4.c
12932     +++ b/sound/soc/codecs/tlv320aic32x4.c
12933     @@ -662,7 +662,7 @@ static int aic32x4_set_processing_blocks(struct snd_soc_component *component,
12934     }
12935    
12936     static int aic32x4_setup_clocks(struct snd_soc_component *component,
12937     - unsigned int sample_rate)
12938     + unsigned int sample_rate, unsigned int channels)
12939     {
12940     u8 aosr;
12941     u16 dosr;
12942     @@ -750,7 +750,9 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
12943     dosr);
12944    
12945     clk_set_rate(clocks[5].clk,
12946     - sample_rate * 32);
12947     + sample_rate * 32 *
12948     + channels);
12949     +
12950     return 0;
12951     }
12952     }
12953     @@ -772,7 +774,8 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
12954     u8 iface1_reg = 0;
12955     u8 dacsetup_reg = 0;
12956    
12957     - aic32x4_setup_clocks(component, params_rate(params));
12958     + aic32x4_setup_clocks(component, params_rate(params),
12959     + params_channels(params));
12960    
12961     switch (params_width(params)) {
12962     case 16:
12963     diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
12964     index 914b75c23d1bf..027259695551c 100644
12965     --- a/sound/soc/fsl/fsl_sai.c
12966     +++ b/sound/soc/fsl/fsl_sai.c
12967     @@ -694,7 +694,7 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai)
12968     return 0;
12969     }
12970    
12971     -static struct snd_soc_dai_driver fsl_sai_dai = {
12972     +static struct snd_soc_dai_driver fsl_sai_dai_template = {
12973     .probe = fsl_sai_dai_probe,
12974     .playback = {
12975     .stream_name = "CPU-Playback",
12976     @@ -965,12 +965,15 @@ static int fsl_sai_probe(struct platform_device *pdev)
12977     return ret;
12978     }
12979    
12980     + memcpy(&sai->cpu_dai_drv, &fsl_sai_dai_template,
12981     + sizeof(fsl_sai_dai_template));
12982     +
12983     /* Sync Tx with Rx as default by following old DT binding */
12984     sai->synchronous[RX] = true;
12985     sai->synchronous[TX] = false;
12986     - fsl_sai_dai.symmetric_rates = 1;
12987     - fsl_sai_dai.symmetric_channels = 1;
12988     - fsl_sai_dai.symmetric_samplebits = 1;
12989     + sai->cpu_dai_drv.symmetric_rates = 1;
12990     + sai->cpu_dai_drv.symmetric_channels = 1;
12991     + sai->cpu_dai_drv.symmetric_samplebits = 1;
12992    
12993     if (of_find_property(np, "fsl,sai-synchronous-rx", NULL) &&
12994     of_find_property(np, "fsl,sai-asynchronous", NULL)) {
12995     @@ -987,9 +990,9 @@ static int fsl_sai_probe(struct platform_device *pdev)
12996     /* Discard all settings for asynchronous mode */
12997     sai->synchronous[RX] = false;
12998     sai->synchronous[TX] = false;
12999     - fsl_sai_dai.symmetric_rates = 0;
13000     - fsl_sai_dai.symmetric_channels = 0;
13001     - fsl_sai_dai.symmetric_samplebits = 0;
13002     + sai->cpu_dai_drv.symmetric_rates = 0;
13003     + sai->cpu_dai_drv.symmetric_channels = 0;
13004     + sai->cpu_dai_drv.symmetric_samplebits = 0;
13005     }
13006    
13007     if (of_find_property(np, "fsl,sai-mclk-direction-output", NULL) &&
13008     @@ -1018,7 +1021,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
13009     pm_runtime_enable(&pdev->dev);
13010    
13011     ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
13012     - &fsl_sai_dai, 1);
13013     + &sai->cpu_dai_drv, 1);
13014     if (ret)
13015     goto err_pm_disable;
13016    
13017     diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
13018     index 6aba7d28f5f34..677ecfc1ec68f 100644
13019     --- a/sound/soc/fsl/fsl_sai.h
13020     +++ b/sound/soc/fsl/fsl_sai.h
13021     @@ -180,6 +180,7 @@ struct fsl_sai {
13022     unsigned int bclk_ratio;
13023    
13024     const struct fsl_sai_soc_data *soc_data;
13025     + struct snd_soc_dai_driver cpu_dai_drv;
13026     struct snd_dmaengine_dai_dma_data dma_params_rx;
13027     struct snd_dmaengine_dai_dma_data dma_params_tx;
13028     };
13029     diff --git a/sound/soc/fsl/imx-es8328.c b/sound/soc/fsl/imx-es8328.c
13030     index 15a27a2cd0cae..fad1eb6253d53 100644
13031     --- a/sound/soc/fsl/imx-es8328.c
13032     +++ b/sound/soc/fsl/imx-es8328.c
13033     @@ -145,13 +145,13 @@ static int imx_es8328_probe(struct platform_device *pdev)
13034     data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
13035     if (!data) {
13036     ret = -ENOMEM;
13037     - goto fail;
13038     + goto put_device;
13039     }
13040    
13041     comp = devm_kzalloc(dev, 3 * sizeof(*comp), GFP_KERNEL);
13042     if (!comp) {
13043     ret = -ENOMEM;
13044     - goto fail;
13045     + goto put_device;
13046     }
13047    
13048     data->dev = dev;
13049     @@ -182,12 +182,12 @@ static int imx_es8328_probe(struct platform_device *pdev)
13050     ret = snd_soc_of_parse_card_name(&data->card, "model");
13051     if (ret) {
13052     dev_err(dev, "Unable to parse card name\n");
13053     - goto fail;
13054     + goto put_device;
13055     }
13056     ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing");
13057     if (ret) {
13058     dev_err(dev, "Unable to parse routing: %d\n", ret);
13059     - goto fail;
13060     + goto put_device;
13061     }
13062     data->card.num_links = 1;
13063     data->card.owner = THIS_MODULE;
13064     @@ -196,10 +196,12 @@ static int imx_es8328_probe(struct platform_device *pdev)
13065     ret = snd_soc_register_card(&data->card);
13066     if (ret) {
13067     dev_err(dev, "Unable to register: %d\n", ret);
13068     - goto fail;
13069     + goto put_device;
13070     }
13071    
13072     platform_set_drvdata(pdev, data);
13073     +put_device:
13074     + put_device(&ssi_pdev->dev);
13075     fail:
13076     of_node_put(ssi_np);
13077     of_node_put(codec_np);
13078     diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
13079     index dbce7e92baf3c..c5d6952a4a33f 100644
13080     --- a/sound/soc/qcom/lpass-cpu.c
13081     +++ b/sound/soc/qcom/lpass-cpu.c
13082     @@ -174,21 +174,6 @@ static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream,
13083     return 0;
13084     }
13085    
13086     -static int lpass_cpu_daiops_hw_free(struct snd_pcm_substream *substream,
13087     - struct snd_soc_dai *dai)
13088     -{
13089     - struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
13090     - int ret;
13091     -
13092     - ret = regmap_write(drvdata->lpaif_map,
13093     - LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id),
13094     - 0);
13095     - if (ret)
13096     - dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
13097     -
13098     - return ret;
13099     -}
13100     -
13101     static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
13102     struct snd_soc_dai *dai)
13103     {
13104     @@ -269,7 +254,6 @@ const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
13105     .startup = lpass_cpu_daiops_startup,
13106     .shutdown = lpass_cpu_daiops_shutdown,
13107     .hw_params = lpass_cpu_daiops_hw_params,
13108     - .hw_free = lpass_cpu_daiops_hw_free,
13109     .prepare = lpass_cpu_daiops_prepare,
13110     .trigger = lpass_cpu_daiops_trigger,
13111     };
13112     diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
13113     index 4c745baa39f73..9acaef81dd74c 100644
13114     --- a/sound/soc/qcom/lpass-platform.c
13115     +++ b/sound/soc/qcom/lpass-platform.c
13116     @@ -61,7 +61,7 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
13117     int ret, dma_ch, dir = substream->stream;
13118     struct lpass_pcm_data *data;
13119    
13120     - data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL);
13121     + data = kzalloc(sizeof(*data), GFP_KERNEL);
13122     if (!data)
13123     return -ENOMEM;
13124    
13125     @@ -119,6 +119,7 @@ static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream)
13126     if (v->free_dma_channel)
13127     v->free_dma_channel(drvdata, data->dma_ch);
13128    
13129     + kfree(data);
13130     return 0;
13131     }
13132    
13133     diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
13134     index ac2feddc75fdd..ea183922c4ef1 100644
13135     --- a/tools/perf/builtin-stat.c
13136     +++ b/tools/perf/builtin-stat.c
13137     @@ -1671,8 +1671,10 @@ static void setup_system_wide(int forks)
13138     struct evsel *counter;
13139    
13140     evlist__for_each_entry(evsel_list, counter) {
13141     - if (!counter->core.system_wide)
13142     + if (!counter->core.system_wide &&
13143     + strcmp(counter->name, "duration_time")) {
13144     return;
13145     + }
13146     }
13147    
13148     if (evsel_list->core.nr_entries)
13149     diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
13150     index c5cce3a60476b..8aeaeba48a41f 100644
13151     --- a/tools/perf/util/intel-pt.c
13152     +++ b/tools/perf/util/intel-pt.c
13153     @@ -974,6 +974,8 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
13154    
13155     if (queue->tid == -1 || pt->have_sched_switch) {
13156     ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
13157     + if (ptq->tid == -1)
13158     + ptq->pid = -1;
13159     thread__zput(ptq->thread);
13160     }
13161    
13162     @@ -2488,10 +2490,8 @@ static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
13163     tid = sample->tid;
13164     }
13165    
13166     - if (tid == -1) {
13167     - pr_err("context_switch event has no tid\n");
13168     - return -EINVAL;
13169     - }
13170     + if (tid == -1)
13171     + intel_pt_log("context_switch event has no tid\n");
13172    
13173     intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
13174     cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
13175     diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
13176     index 8995092d541ec..3b796dd5e5772 100644
13177     --- a/tools/testing/radix-tree/idr-test.c
13178     +++ b/tools/testing/radix-tree/idr-test.c
13179     @@ -523,8 +523,27 @@ static void *ida_random_fn(void *arg)
13180     return NULL;
13181     }
13182    
13183     +static void *ida_leak_fn(void *arg)
13184     +{
13185     + struct ida *ida = arg;
13186     + time_t s = time(NULL);
13187     + int i, ret;
13188     +
13189     + rcu_register_thread();
13190     +
13191     + do for (i = 0; i < 1000; i++) {
13192     + ret = ida_alloc_range(ida, 128, 128, GFP_KERNEL);
13193     + if (ret >= 0)
13194     + ida_free(ida, 128);
13195     + } while (time(NULL) < s + 2);
13196     +
13197     + rcu_unregister_thread();
13198     + return NULL;
13199     +}
13200     +
13201     void ida_thread_tests(void)
13202     {
13203     + DEFINE_IDA(ida);
13204     pthread_t threads[20];
13205     int i;
13206    
13207     @@ -536,6 +555,16 @@ void ida_thread_tests(void)
13208    
13209     while (i--)
13210     pthread_join(threads[i], NULL);
13211     +
13212     + for (i = 0; i < ARRAY_SIZE(threads); i++)
13213     + if (pthread_create(&threads[i], NULL, ida_leak_fn, &ida)) {
13214     + perror("creating ida thread");
13215     + exit(1);
13216     + }
13217     +
13218     + while (i--)
13219     + pthread_join(threads[i], NULL);
13220     + assert(ida_is_empty(&ida));
13221     }
13222    
13223     void ida_tests(void)
13224     diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
13225     index d22e438198cf7..9af8822ece477 100644
13226     --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
13227     +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
13228     @@ -18,11 +18,11 @@
13229     #define MAX_ULONG_STR_LEN 7
13230     #define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
13231    
13232     +const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
13233     static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
13234     {
13235     - volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
13236     unsigned char i;
13237     - char name[64];
13238     + char name[sizeof(tcp_mem_name)];
13239     int ret;
13240    
13241     memset(name, 0, sizeof(name));
13242     diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
13243     index cb201cbe11e77..55251046c9b73 100644
13244     --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
13245     +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
13246     @@ -18,11 +18,11 @@
13247     #define MAX_ULONG_STR_LEN 7
13248     #define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
13249    
13250     +const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
13251     static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx)
13252     {
13253     - volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
13254     unsigned char i;
13255     - char name[64];
13256     + char name[sizeof(tcp_mem_name)];
13257     int ret;
13258    
13259     memset(name, 0, sizeof(name));
13260     diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
13261     index f3eb8aacec0e7..a2b0e4eb1fe4c 100644
13262     --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
13263     +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
13264     @@ -34,12 +34,12 @@ echo 'wakeup_latency u64 lat pid_t pid' >> synthetic_events
13265     echo 'hist:keys=pid:ts1=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger
13266     echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts1:onmatch(sched.sched_wakeup).wakeup_latency($wakeup_lat,next_pid) if next_comm=="ping"' > events/sched/sched_switch/trigger
13267    
13268     -echo 'waking+wakeup_latency u64 lat; pid_t pid' >> synthetic_events
13269     -echo 'hist:keys=pid,lat:sort=pid,lat:ww_lat=$waking_lat+$wakeup_lat:onmatch(synthetic.wakeup_latency).waking+wakeup_latency($ww_lat,pid)' >> events/synthetic/wakeup_latency/trigger
13270     -echo 'hist:keys=pid,lat:sort=pid,lat' >> events/synthetic/waking+wakeup_latency/trigger
13271     +echo 'waking_plus_wakeup_latency u64 lat; pid_t pid' >> synthetic_events
13272     +echo 'hist:keys=pid,lat:sort=pid,lat:ww_lat=$waking_lat+$wakeup_lat:onmatch(synthetic.wakeup_latency).waking_plus_wakeup_latency($ww_lat,pid)' >> events/synthetic/wakeup_latency/trigger
13273     +echo 'hist:keys=pid,lat:sort=pid,lat' >> events/synthetic/waking_plus_wakeup_latency/trigger
13274    
13275     ping $LOCALHOST -c 3
13276     -if ! grep -q "pid:" events/synthetic/waking+wakeup_latency/hist; then
13277     +if ! grep -q "pid:" events/synthetic/waking_plus_wakeup_latency/hist; then
13278     fail "Failed to create combined histogram"
13279     fi
13280    
13281     diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
13282     index b8503a8119b07..81fcc25a54db6 100644
13283     --- a/tools/testing/selftests/net/config
13284     +++ b/tools/testing/selftests/net/config
13285     @@ -29,3 +29,4 @@ CONFIG_NET_SCH_FQ=m
13286     CONFIG_NET_SCH_ETF=m
13287     CONFIG_TEST_BLACKHOLE_DEV=m
13288     CONFIG_KALLSYMS=y
13289     +CONFIG_NET_FOU=m
13290     diff --git a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
13291     index a0b5f57d6bd31..0727e2012b685 100755
13292     --- a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
13293     +++ b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
13294     @@ -215,10 +215,16 @@ switch_create()
13295    
13296     bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
13297     bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
13298     +
13299     + sysctl_set net.ipv4.conf.all.rp_filter 0
13300     + sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
13301     + sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
13302     }
13303    
13304     switch_destroy()
13305     {
13306     + sysctl_restore net.ipv4.conf.all.rp_filter
13307     +
13308     bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
13309     bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
13310    
13311     @@ -359,6 +365,10 @@ ns_switch_create()
13312    
13313     bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
13314     bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
13315     +
13316     + sysctl_set net.ipv4.conf.all.rp_filter 0
13317     + sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
13318     + sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
13319     }
13320     export -f ns_switch_create
13321    
13322     diff --git a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
13323     index 1209031bc794d..5d97fa347d75a 100755
13324     --- a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
13325     +++ b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
13326     @@ -237,10 +237,16 @@ switch_create()
13327    
13328     bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
13329     bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
13330     +
13331     + sysctl_set net.ipv4.conf.all.rp_filter 0
13332     + sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
13333     + sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
13334     }
13335    
13336     switch_destroy()
13337     {
13338     + sysctl_restore net.ipv4.conf.all.rp_filter
13339     +
13340     bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
13341     bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
13342    
13343     @@ -402,6 +408,10 @@ ns_switch_create()
13344    
13345     bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
13346     bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
13347     +
13348     + sysctl_set net.ipv4.conf.all.rp_filter 0
13349     + sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
13350     + sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
13351     }
13352     export -f ns_switch_create
13353    
13354     diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
13355     index bdbf4b3125b6a..28ea3753da207 100755
13356     --- a/tools/testing/selftests/net/rtnetlink.sh
13357     +++ b/tools/testing/selftests/net/rtnetlink.sh
13358     @@ -521,6 +521,11 @@ kci_test_encap_fou()
13359     return $ksft_skip
13360     fi
13361    
13362     + if ! /sbin/modprobe -q -n fou; then
13363     + echo "SKIP: module fou is not found"
13364     + return $ksft_skip
13365     + fi
13366     + /sbin/modprobe -q fou
13367     ip -netns "$testns" fou add port 7777 ipproto 47 2>/dev/null
13368     if [ $? -ne 0 ];then
13369     echo "FAIL: can't add fou port 7777, skipping test"
13370     diff --git a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
13371     index f988d2f42e8f2..cf001a2c69420 100755
13372     --- a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
13373     +++ b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
13374     @@ -1,17 +1,19 @@
13375     #!/bin/sh
13376     # SPDX-License-Identifier: GPL-2.0-only
13377    
13378     +KSELFTESTS_SKIP=4
13379     +
13380     . ./eeh-functions.sh
13381    
13382     if ! eeh_supported ; then
13383     echo "EEH not supported on this system, skipping"
13384     - exit 0;
13385     + exit $KSELFTESTS_SKIP;
13386     fi
13387    
13388     if [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_check" ] && \
13389     [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_break" ] ; then
13390     echo "debugfs EEH testing files are missing. Is debugfs mounted?"
13391     - exit 1;
13392     + exit $KSELFTESTS_SKIP;
13393     fi
13394    
13395     pre_lspci=`mktemp`
13396     @@ -79,4 +81,5 @@ echo "$failed devices failed to recover ($dev_count tested)"
13397     lspci | diff -u $pre_lspci -
13398     rm -f $pre_lspci
13399    
13400     -exit $failed
13401     +test "$failed" == 0
13402     +exit $?