Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0158-5.4.59-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3613 - (show annotations) (download)
Tue Aug 25 10:59:01 2020 UTC (3 years, 8 months ago) by niro
File size: 298941 byte(s)
-linux-5.4.59
1 diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
2 index 680451695422..c3767d4d01a6 100644
3 --- a/Documentation/ABI/testing/sysfs-bus-iio
4 +++ b/Documentation/ABI/testing/sysfs-bus-iio
5 @@ -1566,7 +1566,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_voc_raw
6 KernelVersion: 4.3
7 Contact: linux-iio@vger.kernel.org
8 Description:
9 - Raw (unscaled no offset etc.) percentage reading of a substance.
10 + Raw (unscaled no offset etc.) reading of a substance. Units
11 + after application of scale and offset are percents.
12
13 What: /sys/bus/iio/devices/iio:deviceX/in_resistance_raw
14 What: /sys/bus/iio/devices/iio:deviceX/in_resistanceX_raw
15 diff --git a/Makefile b/Makefile
16 index 29948bc4a0d2..cc72b8472f24 100644
17 --- a/Makefile
18 +++ b/Makefile
19 @@ -1,7 +1,7 @@
20 # SPDX-License-Identifier: GPL-2.0
21 VERSION = 5
22 PATCHLEVEL = 4
23 -SUBLEVEL = 58
24 +SUBLEVEL = 59
25 EXTRAVERSION =
26 NAME = Kleptomaniac Octopus
27
28 diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
29 index 42f3313e6988..9f507393c375 100644
30 --- a/arch/arm/boot/dts/r8a7793-gose.dts
31 +++ b/arch/arm/boot/dts/r8a7793-gose.dts
32 @@ -339,7 +339,7 @@
33 reg = <0x20>;
34 remote = <&vin1>;
35
36 - port {
37 + ports {
38 #address-cells = <1>;
39 #size-cells = <0>;
40
41 @@ -399,7 +399,7 @@
42 interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
43 default-input = <0>;
44
45 - port {
46 + ports {
47 #address-cells = <1>;
48 #size-cells = <0>;
49
50 diff --git a/arch/arm/boot/dts/sunxi-bananapi-m2-plus-v1.2.dtsi b/arch/arm/boot/dts/sunxi-bananapi-m2-plus-v1.2.dtsi
51 index 22466afd38a3..235994a4a2eb 100644
52 --- a/arch/arm/boot/dts/sunxi-bananapi-m2-plus-v1.2.dtsi
53 +++ b/arch/arm/boot/dts/sunxi-bananapi-m2-plus-v1.2.dtsi
54 @@ -16,15 +16,27 @@
55 regulator-type = "voltage";
56 regulator-boot-on;
57 regulator-always-on;
58 - regulator-min-microvolt = <1100000>;
59 - regulator-max-microvolt = <1300000>;
60 + regulator-min-microvolt = <1108475>;
61 + regulator-max-microvolt = <1308475>;
62 regulator-ramp-delay = <50>; /* 4ms */
63 gpios = <&r_pio 0 1 GPIO_ACTIVE_HIGH>; /* PL1 */
64 gpios-states = <0x1>;
65 - states = <1100000 0>, <1300000 1>;
66 + states = <1108475 0>, <1308475 1>;
67 };
68 };
69
70 &cpu0 {
71 cpu-supply = <&reg_vdd_cpux>;
72 };
73 +
74 +&cpu1 {
75 + cpu-supply = <&reg_vdd_cpux>;
76 +};
77 +
78 +&cpu2 {
79 + cpu-supply = <&reg_vdd_cpux>;
80 +};
81 +
82 +&cpu3 {
83 + cpu-supply = <&reg_vdd_cpux>;
84 +};
85 diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
86 index 71778bb0475b..a082f6e4f0f4 100644
87 --- a/arch/arm/kernel/stacktrace.c
88 +++ b/arch/arm/kernel/stacktrace.c
89 @@ -22,6 +22,19 @@
90 * A simple function epilogue looks like this:
91 * ldm sp, {fp, sp, pc}
92 *
93 + * When compiled with clang, pc and sp are not pushed. A simple function
94 + * prologue looks like this when built with clang:
95 + *
96 + * stmdb {..., fp, lr}
97 + * add fp, sp, #x
98 + * sub sp, sp, #y
99 + *
100 + * A simple function epilogue looks like this when built with clang:
101 + *
102 + * sub sp, fp, #x
103 + * ldm {..., fp, pc}
104 + *
105 + *
106 * Note that with framepointer enabled, even the leaf functions have the same
107 * prologue and epilogue, therefore we can ignore the LR value in this case.
108 */
109 @@ -34,6 +47,16 @@ int notrace unwind_frame(struct stackframe *frame)
110 low = frame->sp;
111 high = ALIGN(low, THREAD_SIZE);
112
113 +#ifdef CONFIG_CC_IS_CLANG
114 + /* check current frame pointer is within bounds */
115 + if (fp < low + 4 || fp > high - 4)
116 + return -EINVAL;
117 +
118 + frame->sp = frame->fp;
119 + frame->fp = *(unsigned long *)(fp);
120 + frame->pc = frame->lr;
121 + frame->lr = *(unsigned long *)(fp + 4);
122 +#else
123 /* check current frame pointer is within bounds */
124 if (fp < low + 12 || fp > high - 4)
125 return -EINVAL;
126 @@ -42,6 +65,7 @@ int notrace unwind_frame(struct stackframe *frame)
127 frame->fp = *(unsigned long *)(fp - 12);
128 frame->sp = *(unsigned long *)(fp - 8);
129 frame->pc = *(unsigned long *)(fp - 4);
130 +#endif
131
132 return 0;
133 }
134 diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
135 index 52665f30d236..6bc3000deb86 100644
136 --- a/arch/arm/mach-at91/pm.c
137 +++ b/arch/arm/mach-at91/pm.c
138 @@ -592,13 +592,13 @@ static void __init at91_pm_sram_init(void)
139 sram_pool = gen_pool_get(&pdev->dev, NULL);
140 if (!sram_pool) {
141 pr_warn("%s: sram pool unavailable!\n", __func__);
142 - return;
143 + goto out_put_device;
144 }
145
146 sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz);
147 if (!sram_base) {
148 pr_warn("%s: unable to alloc sram!\n", __func__);
149 - return;
150 + goto out_put_device;
151 }
152
153 sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
154 @@ -606,12 +606,17 @@ static void __init at91_pm_sram_init(void)
155 at91_pm_suspend_in_sram_sz, false);
156 if (!at91_suspend_sram_fn) {
157 pr_warn("SRAM: Could not map\n");
158 - return;
159 + goto out_put_device;
160 }
161
162 /* Copy the pm suspend handler to SRAM */
163 at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
164 &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
165 + return;
166 +
167 +out_put_device:
168 + put_device(&pdev->dev);
169 + return;
170 }
171
172 static bool __init at91_is_pm_mode_active(int pm_mode)
173 diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
174 index 9a681b421ae1..cd861c57d5ad 100644
175 --- a/arch/arm/mach-exynos/mcpm-exynos.c
176 +++ b/arch/arm/mach-exynos/mcpm-exynos.c
177 @@ -26,6 +26,7 @@
178 #define EXYNOS5420_USE_L2_COMMON_UP_STATE BIT(30)
179
180 static void __iomem *ns_sram_base_addr __ro_after_init;
181 +static bool secure_firmware __ro_after_init;
182
183 /*
184 * The common v7_exit_coherency_flush API could not be used because of the
185 @@ -58,15 +59,16 @@ static void __iomem *ns_sram_base_addr __ro_after_init;
186 static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
187 {
188 unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
189 + bool state;
190
191 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
192 if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
193 cluster >= EXYNOS5420_NR_CLUSTERS)
194 return -EINVAL;
195
196 - if (!exynos_cpu_power_state(cpunr)) {
197 - exynos_cpu_power_up(cpunr);
198 -
199 + state = exynos_cpu_power_state(cpunr);
200 + exynos_cpu_power_up(cpunr);
201 + if (!state && secure_firmware) {
202 /*
203 * This assumes the cluster number of the big cores(Cortex A15)
204 * is 0 and the Little cores(Cortex A7) is 1.
205 @@ -258,6 +260,8 @@ static int __init exynos_mcpm_init(void)
206 return -ENOMEM;
207 }
208
209 + secure_firmware = exynos_secure_firmware_available();
210 +
211 /*
212 * To increase the stability of KFC reset we need to program
213 * the PMU SPARE3 register
214 diff --git a/arch/arm/mach-socfpga/pm.c b/arch/arm/mach-socfpga/pm.c
215 index 6ed887cf8dc9..365c0428b21b 100644
216 --- a/arch/arm/mach-socfpga/pm.c
217 +++ b/arch/arm/mach-socfpga/pm.c
218 @@ -49,14 +49,14 @@ static int socfpga_setup_ocram_self_refresh(void)
219 if (!ocram_pool) {
220 pr_warn("%s: ocram pool unavailable!\n", __func__);
221 ret = -ENODEV;
222 - goto put_node;
223 + goto put_device;
224 }
225
226 ocram_base = gen_pool_alloc(ocram_pool, socfpga_sdram_self_refresh_sz);
227 if (!ocram_base) {
228 pr_warn("%s: unable to alloc ocram!\n", __func__);
229 ret = -ENOMEM;
230 - goto put_node;
231 + goto put_device;
232 }
233
234 ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base);
235 @@ -67,7 +67,7 @@ static int socfpga_setup_ocram_self_refresh(void)
236 if (!suspend_ocram_base) {
237 pr_warn("%s: __arm_ioremap_exec failed!\n", __func__);
238 ret = -ENOMEM;
239 - goto put_node;
240 + goto put_device;
241 }
242
243 /* Copy the code that puts DDR in self refresh to ocram */
244 @@ -81,6 +81,8 @@ static int socfpga_setup_ocram_self_refresh(void)
245 if (!socfpga_sdram_self_refresh_in_ocram)
246 ret = -EFAULT;
247
248 +put_device:
249 + put_device(&pdev->dev);
250 put_node:
251 of_node_put(np);
252
253 diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
254 index 080e0f56e108..61ee7b6a3159 100644
255 --- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
256 +++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
257 @@ -157,6 +157,7 @@
258 regulator-min-microvolt = <700000>;
259 regulator-max-microvolt = <1150000>;
260 regulator-enable-ramp-delay = <125>;
261 + regulator-always-on;
262 };
263
264 ldo8_reg: LDO8 {
265 diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
266 index e035cf195b19..8c4bfbaf3a80 100644
267 --- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
268 +++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
269 @@ -530,6 +530,17 @@
270 status = "ok";
271 compatible = "adi,adv7533";
272 reg = <0x39>;
273 + adi,dsi-lanes = <4>;
274 + ports {
275 + #address-cells = <1>;
276 + #size-cells = <0>;
277 + port@0 {
278 + reg = <0>;
279 + };
280 + port@1 {
281 + reg = <1>;
282 + };
283 + };
284 };
285 };
286
287 diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
288 index c14205cd6bf5..3e47150c05ec 100644
289 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
290 +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
291 @@ -516,7 +516,7 @@
292 reg = <0x39>;
293 interrupt-parent = <&gpio1>;
294 interrupts = <1 2>;
295 - pd-gpio = <&gpio0 4 0>;
296 + pd-gpios = <&gpio0 4 0>;
297 adi,dsi-lanes = <4>;
298 #sound-dai-cells = <0>;
299
300 diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
301 index 242aaea68804..1235830ffd0b 100644
302 --- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
303 +++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
304 @@ -508,7 +508,7 @@
305 pins = "gpio63", "gpio64", "gpio65", "gpio66",
306 "gpio67", "gpio68";
307 drive-strength = <8>;
308 - bias-pull-none;
309 + bias-disable;
310 };
311 };
312 cdc_pdm_lines_sus: pdm_lines_off {
313 @@ -537,7 +537,7 @@
314 pins = "gpio113", "gpio114", "gpio115",
315 "gpio116";
316 drive-strength = <8>;
317 - bias-pull-none;
318 + bias-disable;
319 };
320 };
321
322 @@ -565,7 +565,7 @@
323 pinconf {
324 pins = "gpio110";
325 drive-strength = <8>;
326 - bias-pull-none;
327 + bias-disable;
328 };
329 };
330
331 @@ -591,7 +591,7 @@
332 pinconf {
333 pins = "gpio116";
334 drive-strength = <8>;
335 - bias-pull-none;
336 + bias-disable;
337 };
338 };
339 ext_mclk_tlmm_lines_sus: mclk_lines_off {
340 @@ -619,7 +619,7 @@
341 pins = "gpio112", "gpio117", "gpio118",
342 "gpio119";
343 drive-strength = <8>;
344 - bias-pull-none;
345 + bias-disable;
346 };
347 };
348 ext_sec_tlmm_lines_sus: tlmm_lines_off {
349 diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
350 index e17311e09082..216aafd90e7f 100644
351 --- a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
352 +++ b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
353 @@ -156,7 +156,7 @@
354 pinctrl-0 = <&rgmii_pins>;
355 snps,reset-active-low;
356 snps,reset-delays-us = <0 10000 50000>;
357 - snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_HIGH>;
358 + snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_LOW>;
359 tx_delay = <0x10>;
360 rx_delay = <0x10>;
361 status = "okay";
362 diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
363 index 62ea288a1a70..45b86933c6ea 100644
364 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
365 +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
366 @@ -101,7 +101,7 @@
367
368 vcc5v0_host: vcc5v0-host-regulator {
369 compatible = "regulator-fixed";
370 - gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>;
371 + gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
372 enable-active-low;
373 pinctrl-names = "default";
374 pinctrl-0 = <&vcc5v0_host_en>;
375 @@ -157,7 +157,7 @@
376 phy-mode = "rgmii";
377 pinctrl-names = "default";
378 pinctrl-0 = <&rgmii_pins>;
379 - snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>;
380 + snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>;
381 snps,reset-active-low;
382 snps,reset-delays-us = <0 10000 50000>;
383 tx_delay = <0x10>;
384 diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
385 index 9bfa17015768..c432bfafe63e 100644
386 --- a/arch/m68k/mac/iop.c
387 +++ b/arch/m68k/mac/iop.c
388 @@ -183,7 +183,7 @@ static __inline__ void iop_writeb(volatile struct mac_iop *iop, __u16 addr, __u8
389
390 static __inline__ void iop_stop(volatile struct mac_iop *iop)
391 {
392 - iop->status_ctrl &= ~IOP_RUN;
393 + iop->status_ctrl = IOP_AUTOINC;
394 }
395
396 static __inline__ void iop_start(volatile struct mac_iop *iop)
397 @@ -191,14 +191,9 @@ static __inline__ void iop_start(volatile struct mac_iop *iop)
398 iop->status_ctrl = IOP_RUN | IOP_AUTOINC;
399 }
400
401 -static __inline__ void iop_bypass(volatile struct mac_iop *iop)
402 -{
403 - iop->status_ctrl |= IOP_BYPASS;
404 -}
405 -
406 static __inline__ void iop_interrupt(volatile struct mac_iop *iop)
407 {
408 - iop->status_ctrl |= IOP_IRQ;
409 + iop->status_ctrl = IOP_IRQ | IOP_RUN | IOP_AUTOINC;
410 }
411
412 static int iop_alive(volatile struct mac_iop *iop)
413 @@ -244,7 +239,6 @@ void __init iop_preinit(void)
414 } else {
415 iop_base[IOP_NUM_SCC] = (struct mac_iop *) SCC_IOP_BASE_QUADRA;
416 }
417 - iop_base[IOP_NUM_SCC]->status_ctrl = 0x87;
418 iop_scc_present = 1;
419 } else {
420 iop_base[IOP_NUM_SCC] = NULL;
421 @@ -256,7 +250,7 @@ void __init iop_preinit(void)
422 } else {
423 iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_QUADRA;
424 }
425 - iop_base[IOP_NUM_ISM]->status_ctrl = 0;
426 + iop_stop(iop_base[IOP_NUM_ISM]);
427 iop_ism_present = 1;
428 } else {
429 iop_base[IOP_NUM_ISM] = NULL;
430 @@ -416,7 +410,8 @@ static void iop_handle_send(uint iop_num, uint chan)
431 msg->status = IOP_MSGSTATUS_UNUSED;
432 msg = msg->next;
433 iop_send_queue[iop_num][chan] = msg;
434 - if (msg) iop_do_send(msg);
435 + if (msg && iop_readb(iop, IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE)
436 + iop_do_send(msg);
437 }
438
439 /*
440 @@ -490,16 +485,12 @@ int iop_send_message(uint iop_num, uint chan, void *privdata,
441
442 if (!(q = iop_send_queue[iop_num][chan])) {
443 iop_send_queue[iop_num][chan] = msg;
444 + iop_do_send(msg);
445 } else {
446 while (q->next) q = q->next;
447 q->next = msg;
448 }
449
450 - if (iop_readb(iop_base[iop_num],
451 - IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) {
452 - iop_do_send(msg);
453 - }
454 -
455 return 0;
456 }
457
458 diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c
459 index cc88a08bc1f7..4017398519cf 100644
460 --- a/arch/mips/cavium-octeon/octeon-usb.c
461 +++ b/arch/mips/cavium-octeon/octeon-usb.c
462 @@ -518,6 +518,7 @@ static int __init dwc3_octeon_device_init(void)
463
464 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
465 if (res == NULL) {
466 + put_device(&pdev->dev);
467 dev_err(&pdev->dev, "No memory resources\n");
468 return -ENXIO;
469 }
470 @@ -529,8 +530,10 @@ static int __init dwc3_octeon_device_init(void)
471 * know the difference.
472 */
473 base = devm_ioremap_resource(&pdev->dev, res);
474 - if (IS_ERR(base))
475 + if (IS_ERR(base)) {
476 + put_device(&pdev->dev);
477 return PTR_ERR(base);
478 + }
479
480 mutex_lock(&dwc3_octeon_clocks_mutex);
481 dwc3_octeon_clocks_start(&pdev->dev, (u64)base);
482 diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c
483 index 6ce76b18186e..c4b1c6cf2660 100644
484 --- a/arch/mips/pci/pci-xtalk-bridge.c
485 +++ b/arch/mips/pci/pci-xtalk-bridge.c
486 @@ -539,6 +539,7 @@ err_free_resource:
487 pci_free_resource_list(&host->windows);
488 err_remove_domain:
489 irq_domain_remove(domain);
490 + irq_domain_free_fwnode(fn);
491 return err;
492 }
493
494 @@ -546,8 +547,10 @@ static int bridge_remove(struct platform_device *pdev)
495 {
496 struct pci_bus *bus = platform_get_drvdata(pdev);
497 struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
498 + struct fwnode_handle *fn = bc->domain->fwnode;
499
500 irq_domain_remove(bc->domain);
501 + irq_domain_free_fwnode(fn);
502 pci_lock_rescan_remove();
503 pci_stop_root_bus(bus);
504 pci_remove_root_bus(bus);
505 diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
506 index dbaaca84f27f..640d46edf32e 100644
507 --- a/arch/parisc/include/asm/barrier.h
508 +++ b/arch/parisc/include/asm/barrier.h
509 @@ -26,6 +26,67 @@
510 #define __smp_rmb() mb()
511 #define __smp_wmb() mb()
512
513 +#define __smp_store_release(p, v) \
514 +do { \
515 + typeof(p) __p = (p); \
516 + union { typeof(*p) __val; char __c[1]; } __u = \
517 + { .__val = (__force typeof(*p)) (v) }; \
518 + compiletime_assert_atomic_type(*p); \
519 + switch (sizeof(*p)) { \
520 + case 1: \
521 + asm volatile("stb,ma %0,0(%1)" \
522 + : : "r"(*(__u8 *)__u.__c), "r"(__p) \
523 + : "memory"); \
524 + break; \
525 + case 2: \
526 + asm volatile("sth,ma %0,0(%1)" \
527 + : : "r"(*(__u16 *)__u.__c), "r"(__p) \
528 + : "memory"); \
529 + break; \
530 + case 4: \
531 + asm volatile("stw,ma %0,0(%1)" \
532 + : : "r"(*(__u32 *)__u.__c), "r"(__p) \
533 + : "memory"); \
534 + break; \
535 + case 8: \
536 + if (IS_ENABLED(CONFIG_64BIT)) \
537 + asm volatile("std,ma %0,0(%1)" \
538 + : : "r"(*(__u64 *)__u.__c), "r"(__p) \
539 + : "memory"); \
540 + break; \
541 + } \
542 +} while (0)
543 +
544 +#define __smp_load_acquire(p) \
545 +({ \
546 + union { typeof(*p) __val; char __c[1]; } __u; \
547 + typeof(p) __p = (p); \
548 + compiletime_assert_atomic_type(*p); \
549 + switch (sizeof(*p)) { \
550 + case 1: \
551 + asm volatile("ldb,ma 0(%1),%0" \
552 + : "=r"(*(__u8 *)__u.__c) : "r"(__p) \
553 + : "memory"); \
554 + break; \
555 + case 2: \
556 + asm volatile("ldh,ma 0(%1),%0" \
557 + : "=r"(*(__u16 *)__u.__c) : "r"(__p) \
558 + : "memory"); \
559 + break; \
560 + case 4: \
561 + asm volatile("ldw,ma 0(%1),%0" \
562 + : "=r"(*(__u32 *)__u.__c) : "r"(__p) \
563 + : "memory"); \
564 + break; \
565 + case 8: \
566 + if (IS_ENABLED(CONFIG_64BIT)) \
567 + asm volatile("ldd,ma 0(%1),%0" \
568 + : "=r"(*(__u64 *)__u.__c) : "r"(__p) \
569 + : "memory"); \
570 + break; \
571 + } \
572 + __u.__val; \
573 +})
574 #include <asm-generic/barrier.h>
575
576 #endif /* !__ASSEMBLY__ */
577 diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
578 index 197d2247e4db..16aec9ba2580 100644
579 --- a/arch/parisc/include/asm/spinlock.h
580 +++ b/arch/parisc/include/asm/spinlock.h
581 @@ -37,12 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
582 volatile unsigned int *a;
583
584 a = __ldcw_align(x);
585 -#ifdef CONFIG_SMP
586 - (void) __ldcw(a);
587 -#else
588 - mb();
589 -#endif
590 - *a = 1;
591 + /* Release with ordered store. */
592 + __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
593 }
594
595 static inline int arch_spin_trylock(arch_spinlock_t *x)
596 diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
597 index b96d74496977..873bf3434da9 100644
598 --- a/arch/parisc/kernel/entry.S
599 +++ b/arch/parisc/kernel/entry.S
600 @@ -454,7 +454,6 @@
601 nop
602 LDREG 0(\ptp),\pte
603 bb,<,n \pte,_PAGE_PRESENT_BIT,3f
604 - LDCW 0(\tmp),\tmp1
605 b \fault
606 stw \spc,0(\tmp)
607 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
608 @@ -464,23 +463,26 @@
609 3:
610 .endm
611
612 - /* Release pa_tlb_lock lock without reloading lock address. */
613 - .macro tlb_unlock0 spc,tmp,tmp1
614 + /* Release pa_tlb_lock lock without reloading lock address.
615 + Note that the values in the register spc are limited to
616 + NR_SPACE_IDS (262144). Thus, the stw instruction always
617 + stores a nonzero value even when register spc is 64 bits.
618 + We use an ordered store to ensure all prior accesses are
619 + performed prior to releasing the lock. */
620 + .macro tlb_unlock0 spc,tmp
621 #ifdef CONFIG_SMP
622 98: or,COND(=) %r0,\spc,%r0
623 - LDCW 0(\tmp),\tmp1
624 - or,COND(=) %r0,\spc,%r0
625 - stw \spc,0(\tmp)
626 + stw,ma \spc,0(\tmp)
627 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
628 #endif
629 .endm
630
631 /* Release pa_tlb_lock lock. */
632 - .macro tlb_unlock1 spc,tmp,tmp1
633 + .macro tlb_unlock1 spc,tmp
634 #ifdef CONFIG_SMP
635 98: load_pa_tlb_lock \tmp
636 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
637 - tlb_unlock0 \spc,\tmp,\tmp1
638 + tlb_unlock0 \spc,\tmp
639 #endif
640 .endm
641
642 @@ -1163,7 +1165,7 @@ dtlb_miss_20w:
643
644 idtlbt pte,prot
645
646 - tlb_unlock1 spc,t0,t1
647 + tlb_unlock1 spc,t0
648 rfir
649 nop
650
651 @@ -1189,7 +1191,7 @@ nadtlb_miss_20w:
652
653 idtlbt pte,prot
654
655 - tlb_unlock1 spc,t0,t1
656 + tlb_unlock1 spc,t0
657 rfir
658 nop
659
660 @@ -1223,7 +1225,7 @@ dtlb_miss_11:
661
662 mtsp t1, %sr1 /* Restore sr1 */
663
664 - tlb_unlock1 spc,t0,t1
665 + tlb_unlock1 spc,t0
666 rfir
667 nop
668
669 @@ -1256,7 +1258,7 @@ nadtlb_miss_11:
670
671 mtsp t1, %sr1 /* Restore sr1 */
672
673 - tlb_unlock1 spc,t0,t1
674 + tlb_unlock1 spc,t0
675 rfir
676 nop
677
678 @@ -1285,7 +1287,7 @@ dtlb_miss_20:
679
680 idtlbt pte,prot
681
682 - tlb_unlock1 spc,t0,t1
683 + tlb_unlock1 spc,t0
684 rfir
685 nop
686
687 @@ -1313,7 +1315,7 @@ nadtlb_miss_20:
688
689 idtlbt pte,prot
690
691 - tlb_unlock1 spc,t0,t1
692 + tlb_unlock1 spc,t0
693 rfir
694 nop
695
696 @@ -1420,7 +1422,7 @@ itlb_miss_20w:
697
698 iitlbt pte,prot
699
700 - tlb_unlock1 spc,t0,t1
701 + tlb_unlock1 spc,t0
702 rfir
703 nop
704
705 @@ -1444,7 +1446,7 @@ naitlb_miss_20w:
706
707 iitlbt pte,prot
708
709 - tlb_unlock1 spc,t0,t1
710 + tlb_unlock1 spc,t0
711 rfir
712 nop
713
714 @@ -1478,7 +1480,7 @@ itlb_miss_11:
715
716 mtsp t1, %sr1 /* Restore sr1 */
717
718 - tlb_unlock1 spc,t0,t1
719 + tlb_unlock1 spc,t0
720 rfir
721 nop
722
723 @@ -1502,7 +1504,7 @@ naitlb_miss_11:
724
725 mtsp t1, %sr1 /* Restore sr1 */
726
727 - tlb_unlock1 spc,t0,t1
728 + tlb_unlock1 spc,t0
729 rfir
730 nop
731
732 @@ -1532,7 +1534,7 @@ itlb_miss_20:
733
734 iitlbt pte,prot
735
736 - tlb_unlock1 spc,t0,t1
737 + tlb_unlock1 spc,t0
738 rfir
739 nop
740
741 @@ -1552,7 +1554,7 @@ naitlb_miss_20:
742
743 iitlbt pte,prot
744
745 - tlb_unlock1 spc,t0,t1
746 + tlb_unlock1 spc,t0
747 rfir
748 nop
749
750 @@ -1582,7 +1584,7 @@ dbit_trap_20w:
751
752 idtlbt pte,prot
753
754 - tlb_unlock0 spc,t0,t1
755 + tlb_unlock0 spc,t0
756 rfir
757 nop
758 #else
759 @@ -1608,7 +1610,7 @@ dbit_trap_11:
760
761 mtsp t1, %sr1 /* Restore sr1 */
762
763 - tlb_unlock0 spc,t0,t1
764 + tlb_unlock0 spc,t0
765 rfir
766 nop
767
768 @@ -1628,7 +1630,7 @@ dbit_trap_20:
769
770 idtlbt pte,prot
771
772 - tlb_unlock0 spc,t0,t1
773 + tlb_unlock0 spc,t0
774 rfir
775 nop
776 #endif
777 diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
778 index 97ac707c6bff..a37814cb66c7 100644
779 --- a/arch/parisc/kernel/syscall.S
780 +++ b/arch/parisc/kernel/syscall.S
781 @@ -640,11 +640,7 @@ cas_action:
782 sub,<> %r28, %r25, %r0
783 2: stw %r24, 0(%r26)
784 /* Free lock */
785 -#ifdef CONFIG_SMP
786 -98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
787 -99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
788 -#endif
789 - stw %r20, 0(%sr2,%r20)
790 + stw,ma %r20, 0(%sr2,%r20)
791 #if ENABLE_LWS_DEBUG
792 /* Clear thread register indicator */
793 stw %r0, 4(%sr2,%r20)
794 @@ -658,11 +654,7 @@ cas_action:
795 3:
796 /* Error occurred on load or store */
797 /* Free lock */
798 -#ifdef CONFIG_SMP
799 -98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
800 -99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
801 -#endif
802 - stw %r20, 0(%sr2,%r20)
803 + stw,ma %r20, 0(%sr2,%r20)
804 #if ENABLE_LWS_DEBUG
805 stw %r0, 4(%sr2,%r20)
806 #endif
807 @@ -863,11 +855,7 @@ cas2_action:
808
809 cas2_end:
810 /* Free lock */
811 -#ifdef CONFIG_SMP
812 -98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
813 -99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
814 -#endif
815 - stw %r20, 0(%sr2,%r20)
816 + stw,ma %r20, 0(%sr2,%r20)
817 /* Enable interrupts */
818 ssm PSW_SM_I, %r0
819 /* Return to userspace, set no error */
820 @@ -877,11 +865,7 @@ cas2_end:
821 22:
822 /* Error occurred on load or store */
823 /* Free lock */
824 -#ifdef CONFIG_SMP
825 -98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
826 -99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
827 -#endif
828 - stw %r20, 0(%sr2,%r20)
829 + stw,ma %r20, 0(%sr2,%r20)
830 ssm PSW_SM_I, %r0
831 ldo 1(%r0),%r28
832 b lws_exit
833 diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
834 index dfbd7f22eef5..8c69bd07ada6 100644
835 --- a/arch/powerpc/boot/Makefile
836 +++ b/arch/powerpc/boot/Makefile
837 @@ -119,7 +119,7 @@ src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
838 elf_util.c $(zlib-y) devtree.c stdlib.c \
839 oflib.c ofconsole.c cuboot.c
840
841 -src-wlib-$(CONFIG_PPC_MPC52XX) += mpc52xx-psc.c
842 +src-wlib-$(CONFIG_PPC_MPC52xx) += mpc52xx-psc.c
843 src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S opal.c
844 ifndef CONFIG_PPC64_BOOT_WRAPPER
845 src-wlib-y += crtsavres.S
846 diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
847 index 9457863147f9..00179cd6bdd0 100644
848 --- a/arch/powerpc/boot/serial.c
849 +++ b/arch/powerpc/boot/serial.c
850 @@ -128,7 +128,7 @@ int serial_console_init(void)
851 dt_is_compatible(devp, "fsl,cpm2-smc-uart"))
852 rc = cpm_console_init(devp, &serial_cd);
853 #endif
854 -#ifdef CONFIG_PPC_MPC52XX
855 +#ifdef CONFIG_PPC_MPC52xx
856 else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart"))
857 rc = mpc5200_psc_console_init(devp, &serial_cd);
858 #endif
859 diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
860 index 7426d7a90e1e..7aba3c7ea25c 100644
861 --- a/arch/powerpc/include/asm/perf_event.h
862 +++ b/arch/powerpc/include/asm/perf_event.h
863 @@ -12,6 +12,8 @@
864
865 #ifdef CONFIG_PPC_PERF_CTRS
866 #include <asm/perf_event_server.h>
867 +#else
868 +static inline bool is_sier_available(void) { return false; }
869 #endif
870
871 #ifdef CONFIG_FSL_EMB_PERF_EVENT
872 diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
873 index ee3ada66deb5..c41220f4aad9 100644
874 --- a/arch/powerpc/include/asm/ptrace.h
875 +++ b/arch/powerpc/include/asm/ptrace.h
876 @@ -203,7 +203,7 @@ do { \
877 #endif /* __powerpc64__ */
878
879 #define arch_has_single_step() (1)
880 -#ifndef CONFIG_BOOK3S_601
881 +#ifndef CONFIG_PPC_BOOK3S_601
882 #define arch_has_block_step() (true)
883 #else
884 #define arch_has_block_step() (false)
885 diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
886 index 3c1887351c71..bd227e0eab07 100644
887 --- a/arch/powerpc/include/asm/rtas.h
888 +++ b/arch/powerpc/include/asm/rtas.h
889 @@ -368,8 +368,6 @@ extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
890 extern void rtas_progress(char *s, unsigned short hex);
891 extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
892 extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
893 -extern int rtas_online_cpus_mask(cpumask_var_t cpus);
894 -extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
895 extern int rtas_ibm_suspend_me(u64 handle);
896
897 struct rtc_time;
898 diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
899 index d2d2c4bd8435..6047402b0a4d 100644
900 --- a/arch/powerpc/include/asm/timex.h
901 +++ b/arch/powerpc/include/asm/timex.h
902 @@ -17,7 +17,7 @@ typedef unsigned long cycles_t;
903
904 static inline cycles_t get_cycles(void)
905 {
906 - if (IS_ENABLED(CONFIG_BOOK3S_601))
907 + if (IS_ENABLED(CONFIG_PPC_BOOK3S_601))
908 return 0;
909
910 return mftb();
911 diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
912 index c5fa251b8950..01210593d60c 100644
913 --- a/arch/powerpc/kernel/rtas.c
914 +++ b/arch/powerpc/kernel/rtas.c
915 @@ -842,96 +842,6 @@ static void rtas_percpu_suspend_me(void *info)
916 __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
917 }
918
919 -enum rtas_cpu_state {
920 - DOWN,
921 - UP,
922 -};
923 -
924 -#ifndef CONFIG_SMP
925 -static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
926 - cpumask_var_t cpus)
927 -{
928 - if (!cpumask_empty(cpus)) {
929 - cpumask_clear(cpus);
930 - return -EINVAL;
931 - } else
932 - return 0;
933 -}
934 -#else
935 -/* On return cpumask will be altered to indicate CPUs changed.
936 - * CPUs with states changed will be set in the mask,
937 - * CPUs with status unchanged will be unset in the mask. */
938 -static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
939 - cpumask_var_t cpus)
940 -{
941 - int cpu;
942 - int cpuret = 0;
943 - int ret = 0;
944 -
945 - if (cpumask_empty(cpus))
946 - return 0;
947 -
948 - for_each_cpu(cpu, cpus) {
949 - struct device *dev = get_cpu_device(cpu);
950 -
951 - switch (state) {
952 - case DOWN:
953 - cpuret = device_offline(dev);
954 - break;
955 - case UP:
956 - cpuret = device_online(dev);
957 - break;
958 - }
959 - if (cpuret < 0) {
960 - pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
961 - __func__,
962 - ((state == UP) ? "up" : "down"),
963 - cpu, cpuret);
964 - if (!ret)
965 - ret = cpuret;
966 - if (state == UP) {
967 - /* clear bits for unchanged cpus, return */
968 - cpumask_shift_right(cpus, cpus, cpu);
969 - cpumask_shift_left(cpus, cpus, cpu);
970 - break;
971 - } else {
972 - /* clear bit for unchanged cpu, continue */
973 - cpumask_clear_cpu(cpu, cpus);
974 - }
975 - }
976 - cond_resched();
977 - }
978 -
979 - return ret;
980 -}
981 -#endif
982 -
983 -int rtas_online_cpus_mask(cpumask_var_t cpus)
984 -{
985 - int ret;
986 -
987 - ret = rtas_cpu_state_change_mask(UP, cpus);
988 -
989 - if (ret) {
990 - cpumask_var_t tmp_mask;
991 -
992 - if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
993 - return ret;
994 -
995 - /* Use tmp_mask to preserve cpus mask from first failure */
996 - cpumask_copy(tmp_mask, cpus);
997 - rtas_offline_cpus_mask(tmp_mask);
998 - free_cpumask_var(tmp_mask);
999 - }
1000 -
1001 - return ret;
1002 -}
1003 -
1004 -int rtas_offline_cpus_mask(cpumask_var_t cpus)
1005 -{
1006 - return rtas_cpu_state_change_mask(DOWN, cpus);
1007 -}
1008 -
1009 int rtas_ibm_suspend_me(u64 handle)
1010 {
1011 long state;
1012 @@ -939,8 +849,6 @@ int rtas_ibm_suspend_me(u64 handle)
1013 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
1014 struct rtas_suspend_me_data data;
1015 DECLARE_COMPLETION_ONSTACK(done);
1016 - cpumask_var_t offline_mask;
1017 - int cpuret;
1018
1019 if (!rtas_service_present("ibm,suspend-me"))
1020 return -ENOSYS;
1021 @@ -961,9 +869,6 @@ int rtas_ibm_suspend_me(u64 handle)
1022 return -EIO;
1023 }
1024
1025 - if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL))
1026 - return -ENOMEM;
1027 -
1028 atomic_set(&data.working, 0);
1029 atomic_set(&data.done, 0);
1030 atomic_set(&data.error, 0);
1031 @@ -972,24 +877,8 @@ int rtas_ibm_suspend_me(u64 handle)
1032
1033 lock_device_hotplug();
1034
1035 - /* All present CPUs must be online */
1036 - cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
1037 - cpuret = rtas_online_cpus_mask(offline_mask);
1038 - if (cpuret) {
1039 - pr_err("%s: Could not bring present CPUs online.\n", __func__);
1040 - atomic_set(&data.error, cpuret);
1041 - goto out;
1042 - }
1043 -
1044 cpu_hotplug_disable();
1045
1046 - /* Check if we raced with a CPU-Offline Operation */
1047 - if (!cpumask_equal(cpu_present_mask, cpu_online_mask)) {
1048 - pr_info("%s: Raced against a concurrent CPU-Offline\n", __func__);
1049 - atomic_set(&data.error, -EAGAIN);
1050 - goto out_hotplug_enable;
1051 - }
1052 -
1053 /* Call function on all CPUs. One of us will make the
1054 * rtas call
1055 */
1056 @@ -1000,18 +889,11 @@ int rtas_ibm_suspend_me(u64 handle)
1057 if (atomic_read(&data.error) != 0)
1058 printk(KERN_ERR "Error doing global join\n");
1059
1060 -out_hotplug_enable:
1061 - cpu_hotplug_enable();
1062
1063 - /* Take down CPUs not online prior to suspend */
1064 - cpuret = rtas_offline_cpus_mask(offline_mask);
1065 - if (cpuret)
1066 - pr_warn("%s: Could not restore CPUs to offline state.\n",
1067 - __func__);
1068 + cpu_hotplug_enable();
1069
1070 -out:
1071 unlock_device_hotplug();
1072 - free_cpumask_var(offline_mask);
1073 +
1074 return atomic_read(&data.error);
1075 }
1076 #else /* CONFIG_PPC_PSERIES */
1077 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
1078 index eae9ddaecbcf..efb1ba40274a 100644
1079 --- a/arch/powerpc/kernel/vdso.c
1080 +++ b/arch/powerpc/kernel/vdso.c
1081 @@ -682,7 +682,7 @@ int vdso_getcpu_init(void)
1082 node = cpu_to_node(cpu);
1083 WARN_ON_ONCE(node > 0xffff);
1084
1085 - val = (cpu & 0xfff) | ((node & 0xffff) << 16);
1086 + val = (cpu & 0xffff) | ((node & 0xffff) << 16);
1087 mtspr(SPRN_SPRG_VDSO_WRITE, val);
1088 get_paca()->sprg_vdso = val;
1089
1090 diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c
1091 index 66f307e873dc..432fd9fa8c3f 100644
1092 --- a/arch/powerpc/mm/book3s64/pkeys.c
1093 +++ b/arch/powerpc/mm/book3s64/pkeys.c
1094 @@ -83,13 +83,17 @@ static int pkey_initialize(void)
1095 scan_pkey_feature();
1096
1097 /*
1098 - * Let's assume 32 pkeys on P8 bare metal, if its not defined by device
1099 - * tree. We make this exception since skiboot forgot to expose this
1100 - * property on power8.
1101 + * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device
1102 + * tree. We make this exception since some version of skiboot forgot to
1103 + * expose this property on power8/9.
1104 */
1105 - if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR) &&
1106 - cpu_has_feature(CPU_FTRS_POWER8))
1107 - pkeys_total = 32;
1108 + if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR)) {
1109 + unsigned long pvr = mfspr(SPRN_PVR);
1110 +
1111 + if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E ||
1112 + PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9)
1113 + pkeys_total = 32;
1114 + }
1115
1116 /*
1117 * Adjust the upper limit, based on the number of bits supported by
1118 diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
1119 index 0a24a5a185f0..f789693f61f4 100644
1120 --- a/arch/powerpc/platforms/pseries/suspend.c
1121 +++ b/arch/powerpc/platforms/pseries/suspend.c
1122 @@ -132,15 +132,11 @@ static ssize_t store_hibernate(struct device *dev,
1123 struct device_attribute *attr,
1124 const char *buf, size_t count)
1125 {
1126 - cpumask_var_t offline_mask;
1127 int rc;
1128
1129 if (!capable(CAP_SYS_ADMIN))
1130 return -EPERM;
1131
1132 - if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL))
1133 - return -ENOMEM;
1134 -
1135 stream_id = simple_strtoul(buf, NULL, 16);
1136
1137 do {
1138 @@ -150,32 +146,16 @@ static ssize_t store_hibernate(struct device *dev,
1139 } while (rc == -EAGAIN);
1140
1141 if (!rc) {
1142 - /* All present CPUs must be online */
1143 - cpumask_andnot(offline_mask, cpu_present_mask,
1144 - cpu_online_mask);
1145 - rc = rtas_online_cpus_mask(offline_mask);
1146 - if (rc) {
1147 - pr_err("%s: Could not bring present CPUs online.\n",
1148 - __func__);
1149 - goto out;
1150 - }
1151 -
1152 stop_topology_update();
1153 rc = pm_suspend(PM_SUSPEND_MEM);
1154 start_topology_update();
1155 -
1156 - /* Take down CPUs not online prior to suspend */
1157 - if (!rtas_offline_cpus_mask(offline_mask))
1158 - pr_warn("%s: Could not restore CPUs to offline "
1159 - "state.\n", __func__);
1160 }
1161
1162 stream_id = 0;
1163
1164 if (!rc)
1165 rc = count;
1166 -out:
1167 - free_cpumask_var(offline_mask);
1168 +
1169 return rc;
1170 }
1171
1172 diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
1173 index 364e3a89c096..4fa7a562c6fc 100644
1174 --- a/arch/s390/mm/gmap.c
1175 +++ b/arch/s390/mm/gmap.c
1176 @@ -2485,23 +2485,36 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
1177 }
1178 EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
1179
1180 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1181 +static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
1182 + unsigned long end, struct mm_walk *walk)
1183 +{
1184 + struct vm_area_struct *vma = walk->vma;
1185 +
1186 + split_huge_pmd(vma, pmd, addr);
1187 + return 0;
1188 +}
1189 +
1190 +static const struct mm_walk_ops thp_split_walk_ops = {
1191 + .pmd_entry = thp_split_walk_pmd_entry,
1192 +};
1193 +
1194 static inline void thp_split_mm(struct mm_struct *mm)
1195 {
1196 -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1197 struct vm_area_struct *vma;
1198 - unsigned long addr;
1199
1200 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
1201 - for (addr = vma->vm_start;
1202 - addr < vma->vm_end;
1203 - addr += PAGE_SIZE)
1204 - follow_page(vma, addr, FOLL_SPLIT);
1205 vma->vm_flags &= ~VM_HUGEPAGE;
1206 vma->vm_flags |= VM_NOHUGEPAGE;
1207 + walk_page_vma(vma, &thp_split_walk_ops, NULL);
1208 }
1209 mm->def_flags |= VM_NOHUGEPAGE;
1210 -#endif
1211 }
1212 +#else
1213 +static inline void thp_split_mm(struct mm_struct *mm)
1214 +{
1215 +}
1216 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1217
1218 /*
1219 * Remove all empty zero pages from the mapping for lazy refaulting
1220 diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
1221 index 5f6a5af9c489..77043a82da51 100644
1222 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
1223 +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
1224 @@ -127,10 +127,6 @@ ddq_add_8:
1225
1226 /* generate a unique variable for ddq_add_x */
1227
1228 -.macro setddq n
1229 - var_ddq_add = ddq_add_\n
1230 -.endm
1231 -
1232 /* generate a unique variable for xmm register */
1233 .macro setxdata n
1234 var_xdata = %xmm\n
1235 @@ -140,9 +136,7 @@ ddq_add_8:
1236
1237 .macro club name, id
1238 .altmacro
1239 - .if \name == DDQ_DATA
1240 - setddq %\id
1241 - .elseif \name == XDATA
1242 + .if \name == XDATA
1243 setxdata %\id
1244 .endif
1245 .noaltmacro
1246 @@ -165,9 +159,8 @@ ddq_add_8:
1247
1248 .set i, 1
1249 .rept (by - 1)
1250 - club DDQ_DATA, i
1251 club XDATA, i
1252 - vpaddq var_ddq_add(%rip), xcounter, var_xdata
1253 + vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata
1254 vptest ddq_low_msk(%rip), var_xdata
1255 jnz 1f
1256 vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata
1257 @@ -180,8 +173,7 @@ ddq_add_8:
1258 vmovdqa 1*16(p_keys), xkeyA
1259
1260 vpxor xkey0, xdata0, xdata0
1261 - club DDQ_DATA, by
1262 - vpaddq var_ddq_add(%rip), xcounter, xcounter
1263 + vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter
1264 vptest ddq_low_msk(%rip), xcounter
1265 jnz 1f
1266 vpaddq ddq_high_add_1(%rip), xcounter, xcounter
1267 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
1268 index e40bdf024ba7..9afeb58c910e 100644
1269 --- a/arch/x86/crypto/aesni-intel_asm.S
1270 +++ b/arch/x86/crypto/aesni-intel_asm.S
1271 @@ -266,7 +266,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
1272 PSHUFB_XMM %xmm2, %xmm0
1273 movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
1274
1275 - PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
1276 + PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7
1277 movdqu HashKey(%arg2), %xmm13
1278
1279 CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
1280 @@ -978,7 +978,7 @@ _initial_blocks_done\@:
1281 * arg1, %arg3, %arg4 are used as pointers only, not modified
1282 * %r11 is the data offset value
1283 */
1284 -.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
1285 +.macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \
1286 TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1287
1288 movdqa \XMM1, \XMM5
1289 @@ -1186,7 +1186,7 @@ aes_loop_par_enc_done\@:
1290 * arg1, %arg3, %arg4 are used as pointers only, not modified
1291 * %r11 is the data offset value
1292 */
1293 -.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
1294 +.macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \
1295 TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1296
1297 movdqa \XMM1, \XMM5
1298 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
1299 index 16699101fd2f..ea6d9da9b094 100644
1300 --- a/arch/x86/kernel/apic/io_apic.c
1301 +++ b/arch/x86/kernel/apic/io_apic.c
1302 @@ -2348,8 +2348,13 @@ static int mp_irqdomain_create(int ioapic)
1303
1304 static void ioapic_destroy_irqdomain(int idx)
1305 {
1306 + struct ioapic_domain_cfg *cfg = &ioapics[idx].irqdomain_cfg;
1307 + struct fwnode_handle *fn = ioapics[idx].irqdomain->fwnode;
1308 +
1309 if (ioapics[idx].irqdomain) {
1310 irq_domain_remove(ioapics[idx].irqdomain);
1311 + if (!cfg->dev)
1312 + irq_domain_free_fwnode(fn);
1313 ioapics[idx].irqdomain = NULL;
1314 }
1315 }
1316 diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
1317 index 1f30117b24ba..eb2d41c1816d 100644
1318 --- a/arch/x86/kernel/cpu/mce/inject.c
1319 +++ b/arch/x86/kernel/cpu/mce/inject.c
1320 @@ -511,7 +511,7 @@ static void do_inject(void)
1321 */
1322 if (inj_type == DFR_INT_INJ) {
1323 i_mce.status |= MCI_STATUS_DEFERRED;
1324 - i_mce.status |= (i_mce.status & ~MCI_STATUS_UC);
1325 + i_mce.status &= ~MCI_STATUS_UC;
1326 }
1327
1328 /*
1329 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
1330 index af64519b2695..da3cc3a10d63 100644
1331 --- a/arch/x86/kernel/process_64.c
1332 +++ b/arch/x86/kernel/process_64.c
1333 @@ -316,7 +316,7 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
1334 */
1335 mutex_lock(&task->mm->context.lock);
1336 ldt = task->mm->context.ldt;
1337 - if (unlikely(idx >= ldt->nr_entries))
1338 + if (unlikely(!ldt || idx >= ldt->nr_entries))
1339 base = 0;
1340 else
1341 base = get_desc_base(ldt->entries + idx);
1342 diff --git a/block/blk-iocost.c b/block/blk-iocost.c
1343 index 4d2bda812d9b..dcc6685d5bec 100644
1344 --- a/block/blk-iocost.c
1345 +++ b/block/blk-iocost.c
1346 @@ -1377,7 +1377,7 @@ static void ioc_timer_fn(struct timer_list *timer)
1347 * should have woken up in the last period and expire idle iocgs.
1348 */
1349 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
1350 - if (!waitqueue_active(&iocg->waitq) && iocg->abs_vdebt &&
1351 + if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
1352 !iocg_is_idle(iocg))
1353 continue;
1354
1355 diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
1356 index 728d752f7adc..85f799c9c25c 100644
1357 --- a/drivers/acpi/acpica/exprep.c
1358 +++ b/drivers/acpi/acpica/exprep.c
1359 @@ -473,10 +473,6 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
1360 (u8)access_byte_width;
1361 }
1362 }
1363 - /* An additional reference for the container */
1364 -
1365 - acpi_ut_add_reference(obj_desc->field.region_obj);
1366 -
1367 ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
1368 "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
1369 obj_desc->field.start_field_bit_offset,
1370 diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
1371 index c365faf4e6cd..4c0d4e434196 100644
1372 --- a/drivers/acpi/acpica/utdelete.c
1373 +++ b/drivers/acpi/acpica/utdelete.c
1374 @@ -568,11 +568,6 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
1375 next_object = object->buffer_field.buffer_obj;
1376 break;
1377
1378 - case ACPI_TYPE_LOCAL_REGION_FIELD:
1379 -
1380 - next_object = object->field.region_obj;
1381 - break;
1382 -
1383 case ACPI_TYPE_LOCAL_BANK_FIELD:
1384
1385 next_object = object->bank_field.bank_obj;
1386 @@ -613,6 +608,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
1387 }
1388 break;
1389
1390 + case ACPI_TYPE_LOCAL_REGION_FIELD:
1391 case ACPI_TYPE_REGION:
1392 default:
1393
1394 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
1395 index 565e35e69f24..bddbbf5b3dda 100644
1396 --- a/drivers/block/loop.c
1397 +++ b/drivers/block/loop.c
1398 @@ -2325,6 +2325,8 @@ static void __exit loop_exit(void)
1399
1400 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
1401
1402 + mutex_lock(&loop_ctl_mutex);
1403 +
1404 idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
1405 idr_destroy(&loop_index_idr);
1406
1407 @@ -2332,6 +2334,8 @@ static void __exit loop_exit(void)
1408 unregister_blkdev(LOOP_MAJOR, "loop");
1409
1410 misc_deregister(&loop_misc);
1411 +
1412 + mutex_unlock(&loop_ctl_mutex);
1413 }
1414
1415 module_init(loop_init);
1416 diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
1417 index 0f3a020703ab..4c7978cb1786 100644
1418 --- a/drivers/bluetooth/btmrvl_sdio.c
1419 +++ b/drivers/bluetooth/btmrvl_sdio.c
1420 @@ -328,7 +328,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
1421
1422 static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = {
1423 .helper = NULL,
1424 - .firmware = "mrvl/sd8977_uapsta.bin",
1425 + .firmware = "mrvl/sdsd8977_combo_v2.bin",
1426 .reg = &btmrvl_reg_8977,
1427 .support_pscan_win_report = true,
1428 .sd_blksz_fw_dl = 256,
1429 @@ -346,7 +346,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = {
1430
1431 static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
1432 .helper = NULL,
1433 - .firmware = "mrvl/sd8997_uapsta.bin",
1434 + .firmware = "mrvl/sdsd8997_combo_v4.bin",
1435 .reg = &btmrvl_reg_8997,
1436 .support_pscan_win_report = true,
1437 .sd_blksz_fw_dl = 256,
1438 @@ -1831,6 +1831,6 @@ MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
1439 MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
1440 MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin");
1441 MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
1442 -MODULE_FIRMWARE("mrvl/sd8977_uapsta.bin");
1443 +MODULE_FIRMWARE("mrvl/sdsd8977_combo_v2.bin");
1444 MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin");
1445 -MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin");
1446 +MODULE_FIRMWARE("mrvl/sdsd8997_combo_v4.bin");
1447 diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
1448 index 813338288453..b7de7cb8cca9 100644
1449 --- a/drivers/bluetooth/btmtksdio.c
1450 +++ b/drivers/bluetooth/btmtksdio.c
1451 @@ -684,7 +684,7 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
1452 const u8 *fw_ptr;
1453 size_t fw_size;
1454 int err, dlen;
1455 - u8 flag;
1456 + u8 flag, param;
1457
1458 err = request_firmware(&fw, fwname, &hdev->dev);
1459 if (err < 0) {
1460 @@ -692,6 +692,20 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
1461 return err;
1462 }
1463
1464 + /* Power on data RAM the firmware relies on. */
1465 + param = 1;
1466 + wmt_params.op = MTK_WMT_FUNC_CTRL;
1467 + wmt_params.flag = 3;
1468 + wmt_params.dlen = sizeof(param);
1469 + wmt_params.data = &param;
1470 + wmt_params.status = NULL;
1471 +
1472 + err = mtk_hci_wmt_sync(hdev, &wmt_params);
1473 + if (err < 0) {
1474 + bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
1475 + return err;
1476 + }
1477 +
1478 fw_ptr = fw->data;
1479 fw_size = fw->size;
1480
1481 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1482 index 9c3b063e1a1f..f3f0529564da 100644
1483 --- a/drivers/bluetooth/btusb.c
1484 +++ b/drivers/bluetooth/btusb.c
1485 @@ -2792,7 +2792,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
1486 const u8 *fw_ptr;
1487 size_t fw_size;
1488 int err, dlen;
1489 - u8 flag;
1490 + u8 flag, param;
1491
1492 err = request_firmware(&fw, fwname, &hdev->dev);
1493 if (err < 0) {
1494 @@ -2800,6 +2800,20 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
1495 return err;
1496 }
1497
1498 + /* Power on data RAM the firmware relies on. */
1499 + param = 1;
1500 + wmt_params.op = BTMTK_WMT_FUNC_CTRL;
1501 + wmt_params.flag = 3;
1502 + wmt_params.dlen = sizeof(param);
1503 + wmt_params.data = &param;
1504 + wmt_params.status = NULL;
1505 +
1506 + err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
1507 + if (err < 0) {
1508 + bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
1509 + return err;
1510 + }
1511 +
1512 fw_ptr = fw->data;
1513 fw_size = fw->size;
1514
1515 diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
1516 index dacf297baf59..5df0651b6cd5 100644
1517 --- a/drivers/bluetooth/hci_h5.c
1518 +++ b/drivers/bluetooth/hci_h5.c
1519 @@ -790,7 +790,7 @@ static int h5_serdev_probe(struct serdev_device *serdev)
1520 if (!h5)
1521 return -ENOMEM;
1522
1523 - set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags);
1524 + set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags);
1525
1526 h5->hu = &h5->serdev_hu;
1527 h5->serdev_hu.serdev = serdev;
1528 diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
1529 index 4652896d4990..ad2f26cb2622 100644
1530 --- a/drivers/bluetooth/hci_serdev.c
1531 +++ b/drivers/bluetooth/hci_serdev.c
1532 @@ -357,7 +357,8 @@ void hci_uart_unregister_device(struct hci_uart *hu)
1533 struct hci_dev *hdev = hu->hdev;
1534
1535 clear_bit(HCI_UART_PROTO_READY, &hu->flags);
1536 - hci_unregister_dev(hdev);
1537 + if (test_bit(HCI_UART_REGISTERED, &hu->flags))
1538 + hci_unregister_dev(hdev);
1539 hci_free_dev(hdev);
1540
1541 cancel_work_sync(&hu->write_work);
1542 diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
1543 index f8bc052cd853..770a780dfa54 100644
1544 --- a/drivers/bus/ti-sysc.c
1545 +++ b/drivers/bus/ti-sysc.c
1546 @@ -1371,6 +1371,10 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1547 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1548 SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
1549 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1550 + SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
1551 + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1552 + SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
1553 + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1554 SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
1555 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1556 SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
1557 @@ -1440,8 +1444,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1558 SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
1559 SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
1560 SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
1561 - SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0),
1562 - SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, 0),
1563 SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
1564 SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
1565 #endif
1566 diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
1567 index b161bdf60000..0941d38b2d32 100644
1568 --- a/drivers/char/agp/intel-gtt.c
1569 +++ b/drivers/char/agp/intel-gtt.c
1570 @@ -304,8 +304,10 @@ static int intel_gtt_setup_scratch_page(void)
1571 if (intel_private.needs_dmar) {
1572 dma_addr = pci_map_page(intel_private.pcidev, page, 0,
1573 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1574 - if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
1575 + if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) {
1576 + __free_page(page);
1577 return -EINVAL;
1578 + }
1579
1580 intel_private.scratch_page_dma = dma_addr;
1581 } else
1582 diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
1583 index 58073836b555..1838039b0333 100644
1584 --- a/drivers/char/tpm/tpm-chip.c
1585 +++ b/drivers/char/tpm/tpm-chip.c
1586 @@ -386,13 +386,8 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
1587 chip->cdev.owner = THIS_MODULE;
1588 chip->cdevs.owner = THIS_MODULE;
1589
1590 - chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1591 - if (!chip->work_space.context_buf) {
1592 - rc = -ENOMEM;
1593 - goto out;
1594 - }
1595 - chip->work_space.session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1596 - if (!chip->work_space.session_buf) {
1597 + rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE);
1598 + if (rc) {
1599 rc = -ENOMEM;
1600 goto out;
1601 }
1602 diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
1603 index 218cb496222a..37f010421a36 100644
1604 --- a/drivers/char/tpm/tpm.h
1605 +++ b/drivers/char/tpm/tpm.h
1606 @@ -177,6 +177,9 @@ struct tpm_header {
1607
1608 #define TPM_TAG_RQU_COMMAND 193
1609
1610 +/* TPM2 specific constants. */
1611 +#define TPM2_SPACE_BUFFER_SIZE 16384 /* 16 kB */
1612 +
1613 struct stclear_flags_t {
1614 __be16 tag;
1615 u8 deactivated;
1616 @@ -456,7 +459,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
1617 unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
1618 int tpm2_probe(struct tpm_chip *chip);
1619 int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
1620 -int tpm2_init_space(struct tpm_space *space);
1621 +int tpm2_init_space(struct tpm_space *space, unsigned int buf_size);
1622 void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space);
1623 void tpm2_flush_space(struct tpm_chip *chip);
1624 int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
1625 diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
1626 index 982d341d8837..784b8b3cb903 100644
1627 --- a/drivers/char/tpm/tpm2-space.c
1628 +++ b/drivers/char/tpm/tpm2-space.c
1629 @@ -38,18 +38,21 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
1630 }
1631 }
1632
1633 -int tpm2_init_space(struct tpm_space *space)
1634 +int tpm2_init_space(struct tpm_space *space, unsigned int buf_size)
1635 {
1636 - space->context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1637 + space->context_buf = kzalloc(buf_size, GFP_KERNEL);
1638 if (!space->context_buf)
1639 return -ENOMEM;
1640
1641 - space->session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1642 + space->session_buf = kzalloc(buf_size, GFP_KERNEL);
1643 if (space->session_buf == NULL) {
1644 kfree(space->context_buf);
1645 + /* Prevent caller getting a dangling pointer. */
1646 + space->context_buf = NULL;
1647 return -ENOMEM;
1648 }
1649
1650 + space->buf_size = buf_size;
1651 return 0;
1652 }
1653
1654 @@ -311,8 +314,10 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
1655 sizeof(space->context_tbl));
1656 memcpy(&chip->work_space.session_tbl, &space->session_tbl,
1657 sizeof(space->session_tbl));
1658 - memcpy(chip->work_space.context_buf, space->context_buf, PAGE_SIZE);
1659 - memcpy(chip->work_space.session_buf, space->session_buf, PAGE_SIZE);
1660 + memcpy(chip->work_space.context_buf, space->context_buf,
1661 + space->buf_size);
1662 + memcpy(chip->work_space.session_buf, space->session_buf,
1663 + space->buf_size);
1664
1665 rc = tpm2_load_space(chip);
1666 if (rc) {
1667 @@ -492,7 +497,7 @@ static int tpm2_save_space(struct tpm_chip *chip)
1668 continue;
1669
1670 rc = tpm2_save_context(chip, space->context_tbl[i],
1671 - space->context_buf, PAGE_SIZE,
1672 + space->context_buf, space->buf_size,
1673 &offset);
1674 if (rc == -ENOENT) {
1675 space->context_tbl[i] = 0;
1676 @@ -509,9 +514,8 @@ static int tpm2_save_space(struct tpm_chip *chip)
1677 continue;
1678
1679 rc = tpm2_save_context(chip, space->session_tbl[i],
1680 - space->session_buf, PAGE_SIZE,
1681 + space->session_buf, space->buf_size,
1682 &offset);
1683 -
1684 if (rc == -ENOENT) {
1685 /* handle error saving session, just forget it */
1686 space->session_tbl[i] = 0;
1687 @@ -557,8 +561,10 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
1688 sizeof(space->context_tbl));
1689 memcpy(&space->session_tbl, &chip->work_space.session_tbl,
1690 sizeof(space->session_tbl));
1691 - memcpy(space->context_buf, chip->work_space.context_buf, PAGE_SIZE);
1692 - memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE);
1693 + memcpy(space->context_buf, chip->work_space.context_buf,
1694 + space->buf_size);
1695 + memcpy(space->session_buf, chip->work_space.session_buf,
1696 + space->buf_size);
1697
1698 return 0;
1699 out:
1700 diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c
1701 index 7a0a7051a06f..eef0fb06ea83 100644
1702 --- a/drivers/char/tpm/tpmrm-dev.c
1703 +++ b/drivers/char/tpm/tpmrm-dev.c
1704 @@ -21,7 +21,7 @@ static int tpmrm_open(struct inode *inode, struct file *file)
1705 if (priv == NULL)
1706 return -ENOMEM;
1707
1708 - rc = tpm2_init_space(&priv->space);
1709 + rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE);
1710 if (rc) {
1711 kfree(priv);
1712 return -ENOMEM;
1713 diff --git a/drivers/clk/bcm/clk-bcm63xx-gate.c b/drivers/clk/bcm/clk-bcm63xx-gate.c
1714 index 98e884957db8..911a29bd744e 100644
1715 --- a/drivers/clk/bcm/clk-bcm63xx-gate.c
1716 +++ b/drivers/clk/bcm/clk-bcm63xx-gate.c
1717 @@ -155,6 +155,7 @@ static int clk_bcm63xx_probe(struct platform_device *pdev)
1718
1719 for (entry = table; entry->name; entry++)
1720 maxbit = max_t(u8, maxbit, entry->bit);
1721 + maxbit++;
1722
1723 hw = devm_kzalloc(&pdev->dev, struct_size(hw, data.hws, maxbit),
1724 GFP_KERNEL);
1725 diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
1726 index 886f7c5df51a..e3cdb4a282fe 100644
1727 --- a/drivers/clk/clk-scmi.c
1728 +++ b/drivers/clk/clk-scmi.c
1729 @@ -103,6 +103,8 @@ static const struct clk_ops scmi_clk_ops = {
1730 static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
1731 {
1732 int ret;
1733 + unsigned long min_rate, max_rate;
1734 +
1735 struct clk_init_data init = {
1736 .flags = CLK_GET_RATE_NOCACHE,
1737 .num_parents = 0,
1738 @@ -112,9 +114,23 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
1739
1740 sclk->hw.init = &init;
1741 ret = devm_clk_hw_register(dev, &sclk->hw);
1742 - if (!ret)
1743 - clk_hw_set_rate_range(&sclk->hw, sclk->info->range.min_rate,
1744 - sclk->info->range.max_rate);
1745 + if (ret)
1746 + return ret;
1747 +
1748 + if (sclk->info->rate_discrete) {
1749 + int num_rates = sclk->info->list.num_rates;
1750 +
1751 + if (num_rates <= 0)
1752 + return -EINVAL;
1753 +
1754 + min_rate = sclk->info->list.rates[0];
1755 + max_rate = sclk->info->list.rates[num_rates - 1];
1756 + } else {
1757 + min_rate = sclk->info->range.min_rate;
1758 + max_rate = sclk->info->range.max_rate;
1759 + }
1760 +
1761 + clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate);
1762 return ret;
1763 }
1764
1765 diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
1766 index 96a36f6ff667..d7586e26acd8 100644
1767 --- a/drivers/clk/qcom/clk-rpmh.c
1768 +++ b/drivers/clk/qcom/clk-rpmh.c
1769 @@ -143,12 +143,22 @@ static inline bool has_state_changed(struct clk_rpmh *c, u32 state)
1770 != (c->aggr_state & BIT(state));
1771 }
1772
1773 +static int clk_rpmh_send(struct clk_rpmh *c, enum rpmh_state state,
1774 + struct tcs_cmd *cmd, bool wait)
1775 +{
1776 + if (wait)
1777 + return rpmh_write(c->dev, state, cmd, 1);
1778 +
1779 + return rpmh_write_async(c->dev, state, cmd, 1);
1780 +}
1781 +
1782 static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
1783 {
1784 struct tcs_cmd cmd = { 0 };
1785 u32 cmd_state, on_val;
1786 enum rpmh_state state = RPMH_SLEEP_STATE;
1787 int ret;
1788 + bool wait;
1789
1790 cmd.addr = c->res_addr;
1791 cmd_state = c->aggr_state;
1792 @@ -159,7 +169,8 @@ static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
1793 if (cmd_state & BIT(state))
1794 cmd.data = on_val;
1795
1796 - ret = rpmh_write_async(c->dev, state, &cmd, 1);
1797 + wait = cmd_state && state == RPMH_ACTIVE_ONLY_STATE;
1798 + ret = clk_rpmh_send(c, state, &cmd, wait);
1799 if (ret) {
1800 dev_err(c->dev, "set %s state of %s failed: (%d)\n",
1801 !state ? "sleep" :
1802 @@ -267,7 +278,7 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
1803 cmd.addr = c->res_addr;
1804 cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
1805
1806 - ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1);
1807 + ret = clk_rpmh_send(c, RPMH_ACTIVE_ONLY_STATE, &cmd, enable);
1808 if (ret) {
1809 dev_err(c->dev, "set active state of %s failed: (%d)\n",
1810 c->res_name, ret);
1811 diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
1812 index a905796f7f85..25f11e9ec358 100644
1813 --- a/drivers/cpufreq/Kconfig.arm
1814 +++ b/drivers/cpufreq/Kconfig.arm
1815 @@ -41,6 +41,7 @@ config ARM_ARMADA_37XX_CPUFREQ
1816 config ARM_ARMADA_8K_CPUFREQ
1817 tristate "Armada 8K CPUFreq driver"
1818 depends on ARCH_MVEBU && CPUFREQ_DT
1819 + select ARMADA_AP_CPU_CLK
1820 help
1821 This enables the CPUFreq driver support for Marvell
1822 Armada8k SOCs.
1823 diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
1824 index aa0f06dec959..df1c941260d1 100644
1825 --- a/drivers/cpufreq/armada-37xx-cpufreq.c
1826 +++ b/drivers/cpufreq/armada-37xx-cpufreq.c
1827 @@ -456,6 +456,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
1828 /* Now that everything is setup, enable the DVFS at hardware level */
1829 armada37xx_cpufreq_enable_dvfs(nb_pm_base);
1830
1831 + memset(&pdata, 0, sizeof(pdata));
1832 pdata.suspend = armada37xx_cpufreq_suspend;
1833 pdata.resume = armada37xx_cpufreq_resume;
1834
1835 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1836 index fa988bd1e606..194a6587a1de 100644
1837 --- a/drivers/cpufreq/cpufreq.c
1838 +++ b/drivers/cpufreq/cpufreq.c
1839 @@ -616,6 +616,24 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
1840 return NULL;
1841 }
1842
1843 +static struct cpufreq_governor *get_governor(const char *str_governor)
1844 +{
1845 + struct cpufreq_governor *t;
1846 +
1847 + mutex_lock(&cpufreq_governor_mutex);
1848 + t = find_governor(str_governor);
1849 + if (!t)
1850 + goto unlock;
1851 +
1852 + if (!try_module_get(t->owner))
1853 + t = NULL;
1854 +
1855 +unlock:
1856 + mutex_unlock(&cpufreq_governor_mutex);
1857 +
1858 + return t;
1859 +}
1860 +
1861 static unsigned int cpufreq_parse_policy(char *str_governor)
1862 {
1863 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
1864 @@ -635,28 +653,14 @@ static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
1865 {
1866 struct cpufreq_governor *t;
1867
1868 - mutex_lock(&cpufreq_governor_mutex);
1869 + t = get_governor(str_governor);
1870 + if (t)
1871 + return t;
1872
1873 - t = find_governor(str_governor);
1874 - if (!t) {
1875 - int ret;
1876 -
1877 - mutex_unlock(&cpufreq_governor_mutex);
1878 -
1879 - ret = request_module("cpufreq_%s", str_governor);
1880 - if (ret)
1881 - return NULL;
1882 -
1883 - mutex_lock(&cpufreq_governor_mutex);
1884 -
1885 - t = find_governor(str_governor);
1886 - }
1887 - if (t && !try_module_get(t->owner))
1888 - t = NULL;
1889 -
1890 - mutex_unlock(&cpufreq_governor_mutex);
1891 + if (request_module("cpufreq_%s", str_governor))
1892 + return NULL;
1893
1894 - return t;
1895 + return get_governor(str_governor);
1896 }
1897
1898 /**
1899 @@ -810,12 +814,14 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
1900 goto out;
1901 }
1902
1903 + mutex_lock(&cpufreq_governor_mutex);
1904 for_each_governor(t) {
1905 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
1906 - (CPUFREQ_NAME_LEN + 2)))
1907 - goto out;
1908 + break;
1909 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1910 }
1911 + mutex_unlock(&cpufreq_governor_mutex);
1912 out:
1913 i += sprintf(&buf[i], "\n");
1914 return i;
1915 @@ -1053,15 +1059,17 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
1916 struct cpufreq_governor *def_gov = cpufreq_default_governor();
1917 struct cpufreq_governor *gov = NULL;
1918 unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1919 + int ret;
1920
1921 if (has_target()) {
1922 /* Update policy governor to the one used before hotplug. */
1923 - gov = find_governor(policy->last_governor);
1924 + gov = get_governor(policy->last_governor);
1925 if (gov) {
1926 pr_debug("Restoring governor %s for cpu %d\n",
1927 policy->governor->name, policy->cpu);
1928 } else if (def_gov) {
1929 gov = def_gov;
1930 + __module_get(gov->owner);
1931 } else {
1932 return -ENODATA;
1933 }
1934 @@ -1084,7 +1092,11 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
1935 return -ENODATA;
1936 }
1937
1938 - return cpufreq_set_policy(policy, gov, pol);
1939 + ret = cpufreq_set_policy(policy, gov, pol);
1940 + if (gov)
1941 + module_put(gov->owner);
1942 +
1943 + return ret;
1944 }
1945
1946 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1947 diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
1948 index 596ce28b957d..2410b23aa609 100644
1949 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c
1950 +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
1951 @@ -200,6 +200,7 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
1952 int status;
1953
1954 memset(req_info, 0, sizeof(struct cpt_request_info));
1955 + req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0;
1956 memset(fctx, 0, sizeof(struct fc_context));
1957 create_input_list(req, enc, enc_iv_len);
1958 create_output_list(req, enc_iv_len);
1959 diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
1960 index 7a24019356b5..e343249c8d05 100644
1961 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
1962 +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
1963 @@ -133,7 +133,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
1964
1965 /* Setup gather (input) components */
1966 g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component);
1967 - info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL);
1968 + info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
1969 if (!info->gather_components) {
1970 ret = -ENOMEM;
1971 goto scatter_gather_clean;
1972 @@ -150,7 +150,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
1973
1974 /* Setup scatter (output) components */
1975 s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component);
1976 - info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL);
1977 + info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
1978 if (!info->scatter_components) {
1979 ret = -ENOMEM;
1980 goto scatter_gather_clean;
1981 @@ -167,7 +167,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
1982
1983 /* Create and initialize DPTR */
1984 info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
1985 - info->in_buffer = kzalloc(info->dlen, GFP_KERNEL);
1986 + info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
1987 if (!info->in_buffer) {
1988 ret = -ENOMEM;
1989 goto scatter_gather_clean;
1990 @@ -195,7 +195,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
1991 }
1992
1993 /* Create and initialize RPTR */
1994 - info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL);
1995 + info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
1996 if (!info->out_buffer) {
1997 ret = -ENOMEM;
1998 goto scatter_gather_clean;
1999 @@ -421,7 +421,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
2000 struct cpt_vq_command vq_cmd;
2001 union cpt_inst_s cptinst;
2002
2003 - info = kzalloc(sizeof(*info), GFP_KERNEL);
2004 + info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
2005 if (unlikely(!info)) {
2006 dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n");
2007 return -ENOMEM;
2008 @@ -443,7 +443,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
2009 * Get buffer for union cpt_res_s response
2010 * structure and its physical address
2011 */
2012 - info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL);
2013 + info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
2014 if (unlikely(!info->completion_addr)) {
2015 dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
2016 ret = -ENOMEM;
2017 diff --git a/drivers/crypto/cavium/cpt/request_manager.h b/drivers/crypto/cavium/cpt/request_manager.h
2018 index 3514b082eca7..1e8dd9ebcc17 100644
2019 --- a/drivers/crypto/cavium/cpt/request_manager.h
2020 +++ b/drivers/crypto/cavium/cpt/request_manager.h
2021 @@ -62,6 +62,8 @@ struct cpt_request_info {
2022 union ctrl_info ctrl; /* User control information */
2023 struct cptvf_request req; /* Request Information (Core specific) */
2024
2025 + bool may_sleep;
2026 +
2027 struct buf_ptr in[MAX_BUF_CNT];
2028 struct buf_ptr out[MAX_BUF_CNT];
2029
2030 diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
2031 index 3f68262d9ab4..87a34d91fdf7 100644
2032 --- a/drivers/crypto/ccp/ccp-dev.h
2033 +++ b/drivers/crypto/ccp/ccp-dev.h
2034 @@ -469,6 +469,7 @@ struct ccp_sg_workarea {
2035 unsigned int sg_used;
2036
2037 struct scatterlist *dma_sg;
2038 + struct scatterlist *dma_sg_head;
2039 struct device *dma_dev;
2040 unsigned int dma_count;
2041 enum dma_data_direction dma_dir;
2042 diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
2043 index 422193690fd4..64112c736810 100644
2044 --- a/drivers/crypto/ccp/ccp-ops.c
2045 +++ b/drivers/crypto/ccp/ccp-ops.c
2046 @@ -63,7 +63,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp)
2047 static void ccp_sg_free(struct ccp_sg_workarea *wa)
2048 {
2049 if (wa->dma_count)
2050 - dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
2051 + dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
2052
2053 wa->dma_count = 0;
2054 }
2055 @@ -92,6 +92,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
2056 return 0;
2057
2058 wa->dma_sg = sg;
2059 + wa->dma_sg_head = sg;
2060 wa->dma_dev = dev;
2061 wa->dma_dir = dma_dir;
2062 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
2063 @@ -104,14 +105,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
2064 static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
2065 {
2066 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
2067 + unsigned int sg_combined_len = 0;
2068
2069 if (!wa->sg)
2070 return;
2071
2072 wa->sg_used += nbytes;
2073 wa->bytes_left -= nbytes;
2074 - if (wa->sg_used == wa->sg->length) {
2075 - wa->sg = sg_next(wa->sg);
2076 + if (wa->sg_used == sg_dma_len(wa->dma_sg)) {
2077 + /* Advance to the next DMA scatterlist entry */
2078 + wa->dma_sg = sg_next(wa->dma_sg);
2079 +
2080 + /* In the case that the DMA mapped scatterlist has entries
2081 + * that have been merged, the non-DMA mapped scatterlist
2082 + * must be advanced multiple times for each merged entry.
2083 + * This ensures that the current non-DMA mapped entry
2084 + * corresponds to the current DMA mapped entry.
2085 + */
2086 + do {
2087 + sg_combined_len += wa->sg->length;
2088 + wa->sg = sg_next(wa->sg);
2089 + } while (wa->sg_used > sg_combined_len);
2090 +
2091 wa->sg_used = 0;
2092 }
2093 }
2094 @@ -299,7 +314,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
2095 /* Update the structures and generate the count */
2096 buf_count = 0;
2097 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
2098 - nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
2099 + nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used,
2100 dm_wa->length - buf_count);
2101 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
2102
2103 @@ -331,11 +346,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
2104 * and destination. The resulting len values will always be <= UINT_MAX
2105 * because the dma length is an unsigned int.
2106 */
2107 - sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
2108 + sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used;
2109 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
2110
2111 if (dst) {
2112 - sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
2113 + sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used;
2114 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
2115 op_len = min(sg_src_len, sg_dst_len);
2116 } else {
2117 @@ -365,7 +380,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
2118 /* Enough data in the sg element, but we need to
2119 * adjust for any previously copied data
2120 */
2121 - op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
2122 + op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg);
2123 op->src.u.dma.offset = src->sg_wa.sg_used;
2124 op->src.u.dma.length = op_len & ~(block_size - 1);
2125
2126 @@ -386,7 +401,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
2127 /* Enough room in the sg element, but we need to
2128 * adjust for any previously used area
2129 */
2130 - op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
2131 + op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg);
2132 op->dst.u.dma.offset = dst->sg_wa.sg_used;
2133 op->dst.u.dma.length = op->src.u.dma.length;
2134 }
2135 @@ -2028,7 +2043,7 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2136 dst.sg_wa.sg_used = 0;
2137 for (i = 1; i <= src.sg_wa.dma_count; i++) {
2138 if (!dst.sg_wa.sg ||
2139 - (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
2140 + (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) {
2141 ret = -EINVAL;
2142 goto e_dst;
2143 }
2144 @@ -2054,8 +2069,8 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2145 goto e_dst;
2146 }
2147
2148 - dst.sg_wa.sg_used += src.sg_wa.sg->length;
2149 - if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
2150 + dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg);
2151 + if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) {
2152 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
2153 dst.sg_wa.sg_used = 0;
2154 }
2155 diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
2156 index cd9c60268bf8..9bf0cce578f0 100644
2157 --- a/drivers/crypto/ccree/cc_cipher.c
2158 +++ b/drivers/crypto/ccree/cc_cipher.c
2159 @@ -163,7 +163,6 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
2160 skcipher_alg.base);
2161 struct device *dev = drvdata_to_dev(cc_alg->drvdata);
2162 unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
2163 - int rc = 0;
2164
2165 dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
2166 crypto_tfm_alg_name(tfm));
2167 @@ -175,10 +174,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
2168 ctx_p->flow_mode = cc_alg->flow_mode;
2169 ctx_p->drvdata = cc_alg->drvdata;
2170
2171 + if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
2172 + /* Alloc hash tfm for essiv */
2173 + ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
2174 + if (IS_ERR(ctx_p->shash_tfm)) {
2175 + dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
2176 + return PTR_ERR(ctx_p->shash_tfm);
2177 + }
2178 + }
2179 +
2180 /* Allocate key buffer, cache line aligned */
2181 ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
2182 if (!ctx_p->user.key)
2183 - return -ENOMEM;
2184 + goto free_shash;
2185
2186 dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
2187 ctx_p->user.key);
2188 @@ -190,21 +198,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
2189 if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
2190 dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
2191 max_key_buf_size, ctx_p->user.key);
2192 - return -ENOMEM;
2193 + goto free_key;
2194 }
2195 dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
2196 max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
2197
2198 - if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
2199 - /* Alloc hash tfm for essiv */
2200 - ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
2201 - if (IS_ERR(ctx_p->shash_tfm)) {
2202 - dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
2203 - return PTR_ERR(ctx_p->shash_tfm);
2204 - }
2205 - }
2206 + return 0;
2207
2208 - return rc;
2209 +free_key:
2210 + kfree(ctx_p->user.key);
2211 +free_shash:
2212 + crypto_free_shash(ctx_p->shash_tfm);
2213 +
2214 + return -ENOMEM;
2215 }
2216
2217 static void cc_cipher_exit(struct crypto_tfm *tfm)
2218 diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
2219 index c27e7160d2df..4ad4ffd90cee 100644
2220 --- a/drivers/crypto/hisilicon/sec/sec_algs.c
2221 +++ b/drivers/crypto/hisilicon/sec/sec_algs.c
2222 @@ -175,7 +175,8 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
2223 dma_addr_t *psec_sgl,
2224 struct scatterlist *sgl,
2225 int count,
2226 - struct sec_dev_info *info)
2227 + struct sec_dev_info *info,
2228 + gfp_t gfp)
2229 {
2230 struct sec_hw_sgl *sgl_current = NULL;
2231 struct sec_hw_sgl *sgl_next;
2232 @@ -190,7 +191,7 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
2233 sge_index = i % SEC_MAX_SGE_NUM;
2234 if (sge_index == 0) {
2235 sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
2236 - GFP_KERNEL, &sgl_next_dma);
2237 + gfp, &sgl_next_dma);
2238 if (!sgl_next) {
2239 ret = -ENOMEM;
2240 goto err_free_hw_sgls;
2241 @@ -545,14 +546,14 @@ void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
2242 }
2243
2244 static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
2245 - int *steps)
2246 + int *steps, gfp_t gfp)
2247 {
2248 size_t *sizes;
2249 int i;
2250
2251 /* Split into suitable sized blocks */
2252 *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
2253 - sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
2254 + sizes = kcalloc(*steps, sizeof(*sizes), gfp);
2255 if (!sizes)
2256 return -ENOMEM;
2257
2258 @@ -568,7 +569,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
2259 int steps, struct scatterlist ***splits,
2260 int **splits_nents,
2261 int sgl_len_in,
2262 - struct device *dev)
2263 + struct device *dev, gfp_t gfp)
2264 {
2265 int ret, count;
2266
2267 @@ -576,12 +577,12 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
2268 if (!count)
2269 return -EINVAL;
2270
2271 - *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
2272 + *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp);
2273 if (!*splits) {
2274 ret = -ENOMEM;
2275 goto err_unmap_sg;
2276 }
2277 - *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
2278 + *splits_nents = kcalloc(steps, sizeof(int), gfp);
2279 if (!*splits_nents) {
2280 ret = -ENOMEM;
2281 goto err_free_splits;
2282 @@ -589,7 +590,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
2283
2284 /* output the scatter list before and after this */
2285 ret = sg_split(sgl, count, 0, steps, split_sizes,
2286 - *splits, *splits_nents, GFP_KERNEL);
2287 + *splits, *splits_nents, gfp);
2288 if (ret) {
2289 ret = -ENOMEM;
2290 goto err_free_splits_nents;
2291 @@ -630,13 +631,13 @@ static struct sec_request_el
2292 int el_size, bool different_dest,
2293 struct scatterlist *sgl_in, int n_ents_in,
2294 struct scatterlist *sgl_out, int n_ents_out,
2295 - struct sec_dev_info *info)
2296 + struct sec_dev_info *info, gfp_t gfp)
2297 {
2298 struct sec_request_el *el;
2299 struct sec_bd_info *req;
2300 int ret;
2301
2302 - el = kzalloc(sizeof(*el), GFP_KERNEL);
2303 + el = kzalloc(sizeof(*el), gfp);
2304 if (!el)
2305 return ERR_PTR(-ENOMEM);
2306 el->el_length = el_size;
2307 @@ -668,7 +669,7 @@ static struct sec_request_el
2308 el->sgl_in = sgl_in;
2309
2310 ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
2311 - n_ents_in, info);
2312 + n_ents_in, info, gfp);
2313 if (ret)
2314 goto err_free_el;
2315
2316 @@ -679,7 +680,7 @@ static struct sec_request_el
2317 el->sgl_out = sgl_out;
2318 ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
2319 el->sgl_out,
2320 - n_ents_out, info);
2321 + n_ents_out, info, gfp);
2322 if (ret)
2323 goto err_free_hw_sgl_in;
2324
2325 @@ -720,6 +721,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
2326 int *splits_out_nents = NULL;
2327 struct sec_request_el *el, *temp;
2328 bool split = skreq->src != skreq->dst;
2329 + gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
2330
2331 mutex_init(&sec_req->lock);
2332 sec_req->req_base = &skreq->base;
2333 @@ -728,13 +730,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
2334 sec_req->len_in = sg_nents(skreq->src);
2335
2336 ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
2337 - &steps);
2338 + &steps, gfp);
2339 if (ret)
2340 return ret;
2341 sec_req->num_elements = steps;
2342 ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
2343 &splits_in_nents, sec_req->len_in,
2344 - info->dev);
2345 + info->dev, gfp);
2346 if (ret)
2347 goto err_free_split_sizes;
2348
2349 @@ -742,7 +744,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
2350 sec_req->len_out = sg_nents(skreq->dst);
2351 ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
2352 &splits_out, &splits_out_nents,
2353 - sec_req->len_out, info->dev);
2354 + sec_req->len_out, info->dev, gfp);
2355 if (ret)
2356 goto err_unmap_in_sg;
2357 }
2358 @@ -775,7 +777,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
2359 splits_in[i], splits_in_nents[i],
2360 split ? splits_out[i] : NULL,
2361 split ? splits_out_nents[i] : 0,
2362 - info);
2363 + info, gfp);
2364 if (IS_ERR(el)) {
2365 ret = PTR_ERR(el);
2366 goto err_free_elements;
2367 diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
2368 index 6bd8f6a2a24f..aeb03081415c 100644
2369 --- a/drivers/crypto/qat/qat_common/qat_uclo.c
2370 +++ b/drivers/crypto/qat/qat_common/qat_uclo.c
2371 @@ -332,13 +332,18 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
2372 }
2373 return 0;
2374 out_err:
2375 + /* Do not free the list head unless we allocated it. */
2376 + tail_old = tail_old->next;
2377 + if (flag) {
2378 + kfree(*init_tab_base);
2379 + *init_tab_base = NULL;
2380 + }
2381 +
2382 while (tail_old) {
2383 mem_init = tail_old->next;
2384 kfree(tail_old);
2385 tail_old = mem_init;
2386 }
2387 - if (flag)
2388 - kfree(*init_tab_base);
2389 return -ENOMEM;
2390 }
2391
2392 diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
2393 index 0e7ea3591b78..5e7593753799 100644
2394 --- a/drivers/edac/edac_device_sysfs.c
2395 +++ b/drivers/edac/edac_device_sysfs.c
2396 @@ -275,6 +275,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
2397
2398 /* Error exit stack */
2399 err_kobj_reg:
2400 + kobject_put(&edac_dev->kobj);
2401 module_put(edac_dev->owner);
2402
2403 err_out:
2404 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
2405 index 72c9eb9fdffb..53042af7262e 100644
2406 --- a/drivers/edac/edac_pci_sysfs.c
2407 +++ b/drivers/edac/edac_pci_sysfs.c
2408 @@ -386,7 +386,7 @@ static int edac_pci_main_kobj_setup(void)
2409
2410 /* Error unwind statck */
2411 kobject_init_and_add_fail:
2412 - kfree(edac_pci_top_main_kobj);
2413 + kobject_put(edac_pci_top_main_kobj);
2414
2415 kzalloc_fail:
2416 module_put(THIS_MODULE);
2417 diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
2418 index 87f737e01473..041f8152272b 100644
2419 --- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
2420 +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
2421 @@ -85,7 +85,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
2422 for (i = 0; i < num_domains; i++, scmi_pd++) {
2423 u32 state;
2424
2425 - domains[i] = &scmi_pd->genpd;
2426 + if (handle->power_ops->state_get(handle, i, &state)) {
2427 + dev_warn(dev, "failed to get state for domain %d\n", i);
2428 + continue;
2429 + }
2430
2431 scmi_pd->domain = i;
2432 scmi_pd->handle = handle;
2433 @@ -94,13 +97,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
2434 scmi_pd->genpd.power_off = scmi_pd_power_off;
2435 scmi_pd->genpd.power_on = scmi_pd_power_on;
2436
2437 - if (handle->power_ops->state_get(handle, i, &state)) {
2438 - dev_warn(dev, "failed to get state for domain %d\n", i);
2439 - continue;
2440 - }
2441 -
2442 pm_genpd_init(&scmi_pd->genpd, NULL,
2443 state == SCMI_POWER_STATE_GENERIC_OFF);
2444 +
2445 + domains[i] = &scmi_pd->genpd;
2446 }
2447
2448 scmi_pd_data->domains = domains;
2449 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
2450 index 23085b352cf2..c212d5fc665c 100644
2451 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
2452 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
2453 @@ -404,7 +404,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
2454 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
2455 }
2456 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
2457 - amdgpu_irq_get(adev, irq_src, irq_type);
2458 +
2459 + if (irq_src)
2460 + amdgpu_irq_get(adev, irq_src, irq_type);
2461
2462 ring->fence_drv.irq_src = irq_src;
2463 ring->fence_drv.irq_type = irq_type;
2464 @@ -539,8 +541,9 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
2465 /* no need to trigger GPU reset as we are unloading */
2466 amdgpu_fence_driver_force_completion(ring);
2467 }
2468 - amdgpu_irq_put(adev, ring->fence_drv.irq_src,
2469 - ring->fence_drv.irq_type);
2470 + if (ring->fence_drv.irq_src)
2471 + amdgpu_irq_put(adev, ring->fence_drv.irq_src,
2472 + ring->fence_drv.irq_type);
2473 drm_sched_fini(&ring->sched);
2474 del_timer_sync(&ring->fence_drv.fallback_timer);
2475 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
2476 @@ -576,8 +579,9 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
2477 }
2478
2479 /* disable the interrupt */
2480 - amdgpu_irq_put(adev, ring->fence_drv.irq_src,
2481 - ring->fence_drv.irq_type);
2482 + if (ring->fence_drv.irq_src)
2483 + amdgpu_irq_put(adev, ring->fence_drv.irq_src,
2484 + ring->fence_drv.irq_type);
2485 }
2486 }
2487
2488 @@ -603,8 +607,9 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
2489 continue;
2490
2491 /* enable the interrupt */
2492 - amdgpu_irq_get(adev, ring->fence_drv.irq_src,
2493 - ring->fence_drv.irq_type);
2494 + if (ring->fence_drv.irq_src)
2495 + amdgpu_irq_get(adev, ring->fence_drv.irq_src,
2496 + ring->fence_drv.irq_type);
2497 }
2498 }
2499
2500 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
2501 index 785322cd4c6c..7241d4c20778 100644
2502 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
2503 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
2504 @@ -530,6 +530,8 @@ bool dm_pp_get_static_clocks(
2505 &pp_clk_info);
2506 else if (adev->smu.funcs)
2507 ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
2508 + else
2509 + return false;
2510 if (ret)
2511 return false;
2512
2513 diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
2514 index 0922d9cd858a..c4d8c52c6b9c 100644
2515 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
2516 +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
2517 @@ -171,7 +171,8 @@ static int smu_v11_0_init_microcode(struct smu_context *smu)
2518 chip_name = "navi12";
2519 break;
2520 default:
2521 - BUG();
2522 + dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type);
2523 + return -EINVAL;
2524 }
2525
2526 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
2527 diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
2528 index 3c70a53813bf..0b2bb485d9be 100644
2529 --- a/drivers/gpu/drm/arm/malidp_planes.c
2530 +++ b/drivers/gpu/drm/arm/malidp_planes.c
2531 @@ -928,7 +928,7 @@ int malidp_de_planes_init(struct drm_device *drm)
2532 const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
2533 struct malidp_plane *plane = NULL;
2534 enum drm_plane_type plane_type;
2535 - unsigned long crtcs = 1 << drm->mode_config.num_crtc;
2536 + unsigned long crtcs = BIT(drm->mode_config.num_crtc);
2537 unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
2538 DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
2539 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
2540 diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
2541 index bd3165ee5354..04431dbac4a4 100644
2542 --- a/drivers/gpu/drm/bridge/sil-sii8620.c
2543 +++ b/drivers/gpu/drm/bridge/sil-sii8620.c
2544 @@ -177,7 +177,7 @@ static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len)
2545
2546 static u8 sii8620_readb(struct sii8620 *ctx, u16 addr)
2547 {
2548 - u8 ret;
2549 + u8 ret = 0;
2550
2551 sii8620_read_buf(ctx, addr, &ret, 1);
2552 return ret;
2553 diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
2554 index 0a580957c8cf..f1de4bb6558c 100644
2555 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
2556 +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
2557 @@ -647,6 +647,12 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
2558 buf[i]);
2559 }
2560
2561 + /* Clear old status bits before start so we don't get confused */
2562 + regmap_write(pdata->regmap, SN_AUX_CMD_STATUS_REG,
2563 + AUX_IRQ_STATUS_NAT_I2C_FAIL |
2564 + AUX_IRQ_STATUS_AUX_RPLY_TOUT |
2565 + AUX_IRQ_STATUS_AUX_SHORT);
2566 +
2567 regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val | AUX_CMD_SEND);
2568
2569 ret = regmap_read_poll_timeout(pdata->regmap, SN_AUX_CMD_REG, val,
2570 diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
2571 index eab0f2687cd6..00debd02c322 100644
2572 --- a/drivers/gpu/drm/drm_debugfs.c
2573 +++ b/drivers/gpu/drm/drm_debugfs.c
2574 @@ -337,13 +337,13 @@ static ssize_t connector_write(struct file *file, const char __user *ubuf,
2575
2576 buf[len] = '\0';
2577
2578 - if (!strcmp(buf, "on"))
2579 + if (sysfs_streq(buf, "on"))
2580 connector->force = DRM_FORCE_ON;
2581 - else if (!strcmp(buf, "digital"))
2582 + else if (sysfs_streq(buf, "digital"))
2583 connector->force = DRM_FORCE_ON_DIGITAL;
2584 - else if (!strcmp(buf, "off"))
2585 + else if (sysfs_streq(buf, "off"))
2586 connector->force = DRM_FORCE_OFF;
2587 - else if (!strcmp(buf, "unspecified"))
2588 + else if (sysfs_streq(buf, "unspecified"))
2589 connector->force = DRM_FORCE_UNSPECIFIED;
2590 else
2591 return -EINVAL;
2592 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
2593 index 46ad14470d06..1fdc85a71cec 100644
2594 --- a/drivers/gpu/drm/drm_gem.c
2595 +++ b/drivers/gpu/drm/drm_gem.c
2596 @@ -710,6 +710,8 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
2597 if (!objs)
2598 return -ENOMEM;
2599
2600 + *objs_out = objs;
2601 +
2602 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
2603 if (!handles) {
2604 ret = -ENOMEM;
2605 @@ -723,8 +725,6 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
2606 }
2607
2608 ret = objects_lookup(filp, handles, count, objs);
2609 - *objs_out = objs;
2610 -
2611 out:
2612 kvfree(handles);
2613 return ret;
2614 diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
2615 index bd2498bbd74a..b99f96dcc6f1 100644
2616 --- a/drivers/gpu/drm/drm_mipi_dsi.c
2617 +++ b/drivers/gpu/drm/drm_mipi_dsi.c
2618 @@ -1029,11 +1029,11 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
2619 */
2620 int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
2621 {
2622 - u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8,
2623 - scanline & 0xff };
2624 + u8 payload[2] = { scanline >> 8, scanline & 0xff };
2625 ssize_t err;
2626
2627 - err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
2628 + err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_SCANLINE, payload,
2629 + sizeof(payload));
2630 if (err < 0)
2631 return err;
2632
2633 diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
2634 index d47d1a8e0219..85de8551ce86 100644
2635 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
2636 +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
2637 @@ -713,7 +713,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
2638 ret = pm_runtime_get_sync(gpu->dev);
2639 if (ret < 0) {
2640 dev_err(gpu->dev, "Failed to enable GPU power domain\n");
2641 - return ret;
2642 + goto pm_put;
2643 }
2644
2645 etnaviv_hw_identify(gpu);
2646 @@ -802,6 +802,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
2647
2648 fail:
2649 pm_runtime_mark_last_busy(gpu->dev);
2650 +pm_put:
2651 pm_runtime_put_autosuspend(gpu->dev);
2652
2653 return ret;
2654 @@ -842,7 +843,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
2655
2656 ret = pm_runtime_get_sync(gpu->dev);
2657 if (ret < 0)
2658 - return ret;
2659 + goto pm_put;
2660
2661 dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
2662 dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
2663 @@ -965,6 +966,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
2664 ret = 0;
2665
2666 pm_runtime_mark_last_busy(gpu->dev);
2667 +pm_put:
2668 pm_runtime_put_autosuspend(gpu->dev);
2669
2670 return ret;
2671 @@ -978,7 +980,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
2672 dev_err(gpu->dev, "recover hung GPU!\n");
2673
2674 if (pm_runtime_get_sync(gpu->dev) < 0)
2675 - return;
2676 + goto pm_put;
2677
2678 mutex_lock(&gpu->lock);
2679
2680 @@ -997,6 +999,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
2681
2682 mutex_unlock(&gpu->lock);
2683 pm_runtime_mark_last_busy(gpu->dev);
2684 +pm_put:
2685 pm_runtime_put_autosuspend(gpu->dev);
2686 }
2687
2688 @@ -1269,8 +1272,10 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
2689
2690 if (!submit->runtime_resumed) {
2691 ret = pm_runtime_get_sync(gpu->dev);
2692 - if (ret < 0)
2693 + if (ret < 0) {
2694 + pm_runtime_put_noidle(gpu->dev);
2695 return NULL;
2696 + }
2697 submit->runtime_resumed = true;
2698 }
2699
2700 @@ -1287,6 +1292,7 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
2701 ret = event_alloc(gpu, nr_events, event);
2702 if (ret) {
2703 DRM_ERROR("no free events\n");
2704 + pm_runtime_put_noidle(gpu->dev);
2705 return NULL;
2706 }
2707
2708 @@ -1457,7 +1463,7 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
2709 if (gpu->clk_bus) {
2710 ret = clk_prepare_enable(gpu->clk_bus);
2711 if (ret)
2712 - return ret;
2713 + goto disable_clk_reg;
2714 }
2715
2716 if (gpu->clk_core) {
2717 @@ -1480,6 +1486,9 @@ disable_clk_core:
2718 disable_clk_bus:
2719 if (gpu->clk_bus)
2720 clk_disable_unprepare(gpu->clk_bus);
2721 +disable_clk_reg:
2722 + if (gpu->clk_reg)
2723 + clk_disable_unprepare(gpu->clk_reg);
2724
2725 return ret;
2726 }
2727 diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
2728 index f22cfbf9353e..2e12a4a3bfa1 100644
2729 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
2730 +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
2731 @@ -212,9 +212,8 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
2732 if (!pdev->dev.of_node)
2733 return -ENODEV;
2734
2735 - hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
2736 - if (!hdmi)
2737 - return -ENOMEM;
2738 + hdmi = dev_get_drvdata(dev);
2739 + memset(hdmi, 0, sizeof(*hdmi));
2740
2741 match = of_match_node(dw_hdmi_imx_dt_ids, pdev->dev.of_node);
2742 plat_data = match->data;
2743 @@ -239,8 +238,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
2744 drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
2745 DRM_MODE_ENCODER_TMDS, NULL);
2746
2747 - platform_set_drvdata(pdev, hdmi);
2748 -
2749 hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data);
2750
2751 /*
2752 @@ -270,6 +267,14 @@ static const struct component_ops dw_hdmi_imx_ops = {
2753
2754 static int dw_hdmi_imx_probe(struct platform_device *pdev)
2755 {
2756 + struct imx_hdmi *hdmi;
2757 +
2758 + hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
2759 + if (!hdmi)
2760 + return -ENOMEM;
2761 +
2762 + platform_set_drvdata(pdev, hdmi);
2763 +
2764 return component_add(&pdev->dev, &dw_hdmi_imx_ops);
2765 }
2766
2767 diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
2768 index da87c70e413b..881c36d0f16b 100644
2769 --- a/drivers/gpu/drm/imx/imx-drm-core.c
2770 +++ b/drivers/gpu/drm/imx/imx-drm-core.c
2771 @@ -281,9 +281,10 @@ static void imx_drm_unbind(struct device *dev)
2772
2773 drm_kms_helper_poll_fini(drm);
2774
2775 + component_unbind_all(drm->dev, drm);
2776 +
2777 drm_mode_config_cleanup(drm);
2778
2779 - component_unbind_all(drm->dev, drm);
2780 dev_set_drvdata(dev, NULL);
2781
2782 drm_dev_put(drm);
2783 diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
2784 index 695f307f36b2..9af5a08d5490 100644
2785 --- a/drivers/gpu/drm/imx/imx-ldb.c
2786 +++ b/drivers/gpu/drm/imx/imx-ldb.c
2787 @@ -593,9 +593,8 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
2788 int ret;
2789 int i;
2790
2791 - imx_ldb = devm_kzalloc(dev, sizeof(*imx_ldb), GFP_KERNEL);
2792 - if (!imx_ldb)
2793 - return -ENOMEM;
2794 + imx_ldb = dev_get_drvdata(dev);
2795 + memset(imx_ldb, 0, sizeof(*imx_ldb));
2796
2797 imx_ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
2798 if (IS_ERR(imx_ldb->regmap)) {
2799 @@ -703,8 +702,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
2800 }
2801 }
2802
2803 - dev_set_drvdata(dev, imx_ldb);
2804 -
2805 return 0;
2806
2807 free_child:
2808 @@ -736,6 +733,14 @@ static const struct component_ops imx_ldb_ops = {
2809
2810 static int imx_ldb_probe(struct platform_device *pdev)
2811 {
2812 + struct imx_ldb *imx_ldb;
2813 +
2814 + imx_ldb = devm_kzalloc(&pdev->dev, sizeof(*imx_ldb), GFP_KERNEL);
2815 + if (!imx_ldb)
2816 + return -ENOMEM;
2817 +
2818 + platform_set_drvdata(pdev, imx_ldb);
2819 +
2820 return component_add(&pdev->dev, &imx_ldb_ops);
2821 }
2822
2823 diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
2824 index 5bbfaa2cd0f4..f91c3eb7697b 100644
2825 --- a/drivers/gpu/drm/imx/imx-tve.c
2826 +++ b/drivers/gpu/drm/imx/imx-tve.c
2827 @@ -494,6 +494,13 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
2828 return 0;
2829 }
2830
2831 +static void imx_tve_disable_regulator(void *data)
2832 +{
2833 + struct imx_tve *tve = data;
2834 +
2835 + regulator_disable(tve->dac_reg);
2836 +}
2837 +
2838 static bool imx_tve_readable_reg(struct device *dev, unsigned int reg)
2839 {
2840 return (reg % 4 == 0) && (reg <= 0xdc);
2841 @@ -546,9 +553,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
2842 int irq;
2843 int ret;
2844
2845 - tve = devm_kzalloc(dev, sizeof(*tve), GFP_KERNEL);
2846 - if (!tve)
2847 - return -ENOMEM;
2848 + tve = dev_get_drvdata(dev);
2849 + memset(tve, 0, sizeof(*tve));
2850
2851 tve->dev = dev;
2852 spin_lock_init(&tve->lock);
2853 @@ -618,6 +624,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
2854 ret = regulator_enable(tve->dac_reg);
2855 if (ret)
2856 return ret;
2857 + ret = devm_add_action_or_reset(dev, imx_tve_disable_regulator, tve);
2858 + if (ret)
2859 + return ret;
2860 }
2861
2862 tve->clk = devm_clk_get(dev, "tve");
2863 @@ -659,27 +668,23 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
2864 if (ret)
2865 return ret;
2866
2867 - dev_set_drvdata(dev, tve);
2868 -
2869 return 0;
2870 }
2871
2872 -static void imx_tve_unbind(struct device *dev, struct device *master,
2873 - void *data)
2874 -{
2875 - struct imx_tve *tve = dev_get_drvdata(dev);
2876 -
2877 - if (!IS_ERR(tve->dac_reg))
2878 - regulator_disable(tve->dac_reg);
2879 -}
2880 -
2881 static const struct component_ops imx_tve_ops = {
2882 .bind = imx_tve_bind,
2883 - .unbind = imx_tve_unbind,
2884 };
2885
2886 static int imx_tve_probe(struct platform_device *pdev)
2887 {
2888 + struct imx_tve *tve;
2889 +
2890 + tve = devm_kzalloc(&pdev->dev, sizeof(*tve), GFP_KERNEL);
2891 + if (!tve)
2892 + return -ENOMEM;
2893 +
2894 + platform_set_drvdata(pdev, tve);
2895 +
2896 return component_add(&pdev->dev, &imx_tve_ops);
2897 }
2898
2899 diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
2900 index 63c0284f8b3c..2256c9789fc2 100644
2901 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c
2902 +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
2903 @@ -438,21 +438,13 @@ static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
2904 struct ipu_client_platformdata *pdata = dev->platform_data;
2905 struct drm_device *drm = data;
2906 struct ipu_crtc *ipu_crtc;
2907 - int ret;
2908
2909 - ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL);
2910 - if (!ipu_crtc)
2911 - return -ENOMEM;
2912 + ipu_crtc = dev_get_drvdata(dev);
2913 + memset(ipu_crtc, 0, sizeof(*ipu_crtc));
2914
2915 ipu_crtc->dev = dev;
2916
2917 - ret = ipu_crtc_init(ipu_crtc, pdata, drm);
2918 - if (ret)
2919 - return ret;
2920 -
2921 - dev_set_drvdata(dev, ipu_crtc);
2922 -
2923 - return 0;
2924 + return ipu_crtc_init(ipu_crtc, pdata, drm);
2925 }
2926
2927 static void ipu_drm_unbind(struct device *dev, struct device *master,
2928 @@ -474,6 +466,7 @@ static const struct component_ops ipu_crtc_ops = {
2929 static int ipu_drm_probe(struct platform_device *pdev)
2930 {
2931 struct device *dev = &pdev->dev;
2932 + struct ipu_crtc *ipu_crtc;
2933 int ret;
2934
2935 if (!dev->platform_data)
2936 @@ -483,6 +476,12 @@ static int ipu_drm_probe(struct platform_device *pdev)
2937 if (ret)
2938 return ret;
2939
2940 + ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL);
2941 + if (!ipu_crtc)
2942 + return -ENOMEM;
2943 +
2944 + dev_set_drvdata(dev, ipu_crtc);
2945 +
2946 return component_add(dev, &ipu_crtc_ops);
2947 }
2948
2949 diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
2950 index e7ce17503ae1..be55548f352a 100644
2951 --- a/drivers/gpu/drm/imx/parallel-display.c
2952 +++ b/drivers/gpu/drm/imx/parallel-display.c
2953 @@ -204,9 +204,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
2954 u32 bus_format = 0;
2955 const char *fmt;
2956
2957 - imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
2958 - if (!imxpd)
2959 - return -ENOMEM;
2960 + imxpd = dev_get_drvdata(dev);
2961 + memset(imxpd, 0, sizeof(*imxpd));
2962
2963 edidp = of_get_property(np, "edid", &imxpd->edid_len);
2964 if (edidp)
2965 @@ -236,8 +235,6 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
2966 if (ret)
2967 return ret;
2968
2969 - dev_set_drvdata(dev, imxpd);
2970 -
2971 return 0;
2972 }
2973
2974 @@ -259,6 +256,14 @@ static const struct component_ops imx_pd_ops = {
2975
2976 static int imx_pd_probe(struct platform_device *pdev)
2977 {
2978 + struct imx_parallel_display *imxpd;
2979 +
2980 + imxpd = devm_kzalloc(&pdev->dev, sizeof(*imxpd), GFP_KERNEL);
2981 + if (!imxpd)
2982 + return -ENOMEM;
2983 +
2984 + platform_set_drvdata(pdev, imxpd);
2985 +
2986 return component_add(&pdev->dev, &imx_pd_ops);
2987 }
2988
2989 diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
2990 index e62b286947a7..9ea748667fab 100644
2991 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
2992 +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
2993 @@ -713,10 +713,19 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
2994 /* Turn on the resources */
2995 pm_runtime_get_sync(gmu->dev);
2996
2997 + /*
2998 + * "enable" the GX power domain which won't actually do anything but it
2999 + * will make sure that the refcounting is correct in case we need to
3000 + * bring down the GX after a GMU failure
3001 + */
3002 + if (!IS_ERR_OR_NULL(gmu->gxpd))
3003 + pm_runtime_get_sync(gmu->gxpd);
3004 +
3005 /* Use a known rate to bring up the GMU */
3006 clk_set_rate(gmu->core_clk, 200000000);
3007 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
3008 if (ret) {
3009 + pm_runtime_put(gmu->gxpd);
3010 pm_runtime_put(gmu->dev);
3011 return ret;
3012 }
3013 @@ -752,19 +761,12 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
3014 /* Set the GPU to the highest power frequency */
3015 __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
3016
3017 - /*
3018 - * "enable" the GX power domain which won't actually do anything but it
3019 - * will make sure that the refcounting is correct in case we need to
3020 - * bring down the GX after a GMU failure
3021 - */
3022 - if (!IS_ERR_OR_NULL(gmu->gxpd))
3023 - pm_runtime_get(gmu->gxpd);
3024 -
3025 out:
3026 /* On failure, shut down the GMU to leave it in a good state */
3027 if (ret) {
3028 disable_irq(gmu->gmu_irq);
3029 a6xx_rpmh_stop(gmu);
3030 + pm_runtime_put(gmu->gxpd);
3031 pm_runtime_put(gmu->dev);
3032 }
3033
3034 diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
3035 index ce59adff06aa..36c85c05b7cf 100644
3036 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
3037 +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
3038 @@ -381,7 +381,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
3039 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
3040
3041 if (!fevent) {
3042 - DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
3043 + DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
3044 return;
3045 }
3046
3047 diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
3048 index 5a6a79fbc9d6..d92a0ffe2a76 100644
3049 --- a/drivers/gpu/drm/msm/msm_gem.c
3050 +++ b/drivers/gpu/drm/msm/msm_gem.c
3051 @@ -977,10 +977,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
3052
3053 static int msm_gem_new_impl(struct drm_device *dev,
3054 uint32_t size, uint32_t flags,
3055 - struct drm_gem_object **obj,
3056 - bool struct_mutex_locked)
3057 + struct drm_gem_object **obj)
3058 {
3059 - struct msm_drm_private *priv = dev->dev_private;
3060 struct msm_gem_object *msm_obj;
3061
3062 switch (flags & MSM_BO_CACHE_MASK) {
3063 @@ -1006,15 +1004,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
3064 INIT_LIST_HEAD(&msm_obj->submit_entry);
3065 INIT_LIST_HEAD(&msm_obj->vmas);
3066
3067 - if (struct_mutex_locked) {
3068 - WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3069 - list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
3070 - } else {
3071 - mutex_lock(&dev->struct_mutex);
3072 - list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
3073 - mutex_unlock(&dev->struct_mutex);
3074 - }
3075 -
3076 *obj = &msm_obj->base;
3077
3078 return 0;
3079 @@ -1024,6 +1013,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
3080 uint32_t size, uint32_t flags, bool struct_mutex_locked)
3081 {
3082 struct msm_drm_private *priv = dev->dev_private;
3083 + struct msm_gem_object *msm_obj;
3084 struct drm_gem_object *obj = NULL;
3085 bool use_vram = false;
3086 int ret;
3087 @@ -1044,14 +1034,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
3088 if (size == 0)
3089 return ERR_PTR(-EINVAL);
3090
3091 - ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
3092 + ret = msm_gem_new_impl(dev, size, flags, &obj);
3093 if (ret)
3094 goto fail;
3095
3096 + msm_obj = to_msm_bo(obj);
3097 +
3098 if (use_vram) {
3099 struct msm_gem_vma *vma;
3100 struct page **pages;
3101 - struct msm_gem_object *msm_obj = to_msm_bo(obj);
3102
3103 mutex_lock(&msm_obj->lock);
3104
3105 @@ -1086,6 +1077,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
3106 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
3107 }
3108
3109 + if (struct_mutex_locked) {
3110 + WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3111 + list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
3112 + } else {
3113 + mutex_lock(&dev->struct_mutex);
3114 + list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
3115 + mutex_unlock(&dev->struct_mutex);
3116 + }
3117 +
3118 return obj;
3119
3120 fail:
3121 @@ -1108,6 +1108,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
3122 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
3123 struct dma_buf *dmabuf, struct sg_table *sgt)
3124 {
3125 + struct msm_drm_private *priv = dev->dev_private;
3126 struct msm_gem_object *msm_obj;
3127 struct drm_gem_object *obj;
3128 uint32_t size;
3129 @@ -1121,7 +1122,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
3130
3131 size = PAGE_ALIGN(dmabuf->size);
3132
3133 - ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
3134 + ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
3135 if (ret)
3136 goto fail;
3137
3138 @@ -1146,6 +1147,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
3139 }
3140
3141 mutex_unlock(&msm_obj->lock);
3142 +
3143 + mutex_lock(&dev->struct_mutex);
3144 + list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
3145 + mutex_unlock(&dev->struct_mutex);
3146 +
3147 return obj;
3148
3149 fail:
3150 diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
3151 index c9692df2b76c..46578108a430 100644
3152 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c
3153 +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
3154 @@ -83,18 +83,20 @@ nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
3155 {
3156 u32 mode = 0x00;
3157
3158 - if (asyc->dither.mode == DITHERING_MODE_AUTO) {
3159 - if (asyh->base.depth > asyh->or.bpc * 3)
3160 - mode = DITHERING_MODE_DYNAMIC2X2;
3161 - } else {
3162 - mode = asyc->dither.mode;
3163 - }
3164 + if (asyc->dither.mode) {
3165 + if (asyc->dither.mode == DITHERING_MODE_AUTO) {
3166 + if (asyh->base.depth > asyh->or.bpc * 3)
3167 + mode = DITHERING_MODE_DYNAMIC2X2;
3168 + } else {
3169 + mode = asyc->dither.mode;
3170 + }
3171
3172 - if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
3173 - if (asyh->or.bpc >= 8)
3174 - mode |= DITHERING_DEPTH_8BPC;
3175 - } else {
3176 - mode |= asyc->dither.depth;
3177 + if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
3178 + if (asyh->or.bpc >= 8)
3179 + mode |= DITHERING_DEPTH_8BPC;
3180 + } else {
3181 + mode |= asyc->dither.depth;
3182 + }
3183 }
3184
3185 asyh->dither.enable = mode;
3186 diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
3187 index 7dfbbbc1beea..5c314f135dd1 100644
3188 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
3189 +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
3190 @@ -54,8 +54,10 @@ nouveau_debugfs_strap_peek(struct seq_file *m, void *data)
3191 int ret;
3192
3193 ret = pm_runtime_get_sync(drm->dev->dev);
3194 - if (ret < 0 && ret != -EACCES)
3195 + if (ret < 0 && ret != -EACCES) {
3196 + pm_runtime_put_autosuspend(drm->dev->dev);
3197 return ret;
3198 + }
3199
3200 seq_printf(m, "0x%08x\n",
3201 nvif_rd32(&drm->client.device.object, 0x101000));
3202 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
3203 index b1beed40e746..5347e5bdee8c 100644
3204 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
3205 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
3206 @@ -1052,8 +1052,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
3207
3208 /* need to bring up power immediately if opening device */
3209 ret = pm_runtime_get_sync(dev->dev);
3210 - if (ret < 0 && ret != -EACCES)
3211 + if (ret < 0 && ret != -EACCES) {
3212 + pm_runtime_put_autosuspend(dev->dev);
3213 return ret;
3214 + }
3215
3216 get_task_comm(tmpname, current);
3217 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
3218 @@ -1135,8 +1137,10 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3219 long ret;
3220
3221 ret = pm_runtime_get_sync(dev->dev);
3222 - if (ret < 0 && ret != -EACCES)
3223 + if (ret < 0 && ret != -EACCES) {
3224 + pm_runtime_put_autosuspend(dev->dev);
3225 return ret;
3226 + }
3227
3228 switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
3229 case DRM_NOUVEAU_NVIF:
3230 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
3231 index 1324c19f4e5c..fbfe25422774 100644
3232 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
3233 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
3234 @@ -45,8 +45,10 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
3235 int ret;
3236
3237 ret = pm_runtime_get_sync(dev);
3238 - if (WARN_ON(ret < 0 && ret != -EACCES))
3239 + if (WARN_ON(ret < 0 && ret != -EACCES)) {
3240 + pm_runtime_put_autosuspend(dev);
3241 return;
3242 + }
3243
3244 if (gem->import_attach)
3245 drm_prime_gem_destroy(gem, nvbo->bo.sg);
3246 diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
3247 index feaac908efed..34403b810dba 100644
3248 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
3249 +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
3250 @@ -96,12 +96,9 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
3251 else
3252 nvbe->ttm.ttm.func = &nv50_sgdma_backend;
3253
3254 - if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags))
3255 - /*
3256 - * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
3257 - * and thus our nouveau_sgdma_destroy() hook, so we don't need
3258 - * to free nvbe here.
3259 - */
3260 + if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) {
3261 + kfree(nvbe);
3262 return NULL;
3263 + }
3264 return &nvbe->ttm.ttm;
3265 }
3266 diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
3267 index 8abb31f83ffc..6d9656323a3f 100644
3268 --- a/drivers/gpu/drm/panel/panel-simple.c
3269 +++ b/drivers/gpu/drm/panel/panel-simple.c
3270 @@ -1935,7 +1935,7 @@ static const struct drm_display_mode lg_lb070wv8_mode = {
3271 static const struct panel_desc lg_lb070wv8 = {
3272 .modes = &lg_lb070wv8_mode,
3273 .num_modes = 1,
3274 - .bpc = 16,
3275 + .bpc = 8,
3276 .size = {
3277 .width = 151,
3278 .height = 91,
3279 diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
3280 index f9685cce1652..1e62e7bbf1b1 100644
3281 --- a/drivers/gpu/drm/radeon/ci_dpm.c
3282 +++ b/drivers/gpu/drm/radeon/ci_dpm.c
3283 @@ -4366,7 +4366,7 @@ static int ci_set_mc_special_registers(struct radeon_device *rdev,
3284 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3285 }
3286 j++;
3287 - if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3288 + if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3289 return -EINVAL;
3290
3291 if (!pi->mem_gddr5) {
3292 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
3293 index 0826efd9b5f5..f9f74150d0d7 100644
3294 --- a/drivers/gpu/drm/radeon/radeon_display.c
3295 +++ b/drivers/gpu/drm/radeon/radeon_display.c
3296 @@ -631,8 +631,10 @@ radeon_crtc_set_config(struct drm_mode_set *set,
3297 dev = set->crtc->dev;
3298
3299 ret = pm_runtime_get_sync(dev->dev);
3300 - if (ret < 0)
3301 + if (ret < 0) {
3302 + pm_runtime_put_autosuspend(dev->dev);
3303 return ret;
3304 + }
3305
3306 ret = drm_crtc_helper_set_config(set, ctx);
3307
3308 diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
3309 index 6128792ab883..c2573096d43c 100644
3310 --- a/drivers/gpu/drm/radeon/radeon_drv.c
3311 +++ b/drivers/gpu/drm/radeon/radeon_drv.c
3312 @@ -174,12 +174,7 @@ int radeon_no_wb;
3313 int radeon_modeset = -1;
3314 int radeon_dynclks = -1;
3315 int radeon_r4xx_atom = 0;
3316 -#ifdef __powerpc__
3317 -/* Default to PCI on PowerPC (fdo #95017) */
3318 int radeon_agpmode = -1;
3319 -#else
3320 -int radeon_agpmode = 0;
3321 -#endif
3322 int radeon_vram_limit = 0;
3323 int radeon_gart_size = -1; /* auto */
3324 int radeon_benchmarking = 0;
3325 @@ -555,8 +550,10 @@ long radeon_drm_ioctl(struct file *filp,
3326 long ret;
3327 dev = file_priv->minor->dev;
3328 ret = pm_runtime_get_sync(dev->dev);
3329 - if (ret < 0)
3330 + if (ret < 0) {
3331 + pm_runtime_put_autosuspend(dev->dev);
3332 return ret;
3333 + }
3334
3335 ret = drm_ioctl(filp, cmd, arg);
3336
3337 diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
3338 index 2bb0187c5bc7..709c4ef5e7d5 100644
3339 --- a/drivers/gpu/drm/radeon/radeon_kms.c
3340 +++ b/drivers/gpu/drm/radeon/radeon_kms.c
3341 @@ -638,8 +638,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
3342 file_priv->driver_priv = NULL;
3343
3344 r = pm_runtime_get_sync(dev->dev);
3345 - if (r < 0)
3346 + if (r < 0) {
3347 + pm_runtime_put_autosuspend(dev->dev);
3348 return r;
3349 + }
3350
3351 /* new gpu have virtual address space support */
3352 if (rdev->family >= CHIP_CAYMAN) {
3353 diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
3354 index 3ab4fbf8eb0d..51571f7246ab 100644
3355 --- a/drivers/gpu/drm/stm/ltdc.c
3356 +++ b/drivers/gpu/drm/stm/ltdc.c
3357 @@ -424,9 +424,12 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
3358 struct drm_crtc_state *old_state)
3359 {
3360 struct ltdc_device *ldev = crtc_to_ltdc(crtc);
3361 + struct drm_device *ddev = crtc->dev;
3362
3363 DRM_DEBUG_DRIVER("\n");
3364
3365 + pm_runtime_get_sync(ddev->dev);
3366 +
3367 /* Sets the background color value */
3368 reg_write(ldev->regs, LTDC_BCCR, BCCR_BCBLACK);
3369
3370 diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
3371 index 5584e656b857..8c4fd1aa4c2d 100644
3372 --- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
3373 +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
3374 @@ -143,12 +143,16 @@ static int panel_connector_get_modes(struct drm_connector *connector)
3375 int i;
3376
3377 for (i = 0; i < timings->num_timings; i++) {
3378 - struct drm_display_mode *mode = drm_mode_create(dev);
3379 + struct drm_display_mode *mode;
3380 struct videomode vm;
3381
3382 if (videomode_from_timings(timings, &vm, i))
3383 break;
3384
3385 + mode = drm_mode_create(dev);
3386 + if (!mode)
3387 + break;
3388 +
3389 drm_display_mode_from_videomode(&vm, mode);
3390
3391 mode->type = DRM_MODE_TYPE_DRIVER;
3392 diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
3393 index e0e9b4f69db6..c770ec7e9e8b 100644
3394 --- a/drivers/gpu/drm/ttm/ttm_tt.c
3395 +++ b/drivers/gpu/drm/ttm/ttm_tt.c
3396 @@ -241,7 +241,6 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
3397 ttm_tt_init_fields(ttm, bo, page_flags);
3398
3399 if (ttm_tt_alloc_page_directory(ttm)) {
3400 - ttm_tt_destroy(ttm);
3401 pr_err("Failed allocating page table\n");
3402 return -ENOMEM;
3403 }
3404 @@ -265,7 +264,6 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
3405
3406 INIT_LIST_HEAD(&ttm_dma->pages_list);
3407 if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
3408 - ttm_tt_destroy(ttm);
3409 pr_err("Failed allocating page table\n");
3410 return -ENOMEM;
3411 }
3412 @@ -287,7 +285,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
3413 else
3414 ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
3415 if (ret) {
3416 - ttm_tt_destroy(ttm);
3417 pr_err("Failed allocating page table\n");
3418 return -ENOMEM;
3419 }
3420 diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
3421 index c0392672a842..1b4997bda1c7 100644
3422 --- a/drivers/gpu/host1x/debug.c
3423 +++ b/drivers/gpu/host1x/debug.c
3424 @@ -16,6 +16,8 @@
3425 #include "debug.h"
3426 #include "channel.h"
3427
3428 +static DEFINE_MUTEX(debug_lock);
3429 +
3430 unsigned int host1x_debug_trace_cmdbuf;
3431
3432 static pid_t host1x_debug_force_timeout_pid;
3433 @@ -52,12 +54,14 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
3434 struct output *o = data;
3435
3436 mutex_lock(&ch->cdma.lock);
3437 + mutex_lock(&debug_lock);
3438
3439 if (show_fifo)
3440 host1x_hw_show_channel_fifo(m, ch, o);
3441
3442 host1x_hw_show_channel_cdma(m, ch, o);
3443
3444 + mutex_unlock(&debug_lock);
3445 mutex_unlock(&ch->cdma.lock);
3446
3447 return 0;
3448 diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
3449 index ee2a025e54cf..b3dae9ec1a38 100644
3450 --- a/drivers/gpu/ipu-v3/ipu-common.c
3451 +++ b/drivers/gpu/ipu-v3/ipu-common.c
3452 @@ -124,6 +124,8 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
3453 case V4L2_PIX_FMT_RGBX32:
3454 case V4L2_PIX_FMT_ARGB32:
3455 case V4L2_PIX_FMT_XRGB32:
3456 + case V4L2_PIX_FMT_RGB32:
3457 + case V4L2_PIX_FMT_BGR32:
3458 return IPUV3_COLORSPACE_RGB;
3459 default:
3460 return IPUV3_COLORSPACE_UNKNOWN;
3461 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
3462 index dea9cc65bf80..e8641ce677e4 100644
3463 --- a/drivers/hid/hid-input.c
3464 +++ b/drivers/hid/hid-input.c
3465 @@ -350,13 +350,13 @@ static int hidinput_query_battery_capacity(struct hid_device *dev)
3466 u8 *buf;
3467 int ret;
3468
3469 - buf = kmalloc(2, GFP_KERNEL);
3470 + buf = kmalloc(4, GFP_KERNEL);
3471 if (!buf)
3472 return -ENOMEM;
3473
3474 - ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2,
3475 + ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 4,
3476 dev->battery_report_type, HID_REQ_GET_REPORT);
3477 - if (ret != 2) {
3478 + if (ret < 2) {
3479 kfree(buf);
3480 return -ENODATA;
3481 }
3482 diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
3483 index 36cce2bfb744..6375504ba8b0 100644
3484 --- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
3485 +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
3486 @@ -639,15 +639,14 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
3487
3488 spin_lock_irqsave(&drvdata->spinlock, flags);
3489
3490 - /* There is no point in reading a TMC in HW FIFO mode */
3491 - mode = readl_relaxed(drvdata->base + TMC_MODE);
3492 - if (mode != TMC_MODE_CIRCULAR_BUFFER) {
3493 - spin_unlock_irqrestore(&drvdata->spinlock, flags);
3494 - return -EINVAL;
3495 - }
3496 -
3497 /* Re-enable the TMC if need be */
3498 if (drvdata->mode == CS_MODE_SYSFS) {
3499 + /* There is no point in reading a TMC in HW FIFO mode */
3500 + mode = readl_relaxed(drvdata->base + TMC_MODE);
3501 + if (mode != TMC_MODE_CIRCULAR_BUFFER) {
3502 + spin_unlock_irqrestore(&drvdata->spinlock, flags);
3503 + return -EINVAL;
3504 + }
3505 /*
3506 * The trace run will continue with the same allocated trace
3507 * buffer. As such zero-out the buffer so that we don't end
3508 diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
3509 index 10ae6c6eab0a..59dc9f3cfb37 100644
3510 --- a/drivers/infiniband/core/device.c
3511 +++ b/drivers/infiniband/core/device.c
3512 @@ -1330,6 +1330,10 @@ out:
3513 return ret;
3514 }
3515
3516 +static void prevent_dealloc_device(struct ib_device *ib_dev)
3517 +{
3518 +}
3519 +
3520 /**
3521 * ib_register_device - Register an IB device with IB core
3522 * @device:Device to register
3523 @@ -1397,11 +1401,11 @@ int ib_register_device(struct ib_device *device, const char *name)
3524 * possibility for a parallel unregistration along with this
3525 * error flow. Since we have a refcount here we know any
3526 * parallel flow is stopped in disable_device and will see the
3527 - * NULL pointers, causing the responsibility to
3528 + * special dealloc_driver pointer, causing the responsibility to
3529 * ib_dealloc_device() to revert back to this thread.
3530 */
3531 dealloc_fn = device->ops.dealloc_driver;
3532 - device->ops.dealloc_driver = NULL;
3533 + device->ops.dealloc_driver = prevent_dealloc_device;
3534 ib_device_put(device);
3535 __ib_unregister_device(device);
3536 device->ops.dealloc_driver = dealloc_fn;
3537 @@ -1449,7 +1453,8 @@ static void __ib_unregister_device(struct ib_device *ib_dev)
3538 * Drivers using the new flow may not call ib_dealloc_device except
3539 * in error unwind prior to registration success.
3540 */
3541 - if (ib_dev->ops.dealloc_driver) {
3542 + if (ib_dev->ops.dealloc_driver &&
3543 + ib_dev->ops.dealloc_driver != prevent_dealloc_device) {
3544 WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1);
3545 ib_dealloc_device(ib_dev);
3546 }
3547 diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
3548 index 244ebf285fc3..e4905d9fecb0 100644
3549 --- a/drivers/infiniband/core/nldev.c
3550 +++ b/drivers/infiniband/core/nldev.c
3551 @@ -702,9 +702,6 @@ static int fill_stat_counter_qps(struct sk_buff *msg,
3552 continue;
3553
3554 qp = container_of(res, struct ib_qp, res);
3555 - if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
3556 - continue;
3557 -
3558 if (!qp->counter || (qp->counter->id != counter->id))
3559 continue;
3560
3561 diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
3562 index 6c4093d0a91d..d4815f29cfd2 100644
3563 --- a/drivers/infiniband/core/verbs.c
3564 +++ b/drivers/infiniband/core/verbs.c
3565 @@ -1648,7 +1648,7 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
3566 if (!(rdma_protocol_ib(qp->device,
3567 attr->alt_ah_attr.port_num) &&
3568 rdma_protocol_ib(qp->device, port))) {
3569 - ret = EINVAL;
3570 + ret = -EINVAL;
3571 goto out;
3572 }
3573 }
3574 diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
3575 index 8e927f6c1520..ed56df319d2d 100644
3576 --- a/drivers/infiniband/hw/qedr/qedr.h
3577 +++ b/drivers/infiniband/hw/qedr/qedr.h
3578 @@ -349,10 +349,10 @@ struct qedr_srq_hwq_info {
3579 u32 wqe_prod;
3580 u32 sge_prod;
3581 u32 wr_prod_cnt;
3582 - u32 wr_cons_cnt;
3583 + atomic_t wr_cons_cnt;
3584 u32 num_elems;
3585
3586 - u32 *virt_prod_pair_addr;
3587 + struct rdma_srq_producers *virt_prod_pair_addr;
3588 dma_addr_t phy_prod_pair_addr;
3589 };
3590
3591 diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
3592 index 8b4240c1cc76..16a994fd7d0a 100644
3593 --- a/drivers/infiniband/hw/qedr/verbs.c
3594 +++ b/drivers/infiniband/hw/qedr/verbs.c
3595 @@ -3460,7 +3460,7 @@ static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
3596 * count and consumer count and subtract it from max
3597 * work request supported so that we get elements left.
3598 */
3599 - used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
3600 + used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
3601
3602 return hw_srq->max_wr - used;
3603 }
3604 @@ -3475,7 +3475,6 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
3605 unsigned long flags;
3606 int status = 0;
3607 u32 num_sge;
3608 - u32 offset;
3609
3610 spin_lock_irqsave(&srq->lock, flags);
3611
3612 @@ -3488,7 +3487,8 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
3613 if (!qedr_srq_elem_left(hw_srq) ||
3614 wr->num_sge > srq->hw_srq.max_sges) {
3615 DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
3616 - hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
3617 + hw_srq->wr_prod_cnt,
3618 + atomic_read(&hw_srq->wr_cons_cnt),
3619 wr->num_sge, srq->hw_srq.max_sges);
3620 status = -ENOMEM;
3621 *bad_wr = wr;
3622 @@ -3522,22 +3522,20 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
3623 hw_srq->sge_prod++;
3624 }
3625
3626 - /* Flush WQE and SGE information before
3627 + /* Update WQE and SGE information before
3628 * updating producer.
3629 */
3630 - wmb();
3631 + dma_wmb();
3632
3633 /* SRQ producer is 8 bytes. Need to update SGE producer index
3634 * in first 4 bytes and need to update WQE producer in
3635 * next 4 bytes.
3636 */
3637 - *srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod;
3638 - offset = offsetof(struct rdma_srq_producers, wqe_prod);
3639 - *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
3640 - hw_srq->wqe_prod;
3641 + srq->hw_srq.virt_prod_pair_addr->sge_prod = hw_srq->sge_prod;
3642 + /* Make sure sge producer is updated first */
3643 + dma_wmb();
3644 + srq->hw_srq.virt_prod_pair_addr->wqe_prod = hw_srq->wqe_prod;
3645
3646 - /* Flush producer after updating it. */
3647 - wmb();
3648 wr = wr->next;
3649 }
3650
3651 @@ -3956,7 +3954,7 @@ static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
3652 } else {
3653 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3654 }
3655 - srq->hw_srq.wr_cons_cnt++;
3656 + atomic_inc(&srq->hw_srq.wr_cons_cnt);
3657
3658 return 1;
3659 }
3660 diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
3661 index 831ad578a7b2..46e111c218fd 100644
3662 --- a/drivers/infiniband/sw/rxe/rxe_recv.c
3663 +++ b/drivers/infiniband/sw/rxe/rxe_recv.c
3664 @@ -330,10 +330,14 @@ err1:
3665
3666 static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
3667 {
3668 + struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
3669 const struct ib_gid_attr *gid_attr;
3670 union ib_gid dgid;
3671 union ib_gid *pdgid;
3672
3673 + if (pkt->mask & RXE_LOOPBACK_MASK)
3674 + return 0;
3675 +
3676 if (skb->protocol == htons(ETH_P_IP)) {
3677 ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
3678 (struct in6_addr *)&dgid);
3679 @@ -366,7 +370,7 @@ void rxe_rcv(struct sk_buff *skb)
3680 if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES))
3681 goto drop;
3682
3683 - if (unlikely(rxe_match_dgid(rxe, skb) < 0)) {
3684 + if (rxe_match_dgid(rxe, skb) < 0) {
3685 pr_warn_ratelimited("failed matching dgid\n");
3686 goto drop;
3687 }
3688 diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
3689 index 623129f27f5a..71358b0b8910 100644
3690 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
3691 +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
3692 @@ -679,6 +679,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
3693 unsigned int mask;
3694 unsigned int length = 0;
3695 int i;
3696 + struct ib_send_wr *next;
3697
3698 while (wr) {
3699 mask = wr_opcode_mask(wr->opcode, qp);
3700 @@ -695,6 +696,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
3701 break;
3702 }
3703
3704 + next = wr->next;
3705 +
3706 length = 0;
3707 for (i = 0; i < wr->num_sge; i++)
3708 length += wr->sg_list[i].length;
3709 @@ -705,7 +708,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
3710 *bad_wr = wr;
3711 break;
3712 }
3713 - wr = wr->next;
3714 + wr = next;
3715 }
3716
3717 rxe_run_task(&qp->req.task, 1);
3718 diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
3719 index 982d796b686b..6bfb283e6f28 100644
3720 --- a/drivers/iommu/intel_irq_remapping.c
3721 +++ b/drivers/iommu/intel_irq_remapping.c
3722 @@ -628,13 +628,21 @@ out_free_table:
3723
3724 static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
3725 {
3726 + struct fwnode_handle *fn;
3727 +
3728 if (iommu && iommu->ir_table) {
3729 if (iommu->ir_msi_domain) {
3730 + fn = iommu->ir_msi_domain->fwnode;
3731 +
3732 irq_domain_remove(iommu->ir_msi_domain);
3733 + irq_domain_free_fwnode(fn);
3734 iommu->ir_msi_domain = NULL;
3735 }
3736 if (iommu->ir_domain) {
3737 + fn = iommu->ir_domain->fwnode;
3738 +
3739 irq_domain_remove(iommu->ir_domain);
3740 + irq_domain_free_fwnode(fn);
3741 iommu->ir_domain = NULL;
3742 }
3743 free_pages((unsigned long)iommu->ir_table->base,
3744 diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
3745 index 73eae5966a40..6ff98b87e5c0 100644
3746 --- a/drivers/irqchip/irq-mtk-sysirq.c
3747 +++ b/drivers/irqchip/irq-mtk-sysirq.c
3748 @@ -15,7 +15,7 @@
3749 #include <linux/spinlock.h>
3750
3751 struct mtk_sysirq_chip_data {
3752 - spinlock_t lock;
3753 + raw_spinlock_t lock;
3754 u32 nr_intpol_bases;
3755 void __iomem **intpol_bases;
3756 u32 *intpol_words;
3757 @@ -37,7 +37,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type)
3758 reg_index = chip_data->which_word[hwirq];
3759 offset = hwirq & 0x1f;
3760
3761 - spin_lock_irqsave(&chip_data->lock, flags);
3762 + raw_spin_lock_irqsave(&chip_data->lock, flags);
3763 value = readl_relaxed(base + reg_index * 4);
3764 if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING) {
3765 if (type == IRQ_TYPE_LEVEL_LOW)
3766 @@ -53,7 +53,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type)
3767
3768 data = data->parent_data;
3769 ret = data->chip->irq_set_type(data, type);
3770 - spin_unlock_irqrestore(&chip_data->lock, flags);
3771 + raw_spin_unlock_irqrestore(&chip_data->lock, flags);
3772 return ret;
3773 }
3774
3775 @@ -212,7 +212,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
3776 ret = -ENOMEM;
3777 goto out_free_which_word;
3778 }
3779 - spin_lock_init(&chip_data->lock);
3780 + raw_spin_lock_init(&chip_data->lock);
3781
3782 return 0;
3783
3784 diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
3785 index fa7488863bd0..0a35499c4672 100644
3786 --- a/drivers/irqchip/irq-ti-sci-inta.c
3787 +++ b/drivers/irqchip/irq-ti-sci-inta.c
3788 @@ -571,7 +571,7 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
3789 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3790 inta->base = devm_ioremap_resource(dev, res);
3791 if (IS_ERR(inta->base))
3792 - return -ENODEV;
3793 + return PTR_ERR(inta->base);
3794
3795 domain = irq_domain_add_linear(dev_of_node(dev),
3796 ti_sci_get_num_resources(inta->vint),
3797 diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
3798 index d3e83c33783e..0a4823d9797a 100644
3799 --- a/drivers/leds/led-class.c
3800 +++ b/drivers/leds/led-class.c
3801 @@ -172,6 +172,7 @@ void led_classdev_suspend(struct led_classdev *led_cdev)
3802 {
3803 led_cdev->flags |= LED_SUSPENDED;
3804 led_set_brightness_nopm(led_cdev, 0);
3805 + flush_work(&led_cdev->set_brightness_work);
3806 }
3807 EXPORT_SYMBOL_GPL(led_classdev_suspend);
3808
3809 diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
3810 index a5abb499574b..129f475aebf2 100644
3811 --- a/drivers/leds/leds-lm355x.c
3812 +++ b/drivers/leds/leds-lm355x.c
3813 @@ -165,18 +165,19 @@ static int lm355x_chip_init(struct lm355x_chip_data *chip)
3814 /* input and output pins configuration */
3815 switch (chip->type) {
3816 case CHIP_LM3554:
3817 - reg_val = pdata->pin_tx2 | pdata->ntc_pin;
3818 + reg_val = (u32)pdata->pin_tx2 | (u32)pdata->ntc_pin;
3819 ret = regmap_update_bits(chip->regmap, 0xE0, 0x28, reg_val);
3820 if (ret < 0)
3821 goto out;
3822 - reg_val = pdata->pass_mode;
3823 + reg_val = (u32)pdata->pass_mode;
3824 ret = regmap_update_bits(chip->regmap, 0xA0, 0x04, reg_val);
3825 if (ret < 0)
3826 goto out;
3827 break;
3828
3829 case CHIP_LM3556:
3830 - reg_val = pdata->pin_tx2 | pdata->ntc_pin | pdata->pass_mode;
3831 + reg_val = (u32)pdata->pin_tx2 | (u32)pdata->ntc_pin |
3832 + (u32)pdata->pass_mode;
3833 ret = regmap_update_bits(chip->regmap, 0x0A, 0xC4, reg_val);
3834 if (ret < 0)
3835 goto out;
3836 diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
3837 index ac824d7b2dcf..6aa903529570 100644
3838 --- a/drivers/macintosh/via-macii.c
3839 +++ b/drivers/macintosh/via-macii.c
3840 @@ -270,15 +270,12 @@ static int macii_autopoll(int devs)
3841 unsigned long flags;
3842 int err = 0;
3843
3844 + local_irq_save(flags);
3845 +
3846 /* bit 1 == device 1, and so on. */
3847 autopoll_devs = devs & 0xFFFE;
3848
3849 - if (!autopoll_devs)
3850 - return 0;
3851 -
3852 - local_irq_save(flags);
3853 -
3854 - if (current_req == NULL) {
3855 + if (autopoll_devs && !current_req) {
3856 /* Send a Talk Reg 0. The controller will repeatedly transmit
3857 * this as long as it is idle.
3858 */
3859 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
3860 index 68901745eb20..168d64707859 100644
3861 --- a/drivers/md/bcache/super.c
3862 +++ b/drivers/md/bcache/super.c
3863 @@ -2091,7 +2091,14 @@ found:
3864 sysfs_create_link(&c->kobj, &ca->kobj, buf))
3865 goto err;
3866
3867 - if (ca->sb.seq > c->sb.seq) {
3868 + /*
3869 + * A special case is both ca->sb.seq and c->sb.seq are 0,
3870 + * such condition happens on a new created cache device whose
3871 + * super block is never flushed yet. In this case c->sb.version
3872 + * and other members should be updated too, otherwise we will
3873 + * have a mistaken super block version in cache set.
3874 + */
3875 + if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) {
3876 c->sb.version = ca->sb.version;
3877 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
3878 c->sb.flags = ca->sb.flags;
3879 diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
3880 index 813a99ffa86f..73fd50e77975 100644
3881 --- a/drivers/md/md-cluster.c
3882 +++ b/drivers/md/md-cluster.c
3883 @@ -1518,6 +1518,7 @@ static void unlock_all_bitmaps(struct mddev *mddev)
3884 }
3885 }
3886 kfree(cinfo->other_bitmap_lockres);
3887 + cinfo->other_bitmap_lockres = NULL;
3888 }
3889 }
3890
3891 diff --git a/drivers/md/md.c b/drivers/md/md.c
3892 index 5a378a453a2d..acef01e519d0 100644
3893 --- a/drivers/md/md.c
3894 +++ b/drivers/md/md.c
3895 @@ -376,17 +376,18 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
3896 struct mddev *mddev = q->queuedata;
3897 unsigned int sectors;
3898
3899 - if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
3900 + if (mddev == NULL || mddev->pers == NULL) {
3901 bio_io_error(bio);
3902 return BLK_QC_T_NONE;
3903 }
3904
3905 - blk_queue_split(q, &bio);
3906 -
3907 - if (mddev == NULL || mddev->pers == NULL) {
3908 + if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
3909 bio_io_error(bio);
3910 return BLK_QC_T_NONE;
3911 }
3912 +
3913 + blk_queue_split(q, &bio);
3914 +
3915 if (mddev->ro == 1 && unlikely(rw == WRITE)) {
3916 if (bio_sectors(bio) != 0)
3917 bio->bi_status = BLK_STS_IOERR;
3918 diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c
3919 index 97144734eb05..3f1ca40b9b98 100644
3920 --- a/drivers/media/firewire/firedtv-fw.c
3921 +++ b/drivers/media/firewire/firedtv-fw.c
3922 @@ -272,6 +272,8 @@ static int node_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
3923
3924 name_len = fw_csr_string(unit->directory, CSR_MODEL,
3925 name, sizeof(name));
3926 + if (name_len < 0)
3927 + return name_len;
3928 for (i = ARRAY_SIZE(model_names); --i; )
3929 if (strlen(model_names[i]) <= name_len &&
3930 strncmp(name, model_names[i], name_len) == 0)
3931 diff --git a/drivers/media/mc/mc-request.c b/drivers/media/mc/mc-request.c
3932 index e3fca436c75b..c0782fd96c59 100644
3933 --- a/drivers/media/mc/mc-request.c
3934 +++ b/drivers/media/mc/mc-request.c
3935 @@ -296,9 +296,18 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd)
3936 if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
3937 return -ENOMEM;
3938
3939 + if (mdev->ops->req_alloc)
3940 + req = mdev->ops->req_alloc(mdev);
3941 + else
3942 + req = kzalloc(sizeof(*req), GFP_KERNEL);
3943 + if (!req)
3944 + return -ENOMEM;
3945 +
3946 fd = get_unused_fd_flags(O_CLOEXEC);
3947 - if (fd < 0)
3948 - return fd;
3949 + if (fd < 0) {
3950 + ret = fd;
3951 + goto err_free_req;
3952 + }
3953
3954 filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC);
3955 if (IS_ERR(filp)) {
3956 @@ -306,15 +315,6 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd)
3957 goto err_put_fd;
3958 }
3959
3960 - if (mdev->ops->req_alloc)
3961 - req = mdev->ops->req_alloc(mdev);
3962 - else
3963 - req = kzalloc(sizeof(*req), GFP_KERNEL);
3964 - if (!req) {
3965 - ret = -ENOMEM;
3966 - goto err_fput;
3967 - }
3968 -
3969 filp->private_data = req;
3970 req->mdev = mdev;
3971 req->state = MEDIA_REQUEST_STATE_IDLE;
3972 @@ -336,12 +336,15 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd)
3973
3974 return 0;
3975
3976 -err_fput:
3977 - fput(filp);
3978 -
3979 err_put_fd:
3980 put_unused_fd(fd);
3981
3982 +err_free_req:
3983 + if (mdev->ops->req_free)
3984 + mdev->ops->req_free(req);
3985 + else
3986 + kfree(req);
3987 +
3988 return ret;
3989 }
3990
3991 diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
3992 index 4a3b3810fd89..31390ce2dbf2 100644
3993 --- a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
3994 +++ b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
3995 @@ -278,11 +278,7 @@ static int cros_ec_cec_probe(struct platform_device *pdev)
3996 platform_set_drvdata(pdev, cros_ec_cec);
3997 cros_ec_cec->cros_ec = cros_ec;
3998
3999 - ret = device_init_wakeup(&pdev->dev, 1);
4000 - if (ret) {
4001 - dev_err(&pdev->dev, "failed to initialize wakeup\n");
4002 - return ret;
4003 - }
4004 + device_init_wakeup(&pdev->dev, 1);
4005
4006 cros_ec_cec->adap = cec_allocate_adapter(&cros_ec_cec_ops, cros_ec_cec,
4007 DRV_NAME,
4008 diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
4009 index 9aaf3b8060d5..9c31d950cddf 100644
4010 --- a/drivers/media/platform/exynos4-is/media-dev.c
4011 +++ b/drivers/media/platform/exynos4-is/media-dev.c
4012 @@ -1270,6 +1270,9 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd)
4013
4014 pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl,
4015 PINCTRL_STATE_IDLE);
4016 + if (IS_ERR(pctl->state_idle))
4017 + return PTR_ERR(pctl->state_idle);
4018 +
4019 return 0;
4020 }
4021
4022 diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
4023 index 803baf97f06e..6de8b3d99fb9 100644
4024 --- a/drivers/media/platform/marvell-ccic/mcam-core.c
4025 +++ b/drivers/media/platform/marvell-ccic/mcam-core.c
4026 @@ -1940,6 +1940,7 @@ int mccic_register(struct mcam_camera *cam)
4027 out:
4028 v4l2_async_notifier_unregister(&cam->notifier);
4029 v4l2_device_unregister(&cam->v4l2_dev);
4030 + v4l2_async_notifier_cleanup(&cam->notifier);
4031 return ret;
4032 }
4033 EXPORT_SYMBOL_GPL(mccic_register);
4034 @@ -1961,6 +1962,7 @@ void mccic_shutdown(struct mcam_camera *cam)
4035 v4l2_ctrl_handler_free(&cam->ctrl_handler);
4036 v4l2_async_notifier_unregister(&cam->notifier);
4037 v4l2_device_unregister(&cam->v4l2_dev);
4038 + v4l2_async_notifier_cleanup(&cam->notifier);
4039 }
4040 EXPORT_SYMBOL_GPL(mccic_shutdown);
4041
4042 diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
4043 index 97d660606d98..b8c2b8bba826 100644
4044 --- a/drivers/media/platform/omap3isp/isppreview.c
4045 +++ b/drivers/media/platform/omap3isp/isppreview.c
4046 @@ -2287,7 +2287,7 @@ static int preview_init_entities(struct isp_prev_device *prev)
4047 me->ops = &preview_media_ops;
4048 ret = media_entity_pads_init(me, PREV_PADS_NUM, pads);
4049 if (ret < 0)
4050 - return ret;
4051 + goto error_handler_free;
4052
4053 preview_init_formats(sd, NULL);
4054
4055 @@ -2320,6 +2320,8 @@ error_video_out:
4056 omap3isp_video_cleanup(&prev->video_in);
4057 error_video_in:
4058 media_entity_cleanup(&prev->subdev.entity);
4059 +error_handler_free:
4060 + v4l2_ctrl_handler_free(&prev->ctrls);
4061 return ret;
4062 }
4063
4064 diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig
4065 index 1a3e5f965ae4..2d7a5c1c84af 100644
4066 --- a/drivers/media/usb/dvb-usb/Kconfig
4067 +++ b/drivers/media/usb/dvb-usb/Kconfig
4068 @@ -150,6 +150,7 @@ config DVB_USB_CXUSB
4069 config DVB_USB_CXUSB_ANALOG
4070 bool "Analog support for the Conexant USB2.0 hybrid reference design"
4071 depends on DVB_USB_CXUSB && VIDEO_V4L2
4072 + depends on VIDEO_V4L2=y || VIDEO_V4L2=DVB_USB_CXUSB
4073 select VIDEO_CX25840
4074 select VIDEOBUF2_VMALLOC
4075 help
4076 diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
4077 index f0263d1a1fdf..d97a243ad30c 100644
4078 --- a/drivers/misc/cxl/sysfs.c
4079 +++ b/drivers/misc/cxl/sysfs.c
4080 @@ -624,7 +624,7 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c
4081 rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
4082 &afu->dev.kobj, "cr%i", cr->cr);
4083 if (rc)
4084 - goto err;
4085 + goto err1;
4086
4087 rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
4088 if (rc)
4089 diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
4090 index 5f2e9696ee4d..0c2489446bd7 100644
4091 --- a/drivers/mmc/host/sdhci-cadence.c
4092 +++ b/drivers/mmc/host/sdhci-cadence.c
4093 @@ -194,57 +194,6 @@ static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv)
4094 return FIELD_GET(SDHCI_CDNS_HRS06_MODE, tmp);
4095 }
4096
4097 -static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
4098 - unsigned int timing)
4099 -{
4100 - struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
4101 - u32 mode;
4102 -
4103 - switch (timing) {
4104 - case MMC_TIMING_MMC_HS:
4105 - mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR;
4106 - break;
4107 - case MMC_TIMING_MMC_DDR52:
4108 - mode = SDHCI_CDNS_HRS06_MODE_MMC_DDR;
4109 - break;
4110 - case MMC_TIMING_MMC_HS200:
4111 - mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200;
4112 - break;
4113 - case MMC_TIMING_MMC_HS400:
4114 - if (priv->enhanced_strobe)
4115 - mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400ES;
4116 - else
4117 - mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400;
4118 - break;
4119 - default:
4120 - mode = SDHCI_CDNS_HRS06_MODE_SD;
4121 - break;
4122 - }
4123 -
4124 - sdhci_cdns_set_emmc_mode(priv, mode);
4125 -
4126 - /* For SD, fall back to the default handler */
4127 - if (mode == SDHCI_CDNS_HRS06_MODE_SD)
4128 - sdhci_set_uhs_signaling(host, timing);
4129 -}
4130 -
4131 -static const struct sdhci_ops sdhci_cdns_ops = {
4132 - .set_clock = sdhci_set_clock,
4133 - .get_timeout_clock = sdhci_cdns_get_timeout_clock,
4134 - .set_bus_width = sdhci_set_bus_width,
4135 - .reset = sdhci_reset,
4136 - .set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
4137 -};
4138 -
4139 -static const struct sdhci_pltfm_data sdhci_cdns_uniphier_pltfm_data = {
4140 - .ops = &sdhci_cdns_ops,
4141 - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
4142 -};
4143 -
4144 -static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = {
4145 - .ops = &sdhci_cdns_ops,
4146 -};
4147 -
4148 static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
4149 {
4150 struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
4151 @@ -278,23 +227,24 @@ static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
4152 return 0;
4153 }
4154
4155 -static int sdhci_cdns_execute_tuning(struct mmc_host *mmc, u32 opcode)
4156 +/*
4157 + * In SD mode, software must not use the hardware tuning and instead perform
4158 + * an almost identical procedure to eMMC.
4159 + */
4160 +static int sdhci_cdns_execute_tuning(struct sdhci_host *host, u32 opcode)
4161 {
4162 - struct sdhci_host *host = mmc_priv(mmc);
4163 int cur_streak = 0;
4164 int max_streak = 0;
4165 int end_of_streak = 0;
4166 int i;
4167
4168 /*
4169 - * This handler only implements the eMMC tuning that is specific to
4170 - * this controller. Fall back to the standard method for SD timing.
4171 + * Do not execute tuning for UHS_SDR50 or UHS_DDR50.
4172 + * The delay is set by probe, based on the DT properties.
4173 */
4174 - if (host->timing != MMC_TIMING_MMC_HS200)
4175 - return sdhci_execute_tuning(mmc, opcode);
4176 -
4177 - if (WARN_ON(opcode != MMC_SEND_TUNING_BLOCK_HS200))
4178 - return -EINVAL;
4179 + if (host->timing != MMC_TIMING_MMC_HS200 &&
4180 + host->timing != MMC_TIMING_UHS_SDR104)
4181 + return 0;
4182
4183 for (i = 0; i < SDHCI_CDNS_MAX_TUNING_LOOP; i++) {
4184 if (sdhci_cdns_set_tune_val(host, i) ||
4185 @@ -317,6 +267,58 @@ static int sdhci_cdns_execute_tuning(struct mmc_host *mmc, u32 opcode)
4186 return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2);
4187 }
4188
4189 +static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
4190 + unsigned int timing)
4191 +{
4192 + struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
4193 + u32 mode;
4194 +
4195 + switch (timing) {
4196 + case MMC_TIMING_MMC_HS:
4197 + mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR;
4198 + break;
4199 + case MMC_TIMING_MMC_DDR52:
4200 + mode = SDHCI_CDNS_HRS06_MODE_MMC_DDR;
4201 + break;
4202 + case MMC_TIMING_MMC_HS200:
4203 + mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200;
4204 + break;
4205 + case MMC_TIMING_MMC_HS400:
4206 + if (priv->enhanced_strobe)
4207 + mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400ES;
4208 + else
4209 + mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400;
4210 + break;
4211 + default:
4212 + mode = SDHCI_CDNS_HRS06_MODE_SD;
4213 + break;
4214 + }
4215 +
4216 + sdhci_cdns_set_emmc_mode(priv, mode);
4217 +
4218 + /* For SD, fall back to the default handler */
4219 + if (mode == SDHCI_CDNS_HRS06_MODE_SD)
4220 + sdhci_set_uhs_signaling(host, timing);
4221 +}
4222 +
4223 +static const struct sdhci_ops sdhci_cdns_ops = {
4224 + .set_clock = sdhci_set_clock,
4225 + .get_timeout_clock = sdhci_cdns_get_timeout_clock,
4226 + .set_bus_width = sdhci_set_bus_width,
4227 + .reset = sdhci_reset,
4228 + .platform_execute_tuning = sdhci_cdns_execute_tuning,
4229 + .set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
4230 +};
4231 +
4232 +static const struct sdhci_pltfm_data sdhci_cdns_uniphier_pltfm_data = {
4233 + .ops = &sdhci_cdns_ops,
4234 + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
4235 +};
4236 +
4237 +static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = {
4238 + .ops = &sdhci_cdns_ops,
4239 +};
4240 +
4241 static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc,
4242 struct mmc_ios *ios)
4243 {
4244 @@ -377,7 +379,6 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
4245 priv->hrs_addr = host->ioaddr;
4246 priv->enhanced_strobe = false;
4247 host->ioaddr += SDHCI_CDNS_SRS_BASE;
4248 - host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning;
4249 host->mmc_host_ops.hs400_enhanced_strobe =
4250 sdhci_cdns_hs400_enhanced_strobe;
4251 sdhci_enable_v4_mode(host);
4252 diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
4253 index fa8105087d68..41a2394313dd 100644
4254 --- a/drivers/mmc/host/sdhci-pci-o2micro.c
4255 +++ b/drivers/mmc/host/sdhci-pci-o2micro.c
4256 @@ -561,6 +561,12 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
4257 slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd;
4258 }
4259
4260 + if (chip->pdev->device == PCI_DEVICE_ID_O2_SEABIRD1) {
4261 + slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd;
4262 + host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
4263 + host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
4264 + }
4265 +
4266 host->mmc_host_ops.execute_tuning = sdhci_o2_execute_tuning;
4267
4268 if (chip->pdev->device != PCI_DEVICE_ID_O2_FUJIN2)
4269 diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
4270 index 7bb9a7e8e1e7..c1c53b02b35f 100644
4271 --- a/drivers/mtd/nand/raw/qcom_nandc.c
4272 +++ b/drivers/mtd/nand/raw/qcom_nandc.c
4273 @@ -459,11 +459,13 @@ struct qcom_nand_host {
4274 * among different NAND controllers.
4275 * @ecc_modes - ecc mode for NAND
4276 * @is_bam - whether NAND controller is using BAM
4277 + * @is_qpic - whether NAND CTRL is part of qpic IP
4278 * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
4279 */
4280 struct qcom_nandc_props {
4281 u32 ecc_modes;
4282 bool is_bam;
4283 + bool is_qpic;
4284 u32 dev_cmd_reg_start;
4285 };
4286
4287 @@ -2751,7 +2753,8 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
4288 u32 nand_ctrl;
4289
4290 /* kill onenand */
4291 - nandc_write(nandc, SFLASHC_BURST_CFG, 0);
4292 + if (!nandc->props->is_qpic)
4293 + nandc_write(nandc, SFLASHC_BURST_CFG, 0);
4294 nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
4295 NAND_DEV_CMD_VLD_VAL);
4296
4297 @@ -3007,12 +3010,14 @@ static const struct qcom_nandc_props ipq806x_nandc_props = {
4298 static const struct qcom_nandc_props ipq4019_nandc_props = {
4299 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
4300 .is_bam = true,
4301 + .is_qpic = true,
4302 .dev_cmd_reg_start = 0x0,
4303 };
4304
4305 static const struct qcom_nandc_props ipq8074_nandc_props = {
4306 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
4307 .is_bam = true,
4308 + .is_qpic = true,
4309 .dev_cmd_reg_start = 0x7000,
4310 };
4311
4312 diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
4313 index 6787d560e9e3..92e4d140df6f 100644
4314 --- a/drivers/net/dsa/mv88e6xxx/chip.c
4315 +++ b/drivers/net/dsa/mv88e6xxx/chip.c
4316 @@ -3063,7 +3063,6 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
4317 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
4318 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
4319 .port_set_ether_type = mv88e6351_port_set_ether_type,
4320 - .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
4321 .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
4322 .port_pause_limit = mv88e6097_port_pause_limit,
4323 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
4324 diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
4325 index ac88caca5ad4..1368816abaed 100644
4326 --- a/drivers/net/dsa/rtl8366.c
4327 +++ b/drivers/net/dsa/rtl8366.c
4328 @@ -43,18 +43,26 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
4329 int ret;
4330 int i;
4331
4332 + dev_dbg(smi->dev,
4333 + "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
4334 + vid, member, untag);
4335 +
4336 /* Update the 4K table */
4337 ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
4338 if (ret)
4339 return ret;
4340
4341 - vlan4k.member = member;
4342 - vlan4k.untag = untag;
4343 + vlan4k.member |= member;
4344 + vlan4k.untag |= untag;
4345 vlan4k.fid = fid;
4346 ret = smi->ops->set_vlan_4k(smi, &vlan4k);
4347 if (ret)
4348 return ret;
4349
4350 + dev_dbg(smi->dev,
4351 + "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
4352 + vid, vlan4k.member, vlan4k.untag);
4353 +
4354 /* Try to find an existing MC entry for this VID */
4355 for (i = 0; i < smi->num_vlan_mc; i++) {
4356 struct rtl8366_vlan_mc vlanmc;
4357 @@ -65,11 +73,16 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
4358
4359 if (vid == vlanmc.vid) {
4360 /* update the MC entry */
4361 - vlanmc.member = member;
4362 - vlanmc.untag = untag;
4363 + vlanmc.member |= member;
4364 + vlanmc.untag |= untag;
4365 vlanmc.fid = fid;
4366
4367 ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
4368 +
4369 + dev_dbg(smi->dev,
4370 + "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
4371 + vid, vlanmc.member, vlanmc.untag);
4372 +
4373 break;
4374 }
4375 }
4376 @@ -384,7 +397,7 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
4377 if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
4378 dev_err(smi->dev, "port is DSA or CPU port\n");
4379
4380 - for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
4381 + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4382 int pvid_val = 0;
4383
4384 dev_info(smi->dev, "add VLAN %04x\n", vid);
4385 @@ -407,13 +420,13 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
4386 if (ret < 0)
4387 return;
4388 }
4389 - }
4390
4391 - ret = rtl8366_set_vlan(smi, port, member, untag, 0);
4392 - if (ret)
4393 - dev_err(smi->dev,
4394 - "failed to set up VLAN %04x",
4395 - vid);
4396 + ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
4397 + if (ret)
4398 + dev_err(smi->dev,
4399 + "failed to set up VLAN %04x",
4400 + vid);
4401 + }
4402 }
4403 EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
4404
4405 diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
4406 index 359a4d387185..9a0db70c1143 100644
4407 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
4408 +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
4409 @@ -776,7 +776,7 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
4410 int err = 0;
4411
4412 if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) {
4413 - err = EBADRQC;
4414 + err = -EBADRQC;
4415 goto err_exit;
4416 }
4417 for (self->aq_nic_cfg->mc_list_count = 0U;
4418 diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
4419 index 43d11c38b38a..4cddd628d41b 100644
4420 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
4421 +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
4422 @@ -1167,7 +1167,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
4423 oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
4424 CN23XX_PCIE_SRIOV_FDL_MASK);
4425 } else {
4426 - ret = EINVAL;
4427 + ret = -EINVAL;
4428
4429 /* Under some virtual environments, extended PCI regs are
4430 * inaccessible, in which case the above read will have failed.
4431 diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
4432 index f414f5651dbd..5c45c0c6dd23 100644
4433 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
4434 +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
4435 @@ -2185,6 +2185,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4436 nic->max_queues *= 2;
4437 nic->ptp_clock = ptp_clock;
4438
4439 + /* Initialize mutex that serializes usage of VF's mailbox */
4440 + mutex_init(&nic->rx_mode_mtx);
4441 +
4442 /* MAP VF's configuration registers */
4443 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
4444 if (!nic->reg_base) {
4445 @@ -2261,7 +2264,6 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4446
4447 INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
4448 spin_lock_init(&nic->rx_mode_wq_lock);
4449 - mutex_init(&nic->rx_mode_mtx);
4450
4451 err = register_netdev(netdev);
4452 if (err) {
4453 diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
4454 index 4c2fa13a7dd7..c8e434c8ab98 100644
4455 --- a/drivers/net/ethernet/freescale/fman/fman.c
4456 +++ b/drivers/net/ethernet/freescale/fman/fman.c
4457 @@ -1396,8 +1396,7 @@ static void enable_time_stamp(struct fman *fman)
4458 {
4459 struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
4460 u16 fm_clk_freq = fman->state->fm_clk_freq;
4461 - u32 tmp, intgr, ts_freq;
4462 - u64 frac;
4463 + u32 tmp, intgr, ts_freq, frac;
4464
4465 ts_freq = (u32)(1 << fman->state->count1_micro_bit);
4466 /* configure timestamp so that bit 8 will count 1 microsecond
4467 diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
4468 index 1ca543ac8f2c..d2de9ea80c43 100644
4469 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
4470 +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
4471 @@ -1205,7 +1205,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
4472 list_for_each(pos,
4473 &dtsec->multicast_addr_hash->lsts[bucket]) {
4474 hash_entry = ETH_HASH_ENTRY_OBJ(pos);
4475 - if (hash_entry->addr == addr) {
4476 + if (hash_entry && hash_entry->addr == addr) {
4477 list_del_init(&hash_entry->node);
4478 kfree(hash_entry);
4479 break;
4480 @@ -1218,7 +1218,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
4481 list_for_each(pos,
4482 &dtsec->unicast_addr_hash->lsts[bucket]) {
4483 hash_entry = ETH_HASH_ENTRY_OBJ(pos);
4484 - if (hash_entry->addr == addr) {
4485 + if (hash_entry && hash_entry->addr == addr) {
4486 list_del_init(&hash_entry->node);
4487 kfree(hash_entry);
4488 break;
4489 diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
4490 index dd6d0526f6c1..19f327efdaff 100644
4491 --- a/drivers/net/ethernet/freescale/fman/fman_mac.h
4492 +++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
4493 @@ -252,7 +252,7 @@ static inline struct eth_hash_t *alloc_hash_table(u16 size)
4494 struct eth_hash_t *hash;
4495
4496 /* Allocate address hash table */
4497 - hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL);
4498 + hash = kmalloc(sizeof(*hash), GFP_KERNEL);
4499 if (!hash)
4500 return NULL;
4501
4502 diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
4503 index e1901874c19f..9088b4f4b4b8 100644
4504 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c
4505 +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
4506 @@ -856,7 +856,6 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
4507
4508 tmp = ioread32be(&regs->command_config);
4509 tmp &= ~CMD_CFG_PFC_MODE;
4510 - priority = 0;
4511
4512 iowrite32be(tmp, &regs->command_config);
4513
4514 @@ -986,7 +985,7 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
4515
4516 list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) {
4517 hash_entry = ETH_HASH_ENTRY_OBJ(pos);
4518 - if (hash_entry->addr == addr) {
4519 + if (hash_entry && hash_entry->addr == addr) {
4520 list_del_init(&hash_entry->node);
4521 kfree(hash_entry);
4522 break;
4523 diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
4524 index ee82ee1384eb..47f6fee1f396 100644
4525 --- a/drivers/net/ethernet/freescale/fman/fman_port.c
4526 +++ b/drivers/net/ethernet/freescale/fman/fman_port.c
4527 @@ -1756,6 +1756,7 @@ static int fman_port_probe(struct platform_device *of_dev)
4528 struct fman_port *port;
4529 struct fman *fman;
4530 struct device_node *fm_node, *port_node;
4531 + struct platform_device *fm_pdev;
4532 struct resource res;
4533 struct resource *dev_res;
4534 u32 val;
4535 @@ -1780,8 +1781,14 @@ static int fman_port_probe(struct platform_device *of_dev)
4536 goto return_err;
4537 }
4538
4539 - fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev);
4540 + fm_pdev = of_find_device_by_node(fm_node);
4541 of_node_put(fm_node);
4542 + if (!fm_pdev) {
4543 + err = -EINVAL;
4544 + goto return_err;
4545 + }
4546 +
4547 + fman = dev_get_drvdata(&fm_pdev->dev);
4548 if (!fman) {
4549 err = -EINVAL;
4550 goto return_err;
4551 diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
4552 index f75b9c11b2d2..ac5a281e0ec3 100644
4553 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
4554 +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
4555 @@ -630,7 +630,7 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
4556
4557 list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) {
4558 hash_entry = ETH_HASH_ENTRY_OBJ(pos);
4559 - if (hash_entry->addr == addr) {
4560 + if (hash_entry && hash_entry->addr == addr) {
4561 list_del_init(&hash_entry->node);
4562 kfree(hash_entry);
4563 break;
4564 diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
4565 index bacc5fb7eba2..34124c213d27 100644
4566 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c
4567 +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
4568 @@ -1863,8 +1863,10 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
4569
4570 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
4571 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
4572 - if (!adapter->rss_key || !adapter->rss_lut)
4573 + if (!adapter->rss_key || !adapter->rss_lut) {
4574 + err = -ENOMEM;
4575 goto err_mem;
4576 + }
4577 if (RSS_AQ(adapter))
4578 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
4579 else
4580 @@ -1946,7 +1948,10 @@ static void iavf_watchdog_task(struct work_struct *work)
4581 iavf_send_api_ver(adapter);
4582 }
4583 } else {
4584 - if (!iavf_process_aq_command(adapter) &&
4585 + /* An error will be returned if no commands were
4586 + * processed; use this opportunity to update stats
4587 + */
4588 + if (iavf_process_aq_command(adapter) &&
4589 adapter->state == __IAVF_RUNNING)
4590 iavf_request_stats(adapter);
4591 }
4592 diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
4593 index cbd53b586c36..6cfe8eb7f47d 100644
4594 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
4595 +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
4596 @@ -1535,10 +1535,12 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
4597 es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
4598 sizeof(*es->ref_count),
4599 GFP_KERNEL);
4600 + if (!es->ref_count)
4601 + goto err;
4602
4603 es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
4604 sizeof(*es->written), GFP_KERNEL);
4605 - if (!es->ref_count)
4606 + if (!es->written)
4607 goto err;
4608 }
4609 return 0;
4610 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
4611 index 86e6bbb57482..b66e5b6eecd9 100644
4612 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
4613 +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
4614 @@ -809,18 +809,15 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
4615 {
4616 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
4617 struct mlx5_flow_table *iter;
4618 - int i = 0;
4619 int err;
4620
4621 fs_for_each_ft(iter, prio) {
4622 - i++;
4623 err = root->cmds->modify_flow_table(root, iter, ft);
4624 if (err) {
4625 - mlx5_core_warn(dev, "Failed to modify flow table %d\n",
4626 - iter->id);
4627 + mlx5_core_err(dev,
4628 + "Failed to modify flow table id %d, type %d, err %d\n",
4629 + iter->id, iter->type, err);
4630 /* The driver is out of sync with the FW */
4631 - if (i > 1)
4632 - WARN_ON(true);
4633 return err;
4634 }
4635 }
4636 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
4637 index 1e32e2443f73..348f02e336f6 100644
4638 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
4639 +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
4640 @@ -247,29 +247,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
4641
4642 /* The order of the actions are must to be keep, only the following
4643 * order is supported by SW steering:
4644 - * TX: push vlan -> modify header -> encap
4645 + * TX: modify header -> push vlan -> encap
4646 * RX: decap -> pop vlan -> modify header
4647 */
4648 - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
4649 - tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
4650 - if (!tmp_action) {
4651 - err = -ENOMEM;
4652 - goto free_actions;
4653 - }
4654 - fs_dr_actions[fs_dr_num_actions++] = tmp_action;
4655 - actions[num_actions++] = tmp_action;
4656 - }
4657 -
4658 - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
4659 - tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
4660 - if (!tmp_action) {
4661 - err = -ENOMEM;
4662 - goto free_actions;
4663 - }
4664 - fs_dr_actions[fs_dr_num_actions++] = tmp_action;
4665 - actions[num_actions++] = tmp_action;
4666 - }
4667 -
4668 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
4669 enum mlx5dr_action_reformat_type decap_type =
4670 DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
4671 @@ -322,6 +302,26 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
4672 actions[num_actions++] =
4673 fte->action.modify_hdr->action.dr_action;
4674
4675 + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
4676 + tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
4677 + if (!tmp_action) {
4678 + err = -ENOMEM;
4679 + goto free_actions;
4680 + }
4681 + fs_dr_actions[fs_dr_num_actions++] = tmp_action;
4682 + actions[num_actions++] = tmp_action;
4683 + }
4684 +
4685 + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
4686 + tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
4687 + if (!tmp_action) {
4688 + err = -ENOMEM;
4689 + goto free_actions;
4690 + }
4691 + fs_dr_actions[fs_dr_num_actions++] = tmp_action;
4692 + actions[num_actions++] = tmp_action;
4693 + }
4694 +
4695 if (delay_encap_set)
4696 actions[num_actions++] =
4697 fte->action.pkt_reformat->action.dr_action;
4698 diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
4699 index c00ec9a02097..e66002251596 100644
4700 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
4701 +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
4702 @@ -666,7 +666,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
4703 eid = le64_to_cpu(comp->event.eid);
4704
4705 /* Have we run out of new completions to process? */
4706 - if (eid <= lif->last_eid)
4707 + if ((s64)(eid - lif->last_eid) <= 0)
4708 return false;
4709
4710 lif->last_eid = eid;
4711 diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
4712 index 538e70810d3d..a99c7c95de5c 100644
4713 --- a/drivers/net/ethernet/toshiba/spider_net.c
4714 +++ b/drivers/net/ethernet/toshiba/spider_net.c
4715 @@ -283,8 +283,8 @@ spider_net_free_chain(struct spider_net_card *card,
4716 descr = descr->next;
4717 } while (descr != chain->ring);
4718
4719 - dma_free_coherent(&card->pdev->dev, chain->num_desc,
4720 - chain->hwring, chain->dma_addr);
4721 + dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr),
4722 + chain->hwring, chain->dma_addr);
4723 }
4724
4725 /**
4726 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
4727 index dba52a5c378a..110924d62744 100644
4728 --- a/drivers/net/phy/phy_device.c
4729 +++ b/drivers/net/phy/phy_device.c
4730 @@ -615,7 +615,9 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
4731 if (c45_ids)
4732 dev->c45_ids = *c45_ids;
4733 dev->irq = bus->irq[addr];
4734 +
4735 dev_set_name(&mdiodev->dev, PHY_ID_FMT, bus->id, addr);
4736 + device_initialize(&mdiodev->dev);
4737
4738 dev->state = PHY_DOWN;
4739
4740 @@ -649,10 +651,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
4741 ret = phy_request_driver_module(dev, phy_id);
4742 }
4743
4744 - if (!ret) {
4745 - device_initialize(&mdiodev->dev);
4746 - } else {
4747 - kfree(dev);
4748 + if (ret) {
4749 + put_device(&mdiodev->dev);
4750 dev = ERR_PTR(ret);
4751 }
4752
4753 diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
4754 index 216acf37ca7c..a06e6ab453f5 100644
4755 --- a/drivers/net/vmxnet3/vmxnet3_drv.c
4756 +++ b/drivers/net/vmxnet3/vmxnet3_drv.c
4757 @@ -861,7 +861,8 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
4758
4759 switch (protocol) {
4760 case IPPROTO_TCP:
4761 - ctx->l4_hdr_size = tcp_hdrlen(skb);
4762 + ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
4763 + tcp_hdrlen(skb);
4764 break;
4765 case IPPROTO_UDP:
4766 ctx->l4_hdr_size = sizeof(struct udphdr);
4767 diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
4768 index 134e4dd916c1..996eb9c55b39 100644
4769 --- a/drivers/net/wan/lapbether.c
4770 +++ b/drivers/net/wan/lapbether.c
4771 @@ -157,6 +157,12 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
4772 if (!netif_running(dev))
4773 goto drop;
4774
4775 + /* There should be a pseudo header of 1 byte added by upper layers.
4776 + * Check to make sure it is there before reading it.
4777 + */
4778 + if (skb->len < 1)
4779 + goto drop;
4780 +
4781 switch (skb->data[0]) {
4782 case X25_IFACE_DATA:
4783 break;
4784 @@ -305,6 +311,7 @@ static void lapbeth_setup(struct net_device *dev)
4785 dev->netdev_ops = &lapbeth_netdev_ops;
4786 dev->needs_free_netdev = true;
4787 dev->type = ARPHRD_X25;
4788 + dev->hard_header_len = 0;
4789 dev->mtu = 1000;
4790 dev->addr_len = 0;
4791 }
4792 @@ -331,7 +338,8 @@ static int lapbeth_new_device(struct net_device *dev)
4793 * then this driver prepends a length field of 2 bytes,
4794 * then the underlying Ethernet device prepends its own header.
4795 */
4796 - ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len;
4797 + ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len
4798 + + dev->needed_headroom;
4799
4800 lapbeth = netdev_priv(ndev);
4801 lapbeth->axdev = ndev;
4802 diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
4803 index 735482877a1f..c38e1963ebc0 100644
4804 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c
4805 +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
4806 @@ -1540,7 +1540,9 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
4807 err_unmap_msdu:
4808 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
4809 err_free_msdu_id:
4810 + spin_lock_bh(&htt->tx_lock);
4811 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
4812 + spin_unlock_bh(&htt->tx_lock);
4813 err:
4814 return res;
4815 }
4816 @@ -1747,7 +1749,9 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
4817 err_unmap_msdu:
4818 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
4819 err_free_msdu_id:
4820 + spin_lock_bh(&htt->tx_lock);
4821 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
4822 + spin_unlock_bh(&htt->tx_lock);
4823 err:
4824 return res;
4825 }
4826 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
4827 index 37c512036e0e..ce18433aaefb 100644
4828 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
4829 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
4830 @@ -19,7 +19,7 @@
4831 #define BRCMF_ARP_OL_PEER_AUTO_REPLY 0x00000008
4832
4833 #define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */
4834 -#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0002
4835 +#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0004
4836
4837 #define BRCMF_STA_BRCM 0x00000001 /* Running a Broadcom driver */
4838 #define BRCMF_STA_WME 0x00000002 /* WMM association */
4839 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
4840 index 2bd892df83cc..eadc64454839 100644
4841 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
4842 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
4843 @@ -643,6 +643,7 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
4844 static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
4845 int ifidx)
4846 {
4847 + struct brcmf_fws_hanger_item *hi;
4848 bool (*matchfn)(struct sk_buff *, void *) = NULL;
4849 struct sk_buff *skb;
4850 int prec;
4851 @@ -654,6 +655,9 @@ static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
4852 skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
4853 while (skb) {
4854 hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
4855 + hi = &fws->hanger.items[hslot];
4856 + WARN_ON(skb != hi->pkt);
4857 + hi->state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
4858 brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
4859 true);
4860 brcmu_pkt_buf_free_skb(skb);
4861 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
4862 index d43247a95ce5..38e6809f16c7 100644
4863 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
4864 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
4865 @@ -3685,7 +3685,11 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
4866 if (bus->idlecount > bus->idletime) {
4867 brcmf_dbg(SDIO, "idle\n");
4868 sdio_claim_host(bus->sdiodev->func1);
4869 - brcmf_sdio_wd_timer(bus, false);
4870 +#ifdef DEBUG
4871 + if (!BRCMF_FWCON_ON() ||
4872 + bus->console_interval == 0)
4873 +#endif
4874 + brcmf_sdio_wd_timer(bus, false);
4875 bus->idlecount = 0;
4876 brcmf_sdio_bus_sleep(bus, true, false);
4877 sdio_release_host(bus->sdiodev->func1);
4878 diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
4879 index 746749f37996..1107b96a8a88 100644
4880 --- a/drivers/net/wireless/intel/iwlegacy/common.c
4881 +++ b/drivers/net/wireless/intel/iwlegacy/common.c
4882 @@ -4286,8 +4286,8 @@ il_apm_init(struct il_priv *il)
4883 * power savings, even without L1.
4884 */
4885 if (il->cfg->set_l0s) {
4886 - pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
4887 - if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
4888 + ret = pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
4889 + if (!ret && (lctl & PCI_EXP_LNKCTL_ASPM_L1)) {
4890 /* L1-ASPM enabled; disable(!) L0S */
4891 il_set_bit(il, CSR_GIO_REG,
4892 CSR_GIO_REG_VAL_L0S_ENABLED);
4893 diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
4894 index f672bdf52cc1..2d9ec225aead 100644
4895 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h
4896 +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
4897 @@ -36,9 +36,9 @@
4898 #define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
4899 #define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
4900 #define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
4901 -#define SD8977_DEFAULT_FW_NAME "mrvl/sd8977_uapsta.bin"
4902 +#define SD8977_DEFAULT_FW_NAME "mrvl/sdsd8977_combo_v2.bin"
4903 #define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin"
4904 -#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin"
4905 +#define SD8997_DEFAULT_FW_NAME "mrvl/sdsd8997_combo_v4.bin"
4906
4907 #define BLOCK_MODE 1
4908 #define BYTE_MODE 0
4909 diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
4910 index 20c206da0631..7ae2c34f65db 100644
4911 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
4912 +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
4913 @@ -580,6 +580,11 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
4914 {
4915 struct host_cmd_ds_802_11_key_material *key =
4916 &resp->params.key_material;
4917 + int len;
4918 +
4919 + len = le16_to_cpu(key->key_param_set.key_len);
4920 + if (len > sizeof(key->key_param_set.key))
4921 + return -EINVAL;
4922
4923 if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) {
4924 if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) {
4925 @@ -593,9 +598,8 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
4926
4927 memset(priv->aes_key.key_param_set.key, 0,
4928 sizeof(key->key_param_set.key));
4929 - priv->aes_key.key_param_set.key_len = key->key_param_set.key_len;
4930 - memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key,
4931 - le16_to_cpu(priv->aes_key.key_param_set.key_len));
4932 + priv->aes_key.key_param_set.key_len = cpu_to_le16(len);
4933 + memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key, len);
4934
4935 return 0;
4936 }
4937 @@ -610,9 +614,14 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
4938 struct host_cmd_ds_command *resp)
4939 {
4940 struct host_cmd_ds_802_11_key_material_v2 *key_v2;
4941 - __le16 len;
4942 + int len;
4943
4944 key_v2 = &resp->params.key_material_v2;
4945 +
4946 + len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len);
4947 + if (len > WLAN_KEY_LEN_CCMP)
4948 + return -EINVAL;
4949 +
4950 if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
4951 if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) {
4952 mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n");
4953 @@ -628,10 +637,9 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
4954 memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0,
4955 WLAN_KEY_LEN_CCMP);
4956 priv->aes_key_v2.key_param_set.key_params.aes.key_len =
4957 - key_v2->key_param_set.key_params.aes.key_len;
4958 - len = priv->aes_key_v2.key_param_set.key_params.aes.key_len;
4959 + cpu_to_le16(len);
4960 memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key,
4961 - key_v2->key_param_set.key_params.aes.key, le16_to_cpu(len));
4962 + key_v2->key_param_set.key_params.aes.key, len);
4963
4964 return 0;
4965 }
4966 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
4967 index 842cd81704db..b6867d93c0e3 100644
4968 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
4969 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
4970 @@ -119,8 +119,10 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
4971 struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
4972 int ret = 0;
4973
4974 - if (seq != rxd->seq)
4975 - return -EAGAIN;
4976 + if (seq != rxd->seq) {
4977 + ret = -EAGAIN;
4978 + goto out;
4979 + }
4980
4981 switch (cmd) {
4982 case -MCU_CMD_PATCH_SEM_CONTROL:
4983 @@ -134,6 +136,7 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
4984 default:
4985 break;
4986 }
4987 +out:
4988 dev_kfree_skb(skb);
4989
4990 return ret;
4991 diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
4992 index 3e95ad198912..853ac1c2ed73 100644
4993 --- a/drivers/net/wireless/realtek/rtw88/coex.c
4994 +++ b/drivers/net/wireless/realtek/rtw88/coex.c
4995 @@ -1923,7 +1923,8 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
4996 if (coex_stat->wl_under_ips)
4997 return;
4998
4999 - if (coex->freeze && !coex_stat->bt_setup_link)
5000 + if (coex->freeze && coex_dm->reason == COEX_RSN_BTINFO &&
5001 + !coex_stat->bt_setup_link)
5002 return;
5003
5004 coex_stat->cnt_wl[COEX_CNT_WL_COEXRUN]++;
5005 diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
5006 index 35dbdb3c4f1e..8efaee7571f3 100644
5007 --- a/drivers/net/wireless/realtek/rtw88/fw.c
5008 +++ b/drivers/net/wireless/realtek/rtw88/fw.c
5009 @@ -340,7 +340,7 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
5010 SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv);
5011 SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable);
5012 SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode);
5013 - SET_RA_INFO_LDPC(h2c_pkt, si->ldpc_en);
5014 + SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en);
5015 SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update);
5016 SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable);
5017 SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt);
5018 diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
5019 index 88e2252bf8a2..15c7a6fc37b9 100644
5020 --- a/drivers/net/wireless/realtek/rtw88/main.c
5021 +++ b/drivers/net/wireless/realtek/rtw88/main.c
5022 @@ -553,8 +553,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
5023 stbc_en = VHT_STBC_EN;
5024 if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
5025 ldpc_en = VHT_LDPC_EN;
5026 - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
5027 - is_support_sgi = true;
5028 } else if (sta->ht_cap.ht_supported) {
5029 ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) |
5030 (sta->ht_cap.mcs.rx_mask[0] << 12);
5031 @@ -562,9 +560,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
5032 stbc_en = HT_STBC_EN;
5033 if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
5034 ldpc_en = HT_LDPC_EN;
5035 - if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20 ||
5036 - sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
5037 - is_support_sgi = true;
5038 }
5039
5040 if (efuse->hw_cap.nss == 1)
5041 @@ -606,12 +601,18 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
5042 switch (sta->bandwidth) {
5043 case IEEE80211_STA_RX_BW_80:
5044 bw_mode = RTW_CHANNEL_WIDTH_80;
5045 + is_support_sgi = sta->vht_cap.vht_supported &&
5046 + (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
5047 break;
5048 case IEEE80211_STA_RX_BW_40:
5049 bw_mode = RTW_CHANNEL_WIDTH_40;
5050 + is_support_sgi = sta->ht_cap.ht_supported &&
5051 + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
5052 break;
5053 default:
5054 bw_mode = RTW_CHANNEL_WIDTH_20;
5055 + is_support_sgi = sta->ht_cap.ht_supported &&
5056 + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
5057 break;
5058 }
5059
5060 diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
5061 index 850864dbafa1..e6d426edab56 100644
5062 --- a/drivers/net/wireless/ti/wl1251/event.c
5063 +++ b/drivers/net/wireless/ti/wl1251/event.c
5064 @@ -70,7 +70,7 @@ static int wl1251_event_ps_report(struct wl1251 *wl,
5065 break;
5066 }
5067
5068 - return 0;
5069 + return ret;
5070 }
5071
5072 static void wl1251_event_mbox_dump(struct event_mailbox *mbox)
5073 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
5074 index 071b63146d4b..ff5681da8780 100644
5075 --- a/drivers/nvme/host/core.c
5076 +++ b/drivers/nvme/host/core.c
5077 @@ -1074,6 +1074,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
5078 int pos;
5079 int len;
5080
5081 + if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
5082 + return 0;
5083 +
5084 c.identify.opcode = nvme_admin_identify;
5085 c.identify.nsid = cpu_to_le32(nsid);
5086 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
5087 @@ -1087,18 +1090,6 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
5088 if (status) {
5089 dev_warn(ctrl->device,
5090 "Identify Descriptors failed (%d)\n", status);
5091 - /*
5092 - * Don't treat non-retryable errors as fatal, as we potentially
5093 - * already have a NGUID or EUI-64. If we failed with DNR set,
5094 - * we want to silently ignore the error as we can still
5095 - * identify the device, but if the status has DNR set, we want
5096 - * to propagate the error back specifically for the disk
5097 - * revalidation flow to make sure we don't abandon the
5098 - * device just because of a temporal retry-able error (such
5099 - * as path of transport errors).
5100 - */
5101 - if (status > 0 && (status & NVME_SC_DNR))
5102 - status = 0;
5103 goto free_data;
5104 }
5105
5106 diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
5107 index 5433aa2f7601..484aad0d0c9c 100644
5108 --- a/drivers/nvme/host/multipath.c
5109 +++ b/drivers/nvme/host/multipath.c
5110 @@ -249,6 +249,12 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
5111 fallback = ns;
5112 }
5113
5114 + /* No optimized path found, re-check the current path */
5115 + if (!nvme_path_is_disabled(old) &&
5116 + old->ana_state == NVME_ANA_OPTIMIZED) {
5117 + found = old;
5118 + goto out;
5119 + }
5120 if (!fallback)
5121 return NULL;
5122 found = fallback;
5123 @@ -269,10 +275,13 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
5124 struct nvme_ns *ns;
5125
5126 ns = srcu_dereference(head->current_path[node], &head->srcu);
5127 - if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR && ns)
5128 - ns = nvme_round_robin_path(head, node, ns);
5129 - if (unlikely(!ns || !nvme_path_is_optimized(ns)))
5130 - ns = __nvme_find_path(head, node);
5131 + if (unlikely(!ns))
5132 + return __nvme_find_path(head, node);
5133 +
5134 + if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
5135 + return nvme_round_robin_path(head, node, ns);
5136 + if (unlikely(!nvme_path_is_optimized(ns)))
5137 + return __nvme_find_path(head, node);
5138 return ns;
5139 }
5140
5141 diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
5142 index ed02260862cb..056953bd8bd8 100644
5143 --- a/drivers/nvme/host/nvme.h
5144 +++ b/drivers/nvme/host/nvme.h
5145 @@ -115,6 +115,13 @@ enum nvme_quirks {
5146 * Prevent tag overlap between queues
5147 */
5148 NVME_QUIRK_SHARED_TAGS = (1 << 13),
5149 +
5150 + /*
5151 + * The controller doesn't handle the Identify Namespace
5152 + * Identification Descriptor list subcommand despite claiming
5153 + * NVMe 1.3 compliance.
5154 + */
5155 + NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
5156 };
5157
5158 /*
5159 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
5160 index ee7669f23cff..100da11ce98c 100644
5161 --- a/drivers/nvme/host/pci.c
5162 +++ b/drivers/nvme/host/pci.c
5163 @@ -3117,6 +3117,8 @@ static const struct pci_device_id nvme_id_table[] = {
5164 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
5165 .driver_data = NVME_QUIRK_IDENTIFY_CNS |
5166 NVME_QUIRK_DISABLE_WRITE_ZEROES, },
5167 + { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
5168 + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
5169 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
5170 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
5171 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
5172 diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
5173 index cd0d49978190..d0336545e1fe 100644
5174 --- a/drivers/nvme/host/rdma.c
5175 +++ b/drivers/nvme/host/rdma.c
5176 @@ -890,15 +890,20 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
5177 ret = PTR_ERR(ctrl->ctrl.connect_q);
5178 goto out_free_tag_set;
5179 }
5180 - } else {
5181 - blk_mq_update_nr_hw_queues(&ctrl->tag_set,
5182 - ctrl->ctrl.queue_count - 1);
5183 }
5184
5185 ret = nvme_rdma_start_io_queues(ctrl);
5186 if (ret)
5187 goto out_cleanup_connect_q;
5188
5189 + if (!new) {
5190 + nvme_start_queues(&ctrl->ctrl);
5191 + nvme_wait_freeze(&ctrl->ctrl);
5192 + blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
5193 + ctrl->ctrl.queue_count - 1);
5194 + nvme_unfreeze(&ctrl->ctrl);
5195 + }
5196 +
5197 return 0;
5198
5199 out_cleanup_connect_q:
5200 @@ -931,6 +936,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
5201 bool remove)
5202 {
5203 if (ctrl->ctrl.queue_count > 1) {
5204 + nvme_start_freeze(&ctrl->ctrl);
5205 nvme_stop_queues(&ctrl->ctrl);
5206 nvme_rdma_stop_io_queues(ctrl);
5207 if (ctrl->ctrl.tagset) {
5208 diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
5209 index 53e113a18a54..0166ff0e4738 100644
5210 --- a/drivers/nvme/host/tcp.c
5211 +++ b/drivers/nvme/host/tcp.c
5212 @@ -1684,15 +1684,20 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
5213 ret = PTR_ERR(ctrl->connect_q);
5214 goto out_free_tag_set;
5215 }
5216 - } else {
5217 - blk_mq_update_nr_hw_queues(ctrl->tagset,
5218 - ctrl->queue_count - 1);
5219 }
5220
5221 ret = nvme_tcp_start_io_queues(ctrl);
5222 if (ret)
5223 goto out_cleanup_connect_q;
5224
5225 + if (!new) {
5226 + nvme_start_queues(ctrl);
5227 + nvme_wait_freeze(ctrl);
5228 + blk_mq_update_nr_hw_queues(ctrl->tagset,
5229 + ctrl->queue_count - 1);
5230 + nvme_unfreeze(ctrl);
5231 + }
5232 +
5233 return 0;
5234
5235 out_cleanup_connect_q:
5236 @@ -1797,6 +1802,7 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
5237 {
5238 if (ctrl->queue_count <= 1)
5239 return;
5240 + nvme_start_freeze(ctrl);
5241 nvme_stop_queues(ctrl);
5242 nvme_tcp_stop_io_queues(ctrl);
5243 if (ctrl->tagset) {
5244 diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
5245 index de8e4e347249..e410033b6df0 100644
5246 --- a/drivers/parisc/sba_iommu.c
5247 +++ b/drivers/parisc/sba_iommu.c
5248 @@ -1270,7 +1270,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
5249 ** (one that doesn't overlap memory or LMMIO space) in the
5250 ** IBASE and IMASK registers.
5251 */
5252 - ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
5253 + ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL;
5254 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
5255
5256 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
5257 diff --git a/drivers/pci/access.c b/drivers/pci/access.c
5258 index 2fccb5762c76..0914ddeae17f 100644
5259 --- a/drivers/pci/access.c
5260 +++ b/drivers/pci/access.c
5261 @@ -204,17 +204,13 @@ EXPORT_SYMBOL(pci_bus_set_ops);
5262 static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
5263
5264 static noinline void pci_wait_cfg(struct pci_dev *dev)
5265 + __must_hold(&pci_lock)
5266 {
5267 - DECLARE_WAITQUEUE(wait, current);
5268 -
5269 - __add_wait_queue(&pci_cfg_wait, &wait);
5270 do {
5271 - set_current_state(TASK_UNINTERRUPTIBLE);
5272 raw_spin_unlock_irq(&pci_lock);
5273 - schedule();
5274 + wait_event(pci_cfg_wait, !dev->block_cfg_access);
5275 raw_spin_lock_irq(&pci_lock);
5276 } while (dev->block_cfg_access);
5277 - __remove_wait_queue(&pci_cfg_wait, &wait);
5278 }
5279
5280 /* Returns 0 on success, negative values indicate error. */
5281 diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/pcie-cadence-host.c
5282 index 97e251090b4f..0dfc778f40a7 100644
5283 --- a/drivers/pci/controller/pcie-cadence-host.c
5284 +++ b/drivers/pci/controller/pcie-cadence-host.c
5285 @@ -102,6 +102,7 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
5286 {
5287 struct cdns_pcie *pcie = &rc->pcie;
5288 u32 value, ctrl;
5289 + u32 id;
5290
5291 /*
5292 * Set the root complex BAR configuration register:
5293 @@ -121,8 +122,12 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
5294 cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
5295
5296 /* Set root port configuration space */
5297 - if (rc->vendor_id != 0xffff)
5298 - cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id);
5299 + if (rc->vendor_id != 0xffff) {
5300 + id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) |
5301 + CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id);
5302 + cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
5303 + }
5304 +
5305 if (rc->device_id != 0xffff)
5306 cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
5307
5308 diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
5309 index 7c24c0aedad4..9966dcf1d112 100644
5310 --- a/drivers/pci/controller/vmd.c
5311 +++ b/drivers/pci/controller/vmd.c
5312 @@ -694,6 +694,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
5313 if (!vmd->bus) {
5314 pci_free_resource_list(&resources);
5315 irq_domain_remove(vmd->irq_domain);
5316 + irq_domain_free_fwnode(fn);
5317 return -ENODEV;
5318 }
5319
5320 @@ -808,6 +809,7 @@ static void vmd_cleanup_srcu(struct vmd_dev *vmd)
5321 static void vmd_remove(struct pci_dev *dev)
5322 {
5323 struct vmd_dev *vmd = pci_get_drvdata(dev);
5324 + struct fwnode_handle *fn = vmd->irq_domain->fwnode;
5325
5326 sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
5327 pci_stop_root_bus(vmd->bus);
5328 @@ -816,6 +818,7 @@ static void vmd_remove(struct pci_dev *dev)
5329 vmd_teardown_dma_ops(vmd);
5330 vmd_detach_resources(vmd);
5331 irq_domain_remove(vmd->irq_domain);
5332 + irq_domain_free_fwnode(fn);
5333 }
5334
5335 #ifdef CONFIG_PM_SLEEP
5336 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
5337 index 4a0ec34062d6..7624c71011c6 100644
5338 --- a/drivers/pci/pcie/aspm.c
5339 +++ b/drivers/pci/pcie/aspm.c
5340 @@ -1157,6 +1157,7 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
5341 cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
5342 else
5343 cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
5344 + cnt += sprintf(buffer + cnt, "\n");
5345 return cnt;
5346 }
5347
5348 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
5349 index 4ac4b28e0ebb..9bc0f321aaf0 100644
5350 --- a/drivers/pci/quirks.c
5351 +++ b/drivers/pci/quirks.c
5352 @@ -4446,6 +4446,8 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
5353 if (ACPI_FAILURE(status))
5354 return -ENODEV;
5355
5356 + acpi_put_table(header);
5357 +
5358 /* Filter out flags not applicable to multifunction */
5359 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
5360
5361 diff --git a/drivers/phy/marvell/phy-armada38x-comphy.c b/drivers/phy/marvell/phy-armada38x-comphy.c
5362 index 6960dfd8ad8c..0fe408964334 100644
5363 --- a/drivers/phy/marvell/phy-armada38x-comphy.c
5364 +++ b/drivers/phy/marvell/phy-armada38x-comphy.c
5365 @@ -41,6 +41,7 @@ struct a38x_comphy_lane {
5366
5367 struct a38x_comphy {
5368 void __iomem *base;
5369 + void __iomem *conf;
5370 struct device *dev;
5371 struct a38x_comphy_lane lane[MAX_A38X_COMPHY];
5372 };
5373 @@ -54,6 +55,21 @@ static const u8 gbe_mux[MAX_A38X_COMPHY][MAX_A38X_PORTS] = {
5374 { 0, 0, 3 },
5375 };
5376
5377 +static void a38x_set_conf(struct a38x_comphy_lane *lane, bool enable)
5378 +{
5379 + struct a38x_comphy *priv = lane->priv;
5380 + u32 conf;
5381 +
5382 + if (priv->conf) {
5383 + conf = readl_relaxed(priv->conf);
5384 + if (enable)
5385 + conf |= BIT(lane->port);
5386 + else
5387 + conf &= ~BIT(lane->port);
5388 + writel(conf, priv->conf);
5389 + }
5390 +}
5391 +
5392 static void a38x_comphy_set_reg(struct a38x_comphy_lane *lane,
5393 unsigned int offset, u32 mask, u32 value)
5394 {
5395 @@ -97,6 +113,7 @@ static int a38x_comphy_set_mode(struct phy *phy, enum phy_mode mode, int sub)
5396 {
5397 struct a38x_comphy_lane *lane = phy_get_drvdata(phy);
5398 unsigned int gen;
5399 + int ret;
5400
5401 if (mode != PHY_MODE_ETHERNET)
5402 return -EINVAL;
5403 @@ -115,13 +132,20 @@ static int a38x_comphy_set_mode(struct phy *phy, enum phy_mode mode, int sub)
5404 return -EINVAL;
5405 }
5406
5407 + a38x_set_conf(lane, false);
5408 +
5409 a38x_comphy_set_speed(lane, gen, gen);
5410
5411 - return a38x_comphy_poll(lane, COMPHY_STAT1,
5412 - COMPHY_STAT1_PLL_RDY_TX |
5413 - COMPHY_STAT1_PLL_RDY_RX,
5414 - COMPHY_STAT1_PLL_RDY_TX |
5415 - COMPHY_STAT1_PLL_RDY_RX);
5416 + ret = a38x_comphy_poll(lane, COMPHY_STAT1,
5417 + COMPHY_STAT1_PLL_RDY_TX |
5418 + COMPHY_STAT1_PLL_RDY_RX,
5419 + COMPHY_STAT1_PLL_RDY_TX |
5420 + COMPHY_STAT1_PLL_RDY_RX);
5421 +
5422 + if (ret == 0)
5423 + a38x_set_conf(lane, true);
5424 +
5425 + return ret;
5426 }
5427
5428 static const struct phy_ops a38x_comphy_ops = {
5429 @@ -174,14 +198,21 @@ static int a38x_comphy_probe(struct platform_device *pdev)
5430 if (!priv)
5431 return -ENOMEM;
5432
5433 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5434 - base = devm_ioremap_resource(&pdev->dev, res);
5435 + base = devm_platform_ioremap_resource(pdev, 0);
5436 if (IS_ERR(base))
5437 return PTR_ERR(base);
5438
5439 priv->dev = &pdev->dev;
5440 priv->base = base;
5441
5442 + /* Optional */
5443 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "conf");
5444 + if (res) {
5445 + priv->conf = devm_ioremap_resource(&pdev->dev, res);
5446 + if (IS_ERR(priv->conf))
5447 + return PTR_ERR(priv->conf);
5448 + }
5449 +
5450 for_each_available_child_of_node(pdev->dev.of_node, child) {
5451 struct phy *phy;
5452 int ret;
5453 diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
5454 index bfb22f868857..5087b7c44d55 100644
5455 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
5456 +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
5457 @@ -111,6 +111,7 @@ struct rcar_gen3_chan {
5458 struct work_struct work;
5459 struct mutex lock; /* protects rphys[...].powered */
5460 enum usb_dr_mode dr_mode;
5461 + int irq;
5462 bool extcon_host;
5463 bool is_otg_channel;
5464 bool uses_otg_pins;
5465 @@ -389,12 +390,38 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
5466 rcar_gen3_device_recognition(ch);
5467 }
5468
5469 +static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
5470 +{
5471 + struct rcar_gen3_chan *ch = _ch;
5472 + void __iomem *usb2_base = ch->base;
5473 + u32 status = readl(usb2_base + USB2_OBINTSTA);
5474 + irqreturn_t ret = IRQ_NONE;
5475 +
5476 + if (status & USB2_OBINT_BITS) {
5477 + dev_vdbg(ch->dev, "%s: %08x\n", __func__, status);
5478 + writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA);
5479 + rcar_gen3_device_recognition(ch);
5480 + ret = IRQ_HANDLED;
5481 + }
5482 +
5483 + return ret;
5484 +}
5485 +
5486 static int rcar_gen3_phy_usb2_init(struct phy *p)
5487 {
5488 struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
5489 struct rcar_gen3_chan *channel = rphy->ch;
5490 void __iomem *usb2_base = channel->base;
5491 u32 val;
5492 + int ret;
5493 +
5494 + if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) {
5495 + INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
5496 + ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq,
5497 + IRQF_SHARED, dev_name(channel->dev), channel);
5498 + if (ret < 0)
5499 + dev_err(channel->dev, "No irq handler (%d)\n", channel->irq);
5500 + }
5501
5502 /* Initialize USB2 part */
5503 val = readl(usb2_base + USB2_INT_ENABLE);
5504 @@ -433,6 +460,9 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
5505 val &= ~USB2_INT_ENABLE_UCOM_INTEN;
5506 writel(val, usb2_base + USB2_INT_ENABLE);
5507
5508 + if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel))
5509 + free_irq(channel->irq, channel);
5510 +
5511 return 0;
5512 }
5513
5514 @@ -503,23 +533,6 @@ static const struct phy_ops rz_g1c_phy_usb2_ops = {
5515 .owner = THIS_MODULE,
5516 };
5517
5518 -static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
5519 -{
5520 - struct rcar_gen3_chan *ch = _ch;
5521 - void __iomem *usb2_base = ch->base;
5522 - u32 status = readl(usb2_base + USB2_OBINTSTA);
5523 - irqreturn_t ret = IRQ_NONE;
5524 -
5525 - if (status & USB2_OBINT_BITS) {
5526 - dev_vdbg(ch->dev, "%s: %08x\n", __func__, status);
5527 - writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA);
5528 - rcar_gen3_device_recognition(ch);
5529 - ret = IRQ_HANDLED;
5530 - }
5531 -
5532 - return ret;
5533 -}
5534 -
5535 static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
5536 {
5537 .compatible = "renesas,usb2-phy-r8a77470",
5538 @@ -598,7 +611,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
5539 struct phy_provider *provider;
5540 struct resource *res;
5541 const struct phy_ops *phy_usb2_ops;
5542 - int irq, ret = 0, i;
5543 + int ret = 0, i;
5544
5545 if (!dev->of_node) {
5546 dev_err(dev, "This driver needs device tree\n");
5547 @@ -614,16 +627,8 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
5548 if (IS_ERR(channel->base))
5549 return PTR_ERR(channel->base);
5550
5551 - /* call request_irq for OTG */
5552 - irq = platform_get_irq_optional(pdev, 0);
5553 - if (irq >= 0) {
5554 - INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
5555 - irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
5556 - IRQF_SHARED, dev_name(dev), channel);
5557 - if (irq < 0)
5558 - dev_err(dev, "No irq handler (%d)\n", irq);
5559 - }
5560 -
5561 + /* get irq number here and request_irq for OTG in phy_init */
5562 + channel->irq = platform_get_irq_optional(pdev, 0);
5563 channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
5564 if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
5565 int ret;
5566 diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
5567 index e510732afb8b..7f6279fb4f8f 100644
5568 --- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
5569 +++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
5570 @@ -714,7 +714,9 @@ static int exynos5_usbdrd_phy_calibrate(struct phy *phy)
5571 struct phy_usb_instance *inst = phy_get_drvdata(phy);
5572 struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
5573
5574 - return exynos5420_usbdrd_phy_calibrate(phy_drd);
5575 + if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI)
5576 + return exynos5420_usbdrd_phy_calibrate(phy_drd);
5577 + return 0;
5578 }
5579
5580 static const struct phy_ops exynos5_usbdrd_phy_ops = {
5581 diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
5582 index 1e0614daee9b..a9d511982780 100644
5583 --- a/drivers/pinctrl/pinctrl-single.c
5584 +++ b/drivers/pinctrl/pinctrl-single.c
5585 @@ -916,7 +916,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
5586
5587 /* If pinconf isn't supported, don't parse properties in below. */
5588 if (!PCS_HAS_PINCONF)
5589 - return 0;
5590 + return -ENOTSUPP;
5591
5592 /* cacluate how much properties are supported in current node */
5593 for (i = 0; i < ARRAY_SIZE(prop2); i++) {
5594 @@ -928,7 +928,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
5595 nconfs++;
5596 }
5597 if (!nconfs)
5598 - return 0;
5599 + return -ENOTSUPP;
5600
5601 func->conf = devm_kcalloc(pcs->dev,
5602 nconfs, sizeof(struct pcs_conf_vals),
5603 @@ -1056,9 +1056,12 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
5604
5605 if (PCS_HAS_PINCONF && function) {
5606 res = pcs_parse_pinconf(pcs, np, function, map);
5607 - if (res)
5608 + if (res == 0)
5609 + *num_maps = 2;
5610 + else if (res == -ENOTSUPP)
5611 + *num_maps = 1;
5612 + else
5613 goto free_pingroups;
5614 - *num_maps = 2;
5615 } else {
5616 *num_maps = 1;
5617 }
5618 diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
5619 index 0d42477946f3..59b78a181723 100644
5620 --- a/drivers/platform/x86/asus-nb-wmi.c
5621 +++ b/drivers/platform/x86/asus-nb-wmi.c
5622 @@ -110,6 +110,16 @@ static struct quirk_entry quirk_asus_forceals = {
5623 .wmi_force_als_set = true,
5624 };
5625
5626 +static struct quirk_entry quirk_asus_ga401i = {
5627 + .wmi_backlight_power = true,
5628 + .wmi_backlight_set_devstate = true,
5629 +};
5630 +
5631 +static struct quirk_entry quirk_asus_ga502i = {
5632 + .wmi_backlight_power = true,
5633 + .wmi_backlight_set_devstate = true,
5634 +};
5635 +
5636 static int dmi_matched(const struct dmi_system_id *dmi)
5637 {
5638 pr_info("Identified laptop model '%s'\n", dmi->ident);
5639 @@ -411,6 +421,78 @@ static const struct dmi_system_id asus_quirks[] = {
5640 },
5641 .driver_data = &quirk_asus_forceals,
5642 },
5643 + {
5644 + .callback = dmi_matched,
5645 + .ident = "ASUSTeK COMPUTER INC. GA401IH",
5646 + .matches = {
5647 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
5648 + DMI_MATCH(DMI_PRODUCT_NAME, "GA401IH"),
5649 + },
5650 + .driver_data = &quirk_asus_ga401i,
5651 + },
5652 + {
5653 + .callback = dmi_matched,
5654 + .ident = "ASUSTeK COMPUTER INC. GA401II",
5655 + .matches = {
5656 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
5657 + DMI_MATCH(DMI_PRODUCT_NAME, "GA401II"),
5658 + },
5659 + .driver_data = &quirk_asus_ga401i,
5660 + },
5661 + {
5662 + .callback = dmi_matched,
5663 + .ident = "ASUSTeK COMPUTER INC. GA401IU",
5664 + .matches = {
5665 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
5666 + DMI_MATCH(DMI_PRODUCT_NAME, "GA401IU"),
5667 + },
5668 + .driver_data = &quirk_asus_ga401i,
5669 + },
5670 + {
5671 + .callback = dmi_matched,
5672 + .ident = "ASUSTeK COMPUTER INC. GA401IV",
5673 + .matches = {
5674 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
5675 + DMI_MATCH(DMI_PRODUCT_NAME, "GA401IV"),
5676 + },
5677 + .driver_data = &quirk_asus_ga401i,
5678 + },
5679 + {
5680 + .callback = dmi_matched,
5681 + .ident = "ASUSTeK COMPUTER INC. GA401IVC",
5682 + .matches = {
5683 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
5684 + DMI_MATCH(DMI_PRODUCT_NAME, "GA401IVC"),
5685 + },
5686 + .driver_data = &quirk_asus_ga401i,
5687 + },
5688 + {
5689 + .callback = dmi_matched,
5690 + .ident = "ASUSTeK COMPUTER INC. GA502II",
5691 + .matches = {
5692 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
5693 + DMI_MATCH(DMI_PRODUCT_NAME, "GA502II"),
5694 + },
5695 + .driver_data = &quirk_asus_ga502i,
5696 + },
5697 + {
5698 + .callback = dmi_matched,
5699 + .ident = "ASUSTeK COMPUTER INC. GA502IU",
5700 + .matches = {
5701 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
5702 + DMI_MATCH(DMI_PRODUCT_NAME, "GA502IU"),
5703 + },
5704 + .driver_data = &quirk_asus_ga502i,
5705 + },
5706 + {
5707 + .callback = dmi_matched,
5708 + .ident = "ASUSTeK COMPUTER INC. GA502IV",
5709 + .matches = {
5710 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
5711 + DMI_MATCH(DMI_PRODUCT_NAME, "GA502IV"),
5712 + },
5713 + .driver_data = &quirk_asus_ga502i,
5714 + },
5715 {},
5716 };
5717
5718 diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
5719 index 7a506c1d0113..ad1399dcb21f 100644
5720 --- a/drivers/platform/x86/intel-hid.c
5721 +++ b/drivers/platform/x86/intel-hid.c
5722 @@ -570,7 +570,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
5723 return AE_OK;
5724
5725 if (acpi_match_device_ids(dev, ids) == 0)
5726 - if (acpi_create_platform_device(dev, NULL))
5727 + if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL)))
5728 dev_info(&dev->dev,
5729 "intel-hid: created platform device\n");
5730
5731 diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
5732 index cb2a80fdd8f4..3393ee95077f 100644
5733 --- a/drivers/platform/x86/intel-vbtn.c
5734 +++ b/drivers/platform/x86/intel-vbtn.c
5735 @@ -286,7 +286,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
5736 return AE_OK;
5737
5738 if (acpi_match_device_ids(dev, ids) == 0)
5739 - if (acpi_create_platform_device(dev, NULL))
5740 + if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL)))
5741 dev_info(&dev->dev,
5742 "intel-vbtn: created platform device\n");
5743
5744 diff --git a/drivers/power/supply/88pm860x_battery.c b/drivers/power/supply/88pm860x_battery.c
5745 index 5ca047b3f58f..23e7d6447ae9 100644
5746 --- a/drivers/power/supply/88pm860x_battery.c
5747 +++ b/drivers/power/supply/88pm860x_battery.c
5748 @@ -433,7 +433,7 @@ static void pm860x_init_battery(struct pm860x_battery_info *info)
5749 int ret;
5750 int data;
5751 int bat_remove;
5752 - int soc;
5753 + int soc = 0;
5754
5755 /* measure enable on GPADC1 */
5756 data = MEAS1_GP1;
5757 @@ -496,7 +496,9 @@ static void pm860x_init_battery(struct pm860x_battery_info *info)
5758 }
5759 mutex_unlock(&info->lock);
5760
5761 - calc_soc(info, OCV_MODE_ACTIVE, &soc);
5762 + ret = calc_soc(info, OCV_MODE_ACTIVE, &soc);
5763 + if (ret < 0)
5764 + goto out;
5765
5766 data = pm860x_reg_read(info->i2c, PM8607_POWER_UP_LOG);
5767 bat_remove = data & BAT_WU_LOG;
5768 diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
5769 index 0011bdc15afb..a17aebe0aa7a 100644
5770 --- a/drivers/regulator/core.c
5771 +++ b/drivers/regulator/core.c
5772 @@ -4994,7 +4994,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
5773 struct regulator_dev *rdev;
5774 bool dangling_cfg_gpiod = false;
5775 bool dangling_of_gpiod = false;
5776 - bool reg_device_fail = false;
5777 struct device *dev;
5778 int ret, i;
5779
5780 @@ -5123,10 +5122,12 @@ regulator_register(const struct regulator_desc *regulator_desc,
5781 }
5782
5783 /* register with sysfs */
5784 + device_initialize(&rdev->dev);
5785 rdev->dev.class = &regulator_class;
5786 rdev->dev.parent = dev;
5787 dev_set_name(&rdev->dev, "regulator.%lu",
5788 (unsigned long) atomic_inc_return(&regulator_no));
5789 + dev_set_drvdata(&rdev->dev, rdev);
5790
5791 /* set regulator constraints */
5792 if (init_data)
5793 @@ -5177,12 +5178,9 @@ regulator_register(const struct regulator_desc *regulator_desc,
5794 !rdev->desc->fixed_uV)
5795 rdev->is_switch = true;
5796
5797 - dev_set_drvdata(&rdev->dev, rdev);
5798 - ret = device_register(&rdev->dev);
5799 - if (ret != 0) {
5800 - reg_device_fail = true;
5801 + ret = device_add(&rdev->dev);
5802 + if (ret != 0)
5803 goto unset_supplies;
5804 - }
5805
5806 rdev_init_debugfs(rdev);
5807
5808 @@ -5204,17 +5202,15 @@ unset_supplies:
5809 mutex_unlock(&regulator_list_mutex);
5810 wash:
5811 kfree(rdev->coupling_desc.coupled_rdevs);
5812 - kfree(rdev->constraints);
5813 mutex_lock(&regulator_list_mutex);
5814 regulator_ena_gpio_free(rdev);
5815 mutex_unlock(&regulator_list_mutex);
5816 + put_device(&rdev->dev);
5817 + rdev = NULL;
5818 clean:
5819 if (dangling_of_gpiod)
5820 gpiod_put(config->ena_gpiod);
5821 - if (reg_device_fail)
5822 - put_device(&rdev->dev);
5823 - else
5824 - kfree(rdev);
5825 + kfree(rdev);
5826 kfree(config);
5827 rinse:
5828 if (dangling_cfg_gpiod)
5829 diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
5830 index 8d4971645cf1..f7ae03fd36cb 100644
5831 --- a/drivers/s390/block/dasd_diag.c
5832 +++ b/drivers/s390/block/dasd_diag.c
5833 @@ -319,7 +319,7 @@ dasd_diag_check_device(struct dasd_device *device)
5834 struct dasd_diag_characteristics *rdc_data;
5835 struct vtoc_cms_label *label;
5836 struct dasd_block *block;
5837 - struct dasd_diag_bio bio;
5838 + struct dasd_diag_bio *bio;
5839 unsigned int sb, bsize;
5840 blocknum_t end_block;
5841 int rc;
5842 @@ -395,29 +395,36 @@ dasd_diag_check_device(struct dasd_device *device)
5843 rc = -ENOMEM;
5844 goto out;
5845 }
5846 + bio = kzalloc(sizeof(*bio), GFP_KERNEL);
5847 + if (bio == NULL) {
5848 + DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5849 + "No memory to allocate initialization bio");
5850 + rc = -ENOMEM;
5851 + goto out_label;
5852 + }
5853 rc = 0;
5854 end_block = 0;
5855 /* try all sizes - needed for ECKD devices */
5856 for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
5857 mdsk_init_io(device, bsize, 0, &end_block);
5858 - memset(&bio, 0, sizeof (struct dasd_diag_bio));
5859 - bio.type = MDSK_READ_REQ;
5860 - bio.block_number = private->pt_block + 1;
5861 - bio.buffer = label;
5862 + memset(bio, 0, sizeof(*bio));
5863 + bio->type = MDSK_READ_REQ;
5864 + bio->block_number = private->pt_block + 1;
5865 + bio->buffer = label;
5866 memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io));
5867 private->iob.dev_nr = rdc_data->dev_nr;
5868 private->iob.key = 0;
5869 private->iob.flags = 0; /* do synchronous io */
5870 private->iob.block_count = 1;
5871 private->iob.interrupt_params = 0;
5872 - private->iob.bio_list = &bio;
5873 + private->iob.bio_list = bio;
5874 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
5875 rc = dia250(&private->iob, RW_BIO);
5876 if (rc == 3) {
5877 pr_warn("%s: A 64-bit DIAG call failed\n",
5878 dev_name(&device->cdev->dev));
5879 rc = -EOPNOTSUPP;
5880 - goto out_label;
5881 + goto out_bio;
5882 }
5883 mdsk_term_io(device);
5884 if (rc == 0)
5885 @@ -427,7 +434,7 @@ dasd_diag_check_device(struct dasd_device *device)
5886 pr_warn("%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n",
5887 dev_name(&device->cdev->dev), rc);
5888 rc = -EIO;
5889 - goto out_label;
5890 + goto out_bio;
5891 }
5892 /* check for label block */
5893 if (memcmp(label->label_id, DASD_DIAG_CMS1,
5894 @@ -457,6 +464,8 @@ dasd_diag_check_device(struct dasd_device *device)
5895 (rc == 4) ? ", read-only device" : "");
5896 rc = 0;
5897 }
5898 +out_bio:
5899 + kfree(bio);
5900 out_label:
5901 free_page((long) label);
5902 out:
5903 diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
5904 index 92bace3b28fd..4ce28aa490cd 100644
5905 --- a/drivers/s390/net/qeth_l2_main.c
5906 +++ b/drivers/s390/net/qeth_l2_main.c
5907 @@ -1199,6 +1199,10 @@ static void qeth_bridge_state_change(struct qeth_card *card,
5908 int extrasize;
5909
5910 QETH_CARD_TEXT(card, 2, "brstchng");
5911 + if (qports->num_entries == 0) {
5912 + QETH_CARD_TEXT(card, 2, "BPempty");
5913 + return;
5914 + }
5915 if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
5916 QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length);
5917 return;
5918 diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
5919 index a1f3e9ee4e63..14e1d001253c 100644
5920 --- a/drivers/scsi/arm/cumana_2.c
5921 +++ b/drivers/scsi/arm/cumana_2.c
5922 @@ -450,7 +450,7 @@ static int cumanascsi2_probe(struct expansion_card *ec,
5923
5924 if (info->info.scsi.dma != NO_DMA)
5925 free_dma(info->info.scsi.dma);
5926 - free_irq(ec->irq, host);
5927 + free_irq(ec->irq, info);
5928
5929 out_release:
5930 fas216_release(host);
5931 diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
5932 index 134f040d58e2..f441ec8eb93d 100644
5933 --- a/drivers/scsi/arm/eesox.c
5934 +++ b/drivers/scsi/arm/eesox.c
5935 @@ -571,7 +571,7 @@ static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
5936
5937 if (info->info.scsi.dma != NO_DMA)
5938 free_dma(info->info.scsi.dma);
5939 - free_irq(ec->irq, host);
5940 + free_irq(ec->irq, info);
5941
5942 out_remove:
5943 fas216_remove(host);
5944 diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
5945 index c795537a671c..2dc0df005cb3 100644
5946 --- a/drivers/scsi/arm/powertec.c
5947 +++ b/drivers/scsi/arm/powertec.c
5948 @@ -378,7 +378,7 @@ static int powertecscsi_probe(struct expansion_card *ec,
5949
5950 if (info->info.scsi.dma != NO_DMA)
5951 free_dma(info->info.scsi.dma);
5952 - free_irq(ec->irq, host);
5953 + free_irq(ec->irq, info);
5954
5955 out_release:
5956 fas216_release(host);
5957 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
5958 index 0cbe6740e0c9..2c2966a297c7 100644
5959 --- a/drivers/scsi/megaraid/megaraid_sas_base.c
5960 +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
5961 @@ -5586,9 +5586,13 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5962 &instance->irq_context[i])) {
5963 dev_err(&instance->pdev->dev,
5964 "Failed to register IRQ for vector %d.\n", i);
5965 - for (j = 0; j < i; j++)
5966 + for (j = 0; j < i; j++) {
5967 + if (j < instance->low_latency_index_start)
5968 + irq_set_affinity_hint(
5969 + pci_irq_vector(pdev, j), NULL);
5970 free_irq(pci_irq_vector(pdev, j),
5971 &instance->irq_context[j]);
5972 + }
5973 /* Retry irq register for IO_APIC*/
5974 instance->msix_vectors = 0;
5975 instance->msix_load_balance = false;
5976 @@ -5626,6 +5630,9 @@ megasas_destroy_irqs(struct megasas_instance *instance) {
5977
5978 if (instance->msix_vectors)
5979 for (i = 0; i < instance->msix_vectors; i++) {
5980 + if (i < instance->low_latency_index_start)
5981 + irq_set_affinity_hint(
5982 + pci_irq_vector(instance->pdev, i), NULL);
5983 free_irq(pci_irq_vector(instance->pdev, i),
5984 &instance->irq_context[i]);
5985 }
5986 diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
5987 index 74fb50644678..4dd50db90677 100644
5988 --- a/drivers/scsi/mesh.c
5989 +++ b/drivers/scsi/mesh.c
5990 @@ -1045,6 +1045,8 @@ static void handle_error(struct mesh_state *ms)
5991 while ((in_8(&mr->bus_status1) & BS1_RST) != 0)
5992 udelay(1);
5993 printk("done\n");
5994 + if (ms->dma_started)
5995 + halt_dma(ms);
5996 handle_reset(ms);
5997 /* request_q is empty, no point in mesh_start() */
5998 return;
5999 @@ -1357,7 +1359,8 @@ static void halt_dma(struct mesh_state *ms)
6000 ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd),
6001 ms->tgts[ms->conn_tgt].data_goes_out);
6002 }
6003 - scsi_dma_unmap(cmd);
6004 + if (cmd)
6005 + scsi_dma_unmap(cmd);
6006 ms->dma_started = 0;
6007 }
6008
6009 @@ -1712,6 +1715,9 @@ static int mesh_host_reset(struct scsi_cmnd *cmd)
6010
6011 spin_lock_irqsave(ms->host->host_lock, flags);
6012
6013 + if (ms->dma_started)
6014 + halt_dma(ms);
6015 +
6016 /* Reset the controller & dbdma channel */
6017 out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */
6018 out_8(&mr->exception, 0xff); /* clear all exception bits */
6019 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
6020 index 32965ec76965..44181a2cbf18 100644
6021 --- a/drivers/scsi/scsi_debug.c
6022 +++ b/drivers/scsi/scsi_debug.c
6023 @@ -5296,6 +5296,12 @@ static int __init scsi_debug_init(void)
6024 pr_err("submit_queues must be 1 or more\n");
6025 return -EINVAL;
6026 }
6027 +
6028 + if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6029 + pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6030 + return -EINVAL;
6031 + }
6032 +
6033 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6034 GFP_KERNEL);
6035 if (sdebug_q_arr == NULL)
6036 diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
6037 index 0ba1f465db12..8924fcd9f5f5 100644
6038 --- a/drivers/soc/qcom/rpmh-rsc.c
6039 +++ b/drivers/soc/qcom/rpmh-rsc.c
6040 @@ -715,6 +715,7 @@ static struct platform_driver rpmh_driver = {
6041 .driver = {
6042 .name = "rpmh",
6043 .of_match_table = rpmh_drv_match,
6044 + .suppress_bind_attrs = true,
6045 },
6046 };
6047
6048 diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c
6049 index 9dfe8b04e688..f9bc1705c0d4 100644
6050 --- a/drivers/spi/spi-lantiq-ssc.c
6051 +++ b/drivers/spi/spi-lantiq-ssc.c
6052 @@ -184,6 +184,7 @@ struct lantiq_ssc_spi {
6053 unsigned int tx_fifo_size;
6054 unsigned int rx_fifo_size;
6055 unsigned int base_cs;
6056 + unsigned int fdx_tx_level;
6057 };
6058
6059 static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg)
6060 @@ -481,6 +482,7 @@ static void tx_fifo_write(struct lantiq_ssc_spi *spi)
6061 u32 data;
6062 unsigned int tx_free = tx_fifo_free(spi);
6063
6064 + spi->fdx_tx_level = 0;
6065 while (spi->tx_todo && tx_free) {
6066 switch (spi->bits_per_word) {
6067 case 2 ... 8:
6068 @@ -509,6 +511,7 @@ static void tx_fifo_write(struct lantiq_ssc_spi *spi)
6069
6070 lantiq_ssc_writel(spi, data, LTQ_SPI_TB);
6071 tx_free--;
6072 + spi->fdx_tx_level++;
6073 }
6074 }
6075
6076 @@ -520,6 +523,13 @@ static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi)
6077 u32 data;
6078 unsigned int rx_fill = rx_fifo_level(spi);
6079
6080 + /*
6081 + * Wait until all expected data to be shifted in.
6082 + * Otherwise, rx overrun may occur.
6083 + */
6084 + while (rx_fill != spi->fdx_tx_level)
6085 + rx_fill = rx_fifo_level(spi);
6086 +
6087 while (rx_fill) {
6088 data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
6089
6090 @@ -907,7 +917,7 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
6091 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) |
6092 SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
6093
6094 - spi->wq = alloc_ordered_workqueue(dev_name(dev), 0);
6095 + spi->wq = alloc_ordered_workqueue(dev_name(dev), WQ_MEM_RECLAIM);
6096 if (!spi->wq) {
6097 err = -ENOMEM;
6098 goto err_clk_put;
6099 diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
6100 index 2cc6d9951b52..008b64f4e031 100644
6101 --- a/drivers/spi/spi-rockchip.c
6102 +++ b/drivers/spi/spi-rockchip.c
6103 @@ -286,7 +286,7 @@ static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
6104 static void rockchip_spi_pio_reader(struct rockchip_spi *rs)
6105 {
6106 u32 words = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
6107 - u32 rx_left = rs->rx_left - words;
6108 + u32 rx_left = (rs->rx_left > words) ? rs->rx_left - words : 0;
6109
6110 /* the hardware doesn't allow us to change fifo threshold
6111 * level while spi is enabled, so instead make sure to leave
6112 diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
6113 index ac6bf1fbbfe6..be503a0e6ef7 100644
6114 --- a/drivers/spi/spidev.c
6115 +++ b/drivers/spi/spidev.c
6116 @@ -223,6 +223,11 @@ static int spidev_message(struct spidev_data *spidev,
6117 for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
6118 n;
6119 n--, k_tmp++, u_tmp++) {
6120 + /* Ensure that also following allocations from rx_buf/tx_buf will meet
6121 + * DMA alignment requirements.
6122 + */
6123 + unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN);
6124 +
6125 k_tmp->len = u_tmp->len;
6126
6127 total += k_tmp->len;
6128 @@ -238,17 +243,17 @@ static int spidev_message(struct spidev_data *spidev,
6129
6130 if (u_tmp->rx_buf) {
6131 /* this transfer needs space in RX bounce buffer */
6132 - rx_total += k_tmp->len;
6133 + rx_total += len_aligned;
6134 if (rx_total > bufsiz) {
6135 status = -EMSGSIZE;
6136 goto done;
6137 }
6138 k_tmp->rx_buf = rx_buf;
6139 - rx_buf += k_tmp->len;
6140 + rx_buf += len_aligned;
6141 }
6142 if (u_tmp->tx_buf) {
6143 /* this transfer needs space in TX bounce buffer */
6144 - tx_total += k_tmp->len;
6145 + tx_total += len_aligned;
6146 if (tx_total > bufsiz) {
6147 status = -EMSGSIZE;
6148 goto done;
6149 @@ -258,7 +263,7 @@ static int spidev_message(struct spidev_data *spidev,
6150 (uintptr_t) u_tmp->tx_buf,
6151 u_tmp->len))
6152 goto done;
6153 - tx_buf += k_tmp->len;
6154 + tx_buf += len_aligned;
6155 }
6156
6157 k_tmp->cs_change = !!u_tmp->cs_change;
6158 @@ -290,16 +295,16 @@ static int spidev_message(struct spidev_data *spidev,
6159 goto done;
6160
6161 /* copy any rx data out of bounce buffer */
6162 - rx_buf = spidev->rx_buffer;
6163 - for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
6164 + for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
6165 + n;
6166 + n--, k_tmp++, u_tmp++) {
6167 if (u_tmp->rx_buf) {
6168 if (copy_to_user((u8 __user *)
6169 - (uintptr_t) u_tmp->rx_buf, rx_buf,
6170 + (uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf,
6171 u_tmp->len)) {
6172 status = -EFAULT;
6173 goto done;
6174 }
6175 - rx_buf += u_tmp->len;
6176 }
6177 }
6178 status = total;
6179 diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
6180 index 511136dce3a4..ddc09616248a 100644
6181 --- a/drivers/staging/rtl8192u/r8192U_core.c
6182 +++ b/drivers/staging/rtl8192u/r8192U_core.c
6183 @@ -2401,7 +2401,7 @@ static int rtl8192_read_eeprom_info(struct net_device *dev)
6184 ret = eprom_read(dev, (EEPROM_TX_PW_INDEX_CCK >> 1));
6185 if (ret < 0)
6186 return ret;
6187 - priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff) >> 8;
6188 + priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff00) >> 8;
6189 } else
6190 priv->EEPROMTxPowerLevelCCK = 0x10;
6191 RT_TRACE(COMP_EPROM, "CCK Tx Power Levl: 0x%02x\n", priv->EEPROMTxPowerLevelCCK);
6192 diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
6193 index af6bf0736b52..eb76cc2cbfd8 100644
6194 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
6195 +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
6196 @@ -3257,6 +3257,7 @@ failed_platform_init:
6197
6198 static int vchiq_remove(struct platform_device *pdev)
6199 {
6200 + platform_device_unregister(bcm2835_audio);
6201 platform_device_unregister(bcm2835_camera);
6202 vchiq_debugfs_deinit();
6203 device_destroy(vchiq_class, vchiq_devid);
6204 diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
6205 index 85776db4bf34..2ce4b19f312a 100644
6206 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
6207 +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
6208 @@ -169,7 +169,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
6209
6210 data = ti_bandgap_get_sensor_data(bgp, id);
6211
6212 - if (!IS_ERR_OR_NULL(data))
6213 + if (IS_ERR_OR_NULL(data))
6214 data = ti_thermal_build_data(bgp, id);
6215
6216 if (!data)
6217 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
6218 index e0b77674869c..c96c50faccf7 100644
6219 --- a/drivers/usb/core/quirks.c
6220 +++ b/drivers/usb/core/quirks.c
6221 @@ -25,17 +25,23 @@ static unsigned int quirk_count;
6222
6223 static char quirks_param[128];
6224
6225 -static int quirks_param_set(const char *val, const struct kernel_param *kp)
6226 +static int quirks_param_set(const char *value, const struct kernel_param *kp)
6227 {
6228 - char *p, *field;
6229 + char *val, *p, *field;
6230 u16 vid, pid;
6231 u32 flags;
6232 size_t i;
6233 int err;
6234
6235 + val = kstrdup(value, GFP_KERNEL);
6236 + if (!val)
6237 + return -ENOMEM;
6238 +
6239 err = param_set_copystring(val, kp);
6240 - if (err)
6241 + if (err) {
6242 + kfree(val);
6243 return err;
6244 + }
6245
6246 mutex_lock(&quirk_mutex);
6247
6248 @@ -60,10 +66,11 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
6249 if (!quirk_list) {
6250 quirk_count = 0;
6251 mutex_unlock(&quirk_mutex);
6252 + kfree(val);
6253 return -ENOMEM;
6254 }
6255
6256 - for (i = 0, p = (char *)val; p && *p;) {
6257 + for (i = 0, p = val; p && *p;) {
6258 /* Each entry consists of VID:PID:flags */
6259 field = strsep(&p, ":");
6260 if (!field)
6261 @@ -144,6 +151,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
6262
6263 unlock:
6264 mutex_unlock(&quirk_mutex);
6265 + kfree(val);
6266
6267 return 0;
6268 }
6269 diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
6270 index 4e14c4f7fed7..34bb6124f1e2 100644
6271 --- a/drivers/usb/dwc2/platform.c
6272 +++ b/drivers/usb/dwc2/platform.c
6273 @@ -514,6 +514,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
6274 if (hsotg->gadget_enabled) {
6275 retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget);
6276 if (retval) {
6277 + hsotg->gadget.udc = NULL;
6278 dwc2_hsotg_remove(hsotg);
6279 goto error;
6280 }
6281 @@ -522,7 +523,8 @@ static int dwc2_driver_probe(struct platform_device *dev)
6282 return 0;
6283
6284 error:
6285 - dwc2_lowlevel_hw_disable(hsotg);
6286 + if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL)
6287 + dwc2_lowlevel_hw_disable(hsotg);
6288 return retval;
6289 }
6290
6291 diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
6292 index db2d4980cb35..3633df6d7610 100644
6293 --- a/drivers/usb/gadget/function/f_uac2.c
6294 +++ b/drivers/usb/gadget/function/f_uac2.c
6295 @@ -215,10 +215,7 @@ static struct uac2_ac_header_descriptor ac_hdr_desc = {
6296 .bDescriptorSubtype = UAC_MS_HEADER,
6297 .bcdADC = cpu_to_le16(0x200),
6298 .bCategory = UAC2_FUNCTION_IO_BOX,
6299 - .wTotalLength = cpu_to_le16(sizeof in_clk_src_desc
6300 - + sizeof out_clk_src_desc + sizeof usb_out_it_desc
6301 - + sizeof io_in_it_desc + sizeof usb_in_ot_desc
6302 - + sizeof io_out_ot_desc),
6303 + /* .wTotalLength = DYNAMIC */
6304 .bmControls = 0,
6305 };
6306
6307 @@ -501,7 +498,7 @@ static void setup_descriptor(struct f_uac2_opts *opts)
6308 as_in_hdr_desc.bTerminalLink = usb_in_ot_desc.bTerminalID;
6309
6310 iad_desc.bInterfaceCount = 1;
6311 - ac_hdr_desc.wTotalLength = 0;
6312 + ac_hdr_desc.wTotalLength = cpu_to_le16(sizeof(ac_hdr_desc));
6313
6314 if (EPIN_EN(opts)) {
6315 u16 len = le16_to_cpu(ac_hdr_desc.wTotalLength);
6316 diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
6317 index cc4a16e253ac..3d33499db50b 100644
6318 --- a/drivers/usb/gadget/udc/bdc/bdc_core.c
6319 +++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
6320 @@ -282,6 +282,7 @@ static void bdc_mem_init(struct bdc *bdc, bool reinit)
6321 * in that case reinit is passed as 1
6322 */
6323 if (reinit) {
6324 + int i;
6325 /* Enable interrupts */
6326 temp = bdc_readl(bdc->regs, BDC_BDCSC);
6327 temp |= BDC_GIE;
6328 @@ -291,6 +292,9 @@ static void bdc_mem_init(struct bdc *bdc, bool reinit)
6329 /* Initialize SRR to 0 */
6330 memset(bdc->srr.sr_bds, 0,
6331 NUM_SR_ENTRIES * sizeof(struct bdc_bd));
6332 + /* clear ep flags to avoid post disconnect stops/deconfigs */
6333 + for (i = 1; i < bdc->num_eps; ++i)
6334 + bdc->bdc_ep_array[i]->flags = 0;
6335 } else {
6336 /* One time initiaization only */
6337 /* Enable status report function pointers */
6338 @@ -601,9 +605,14 @@ static int bdc_remove(struct platform_device *pdev)
6339 static int bdc_suspend(struct device *dev)
6340 {
6341 struct bdc *bdc = dev_get_drvdata(dev);
6342 + int ret;
6343
6344 - clk_disable_unprepare(bdc->clk);
6345 - return 0;
6346 + /* Halt the controller */
6347 + ret = bdc_stop(bdc);
6348 + if (!ret)
6349 + clk_disable_unprepare(bdc->clk);
6350 +
6351 + return ret;
6352 }
6353
6354 static int bdc_resume(struct device *dev)
6355 diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
6356 index d49c6dc1082d..9ddc0b4e92c9 100644
6357 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
6358 +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
6359 @@ -615,7 +615,6 @@ int bdc_ep_enable(struct bdc_ep *ep)
6360 }
6361 bdc_dbg_bd_list(bdc, ep);
6362 /* only for ep0: config ep is called for ep0 from connect event */
6363 - ep->flags |= BDC_EP_ENABLED;
6364 if (ep->ep_num == 1)
6365 return ret;
6366
6367 @@ -759,10 +758,13 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
6368 __func__, ep->name, start_bdi, end_bdi);
6369 dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n",
6370 ep, (void *)ep->usb_ep.desc);
6371 - /* Stop the ep to see where the HW is ? */
6372 - ret = bdc_stop_ep(bdc, ep->ep_num);
6373 - /* if there is an issue with stopping ep, then no need to go further */
6374 - if (ret)
6375 + /* if still connected, stop the ep to see where the HW is ? */
6376 + if (!(bdc_readl(bdc->regs, BDC_USPC) & BDC_PST_MASK)) {
6377 + ret = bdc_stop_ep(bdc, ep->ep_num);
6378 + /* if there is an issue, then no need to go further */
6379 + if (ret)
6380 + return 0;
6381 + } else
6382 return 0;
6383
6384 /*
6385 @@ -1911,7 +1913,9 @@ static int bdc_gadget_ep_disable(struct usb_ep *_ep)
6386 __func__, ep->name, ep->flags);
6387
6388 if (!(ep->flags & BDC_EP_ENABLED)) {
6389 - dev_warn(bdc->dev, "%s is already disabled\n", ep->name);
6390 + if (bdc->gadget.speed != USB_SPEED_UNKNOWN)
6391 + dev_warn(bdc->dev, "%s is already disabled\n",
6392 + ep->name);
6393 return 0;
6394 }
6395 spin_lock_irqsave(&bdc->lock, flags);
6396 diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
6397 index 51efee21915f..7c616d7641c6 100644
6398 --- a/drivers/usb/gadget/udc/net2280.c
6399 +++ b/drivers/usb/gadget/udc/net2280.c
6400 @@ -3782,8 +3782,10 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
6401 return 0;
6402
6403 done:
6404 - if (dev)
6405 + if (dev) {
6406 net2280_remove(pdev);
6407 + kfree(dev);
6408 + }
6409 return retval;
6410 }
6411
6412 diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
6413 index 9dd02160cca9..e3780d4d6514 100644
6414 --- a/drivers/usb/mtu3/mtu3_core.c
6415 +++ b/drivers/usb/mtu3/mtu3_core.c
6416 @@ -131,8 +131,12 @@ static void mtu3_device_disable(struct mtu3 *mtu)
6417 mtu3_setbits(ibase, SSUSB_U2_CTRL(0),
6418 SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN);
6419
6420 - if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
6421 + if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) {
6422 mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
6423 + if (mtu->is_u3_ip)
6424 + mtu3_clrbits(ibase, SSUSB_U3_CTRL(0),
6425 + SSUSB_U3_PORT_DUAL_MODE);
6426 + }
6427
6428 mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
6429 }
6430 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
6431 index f5143eedbc48..a90801ef0055 100644
6432 --- a/drivers/usb/serial/cp210x.c
6433 +++ b/drivers/usb/serial/cp210x.c
6434 @@ -272,6 +272,8 @@ static struct usb_serial_driver cp210x_device = {
6435 .break_ctl = cp210x_break_ctl,
6436 .set_termios = cp210x_set_termios,
6437 .tx_empty = cp210x_tx_empty,
6438 + .throttle = usb_serial_generic_throttle,
6439 + .unthrottle = usb_serial_generic_unthrottle,
6440 .tiocmget = cp210x_tiocmget,
6441 .tiocmset = cp210x_tiocmset,
6442 .attach = cp210x_attach,
6443 @@ -915,6 +917,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
6444 u32 baud;
6445 u16 bits;
6446 u32 ctl_hs;
6447 + u32 flow_repl;
6448
6449 cp210x_read_u32_reg(port, CP210X_GET_BAUDRATE, &baud);
6450
6451 @@ -1015,6 +1018,22 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
6452 ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake);
6453 if (ctl_hs & CP210X_SERIAL_CTS_HANDSHAKE) {
6454 dev_dbg(dev, "%s - flow control = CRTSCTS\n", __func__);
6455 + /*
6456 + * When the port is closed, the CP210x hardware disables
6457 + * auto-RTS and RTS is deasserted but it leaves auto-CTS when
6458 + * in hardware flow control mode. When re-opening the port, if
6459 + * auto-CTS is enabled on the cp210x, then auto-RTS must be
6460 + * re-enabled in the driver.
6461 + */
6462 + flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace);
6463 + flow_repl &= ~CP210X_SERIAL_RTS_MASK;
6464 + flow_repl |= CP210X_SERIAL_RTS_SHIFT(CP210X_SERIAL_RTS_FLOW_CTL);
6465 + flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl);
6466 + cp210x_write_reg_block(port,
6467 + CP210X_SET_FLOW,
6468 + &flow_ctl,
6469 + sizeof(flow_ctl));
6470 +
6471 cflag |= CRTSCTS;
6472 } else {
6473 dev_dbg(dev, "%s - flow control = NONE\n", __func__);
6474 diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
6475 index b8dfeb4fb2ed..ffbb2a8901b2 100644
6476 --- a/drivers/usb/serial/iuu_phoenix.c
6477 +++ b/drivers/usb/serial/iuu_phoenix.c
6478 @@ -353,10 +353,11 @@ static void iuu_led_activity_on(struct urb *urb)
6479 struct usb_serial_port *port = urb->context;
6480 int result;
6481 char *buf_ptr = port->write_urb->transfer_buffer;
6482 - *buf_ptr++ = IUU_SET_LED;
6483 +
6484 if (xmas) {
6485 - get_random_bytes(buf_ptr, 6);
6486 - *(buf_ptr+7) = 1;
6487 + buf_ptr[0] = IUU_SET_LED;
6488 + get_random_bytes(buf_ptr + 1, 6);
6489 + buf_ptr[7] = 1;
6490 } else {
6491 iuu_rgbf_fill_buffer(buf_ptr, 255, 255, 0, 0, 0, 0, 255);
6492 }
6493 @@ -374,13 +375,14 @@ static void iuu_led_activity_off(struct urb *urb)
6494 struct usb_serial_port *port = urb->context;
6495 int result;
6496 char *buf_ptr = port->write_urb->transfer_buffer;
6497 +
6498 if (xmas) {
6499 iuu_rxcmd(urb);
6500 return;
6501 - } else {
6502 - *buf_ptr++ = IUU_SET_LED;
6503 - iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255);
6504 }
6505 +
6506 + iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255);
6507 +
6508 usb_fill_bulk_urb(port->write_urb, port->serial->dev,
6509 usb_sndbulkpipe(port->serial->dev,
6510 port->bulk_out_endpointAddress),
6511 diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
6512 index 00dddf6e08b0..2d2ee17052e8 100644
6513 --- a/drivers/video/console/newport_con.c
6514 +++ b/drivers/video/console/newport_con.c
6515 @@ -32,6 +32,8 @@
6516 #include <linux/linux_logo.h>
6517 #include <linux/font.h>
6518
6519 +#define NEWPORT_LEN 0x10000
6520 +
6521 #define FONT_DATA ((unsigned char *)font_vga_8x16.data)
6522
6523 /* borrowed from fbcon.c */
6524 @@ -43,6 +45,7 @@
6525 static unsigned char *font_data[MAX_NR_CONSOLES];
6526
6527 static struct newport_regs *npregs;
6528 +static unsigned long newport_addr;
6529
6530 static int logo_active;
6531 static int topscan;
6532 @@ -702,7 +705,6 @@ const struct consw newport_con = {
6533 static int newport_probe(struct gio_device *dev,
6534 const struct gio_device_id *id)
6535 {
6536 - unsigned long newport_addr;
6537 int err;
6538
6539 if (!dev->resource.start)
6540 @@ -712,7 +714,7 @@ static int newport_probe(struct gio_device *dev,
6541 return -EBUSY; /* we only support one Newport as console */
6542
6543 newport_addr = dev->resource.start + 0xF0000;
6544 - if (!request_mem_region(newport_addr, 0x10000, "Newport"))
6545 + if (!request_mem_region(newport_addr, NEWPORT_LEN, "Newport"))
6546 return -ENODEV;
6547
6548 npregs = (struct newport_regs *)/* ioremap cannot fail */
6549 @@ -720,6 +722,11 @@ static int newport_probe(struct gio_device *dev,
6550 console_lock();
6551 err = do_take_over_console(&newport_con, 0, MAX_NR_CONSOLES - 1, 1);
6552 console_unlock();
6553 +
6554 + if (err) {
6555 + iounmap((void *)npregs);
6556 + release_mem_region(newport_addr, NEWPORT_LEN);
6557 + }
6558 return err;
6559 }
6560
6561 @@ -727,6 +734,7 @@ static void newport_remove(struct gio_device *dev)
6562 {
6563 give_up_console(&newport_con);
6564 iounmap((void *)npregs);
6565 + release_mem_region(newport_addr, NEWPORT_LEN);
6566 }
6567
6568 static struct gio_device_id newport_ids[] = {
6569 diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c
6570 index b770946a0920..76464000933d 100644
6571 --- a/drivers/video/fbdev/neofb.c
6572 +++ b/drivers/video/fbdev/neofb.c
6573 @@ -1820,6 +1820,7 @@ static int neo_scan_monitor(struct fb_info *info)
6574 #else
6575 printk(KERN_ERR
6576 "neofb: Only 640x480, 800x600/480 and 1024x768 panels are currently supported\n");
6577 + kfree(info->monspecs.modedb);
6578 return -1;
6579 #endif
6580 default:
6581 diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
6582 index f70c9f79622e..27635926cea3 100644
6583 --- a/drivers/video/fbdev/pxafb.c
6584 +++ b/drivers/video/fbdev/pxafb.c
6585 @@ -2425,8 +2425,8 @@ static int pxafb_remove(struct platform_device *dev)
6586
6587 free_pages_exact(fbi->video_mem, fbi->video_mem_size);
6588
6589 - dma_free_wc(&dev->dev, fbi->dma_buff_size, fbi->dma_buff,
6590 - fbi->dma_buff_phys);
6591 + dma_free_coherent(&dev->dev, fbi->dma_buff_size, fbi->dma_buff,
6592 + fbi->dma_buff_phys);
6593
6594 return 0;
6595 }
6596 diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
6597 index 512789f5f884..d5d22d9c0f56 100644
6598 --- a/drivers/video/fbdev/savage/savagefb_driver.c
6599 +++ b/drivers/video/fbdev/savage/savagefb_driver.c
6600 @@ -2158,6 +2158,8 @@ static int savage_init_fb_info(struct fb_info *info, struct pci_dev *dev,
6601 info->flags |= FBINFO_HWACCEL_COPYAREA |
6602 FBINFO_HWACCEL_FILLRECT |
6603 FBINFO_HWACCEL_IMAGEBLIT;
6604 + else
6605 + kfree(info->pixmap.addr);
6606 }
6607 #endif
6608 return err;
6609 diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
6610 index 207d0add684b..246681414577 100644
6611 --- a/drivers/video/fbdev/sm712fb.c
6612 +++ b/drivers/video/fbdev/sm712fb.c
6613 @@ -1429,6 +1429,8 @@ static int smtc_map_smem(struct smtcfb_info *sfb,
6614 static void smtc_unmap_smem(struct smtcfb_info *sfb)
6615 {
6616 if (sfb && sfb->fb->screen_base) {
6617 + if (sfb->chip_id == 0x720)
6618 + sfb->fb->screen_base -= 0x00200000;
6619 iounmap(sfb->fb->screen_base);
6620 sfb->fb->screen_base = NULL;
6621 }
6622 diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
6623 index bed90d612e48..ebb05517b6aa 100644
6624 --- a/drivers/xen/balloon.c
6625 +++ b/drivers/xen/balloon.c
6626 @@ -570,11 +570,13 @@ static int add_ballooned_pages(int nr_pages)
6627 if (xen_hotplug_unpopulated) {
6628 st = reserve_additional_memory();
6629 if (st != BP_ECANCELED) {
6630 + int rc;
6631 +
6632 mutex_unlock(&balloon_mutex);
6633 - wait_event(balloon_wq,
6634 + rc = wait_event_interruptible(balloon_wq,
6635 !list_empty(&ballooned_pages));
6636 mutex_lock(&balloon_mutex);
6637 - return 0;
6638 + return rc ? -ENOMEM : 0;
6639 }
6640 }
6641
6642 @@ -632,6 +634,12 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
6643 out_undo:
6644 mutex_unlock(&balloon_mutex);
6645 free_xenballooned_pages(pgno, pages);
6646 + /*
6647 + * NB: free_xenballooned_pages will only subtract pgno pages, but since
6648 + * target_unpopulated is incremented with nr_pages at the start we need
6649 + * to remove the remaining ones also, or accounting will be screwed.
6650 + */
6651 + balloon_stats.target_unpopulated -= nr_pages - pgno;
6652 return ret;
6653 }
6654 EXPORT_SYMBOL(alloc_xenballooned_pages);
6655 diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
6656 index 2c4f324f8626..da799929087d 100644
6657 --- a/drivers/xen/gntdev-dmabuf.c
6658 +++ b/drivers/xen/gntdev-dmabuf.c
6659 @@ -641,6 +641,14 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
6660 goto fail_detach;
6661 }
6662
6663 + /* Check that we have zero offset. */
6664 + if (sgt->sgl->offset) {
6665 + ret = ERR_PTR(-EINVAL);
6666 + pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
6667 + sgt->sgl->offset);
6668 + goto fail_unmap;
6669 + }
6670 +
6671 /* Check number of pages that imported buffer has. */
6672 if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
6673 ret = ERR_PTR(-EINVAL);
6674 diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
6675 index 15a99f9c7253..39def020a074 100644
6676 --- a/fs/9p/v9fs.c
6677 +++ b/fs/9p/v9fs.c
6678 @@ -500,10 +500,9 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
6679 }
6680
6681 #ifdef CONFIG_9P_FSCACHE
6682 - if (v9ses->fscache) {
6683 + if (v9ses->fscache)
6684 v9fs_cache_session_put_cookie(v9ses);
6685 - kfree(v9ses->cachetag);
6686 - }
6687 + kfree(v9ses->cachetag);
6688 #endif
6689 kfree(v9ses->uname);
6690 kfree(v9ses->aname);
6691 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
6692 index 1a089a642422..99dcb3897659 100644
6693 --- a/fs/btrfs/extent_io.c
6694 +++ b/fs/btrfs/extent_io.c
6695 @@ -4481,6 +4481,8 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
6696
6697 /* once for us */
6698 free_extent_map(em);
6699 +
6700 + cond_resched(); /* Allow large-extent preemption. */
6701 }
6702 }
6703 return try_release_extent_state(tree, page, mask);
6704 diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
6705 index 7889a59a57fa..6f484f0d347e 100644
6706 --- a/fs/btrfs/space-info.c
6707 +++ b/fs/btrfs/space-info.c
6708 @@ -304,8 +304,8 @@ again:
6709 cache->key.objectid, cache->key.offset,
6710 btrfs_block_group_used(&cache->item), cache->pinned,
6711 cache->reserved, cache->ro ? "[readonly]" : "");
6712 - btrfs_dump_free_space(cache, bytes);
6713 spin_unlock(&cache->lock);
6714 + btrfs_dump_free_space(cache, bytes);
6715 }
6716 if (++index < BTRFS_NR_RAID_TYPES)
6717 goto again;
6718 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
6719 index afb8340918b8..c689359ca532 100644
6720 --- a/fs/dlm/lockspace.c
6721 +++ b/fs/dlm/lockspace.c
6722 @@ -632,6 +632,9 @@ static int new_lockspace(const char *name, const char *cluster,
6723 wait_event(ls->ls_recover_lock_wait,
6724 test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags));
6725
6726 + /* let kobject handle freeing of ls if there's an error */
6727 + do_unreg = 1;
6728 +
6729 ls->ls_kobj.kset = dlm_kset;
6730 error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
6731 "%s", ls->ls_name);
6732 @@ -639,9 +642,6 @@ static int new_lockspace(const char *name, const char *cluster,
6733 goto out_recoverd;
6734 kobject_uevent(&ls->ls_kobj, KOBJ_ADD);
6735
6736 - /* let kobject handle freeing of ls if there's an error */
6737 - do_unreg = 1;
6738 -
6739 /* This uevent triggers dlm_controld in userspace to add us to the
6740 group of nodes that are members of this lockspace (managed by the
6741 cluster infrastructure.) Once it's done that, it tells us who the
6742 diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
6743 index 3350ab65d892..b36b414cd7a7 100644
6744 --- a/fs/erofs/inode.c
6745 +++ b/fs/erofs/inode.c
6746 @@ -8,31 +8,80 @@
6747
6748 #include <trace/events/erofs.h>
6749
6750 -/* no locking */
6751 -static int erofs_read_inode(struct inode *inode, void *data)
6752 +/*
6753 + * if inode is successfully read, return its inode page (or sometimes
6754 + * the inode payload page if it's an extended inode) in order to fill
6755 + * inline data if possible.
6756 + */
6757 +static struct page *erofs_read_inode(struct inode *inode,
6758 + unsigned int *ofs)
6759 {
6760 + struct super_block *sb = inode->i_sb;
6761 + struct erofs_sb_info *sbi = EROFS_SB(sb);
6762 struct erofs_inode *vi = EROFS_I(inode);
6763 - struct erofs_inode_compact *dic = data;
6764 - struct erofs_inode_extended *die;
6765 + const erofs_off_t inode_loc = iloc(sbi, vi->nid);
6766 +
6767 + erofs_blk_t blkaddr, nblks = 0;
6768 + struct page *page;
6769 + struct erofs_inode_compact *dic;
6770 + struct erofs_inode_extended *die, *copied = NULL;
6771 + unsigned int ifmt;
6772 + int err;
6773
6774 - const unsigned int ifmt = le16_to_cpu(dic->i_format);
6775 - struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
6776 - erofs_blk_t nblks = 0;
6777 + blkaddr = erofs_blknr(inode_loc);
6778 + *ofs = erofs_blkoff(inode_loc);
6779
6780 - vi->datalayout = erofs_inode_datalayout(ifmt);
6781 + erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
6782 + __func__, vi->nid, *ofs, blkaddr);
6783 +
6784 + page = erofs_get_meta_page(sb, blkaddr);
6785 + if (IS_ERR(page)) {
6786 + erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
6787 + vi->nid, PTR_ERR(page));
6788 + return page;
6789 + }
6790
6791 + dic = page_address(page) + *ofs;
6792 + ifmt = le16_to_cpu(dic->i_format);
6793 +
6794 + vi->datalayout = erofs_inode_datalayout(ifmt);
6795 if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
6796 erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
6797 vi->datalayout, vi->nid);
6798 - DBG_BUGON(1);
6799 - return -EOPNOTSUPP;
6800 + err = -EOPNOTSUPP;
6801 + goto err_out;
6802 }
6803
6804 switch (erofs_inode_version(ifmt)) {
6805 case EROFS_INODE_LAYOUT_EXTENDED:
6806 - die = data;
6807 -
6808 vi->inode_isize = sizeof(struct erofs_inode_extended);
6809 + /* check if the inode acrosses page boundary */
6810 + if (*ofs + vi->inode_isize <= PAGE_SIZE) {
6811 + *ofs += vi->inode_isize;
6812 + die = (struct erofs_inode_extended *)dic;
6813 + } else {
6814 + const unsigned int gotten = PAGE_SIZE - *ofs;
6815 +
6816 + copied = kmalloc(vi->inode_isize, GFP_NOFS);
6817 + if (!copied) {
6818 + err = -ENOMEM;
6819 + goto err_out;
6820 + }
6821 + memcpy(copied, dic, gotten);
6822 + unlock_page(page);
6823 + put_page(page);
6824 +
6825 + page = erofs_get_meta_page(sb, blkaddr + 1);
6826 + if (IS_ERR(page)) {
6827 + erofs_err(sb, "failed to get inode payload page (nid: %llu), err %ld",
6828 + vi->nid, PTR_ERR(page));
6829 + kfree(copied);
6830 + return page;
6831 + }
6832 + *ofs = vi->inode_isize - gotten;
6833 + memcpy((u8 *)copied + gotten, page_address(page), *ofs);
6834 + die = copied;
6835 + }
6836 vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
6837
6838 inode->i_mode = le16_to_cpu(die->i_mode);
6839 @@ -69,9 +118,12 @@ static int erofs_read_inode(struct inode *inode, void *data)
6840 /* total blocks for compressed files */
6841 if (erofs_inode_is_data_compressed(vi->datalayout))
6842 nblks = le32_to_cpu(die->i_u.compressed_blocks);
6843 +
6844 + kfree(copied);
6845 break;
6846 case EROFS_INODE_LAYOUT_COMPACT:
6847 vi->inode_isize = sizeof(struct erofs_inode_compact);
6848 + *ofs += vi->inode_isize;
6849 vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
6850
6851 inode->i_mode = le16_to_cpu(dic->i_mode);
6852 @@ -111,8 +163,8 @@ static int erofs_read_inode(struct inode *inode, void *data)
6853 erofs_err(inode->i_sb,
6854 "unsupported on-disk inode version %u of nid %llu",
6855 erofs_inode_version(ifmt), vi->nid);
6856 - DBG_BUGON(1);
6857 - return -EOPNOTSUPP;
6858 + err = -EOPNOTSUPP;
6859 + goto err_out;
6860 }
6861
6862 if (!nblks)
6863 @@ -120,13 +172,18 @@ static int erofs_read_inode(struct inode *inode, void *data)
6864 inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
6865 else
6866 inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
6867 - return 0;
6868 + return page;
6869
6870 bogusimode:
6871 erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
6872 inode->i_mode, vi->nid);
6873 + err = -EFSCORRUPTED;
6874 +err_out:
6875 DBG_BUGON(1);
6876 - return -EFSCORRUPTED;
6877 + kfree(copied);
6878 + unlock_page(page);
6879 + put_page(page);
6880 + return ERR_PTR(err);
6881 }
6882
6883 static int erofs_fill_symlink(struct inode *inode, void *data,
6884 @@ -146,7 +203,7 @@ static int erofs_fill_symlink(struct inode *inode, void *data,
6885 if (!lnk)
6886 return -ENOMEM;
6887
6888 - m_pofs += vi->inode_isize + vi->xattr_isize;
6889 + m_pofs += vi->xattr_isize;
6890 /* inline symlink data shouldn't cross page boundary as well */
6891 if (m_pofs + inode->i_size > PAGE_SIZE) {
6892 kfree(lnk);
6893 @@ -167,37 +224,17 @@ static int erofs_fill_symlink(struct inode *inode, void *data,
6894
6895 static int erofs_fill_inode(struct inode *inode, int isdir)
6896 {
6897 - struct super_block *sb = inode->i_sb;
6898 struct erofs_inode *vi = EROFS_I(inode);
6899 struct page *page;
6900 - void *data;
6901 - int err;
6902 - erofs_blk_t blkaddr;
6903 unsigned int ofs;
6904 - erofs_off_t inode_loc;
6905 + int err = 0;
6906
6907 trace_erofs_fill_inode(inode, isdir);
6908 - inode_loc = iloc(EROFS_SB(sb), vi->nid);
6909 - blkaddr = erofs_blknr(inode_loc);
6910 - ofs = erofs_blkoff(inode_loc);
6911 -
6912 - erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
6913 - __func__, vi->nid, ofs, blkaddr);
6914
6915 - page = erofs_get_meta_page(sb, blkaddr);
6916 -
6917 - if (IS_ERR(page)) {
6918 - erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
6919 - vi->nid, PTR_ERR(page));
6920 + /* read inode base data from disk */
6921 + page = erofs_read_inode(inode, &ofs);
6922 + if (IS_ERR(page))
6923 return PTR_ERR(page);
6924 - }
6925 -
6926 - DBG_BUGON(!PageUptodate(page));
6927 - data = page_address(page);
6928 -
6929 - err = erofs_read_inode(inode, data + ofs);
6930 - if (err)
6931 - goto out_unlock;
6932
6933 /* setup the new inode */
6934 switch (inode->i_mode & S_IFMT) {
6935 @@ -210,7 +247,7 @@ static int erofs_fill_inode(struct inode *inode, int isdir)
6936 inode->i_fop = &erofs_dir_fops;
6937 break;
6938 case S_IFLNK:
6939 - err = erofs_fill_symlink(inode, data, ofs);
6940 + err = erofs_fill_symlink(inode, page_address(page), ofs);
6941 if (err)
6942 goto out_unlock;
6943 inode_nohighmem(inode);
6944 diff --git a/fs/io_uring.c b/fs/io_uring.c
6945 index be3d595a607f..fada14ee1cdc 100644
6946 --- a/fs/io_uring.c
6947 +++ b/fs/io_uring.c
6948 @@ -1433,8 +1433,10 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
6949
6950 if (file->f_op->read_iter)
6951 ret2 = call_read_iter(file, kiocb, &iter);
6952 - else
6953 + else if (req->file->f_op->read)
6954 ret2 = loop_rw_iter(READ, file, kiocb, &iter);
6955 + else
6956 + ret2 = -EINVAL;
6957
6958 /*
6959 * In case of a short read, punt to async. This can happen
6960 @@ -1524,8 +1526,10 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
6961
6962 if (file->f_op->write_iter)
6963 ret2 = call_write_iter(file, kiocb, &iter);
6964 - else
6965 + else if (req->file->f_op->write)
6966 ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
6967 + else
6968 + ret2 = -EINVAL;
6969
6970 if (!force_nonblock)
6971 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
6972 @@ -2559,6 +2563,7 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
6973 goto err;
6974 }
6975
6976 + memcpy(&req->submit, s, sizeof(*s));
6977 ret = io_req_set_file(ctx, s, state, req);
6978 if (unlikely(ret)) {
6979 err_req:
6980 @@ -3390,6 +3395,9 @@ static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
6981 return SIZE_MAX;
6982 #endif
6983
6984 + if (sq_offset)
6985 + *sq_offset = off;
6986 +
6987 sq_array_size = array_size(sizeof(u32), sq_entries);
6988 if (sq_array_size == SIZE_MAX)
6989 return SIZE_MAX;
6990 @@ -3397,9 +3405,6 @@ static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
6991 if (check_add_overflow(off, sq_array_size, &off))
6992 return SIZE_MAX;
6993
6994 - if (sq_offset)
6995 - *sq_offset = off;
6996 -
6997 return off;
6998 }
6999
7000 @@ -3856,6 +3861,10 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
7001 struct io_rings *rings;
7002 size_t size, sq_array_offset;
7003
7004 + /* make sure these are sane, as we already accounted them */
7005 + ctx->sq_entries = p->sq_entries;
7006 + ctx->cq_entries = p->cq_entries;
7007 +
7008 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
7009 if (size == SIZE_MAX)
7010 return -EOVERFLOW;
7011 @@ -3872,8 +3881,6 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
7012 rings->cq_ring_entries = p->cq_entries;
7013 ctx->sq_mask = rings->sq_ring_mask;
7014 ctx->cq_mask = rings->cq_ring_mask;
7015 - ctx->sq_entries = rings->sq_ring_entries;
7016 - ctx->cq_entries = rings->cq_ring_entries;
7017
7018 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
7019 if (size == SIZE_MAX) {
7020 diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
7021 index e8c792b49616..c35bbaa19486 100644
7022 --- a/fs/kernfs/file.c
7023 +++ b/fs/kernfs/file.c
7024 @@ -912,7 +912,7 @@ repeat:
7025 }
7026
7027 fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
7028 - &name, 0);
7029 + NULL, 0);
7030 iput(inode);
7031 }
7032
7033 diff --git a/fs/minix/inode.c b/fs/minix/inode.c
7034 index 7cb5fd38eb14..0dd929346f3f 100644
7035 --- a/fs/minix/inode.c
7036 +++ b/fs/minix/inode.c
7037 @@ -150,6 +150,23 @@ static int minix_remount (struct super_block * sb, int * flags, char * data)
7038 return 0;
7039 }
7040
7041 +static bool minix_check_superblock(struct minix_sb_info *sbi)
7042 +{
7043 + if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
7044 + return false;
7045 +
7046 + /*
7047 + * s_max_size must not exceed the block mapping limitation. This check
7048 + * is only needed for V1 filesystems, since V2/V3 support an extra level
7049 + * of indirect blocks which places the limit well above U32_MAX.
7050 + */
7051 + if (sbi->s_version == MINIX_V1 &&
7052 + sbi->s_max_size > (7 + 512 + 512*512) * BLOCK_SIZE)
7053 + return false;
7054 +
7055 + return true;
7056 +}
7057 +
7058 static int minix_fill_super(struct super_block *s, void *data, int silent)
7059 {
7060 struct buffer_head *bh;
7061 @@ -228,11 +245,12 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
7062 } else
7063 goto out_no_fs;
7064
7065 + if (!minix_check_superblock(sbi))
7066 + goto out_illegal_sb;
7067 +
7068 /*
7069 * Allocate the buffer map to keep the superblock small.
7070 */
7071 - if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
7072 - goto out_illegal_sb;
7073 i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
7074 map = kzalloc(i, GFP_KERNEL);
7075 if (!map)
7076 @@ -468,6 +486,13 @@ static struct inode *V1_minix_iget(struct inode *inode)
7077 iget_failed(inode);
7078 return ERR_PTR(-EIO);
7079 }
7080 + if (raw_inode->i_nlinks == 0) {
7081 + printk("MINIX-fs: deleted inode referenced: %lu\n",
7082 + inode->i_ino);
7083 + brelse(bh);
7084 + iget_failed(inode);
7085 + return ERR_PTR(-ESTALE);
7086 + }
7087 inode->i_mode = raw_inode->i_mode;
7088 i_uid_write(inode, raw_inode->i_uid);
7089 i_gid_write(inode, raw_inode->i_gid);
7090 @@ -501,6 +526,13 @@ static struct inode *V2_minix_iget(struct inode *inode)
7091 iget_failed(inode);
7092 return ERR_PTR(-EIO);
7093 }
7094 + if (raw_inode->i_nlinks == 0) {
7095 + printk("MINIX-fs: deleted inode referenced: %lu\n",
7096 + inode->i_ino);
7097 + brelse(bh);
7098 + iget_failed(inode);
7099 + return ERR_PTR(-ESTALE);
7100 + }
7101 inode->i_mode = raw_inode->i_mode;
7102 i_uid_write(inode, raw_inode->i_uid);
7103 i_gid_write(inode, raw_inode->i_gid);
7104 diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c
7105 index 043c3fdbc8e7..446148792f41 100644
7106 --- a/fs/minix/itree_common.c
7107 +++ b/fs/minix/itree_common.c
7108 @@ -75,6 +75,7 @@ static int alloc_branch(struct inode *inode,
7109 int n = 0;
7110 int i;
7111 int parent = minix_new_block(inode);
7112 + int err = -ENOSPC;
7113
7114 branch[0].key = cpu_to_block(parent);
7115 if (parent) for (n = 1; n < num; n++) {
7116 @@ -85,6 +86,11 @@ static int alloc_branch(struct inode *inode,
7117 break;
7118 branch[n].key = cpu_to_block(nr);
7119 bh = sb_getblk(inode->i_sb, parent);
7120 + if (!bh) {
7121 + minix_free_block(inode, nr);
7122 + err = -ENOMEM;
7123 + break;
7124 + }
7125 lock_buffer(bh);
7126 memset(bh->b_data, 0, bh->b_size);
7127 branch[n].bh = bh;
7128 @@ -103,7 +109,7 @@ static int alloc_branch(struct inode *inode,
7129 bforget(branch[i].bh);
7130 for (i = 0; i < n; i++)
7131 minix_free_block(inode, block_to_cpu(branch[i].key));
7132 - return -ENOSPC;
7133 + return err;
7134 }
7135
7136 static inline int splice_branch(struct inode *inode,
7137 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
7138 index 443639cbb0cf..9c2b07ce57b2 100644
7139 --- a/fs/nfs/pnfs.c
7140 +++ b/fs/nfs/pnfs.c
7141 @@ -1198,31 +1198,27 @@ out:
7142 return status;
7143 }
7144
7145 +static bool
7146 +pnfs_layout_segments_returnable(struct pnfs_layout_hdr *lo,
7147 + enum pnfs_iomode iomode,
7148 + u32 seq)
7149 +{
7150 + struct pnfs_layout_range recall_range = {
7151 + .length = NFS4_MAX_UINT64,
7152 + .iomode = iomode,
7153 + };
7154 + return pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
7155 + &recall_range, seq) != -EBUSY;
7156 +}
7157 +
7158 /* Return true if layoutreturn is needed */
7159 static bool
7160 pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
7161 {
7162 - struct pnfs_layout_segment *s;
7163 - enum pnfs_iomode iomode;
7164 - u32 seq;
7165 -
7166 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
7167 return false;
7168 -
7169 - seq = lo->plh_return_seq;
7170 - iomode = lo->plh_return_iomode;
7171 -
7172 - /* Defer layoutreturn until all recalled lsegs are done */
7173 - list_for_each_entry(s, &lo->plh_segs, pls_list) {
7174 - if (seq && pnfs_seqid_is_newer(s->pls_seq, seq))
7175 - continue;
7176 - if (iomode != IOMODE_ANY && s->pls_range.iomode != iomode)
7177 - continue;
7178 - if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
7179 - return false;
7180 - }
7181 -
7182 - return true;
7183 + return pnfs_layout_segments_returnable(lo, lo->plh_return_iomode,
7184 + lo->plh_return_seq);
7185 }
7186
7187 static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
7188 @@ -2362,16 +2358,6 @@ out_forget:
7189 return ERR_PTR(-EAGAIN);
7190 }
7191
7192 -static int
7193 -mark_lseg_invalid_or_return(struct pnfs_layout_segment *lseg,
7194 - struct list_head *tmp_list)
7195 -{
7196 - if (!mark_lseg_invalid(lseg, tmp_list))
7197 - return 0;
7198 - pnfs_cache_lseg_for_layoutreturn(lseg->pls_layout, lseg);
7199 - return 1;
7200 -}
7201 -
7202 /**
7203 * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
7204 * @lo: pointer to layout header
7205 @@ -2408,7 +2394,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
7206 lseg, lseg->pls_range.iomode,
7207 lseg->pls_range.offset,
7208 lseg->pls_range.length);
7209 - if (mark_lseg_invalid_or_return(lseg, tmp_list))
7210 + if (mark_lseg_invalid(lseg, tmp_list))
7211 continue;
7212 remaining++;
7213 set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
7214 diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
7215 index e2c34c704185..50a863fc1779 100644
7216 --- a/fs/ocfs2/dlmglue.c
7217 +++ b/fs/ocfs2/dlmglue.c
7218 @@ -2871,9 +2871,15 @@ int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
7219
7220 status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE,
7221 0, 0);
7222 - if (status < 0)
7223 + if (status < 0) {
7224 mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
7225
7226 + if (ex)
7227 + up_write(&osb->nfs_sync_rwlock);
7228 + else
7229 + up_read(&osb->nfs_sync_rwlock);
7230 + }
7231 +
7232 return status;
7233 }
7234
7235 diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
7236 index cdf5b8ae2583..74a60bae2b23 100644
7237 --- a/fs/pstore/platform.c
7238 +++ b/fs/pstore/platform.c
7239 @@ -275,6 +275,9 @@ static int pstore_compress(const void *in, void *out,
7240 {
7241 int ret;
7242
7243 + if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION))
7244 + return -EINVAL;
7245 +
7246 ret = crypto_comp_compress(tfm, in, inlen, out, &outlen);
7247 if (ret) {
7248 pr_err("crypto_comp_compress failed, ret = %d!\n", ret);
7249 @@ -661,7 +664,7 @@ static void decompress_record(struct pstore_record *record)
7250 int unzipped_len;
7251 char *unzipped, *workspace;
7252
7253 - if (!record->compressed)
7254 + if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION) || !record->compressed)
7255 return;
7256
7257 /* Only PSTORE_TYPE_DMESG support compression. */
7258 diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h
7259 index 88221c7a04cc..c6df01a2a158 100644
7260 --- a/fs/xfs/libxfs/xfs_trans_space.h
7261 +++ b/fs/xfs/libxfs/xfs_trans_space.h
7262 @@ -57,7 +57,7 @@
7263 XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK)
7264 #define XFS_IALLOC_SPACE_RES(mp) \
7265 (M_IGEO(mp)->ialloc_blks + \
7266 - (xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1 * \
7267 + ((xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1) * \
7268 (M_IGEO(mp)->inobt_maxlevels - 1)))
7269
7270 /*
7271 diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
7272 index fa6ea6407992..392fb4df5c12 100644
7273 --- a/fs/xfs/scrub/bmap.c
7274 +++ b/fs/xfs/scrub/bmap.c
7275 @@ -45,9 +45,27 @@ xchk_setup_inode_bmap(
7276 */
7277 if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
7278 sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
7279 + struct address_space *mapping = VFS_I(sc->ip)->i_mapping;
7280 +
7281 inode_dio_wait(VFS_I(sc->ip));
7282 - error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping);
7283 - if (error)
7284 +
7285 + /*
7286 + * Try to flush all incore state to disk before we examine the
7287 + * space mappings for the data fork. Leave accumulated errors
7288 + * in the mapping for the writer threads to consume.
7289 + *
7290 + * On ENOSPC or EIO writeback errors, we continue into the
7291 + * extent mapping checks because write failures do not
7292 + * necessarily imply anything about the correctness of the file
7293 + * metadata. The metadata and the file data could be on
7294 + * completely separate devices; a media failure might only
7295 + * affect a subset of the disk, etc. We can handle delalloc
7296 + * extents in the scrubber, so leaving them in memory is fine.
7297 + */
7298 + error = filemap_fdatawrite(mapping);
7299 + if (!error)
7300 + error = filemap_fdatawait_keep_errors(mapping);
7301 + if (error && (error != -ENOSPC && error != -EIO))
7302 goto out;
7303 }
7304
7305 diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
7306 index 6a4fd1738b08..904d8285c226 100644
7307 --- a/fs/xfs/xfs_reflink.c
7308 +++ b/fs/xfs/xfs_reflink.c
7309 @@ -1005,6 +1005,7 @@ xfs_reflink_remap_extent(
7310 xfs_filblks_t rlen;
7311 xfs_filblks_t unmap_len;
7312 xfs_off_t newlen;
7313 + int64_t qres;
7314 int error;
7315
7316 unmap_len = irec->br_startoff + irec->br_blockcount - destoff;
7317 @@ -1027,13 +1028,19 @@ xfs_reflink_remap_extent(
7318 xfs_ilock(ip, XFS_ILOCK_EXCL);
7319 xfs_trans_ijoin(tp, ip, 0);
7320
7321 - /* If we're not just clearing space, then do we have enough quota? */
7322 - if (real_extent) {
7323 - error = xfs_trans_reserve_quota_nblks(tp, ip,
7324 - irec->br_blockcount, 0, XFS_QMOPT_RES_REGBLKS);
7325 - if (error)
7326 - goto out_cancel;
7327 - }
7328 + /*
7329 + * Reserve quota for this operation. We don't know if the first unmap
7330 + * in the dest file will cause a bmap btree split, so we always reserve
7331 + * at least enough blocks for that split. If the extent being mapped
7332 + * in is written, we need to reserve quota for that too.
7333 + */
7334 + qres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
7335 + if (real_extent)
7336 + qres += irec->br_blockcount;
7337 + error = xfs_trans_reserve_quota_nblks(tp, ip, qres, 0,
7338 + XFS_QMOPT_RES_REGBLKS);
7339 + if (error)
7340 + goto out_cancel;
7341
7342 trace_xfs_reflink_remap(ip, irec->br_startoff,
7343 irec->br_blockcount, irec->br_startblock);
7344 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
7345 index d7616d08e863..f050039ca2c0 100644
7346 --- a/include/asm-generic/vmlinux.lds.h
7347 +++ b/include/asm-generic/vmlinux.lds.h
7348 @@ -340,6 +340,7 @@
7349 */
7350 #ifndef RO_AFTER_INIT_DATA
7351 #define RO_AFTER_INIT_DATA \
7352 + . = ALIGN(8); \
7353 __start_ro_after_init = .; \
7354 *(.data..ro_after_init) \
7355 JUMP_TABLE_DATA \
7356 diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
7357 index 4bbb5f1c8b5b..4c0224ff0a14 100644
7358 --- a/include/linux/bitfield.h
7359 +++ b/include/linux/bitfield.h
7360 @@ -64,7 +64,7 @@
7361 */
7362 #define FIELD_FIT(_mask, _val) \
7363 ({ \
7364 - __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \
7365 + __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \
7366 !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
7367 })
7368
7369 diff --git a/include/linux/tpm.h b/include/linux/tpm.h
7370 index 53c0ea9ec9df..77fdc988c610 100644
7371 --- a/include/linux/tpm.h
7372 +++ b/include/linux/tpm.h
7373 @@ -93,6 +93,7 @@ struct tpm_space {
7374 u8 *context_buf;
7375 u32 session_tbl[3];
7376 u8 *session_buf;
7377 + u32 buf_size;
7378 };
7379
7380 struct tpm_bios_log {
7381 diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
7382 index eccfd3a4e4c8..f3caeeb7a0d0 100644
7383 --- a/include/linux/tpm_eventlog.h
7384 +++ b/include/linux/tpm_eventlog.h
7385 @@ -211,9 +211,16 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
7386
7387 efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
7388
7389 - /* Check if event is malformed. */
7390 + /*
7391 + * Perform validation of the event in order to identify malformed
7392 + * events. This function may be asked to parse arbitrary byte sequences
7393 + * immediately following a valid event log. The caller expects this
7394 + * function to recognize that the byte sequence is not a valid event
7395 + * and to return an event size of 0.
7396 + */
7397 if (memcmp(efispecid->signature, TCG_SPECID_SIG,
7398 - sizeof(TCG_SPECID_SIG)) || count > efispecid->num_algs) {
7399 + sizeof(TCG_SPECID_SIG)) ||
7400 + !efispecid->num_algs || count != efispecid->num_algs) {
7401 size = 0;
7402 goto out;
7403 }
7404 diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
7405 index 1fb11daa5c53..57ce5af258a3 100644
7406 --- a/include/linux/tracepoint.h
7407 +++ b/include/linux/tracepoint.h
7408 @@ -362,7 +362,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
7409 static const char *___tp_str __tracepoint_string = str; \
7410 ___tp_str; \
7411 })
7412 -#define __tracepoint_string __attribute__((section("__tracepoint_str")))
7413 +#define __tracepoint_string __attribute__((section("__tracepoint_str"), used))
7414 #else
7415 /*
7416 * tracepoint_string() is used to save the string address for userspace
7417 diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
7418 index 895546058a20..c71eb294da95 100644
7419 --- a/include/net/inet_connection_sock.h
7420 +++ b/include/net/inet_connection_sock.h
7421 @@ -309,6 +309,10 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
7422 int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
7423 char __user *optval, unsigned int optlen);
7424
7425 +/* update the fast reuse flag when adding a socket */
7426 +void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
7427 + struct sock *sk);
7428 +
7429 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
7430
7431 #define TCP_PINGPONG_THRESH 3
7432 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
7433 index 078887c8c586..7c37e3c3b1c7 100644
7434 --- a/include/net/ip_vs.h
7435 +++ b/include/net/ip_vs.h
7436 @@ -1624,18 +1624,16 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
7437 }
7438 #endif /* CONFIG_IP_VS_NFCT */
7439
7440 -/* Really using conntrack? */
7441 -static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
7442 - struct sk_buff *skb)
7443 +/* Using old conntrack that can not be redirected to another real server? */
7444 +static inline bool ip_vs_conn_uses_old_conntrack(struct ip_vs_conn *cp,
7445 + struct sk_buff *skb)
7446 {
7447 #ifdef CONFIG_IP_VS_NFCT
7448 enum ip_conntrack_info ctinfo;
7449 struct nf_conn *ct;
7450
7451 - if (!(cp->flags & IP_VS_CONN_F_NFCT))
7452 - return false;
7453 ct = nf_ct_get(skb, &ctinfo);
7454 - if (ct)
7455 + if (ct && nf_ct_is_confirmed(ct))
7456 return true;
7457 #endif
7458 return false;
7459 diff --git a/include/net/tcp.h b/include/net/tcp.h
7460 index 7cf1b4972c66..377179283c46 100644
7461 --- a/include/net/tcp.h
7462 +++ b/include/net/tcp.h
7463 @@ -1650,6 +1650,8 @@ void tcp_fastopen_destroy_cipher(struct sock *sk);
7464 void tcp_fastopen_ctx_destroy(struct net *net);
7465 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
7466 void *primary_key, void *backup_key);
7467 +int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
7468 + u64 *key);
7469 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
7470 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
7471 struct request_sock *req,
7472 diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
7473 index 90734aa5aa36..b5f901af79f0 100644
7474 --- a/include/uapi/linux/seccomp.h
7475 +++ b/include/uapi/linux/seccomp.h
7476 @@ -93,5 +93,6 @@ struct seccomp_notif_resp {
7477 #define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif)
7478 #define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \
7479 struct seccomp_notif_resp)
7480 -#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64)
7481 +#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64)
7482 +
7483 #endif /* _UAPI_LINUX_SECCOMP_H */
7484 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
7485 index 38ae3cf9d173..b34b5c6e2524 100644
7486 --- a/kernel/sched/core.c
7487 +++ b/kernel/sched/core.c
7488 @@ -1238,6 +1238,20 @@ static void uclamp_fork(struct task_struct *p)
7489 }
7490 }
7491
7492 +static void __init init_uclamp_rq(struct rq *rq)
7493 +{
7494 + enum uclamp_id clamp_id;
7495 + struct uclamp_rq *uc_rq = rq->uclamp;
7496 +
7497 + for_each_clamp_id(clamp_id) {
7498 + uc_rq[clamp_id] = (struct uclamp_rq) {
7499 + .value = uclamp_none(clamp_id)
7500 + };
7501 + }
7502 +
7503 + rq->uclamp_flags = 0;
7504 +}
7505 +
7506 static void __init init_uclamp(void)
7507 {
7508 struct uclamp_se uc_max = {};
7509 @@ -1246,11 +1260,8 @@ static void __init init_uclamp(void)
7510
7511 mutex_init(&uclamp_mutex);
7512
7513 - for_each_possible_cpu(cpu) {
7514 - memset(&cpu_rq(cpu)->uclamp, 0,
7515 - sizeof(struct uclamp_rq)*UCLAMP_CNT);
7516 - cpu_rq(cpu)->uclamp_flags = 0;
7517 - }
7518 + for_each_possible_cpu(cpu)
7519 + init_uclamp_rq(cpu_rq(cpu));
7520
7521 for_each_clamp_id(clamp_id) {
7522 uclamp_se_set(&init_task.uclamp_req[clamp_id],
7523 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
7524 index 9b16080093be..20bf1f66733a 100644
7525 --- a/kernel/sched/fair.c
7526 +++ b/kernel/sched/fair.c
7527 @@ -9385,7 +9385,12 @@ static void kick_ilb(unsigned int flags)
7528 {
7529 int ilb_cpu;
7530
7531 - nohz.next_balance++;
7532 + /*
7533 + * Increase nohz.next_balance only when if full ilb is triggered but
7534 + * not if we only update stats.
7535 + */
7536 + if (flags & NOHZ_BALANCE_KICK)
7537 + nohz.next_balance = jiffies+1;
7538
7539 ilb_cpu = find_new_ilb();
7540
7541 @@ -9703,6 +9708,14 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
7542 }
7543 }
7544
7545 + /*
7546 + * next_balance will be updated only when there is a need.
7547 + * When the CPU is attached to null domain for ex, it will not be
7548 + * updated.
7549 + */
7550 + if (likely(update_next_balance))
7551 + nohz.next_balance = next_balance;
7552 +
7553 /* Newly idle CPU doesn't need an update */
7554 if (idle != CPU_NEWLY_IDLE) {
7555 update_blocked_averages(this_cpu);
7556 @@ -9723,14 +9736,6 @@ abort:
7557 if (has_blocked_load)
7558 WRITE_ONCE(nohz.has_blocked, 1);
7559
7560 - /*
7561 - * next_balance will be updated only when there is a need.
7562 - * When the CPU is attached to null domain for ex, it will not be
7563 - * updated.
7564 - */
7565 - if (likely(update_next_balance))
7566 - nohz.next_balance = next_balance;
7567 -
7568 return ret;
7569 }
7570
7571 diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
7572 index 1fa1e13a5944..ffaa97a8d405 100644
7573 --- a/kernel/sched/topology.c
7574 +++ b/kernel/sched/topology.c
7575 @@ -1333,7 +1333,7 @@ sd_init(struct sched_domain_topology_level *tl,
7576 sd_flags = (*tl->sd_flags)();
7577 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
7578 "wrong sd_flags in topology description\n"))
7579 - sd_flags &= ~TOPOLOGY_SD_FLAGS;
7580 + sd_flags &= TOPOLOGY_SD_FLAGS;
7581
7582 /* Apply detected topology flags */
7583 sd_flags |= dflags;
7584 diff --git a/kernel/seccomp.c b/kernel/seccomp.c
7585 index 2c697ce7be21..e0fd97235653 100644
7586 --- a/kernel/seccomp.c
7587 +++ b/kernel/seccomp.c
7588 @@ -42,6 +42,14 @@
7589 #include <linux/uaccess.h>
7590 #include <linux/anon_inodes.h>
7591
7592 +/*
7593 + * When SECCOMP_IOCTL_NOTIF_ID_VALID was first introduced, it had the
7594 + * wrong direction flag in the ioctl number. This is the broken one,
7595 + * which the kernel needs to keep supporting until all userspaces stop
7596 + * using the wrong command number.
7597 + */
7598 +#define SECCOMP_IOCTL_NOTIF_ID_VALID_WRONG_DIR SECCOMP_IOR(2, __u64)
7599 +
7600 enum notify_state {
7601 SECCOMP_NOTIFY_INIT,
7602 SECCOMP_NOTIFY_SENT,
7603 @@ -1168,6 +1176,7 @@ static long seccomp_notify_ioctl(struct file *file, unsigned int cmd,
7604 return seccomp_notify_recv(filter, buf);
7605 case SECCOMP_IOCTL_NOTIF_SEND:
7606 return seccomp_notify_send(filter, buf);
7607 + case SECCOMP_IOCTL_NOTIF_ID_VALID_WRONG_DIR:
7608 case SECCOMP_IOCTL_NOTIF_ID_VALID:
7609 return seccomp_notify_id_valid(filter, buf);
7610 default:
7611 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
7612 index eaee960153e1..a4c8f9d9522e 100644
7613 --- a/kernel/trace/blktrace.c
7614 +++ b/kernel/trace/blktrace.c
7615 @@ -521,10 +521,18 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
7616 if (!bt->msg_data)
7617 goto err;
7618
7619 - ret = -ENOENT;
7620 -
7621 - dir = debugfs_lookup(buts->name, blk_debugfs_root);
7622 - if (!dir)
7623 +#ifdef CONFIG_BLK_DEBUG_FS
7624 + /*
7625 + * When tracing whole make_request drivers (multiqueue) block devices,
7626 + * reuse the existing debugfs directory created by the block layer on
7627 + * init. For request-based block devices, all partitions block devices,
7628 + * and scsi-generic block devices we create a temporary new debugfs
7629 + * directory that will be removed once the trace ends.
7630 + */
7631 + if (queue_is_mq(q) && bdev && bdev == bdev->bd_contains)
7632 + dir = q->debugfs_dir;
7633 + else
7634 +#endif
7635 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
7636
7637 bt->dev = dev;
7638 @@ -565,8 +573,6 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
7639
7640 ret = 0;
7641 err:
7642 - if (dir && !bt->dir)
7643 - dput(dir);
7644 if (ret)
7645 blk_trace_free(bt);
7646 return ret;
7647 diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
7648 index 8cc01a603416..c9acf1c12cfc 100644
7649 --- a/lib/crc-t10dif.c
7650 +++ b/lib/crc-t10dif.c
7651 @@ -19,39 +19,46 @@
7652 static struct crypto_shash __rcu *crct10dif_tfm;
7653 static struct static_key crct10dif_fallback __read_mostly;
7654 static DEFINE_MUTEX(crc_t10dif_mutex);
7655 +static struct work_struct crct10dif_rehash_work;
7656
7657 -static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data)
7658 +static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, void *data)
7659 {
7660 struct crypto_alg *alg = data;
7661 - struct crypto_shash *new, *old;
7662
7663 if (val != CRYPTO_MSG_ALG_LOADED ||
7664 static_key_false(&crct10dif_fallback) ||
7665 strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING)))
7666 return 0;
7667
7668 + schedule_work(&crct10dif_rehash_work);
7669 + return 0;
7670 +}
7671 +
7672 +static void crc_t10dif_rehash(struct work_struct *work)
7673 +{
7674 + struct crypto_shash *new, *old;
7675 +
7676 mutex_lock(&crc_t10dif_mutex);
7677 old = rcu_dereference_protected(crct10dif_tfm,
7678 lockdep_is_held(&crc_t10dif_mutex));
7679 if (!old) {
7680 mutex_unlock(&crc_t10dif_mutex);
7681 - return 0;
7682 + return;
7683 }
7684 new = crypto_alloc_shash("crct10dif", 0, 0);
7685 if (IS_ERR(new)) {
7686 mutex_unlock(&crc_t10dif_mutex);
7687 - return 0;
7688 + return;
7689 }
7690 rcu_assign_pointer(crct10dif_tfm, new);
7691 mutex_unlock(&crc_t10dif_mutex);
7692
7693 synchronize_rcu();
7694 crypto_free_shash(old);
7695 - return 0;
7696 }
7697
7698 static struct notifier_block crc_t10dif_nb = {
7699 - .notifier_call = crc_t10dif_rehash,
7700 + .notifier_call = crc_t10dif_notify,
7701 };
7702
7703 __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
7704 @@ -86,19 +93,26 @@ EXPORT_SYMBOL(crc_t10dif);
7705
7706 static int __init crc_t10dif_mod_init(void)
7707 {
7708 + struct crypto_shash *tfm;
7709 +
7710 + INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash);
7711 crypto_register_notifier(&crc_t10dif_nb);
7712 - crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
7713 - if (IS_ERR(crct10dif_tfm)) {
7714 + mutex_lock(&crc_t10dif_mutex);
7715 + tfm = crypto_alloc_shash("crct10dif", 0, 0);
7716 + if (IS_ERR(tfm)) {
7717 static_key_slow_inc(&crct10dif_fallback);
7718 - crct10dif_tfm = NULL;
7719 + tfm = NULL;
7720 }
7721 + RCU_INIT_POINTER(crct10dif_tfm, tfm);
7722 + mutex_unlock(&crc_t10dif_mutex);
7723 return 0;
7724 }
7725
7726 static void __exit crc_t10dif_mod_fini(void)
7727 {
7728 crypto_unregister_notifier(&crc_t10dif_nb);
7729 - crypto_free_shash(crct10dif_tfm);
7730 + cancel_work_sync(&crct10dif_rehash_work);
7731 + crypto_free_shash(rcu_dereference_protected(crct10dif_tfm, 1));
7732 }
7733
7734 module_init(crc_t10dif_mod_init);
7735 @@ -106,11 +120,27 @@ module_exit(crc_t10dif_mod_fini);
7736
7737 static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp)
7738 {
7739 + struct crypto_shash *tfm;
7740 + const char *name;
7741 + int len;
7742 +
7743 if (static_key_false(&crct10dif_fallback))
7744 return sprintf(buffer, "fallback\n");
7745
7746 - return sprintf(buffer, "%s\n",
7747 - crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm)));
7748 + rcu_read_lock();
7749 + tfm = rcu_dereference(crct10dif_tfm);
7750 + if (!tfm) {
7751 + len = sprintf(buffer, "init\n");
7752 + goto unlock;
7753 + }
7754 +
7755 + name = crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
7756 + len = sprintf(buffer, "%s\n", name);
7757 +
7758 +unlock:
7759 + rcu_read_unlock();
7760 +
7761 + return len;
7762 }
7763
7764 module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644);
7765 diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
7766 index c60409138e13..ccf05719b1ad 100644
7767 --- a/lib/dynamic_debug.c
7768 +++ b/lib/dynamic_debug.c
7769 @@ -87,22 +87,22 @@ static struct { unsigned flag:8; char opt_char; } opt_array[] = {
7770 { _DPRINTK_FLAGS_NONE, '_' },
7771 };
7772
7773 +struct flagsbuf { char buf[ARRAY_SIZE(opt_array)+1]; };
7774 +
7775 /* format a string into buf[] which describes the _ddebug's flags */
7776 -static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
7777 - size_t maxlen)
7778 +static char *ddebug_describe_flags(unsigned int flags, struct flagsbuf *fb)
7779 {
7780 - char *p = buf;
7781 + char *p = fb->buf;
7782 int i;
7783
7784 - BUG_ON(maxlen < 6);
7785 for (i = 0; i < ARRAY_SIZE(opt_array); ++i)
7786 - if (dp->flags & opt_array[i].flag)
7787 + if (flags & opt_array[i].flag)
7788 *p++ = opt_array[i].opt_char;
7789 - if (p == buf)
7790 + if (p == fb->buf)
7791 *p++ = '_';
7792 *p = '\0';
7793
7794 - return buf;
7795 + return fb->buf;
7796 }
7797
7798 #define vpr_info(fmt, ...) \
7799 @@ -144,7 +144,7 @@ static int ddebug_change(const struct ddebug_query *query,
7800 struct ddebug_table *dt;
7801 unsigned int newflags;
7802 unsigned int nfound = 0;
7803 - char flagbuf[10];
7804 + struct flagsbuf fbuf;
7805
7806 /* search for matching ddebugs */
7807 mutex_lock(&ddebug_lock);
7808 @@ -201,8 +201,7 @@ static int ddebug_change(const struct ddebug_query *query,
7809 vpr_info("changed %s:%d [%s]%s =%s\n",
7810 trim_prefix(dp->filename), dp->lineno,
7811 dt->mod_name, dp->function,
7812 - ddebug_describe_flags(dp, flagbuf,
7813 - sizeof(flagbuf)));
7814 + ddebug_describe_flags(dp->flags, &fbuf));
7815 }
7816 }
7817 mutex_unlock(&ddebug_lock);
7818 @@ -816,7 +815,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
7819 {
7820 struct ddebug_iter *iter = m->private;
7821 struct _ddebug *dp = p;
7822 - char flagsbuf[10];
7823 + struct flagsbuf flags;
7824
7825 vpr_info("called m=%p p=%p\n", m, p);
7826
7827 @@ -829,7 +828,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
7828 seq_printf(m, "%s:%u [%s]%s =%s \"",
7829 trim_prefix(dp->filename), dp->lineno,
7830 iter->table->mod_name, dp->function,
7831 - ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf)));
7832 + ddebug_describe_flags(dp->flags, &flags));
7833 seq_escape(m, dp->format, "\t\r\n\"");
7834 seq_puts(m, "\"\n");
7835
7836 diff --git a/lib/kobject.c b/lib/kobject.c
7837 index 83198cb37d8d..386873bdd51c 100644
7838 --- a/lib/kobject.c
7839 +++ b/lib/kobject.c
7840 @@ -599,14 +599,7 @@ out:
7841 }
7842 EXPORT_SYMBOL_GPL(kobject_move);
7843
7844 -/**
7845 - * kobject_del() - Unlink kobject from hierarchy.
7846 - * @kobj: object.
7847 - *
7848 - * This is the function that should be called to delete an object
7849 - * successfully added via kobject_add().
7850 - */
7851 -void kobject_del(struct kobject *kobj)
7852 +static void __kobject_del(struct kobject *kobj)
7853 {
7854 struct kernfs_node *sd;
7855 const struct kobj_type *ktype;
7856 @@ -625,9 +618,23 @@ void kobject_del(struct kobject *kobj)
7857
7858 kobj->state_in_sysfs = 0;
7859 kobj_kset_leave(kobj);
7860 - kobject_put(kobj->parent);
7861 kobj->parent = NULL;
7862 }
7863 +
7864 +/**
7865 + * kobject_del() - Unlink kobject from hierarchy.
7866 + * @kobj: object.
7867 + *
7868 + * This is the function that should be called to delete an object
7869 + * successfully added via kobject_add().
7870 + */
7871 +void kobject_del(struct kobject *kobj)
7872 +{
7873 + struct kobject *parent = kobj->parent;
7874 +
7875 + __kobject_del(kobj);
7876 + kobject_put(parent);
7877 +}
7878 EXPORT_SYMBOL(kobject_del);
7879
7880 /**
7881 @@ -663,6 +670,7 @@ EXPORT_SYMBOL(kobject_get_unless_zero);
7882 */
7883 static void kobject_cleanup(struct kobject *kobj)
7884 {
7885 + struct kobject *parent = kobj->parent;
7886 struct kobj_type *t = get_ktype(kobj);
7887 const char *name = kobj->name;
7888
7889 @@ -684,7 +692,10 @@ static void kobject_cleanup(struct kobject *kobj)
7890 if (kobj->state_in_sysfs) {
7891 pr_debug("kobject: '%s' (%p): auto cleanup kobject_del\n",
7892 kobject_name(kobj), kobj);
7893 - kobject_del(kobj);
7894 + __kobject_del(kobj);
7895 + } else {
7896 + /* avoid dropping the parent reference unnecessarily */
7897 + parent = NULL;
7898 }
7899
7900 if (t && t->release) {
7901 @@ -698,6 +709,8 @@ static void kobject_cleanup(struct kobject *kobj)
7902 pr_debug("kobject: '%s': free name\n", name);
7903 kfree_const(name);
7904 }
7905 +
7906 + kobject_put(parent);
7907 }
7908
7909 #ifdef CONFIG_DEBUG_KOBJECT_RELEASE
7910 diff --git a/mm/mmap.c b/mm/mmap.c
7911 index ea1ba2db4f4f..a3584a90c55c 100644
7912 --- a/mm/mmap.c
7913 +++ b/mm/mmap.c
7914 @@ -3172,6 +3172,7 @@ void exit_mmap(struct mm_struct *mm)
7915 if (vma->vm_flags & VM_ACCOUNT)
7916 nr_accounted += vma_pages(vma);
7917 vma = remove_vma(vma);
7918 + cond_resched();
7919 }
7920 vm_unacct_memory(nr_accounted);
7921 }
7922 diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
7923 index 4febc82a7c76..52fb6d6d6d58 100644
7924 --- a/net/bluetooth/6lowpan.c
7925 +++ b/net/bluetooth/6lowpan.c
7926 @@ -50,6 +50,7 @@ static bool enable_6lowpan;
7927 /* We are listening incoming connections via this channel
7928 */
7929 static struct l2cap_chan *listen_chan;
7930 +static DEFINE_MUTEX(set_lock);
7931
7932 struct lowpan_peer {
7933 struct list_head list;
7934 @@ -1070,12 +1071,14 @@ static void do_enable_set(struct work_struct *work)
7935
7936 enable_6lowpan = set_enable->flag;
7937
7938 + mutex_lock(&set_lock);
7939 if (listen_chan) {
7940 l2cap_chan_close(listen_chan, 0);
7941 l2cap_chan_put(listen_chan);
7942 }
7943
7944 listen_chan = bt_6lowpan_listen();
7945 + mutex_unlock(&set_lock);
7946
7947 kfree(set_enable);
7948 }
7949 @@ -1127,11 +1130,13 @@ static ssize_t lowpan_control_write(struct file *fp,
7950 if (ret == -EINVAL)
7951 return ret;
7952
7953 + mutex_lock(&set_lock);
7954 if (listen_chan) {
7955 l2cap_chan_close(listen_chan, 0);
7956 l2cap_chan_put(listen_chan);
7957 listen_chan = NULL;
7958 }
7959 + mutex_unlock(&set_lock);
7960
7961 if (conn) {
7962 struct lowpan_peer *peer;
7963 diff --git a/net/core/sock.c b/net/core/sock.c
7964 index 33a232974374..991ab80234ce 100644
7965 --- a/net/core/sock.c
7966 +++ b/net/core/sock.c
7967 @@ -3337,6 +3337,16 @@ static void sock_inuse_add(struct net *net, int val)
7968 }
7969 #endif
7970
7971 +static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
7972 +{
7973 + if (!twsk_prot)
7974 + return;
7975 + kfree(twsk_prot->twsk_slab_name);
7976 + twsk_prot->twsk_slab_name = NULL;
7977 + kmem_cache_destroy(twsk_prot->twsk_slab);
7978 + twsk_prot->twsk_slab = NULL;
7979 +}
7980 +
7981 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
7982 {
7983 if (!rsk_prot)
7984 @@ -3407,7 +3417,7 @@ int proto_register(struct proto *prot, int alloc_slab)
7985 prot->slab_flags,
7986 NULL);
7987 if (prot->twsk_prot->twsk_slab == NULL)
7988 - goto out_free_timewait_sock_slab_name;
7989 + goto out_free_timewait_sock_slab;
7990 }
7991 }
7992
7993 @@ -3415,15 +3425,15 @@ int proto_register(struct proto *prot, int alloc_slab)
7994 ret = assign_proto_idx(prot);
7995 if (ret) {
7996 mutex_unlock(&proto_list_mutex);
7997 - goto out_free_timewait_sock_slab_name;
7998 + goto out_free_timewait_sock_slab;
7999 }
8000 list_add(&prot->node, &proto_list);
8001 mutex_unlock(&proto_list_mutex);
8002 return ret;
8003
8004 -out_free_timewait_sock_slab_name:
8005 +out_free_timewait_sock_slab:
8006 if (alloc_slab && prot->twsk_prot)
8007 - kfree(prot->twsk_prot->twsk_slab_name);
8008 + tw_prot_cleanup(prot->twsk_prot);
8009 out_free_request_sock_slab:
8010 if (alloc_slab) {
8011 req_prot_cleanup(prot->rsk_prot);
8012 @@ -3447,12 +3457,7 @@ void proto_unregister(struct proto *prot)
8013 prot->slab = NULL;
8014
8015 req_prot_cleanup(prot->rsk_prot);
8016 -
8017 - if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
8018 - kmem_cache_destroy(prot->twsk_prot->twsk_slab);
8019 - kfree(prot->twsk_prot->twsk_slab_name);
8020 - prot->twsk_prot->twsk_slab = NULL;
8021 - }
8022 + tw_prot_cleanup(prot->twsk_prot);
8023 }
8024 EXPORT_SYMBOL(proto_unregister);
8025
8026 diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
8027 index 5e486895d67c..9745c52f49ca 100644
8028 --- a/net/ipv4/inet_connection_sock.c
8029 +++ b/net/ipv4/inet_connection_sock.c
8030 @@ -284,6 +284,57 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
8031 ipv6_only_sock(sk), true, false);
8032 }
8033
8034 +void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
8035 + struct sock *sk)
8036 +{
8037 + kuid_t uid = sock_i_uid(sk);
8038 + bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
8039 +
8040 + if (hlist_empty(&tb->owners)) {
8041 + tb->fastreuse = reuse;
8042 + if (sk->sk_reuseport) {
8043 + tb->fastreuseport = FASTREUSEPORT_ANY;
8044 + tb->fastuid = uid;
8045 + tb->fast_rcv_saddr = sk->sk_rcv_saddr;
8046 + tb->fast_ipv6_only = ipv6_only_sock(sk);
8047 + tb->fast_sk_family = sk->sk_family;
8048 +#if IS_ENABLED(CONFIG_IPV6)
8049 + tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
8050 +#endif
8051 + } else {
8052 + tb->fastreuseport = 0;
8053 + }
8054 + } else {
8055 + if (!reuse)
8056 + tb->fastreuse = 0;
8057 + if (sk->sk_reuseport) {
8058 + /* We didn't match or we don't have fastreuseport set on
8059 + * the tb, but we have sk_reuseport set on this socket
8060 + * and we know that there are no bind conflicts with
8061 + * this socket in this tb, so reset our tb's reuseport
8062 + * settings so that any subsequent sockets that match
8063 + * our current socket will be put on the fast path.
8064 + *
8065 + * If we reset we need to set FASTREUSEPORT_STRICT so we
8066 + * do extra checking for all subsequent sk_reuseport
8067 + * socks.
8068 + */
8069 + if (!sk_reuseport_match(tb, sk)) {
8070 + tb->fastreuseport = FASTREUSEPORT_STRICT;
8071 + tb->fastuid = uid;
8072 + tb->fast_rcv_saddr = sk->sk_rcv_saddr;
8073 + tb->fast_ipv6_only = ipv6_only_sock(sk);
8074 + tb->fast_sk_family = sk->sk_family;
8075 +#if IS_ENABLED(CONFIG_IPV6)
8076 + tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
8077 +#endif
8078 + }
8079 + } else {
8080 + tb->fastreuseport = 0;
8081 + }
8082 + }
8083 +}
8084 +
8085 /* Obtain a reference to a local port for the given sock,
8086 * if snum is zero it means select any available local port.
8087 * We try to allocate an odd port (and leave even ports for connect())
8088 @@ -296,7 +347,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
8089 struct inet_bind_hashbucket *head;
8090 struct net *net = sock_net(sk);
8091 struct inet_bind_bucket *tb = NULL;
8092 - kuid_t uid = sock_i_uid(sk);
8093 int l3mdev;
8094
8095 l3mdev = inet_sk_bound_l3mdev(sk);
8096 @@ -333,49 +383,8 @@ tb_found:
8097 goto fail_unlock;
8098 }
8099 success:
8100 - if (hlist_empty(&tb->owners)) {
8101 - tb->fastreuse = reuse;
8102 - if (sk->sk_reuseport) {
8103 - tb->fastreuseport = FASTREUSEPORT_ANY;
8104 - tb->fastuid = uid;
8105 - tb->fast_rcv_saddr = sk->sk_rcv_saddr;
8106 - tb->fast_ipv6_only = ipv6_only_sock(sk);
8107 - tb->fast_sk_family = sk->sk_family;
8108 -#if IS_ENABLED(CONFIG_IPV6)
8109 - tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
8110 -#endif
8111 - } else {
8112 - tb->fastreuseport = 0;
8113 - }
8114 - } else {
8115 - if (!reuse)
8116 - tb->fastreuse = 0;
8117 - if (sk->sk_reuseport) {
8118 - /* We didn't match or we don't have fastreuseport set on
8119 - * the tb, but we have sk_reuseport set on this socket
8120 - * and we know that there are no bind conflicts with
8121 - * this socket in this tb, so reset our tb's reuseport
8122 - * settings so that any subsequent sockets that match
8123 - * our current socket will be put on the fast path.
8124 - *
8125 - * If we reset we need to set FASTREUSEPORT_STRICT so we
8126 - * do extra checking for all subsequent sk_reuseport
8127 - * socks.
8128 - */
8129 - if (!sk_reuseport_match(tb, sk)) {
8130 - tb->fastreuseport = FASTREUSEPORT_STRICT;
8131 - tb->fastuid = uid;
8132 - tb->fast_rcv_saddr = sk->sk_rcv_saddr;
8133 - tb->fast_ipv6_only = ipv6_only_sock(sk);
8134 - tb->fast_sk_family = sk->sk_family;
8135 -#if IS_ENABLED(CONFIG_IPV6)
8136 - tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
8137 -#endif
8138 - }
8139 - } else {
8140 - tb->fastreuseport = 0;
8141 - }
8142 - }
8143 + inet_csk_update_fastreuse(tb, sk);
8144 +
8145 if (!inet_csk(sk)->icsk_bind_hash)
8146 inet_bind_hash(sk, tb, port);
8147 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
8148 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
8149 index 2bbaaf0c7176..006a34b18537 100644
8150 --- a/net/ipv4/inet_hashtables.c
8151 +++ b/net/ipv4/inet_hashtables.c
8152 @@ -163,6 +163,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
8153 return -ENOMEM;
8154 }
8155 }
8156 + inet_csk_update_fastreuse(tb, child);
8157 }
8158 inet_bind_hash(child, tb, port);
8159 spin_unlock(&head->lock);
8160 diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
8161 index 0902cb32bbad..c83a5d05aeaa 100644
8162 --- a/net/ipv4/sysctl_net_ipv4.c
8163 +++ b/net/ipv4/sysctl_net_ipv4.c
8164 @@ -307,24 +307,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
8165 struct ctl_table tbl = { .maxlen = ((TCP_FASTOPEN_KEY_LENGTH *
8166 2 * TCP_FASTOPEN_KEY_MAX) +
8167 (TCP_FASTOPEN_KEY_MAX * 5)) };
8168 - struct tcp_fastopen_context *ctx;
8169 - u32 user_key[TCP_FASTOPEN_KEY_MAX * 4];
8170 - __le32 key[TCP_FASTOPEN_KEY_MAX * 4];
8171 + u32 user_key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u32)];
8172 + __le32 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(__le32)];
8173 char *backup_data;
8174 - int ret, i = 0, off = 0, n_keys = 0;
8175 + int ret, i = 0, off = 0, n_keys;
8176
8177 tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
8178 if (!tbl.data)
8179 return -ENOMEM;
8180
8181 - rcu_read_lock();
8182 - ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
8183 - if (ctx) {
8184 - n_keys = tcp_fastopen_context_len(ctx);
8185 - memcpy(&key[0], &ctx->key[0], TCP_FASTOPEN_KEY_LENGTH * n_keys);
8186 - }
8187 - rcu_read_unlock();
8188 -
8189 + n_keys = tcp_fastopen_get_cipher(net, NULL, (u64 *)key);
8190 if (!n_keys) {
8191 memset(&key[0], 0, TCP_FASTOPEN_KEY_LENGTH);
8192 n_keys = 1;
8193 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
8194 index 840901154210..01ddfb4156e4 100644
8195 --- a/net/ipv4/tcp.c
8196 +++ b/net/ipv4/tcp.c
8197 @@ -3527,22 +3527,14 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
8198 return 0;
8199
8200 case TCP_FASTOPEN_KEY: {
8201 - __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH];
8202 - struct tcp_fastopen_context *ctx;
8203 - unsigned int key_len = 0;
8204 + u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)];
8205 + unsigned int key_len;
8206
8207 if (get_user(len, optlen))
8208 return -EFAULT;
8209
8210 - rcu_read_lock();
8211 - ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
8212 - if (ctx) {
8213 - key_len = tcp_fastopen_context_len(ctx) *
8214 - TCP_FASTOPEN_KEY_LENGTH;
8215 - memcpy(&key[0], &ctx->key[0], key_len);
8216 - }
8217 - rcu_read_unlock();
8218 -
8219 + key_len = tcp_fastopen_get_cipher(net, icsk, key) *
8220 + TCP_FASTOPEN_KEY_LENGTH;
8221 len = min_t(unsigned int, len, key_len);
8222 if (put_user(len, optlen))
8223 return -EFAULT;
8224 diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
8225 index a915ade0c818..a9971e41f31b 100644
8226 --- a/net/ipv4/tcp_fastopen.c
8227 +++ b/net/ipv4/tcp_fastopen.c
8228 @@ -108,6 +108,29 @@ out:
8229 return err;
8230 }
8231
8232 +int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
8233 + u64 *key)
8234 +{
8235 + struct tcp_fastopen_context *ctx;
8236 + int n_keys = 0, i;
8237 +
8238 + rcu_read_lock();
8239 + if (icsk)
8240 + ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
8241 + else
8242 + ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
8243 + if (ctx) {
8244 + n_keys = tcp_fastopen_context_len(ctx);
8245 + for (i = 0; i < n_keys; i++) {
8246 + put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
8247 + put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
8248 + }
8249 + }
8250 + rcu_read_unlock();
8251 +
8252 + return n_keys;
8253 +}
8254 +
8255 static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
8256 struct sk_buff *syn,
8257 const siphash_key_t *key,
8258 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
8259 index 8b80ab794a92..64a05906cc0e 100644
8260 --- a/net/netfilter/ipvs/ip_vs_core.c
8261 +++ b/net/netfilter/ipvs/ip_vs_core.c
8262 @@ -2061,14 +2061,14 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
8263
8264 conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
8265 if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
8266 - bool uses_ct = false, resched = false;
8267 + bool old_ct = false, resched = false;
8268
8269 if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
8270 unlikely(!atomic_read(&cp->dest->weight))) {
8271 resched = true;
8272 - uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
8273 + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
8274 } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
8275 - uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
8276 + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
8277 if (!atomic_read(&cp->n_control)) {
8278 resched = true;
8279 } else {
8280 @@ -2076,15 +2076,17 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
8281 * that uses conntrack while it is still
8282 * referenced by controlled connection(s).
8283 */
8284 - resched = !uses_ct;
8285 + resched = !old_ct;
8286 }
8287 }
8288
8289 if (resched) {
8290 + if (!old_ct)
8291 + cp->flags &= ~IP_VS_CONN_F_NFCT;
8292 if (!atomic_read(&cp->n_control))
8293 ip_vs_conn_expire_now(cp);
8294 __ip_vs_conn_put(cp);
8295 - if (uses_ct)
8296 + if (old_ct)
8297 return NF_DROP;
8298 cp = NULL;
8299 }
8300 diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
8301 index ba5ffd3badd3..b5c867fe3232 100644
8302 --- a/net/nfc/rawsock.c
8303 +++ b/net/nfc/rawsock.c
8304 @@ -332,10 +332,13 @@ static int rawsock_create(struct net *net, struct socket *sock,
8305 if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW))
8306 return -ESOCKTNOSUPPORT;
8307
8308 - if (sock->type == SOCK_RAW)
8309 + if (sock->type == SOCK_RAW) {
8310 + if (!capable(CAP_NET_RAW))
8311 + return -EPERM;
8312 sock->ops = &rawsock_raw_ops;
8313 - else
8314 + } else {
8315 sock->ops = &rawsock_ops;
8316 + }
8317
8318 sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern);
8319 if (!sk)
8320 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
8321 index 1d63ab3a878a..7735340c892e 100644
8322 --- a/net/packet/af_packet.c
8323 +++ b/net/packet/af_packet.c
8324 @@ -941,6 +941,7 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
8325 }
8326
8327 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
8328 + __releases(&pkc->blk_fill_in_prog_lock)
8329 {
8330 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
8331 atomic_dec(&pkc->blk_fill_in_prog);
8332 @@ -988,6 +989,7 @@ static void prb_fill_curr_block(char *curr,
8333 struct tpacket_kbdq_core *pkc,
8334 struct tpacket_block_desc *pbd,
8335 unsigned int len)
8336 + __acquires(&pkc->blk_fill_in_prog_lock)
8337 {
8338 struct tpacket3_hdr *ppd;
8339
8340 @@ -2285,8 +2287,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
8341 if (do_vnet &&
8342 virtio_net_hdr_from_skb(skb, h.raw + macoff -
8343 sizeof(struct virtio_net_hdr),
8344 - vio_le(), true, 0))
8345 + vio_le(), true, 0)) {
8346 + if (po->tp_version == TPACKET_V3)
8347 + prb_clear_blk_fill_status(&po->rx_ring);
8348 goto drop_n_account;
8349 + }
8350
8351 if (po->tp_version <= TPACKET_V2) {
8352 packet_increment_rx_head(po, &po->rx_ring);
8353 @@ -2392,7 +2397,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
8354 __clear_bit(slot_id, po->rx_ring.rx_owner_map);
8355 spin_unlock(&sk->sk_receive_queue.lock);
8356 sk->sk_data_ready(sk);
8357 - } else {
8358 + } else if (po->tp_version == TPACKET_V3) {
8359 prb_clear_blk_fill_status(&po->rx_ring);
8360 }
8361
8362 diff --git a/net/socket.c b/net/socket.c
8363 index 432800b39ddb..d1a0264401b7 100644
8364 --- a/net/socket.c
8365 +++ b/net/socket.c
8366 @@ -485,7 +485,7 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
8367 if (f.file) {
8368 sock = sock_from_file(f.file, err);
8369 if (likely(sock)) {
8370 - *fput_needed = f.flags;
8371 + *fput_needed = f.flags & FDPUT_FPUT;
8372 return sock;
8373 }
8374 fdput(f);
8375 diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
8376 index 683755d95075..78ad41656996 100644
8377 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
8378 +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
8379 @@ -584,7 +584,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
8380 buf->head[0].iov_len);
8381 memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
8382 buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
8383 - buf->len = len - GSS_KRB5_TOK_HDR_LEN + headskip;
8384 + buf->len = len - (GSS_KRB5_TOK_HDR_LEN + headskip);
8385
8386 /* Trim off the trailing "extra count" and checksum blob */
8387 xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
8388 diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
8389 index fd91274e834d..3645cd241d3e 100644
8390 --- a/net/sunrpc/auth_gss/svcauth_gss.c
8391 +++ b/net/sunrpc/auth_gss/svcauth_gss.c
8392 @@ -949,7 +949,6 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
8393
8394 maj_stat = gss_unwrap(ctx, 0, priv_len, buf);
8395 pad = priv_len - buf->len;
8396 - buf->len -= pad;
8397 /* The upper layers assume the buffer is aligned on 4-byte boundaries.
8398 * In the krb5p case, at least, the data ends up offset, so we need to
8399 * move it around. */
8400 diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
8401 index 066af6b2eb01..0bb3f0dca80d 100644
8402 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
8403 +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
8404 @@ -677,7 +677,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
8405 struct svc_rdma_read_info *info,
8406 __be32 *p)
8407 {
8408 - unsigned int i;
8409 int ret;
8410
8411 ret = -EINVAL;
8412 @@ -700,12 +699,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
8413 info->ri_chunklen += rs_length;
8414 }
8415
8416 - /* Pages under I/O have been copied to head->rc_pages.
8417 - * Prevent their premature release by svc_xprt_release() .
8418 - */
8419 - for (i = 0; i < info->ri_readctxt->rc_page_count; i++)
8420 - rqstp->rq_pages[i] = NULL;
8421 -
8422 return ret;
8423 }
8424
8425 @@ -800,6 +793,26 @@ out:
8426 return ret;
8427 }
8428
8429 +/* Pages under I/O have been copied to head->rc_pages. Ensure they
8430 + * are not released by svc_xprt_release() until the I/O is complete.
8431 + *
8432 + * This has to be done after all Read WRs are constructed to properly
8433 + * handle a page that is part of I/O on behalf of two different RDMA
8434 + * segments.
8435 + *
8436 + * Do this only if I/O has been posted. Otherwise, we do indeed want
8437 + * svc_xprt_release() to clean things up properly.
8438 + */
8439 +static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
8440 + const unsigned int start,
8441 + const unsigned int num_pages)
8442 +{
8443 + unsigned int i;
8444 +
8445 + for (i = start; i < num_pages + start; i++)
8446 + rqstp->rq_pages[i] = NULL;
8447 +}
8448 +
8449 /**
8450 * svc_rdma_recv_read_chunk - Pull a Read chunk from the client
8451 * @rdma: controlling RDMA transport
8452 @@ -853,6 +866,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
8453 ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
8454 if (ret < 0)
8455 goto out_err;
8456 + svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count);
8457 return 0;
8458
8459 out_err:
8460 diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
8461 index 1adeb1c0473b..25fca390cdcf 100644
8462 --- a/net/tls/tls_device.c
8463 +++ b/net/tls/tls_device.c
8464 @@ -549,7 +549,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
8465 {
8466 struct tls_context *tls_ctx = tls_get_ctx(sk);
8467 struct iov_iter msg_iter;
8468 - char *kaddr = kmap(page);
8469 + char *kaddr;
8470 struct kvec iov;
8471 int rc;
8472
8473 @@ -564,6 +564,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
8474 goto out;
8475 }
8476
8477 + kaddr = kmap(page);
8478 iov.iov_base = kaddr + offset;
8479 iov.iov_len = size;
8480 iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
8481 diff --git a/samples/bpf/fds_example.c b/samples/bpf/fds_example.c
8482 index 2d4b717726b6..34b3fca788e8 100644
8483 --- a/samples/bpf/fds_example.c
8484 +++ b/samples/bpf/fds_example.c
8485 @@ -30,6 +30,8 @@
8486 #define BPF_M_MAP 1
8487 #define BPF_M_PROG 2
8488
8489 +char bpf_log_buf[BPF_LOG_BUF_SIZE];
8490 +
8491 static void usage(void)
8492 {
8493 printf("Usage: fds_example [...]\n");
8494 @@ -57,7 +59,6 @@ static int bpf_prog_create(const char *object)
8495 BPF_EXIT_INSN(),
8496 };
8497 size_t insns_cnt = sizeof(insns) / sizeof(struct bpf_insn);
8498 - char bpf_log_buf[BPF_LOG_BUF_SIZE];
8499 struct bpf_object *obj;
8500 int prog_fd;
8501
8502 diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
8503 index 7225107a9aaf..e59022b3f125 100644
8504 --- a/scripts/recordmcount.c
8505 +++ b/scripts/recordmcount.c
8506 @@ -434,6 +434,11 @@ static int arm_is_fake_mcount(Elf32_Rel const *rp)
8507 return 1;
8508 }
8509
8510 +static int arm64_is_fake_mcount(Elf64_Rel const *rp)
8511 +{
8512 + return ELF64_R_TYPE(w(rp->r_info)) != R_AARCH64_CALL26;
8513 +}
8514 +
8515 /* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
8516 * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf
8517 * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40]
8518 @@ -547,6 +552,7 @@ static int do_file(char const *const fname)
8519 make_nop = make_nop_arm64;
8520 rel_type_nop = R_AARCH64_NONE;
8521 ideal_nop = ideal_nop4_arm64;
8522 + is_fake_mcount64 = arm64_is_fake_mcount;
8523 break;
8524 case EM_IA_64: reltype = R_IA64_IMM64; break;
8525 case EM_MIPS: /* reltype: e_class */ break;
8526 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
8527 index be469fce19e1..8173982e00ab 100644
8528 --- a/security/integrity/ima/ima.h
8529 +++ b/security/integrity/ima/ima.h
8530 @@ -362,6 +362,7 @@ static inline void ima_free_modsig(struct modsig *modsig)
8531 #ifdef CONFIG_IMA_LSM_RULES
8532
8533 #define security_filter_rule_init security_audit_rule_init
8534 +#define security_filter_rule_free security_audit_rule_free
8535 #define security_filter_rule_match security_audit_rule_match
8536
8537 #else
8538 @@ -372,6 +373,10 @@ static inline int security_filter_rule_init(u32 field, u32 op, char *rulestr,
8539 return -EINVAL;
8540 }
8541
8542 +static inline void security_filter_rule_free(void *lsmrule)
8543 +{
8544 +}
8545 +
8546 static inline int security_filter_rule_match(u32 secid, u32 field, u32 op,
8547 void *lsmrule)
8548 {
8549 diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
8550 index 558a7607bf93..e725d4187271 100644
8551 --- a/security/integrity/ima/ima_policy.c
8552 +++ b/security/integrity/ima/ima_policy.c
8553 @@ -254,7 +254,7 @@ static void ima_lsm_free_rule(struct ima_rule_entry *entry)
8554 int i;
8555
8556 for (i = 0; i < MAX_LSM_RULES; i++) {
8557 - kfree(entry->lsm[i].rule);
8558 + security_filter_rule_free(entry->lsm[i].rule);
8559 kfree(entry->lsm[i].args_p);
8560 }
8561 kfree(entry);
8562 diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
8563 index 840a192e9337..9c4308077574 100644
8564 --- a/security/smack/smackfs.c
8565 +++ b/security/smack/smackfs.c
8566 @@ -884,7 +884,7 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
8567 }
8568
8569 ret = sscanf(rule, "%d", &maplevel);
8570 - if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL)
8571 + if (ret != 1 || maplevel < 0 || maplevel > SMACK_CIPSO_MAXLEVEL)
8572 goto out;
8573
8574 rule += SMK_DIGITLEN;
8575 @@ -905,6 +905,10 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
8576
8577 for (i = 0; i < catlen; i++) {
8578 rule += SMK_DIGITLEN;
8579 + if (rule > data + count) {
8580 + rc = -EOVERFLOW;
8581 + goto out;
8582 + }
8583 ret = sscanf(rule, "%u", &cat);
8584 if (ret != 1 || cat > SMACK_CIPSO_MAXCATNUM)
8585 goto out;
8586 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
8587 index ea25b8d0350d..88629906f314 100644
8588 --- a/sound/pci/hda/patch_realtek.c
8589 +++ b/sound/pci/hda/patch_realtek.c
8590 @@ -4391,6 +4391,7 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
8591 {
8592 struct alc_spec *spec = codec->spec;
8593
8594 + spec->micmute_led_polarity = 1;
8595 alc_fixup_hp_gpio_led(codec, action, 0, 0x04);
8596 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
8597 spec->init_amp = ALC_INIT_DEFAULT;
8598 diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
8599 index 41b83ecaf008..914b75c23d1b 100644
8600 --- a/sound/soc/fsl/fsl_sai.c
8601 +++ b/sound/soc/fsl/fsl_sai.c
8602 @@ -680,10 +680,11 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai)
8603 regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), 0);
8604
8605 regmap_update_bits(sai->regmap, FSL_SAI_TCR1(ofs),
8606 - FSL_SAI_CR1_RFW_MASK,
8607 + FSL_SAI_CR1_RFW_MASK(sai->soc_data->fifo_depth),
8608 sai->soc_data->fifo_depth - FSL_SAI_MAXBURST_TX);
8609 regmap_update_bits(sai->regmap, FSL_SAI_RCR1(ofs),
8610 - FSL_SAI_CR1_RFW_MASK, FSL_SAI_MAXBURST_RX - 1);
8611 + FSL_SAI_CR1_RFW_MASK(sai->soc_data->fifo_depth),
8612 + FSL_SAI_MAXBURST_RX - 1);
8613
8614 snd_soc_dai_init_dma_data(cpu_dai, &sai->dma_params_tx,
8615 &sai->dma_params_rx);
8616 diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
8617 index 76b15deea80c..6aba7d28f5f3 100644
8618 --- a/sound/soc/fsl/fsl_sai.h
8619 +++ b/sound/soc/fsl/fsl_sai.h
8620 @@ -94,7 +94,7 @@
8621 #define FSL_SAI_CSR_FRDE BIT(0)
8622
8623 /* SAI Transmit and Receive Configuration 1 Register */
8624 -#define FSL_SAI_CR1_RFW_MASK 0x1f
8625 +#define FSL_SAI_CR1_RFW_MASK(x) ((x) - 1)
8626
8627 /* SAI Transmit and Receive Configuration 2 Register */
8628 #define FSL_SAI_CR2_SYNC BIT(30)
8629 diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
8630 index adf416a49b48..60fb87495050 100644
8631 --- a/sound/soc/intel/boards/bxt_rt298.c
8632 +++ b/sound/soc/intel/boards/bxt_rt298.c
8633 @@ -556,6 +556,7 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
8634 /* broxton audio machine driver for SPT + RT298S */
8635 static struct snd_soc_card broxton_rt298 = {
8636 .name = "broxton-rt298",
8637 + .owner = THIS_MODULE,
8638 .dai_link = broxton_rt298_dais,
8639 .num_links = ARRAY_SIZE(broxton_rt298_dais),
8640 .controls = broxton_controls,
8641 @@ -571,6 +572,7 @@ static struct snd_soc_card broxton_rt298 = {
8642
8643 static struct snd_soc_card geminilake_rt298 = {
8644 .name = "geminilake-rt298",
8645 + .owner = THIS_MODULE,
8646 .dai_link = broxton_rt298_dais,
8647 .num_links = ARRAY_SIZE(broxton_rt298_dais),
8648 .controls = broxton_controls,
8649 diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c
8650 index 1f698adde506..7126344017fa 100644
8651 --- a/sound/soc/meson/axg-card.c
8652 +++ b/sound/soc/meson/axg-card.c
8653 @@ -266,7 +266,7 @@ static int axg_card_add_tdm_loopback(struct snd_soc_card *card,
8654
8655 lb = &card->dai_link[*index + 1];
8656
8657 - lb->name = kasprintf(GFP_KERNEL, "%s-lb", pad->name);
8658 + lb->name = devm_kasprintf(card->dev, GFP_KERNEL, "%s-lb", pad->name);
8659 if (!lb->name)
8660 return -ENOMEM;
8661
8662 diff --git a/sound/soc/meson/axg-tdm-formatter.c b/sound/soc/meson/axg-tdm-formatter.c
8663 index 358c8c0d861c..f7e8e9da68a0 100644
8664 --- a/sound/soc/meson/axg-tdm-formatter.c
8665 +++ b/sound/soc/meson/axg-tdm-formatter.c
8666 @@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(axg_tdm_formatter_set_channel_masks);
8667 static int axg_tdm_formatter_enable(struct axg_tdm_formatter *formatter)
8668 {
8669 struct axg_tdm_stream *ts = formatter->stream;
8670 - bool invert = formatter->drv->quirks->invert_sclk;
8671 + bool invert;
8672 int ret;
8673
8674 /* Do nothing if the formatter is already enabled */
8675 @@ -96,11 +96,12 @@ static int axg_tdm_formatter_enable(struct axg_tdm_formatter *formatter)
8676 return ret;
8677
8678 /*
8679 - * If sclk is inverted, invert it back and provide the inversion
8680 - * required by the formatter
8681 + * If sclk is inverted, it means the bit should latched on the
8682 + * rising edge which is what our HW expects. If not, we need to
8683 + * invert it before the formatter.
8684 */
8685 - invert ^= axg_tdm_sclk_invert(ts->iface->fmt);
8686 - ret = clk_set_phase(formatter->sclk, invert ? 180 : 0);
8687 + invert = axg_tdm_sclk_invert(ts->iface->fmt);
8688 + ret = clk_set_phase(formatter->sclk, invert ? 0 : 180);
8689 if (ret)
8690 return ret;
8691
8692 diff --git a/sound/soc/meson/axg-tdm-formatter.h b/sound/soc/meson/axg-tdm-formatter.h
8693 index 9ef98e955cb2..a1f0dcc0ff13 100644
8694 --- a/sound/soc/meson/axg-tdm-formatter.h
8695 +++ b/sound/soc/meson/axg-tdm-formatter.h
8696 @@ -16,7 +16,6 @@ struct snd_kcontrol;
8697
8698 struct axg_tdm_formatter_hw {
8699 unsigned int skew_offset;
8700 - bool invert_sclk;
8701 };
8702
8703 struct axg_tdm_formatter_ops {
8704 diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
8705 index d51f3344be7c..e25336f73912 100644
8706 --- a/sound/soc/meson/axg-tdm-interface.c
8707 +++ b/sound/soc/meson/axg-tdm-interface.c
8708 @@ -119,18 +119,25 @@ static int axg_tdm_iface_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
8709 {
8710 struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai);
8711
8712 - /* These modes are not supported */
8713 - if (fmt & (SND_SOC_DAIFMT_CBS_CFM | SND_SOC_DAIFMT_CBM_CFS)) {
8714 + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
8715 + case SND_SOC_DAIFMT_CBS_CFS:
8716 + if (!iface->mclk) {
8717 + dev_err(dai->dev, "cpu clock master: mclk missing\n");
8718 + return -ENODEV;
8719 + }
8720 + break;
8721 +
8722 + case SND_SOC_DAIFMT_CBM_CFM:
8723 + break;
8724 +
8725 + case SND_SOC_DAIFMT_CBS_CFM:
8726 + case SND_SOC_DAIFMT_CBM_CFS:
8727 dev_err(dai->dev, "only CBS_CFS and CBM_CFM are supported\n");
8728 + /* Fall-through */
8729 + default:
8730 return -EINVAL;
8731 }
8732
8733 - /* If the TDM interface is the clock master, it requires mclk */
8734 - if (!iface->mclk && (fmt & SND_SOC_DAIFMT_CBS_CFS)) {
8735 - dev_err(dai->dev, "cpu clock master: mclk missing\n");
8736 - return -ENODEV;
8737 - }
8738 -
8739 iface->fmt = fmt;
8740 return 0;
8741 }
8742 @@ -319,7 +326,8 @@ static int axg_tdm_iface_hw_params(struct snd_pcm_substream *substream,
8743 if (ret)
8744 return ret;
8745
8746 - if (iface->fmt & SND_SOC_DAIFMT_CBS_CFS) {
8747 + if ((iface->fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
8748 + SND_SOC_DAIFMT_CBS_CFS) {
8749 ret = axg_tdm_iface_set_sclk(dai, params);
8750 if (ret)
8751 return ret;
8752 diff --git a/sound/soc/meson/axg-tdmin.c b/sound/soc/meson/axg-tdmin.c
8753 index 973d4c02ef8d..88ed95ae886b 100644
8754 --- a/sound/soc/meson/axg-tdmin.c
8755 +++ b/sound/soc/meson/axg-tdmin.c
8756 @@ -228,15 +228,29 @@ static const struct axg_tdm_formatter_driver axg_tdmin_drv = {
8757 .regmap_cfg = &axg_tdmin_regmap_cfg,
8758 .ops = &axg_tdmin_ops,
8759 .quirks = &(const struct axg_tdm_formatter_hw) {
8760 - .invert_sclk = false,
8761 .skew_offset = 2,
8762 },
8763 };
8764
8765 +static const struct axg_tdm_formatter_driver g12a_tdmin_drv = {
8766 + .component_drv = &axg_tdmin_component_drv,
8767 + .regmap_cfg = &axg_tdmin_regmap_cfg,
8768 + .ops = &axg_tdmin_ops,
8769 + .quirks = &(const struct axg_tdm_formatter_hw) {
8770 + .skew_offset = 3,
8771 + },
8772 +};
8773 +
8774 static const struct of_device_id axg_tdmin_of_match[] = {
8775 {
8776 .compatible = "amlogic,axg-tdmin",
8777 .data = &axg_tdmin_drv,
8778 + }, {
8779 + .compatible = "amlogic,g12a-tdmin",
8780 + .data = &g12a_tdmin_drv,
8781 + }, {
8782 + .compatible = "amlogic,sm1-tdmin",
8783 + .data = &g12a_tdmin_drv,
8784 }, {}
8785 };
8786 MODULE_DEVICE_TABLE(of, axg_tdmin_of_match);
8787 diff --git a/sound/soc/meson/axg-tdmout.c b/sound/soc/meson/axg-tdmout.c
8788 index 418ec314b37d..3ceabddae629 100644
8789 --- a/sound/soc/meson/axg-tdmout.c
8790 +++ b/sound/soc/meson/axg-tdmout.c
8791 @@ -238,7 +238,6 @@ static const struct axg_tdm_formatter_driver axg_tdmout_drv = {
8792 .regmap_cfg = &axg_tdmout_regmap_cfg,
8793 .ops = &axg_tdmout_ops,
8794 .quirks = &(const struct axg_tdm_formatter_hw) {
8795 - .invert_sclk = true,
8796 .skew_offset = 1,
8797 },
8798 };
8799 @@ -248,7 +247,6 @@ static const struct axg_tdm_formatter_driver g12a_tdmout_drv = {
8800 .regmap_cfg = &axg_tdmout_regmap_cfg,
8801 .ops = &axg_tdmout_ops,
8802 .quirks = &(const struct axg_tdm_formatter_hw) {
8803 - .invert_sclk = true,
8804 .skew_offset = 2,
8805 },
8806 };
8807 @@ -309,7 +307,6 @@ static const struct axg_tdm_formatter_driver sm1_tdmout_drv = {
8808 .regmap_cfg = &axg_tdmout_regmap_cfg,
8809 .ops = &axg_tdmout_ops,
8810 .quirks = &(const struct axg_tdm_formatter_hw) {
8811 - .invert_sclk = true,
8812 .skew_offset = 2,
8813 },
8814 };
8815 diff --git a/sound/soc/sof/nocodec.c b/sound/soc/sof/nocodec.c
8816 index ea0fe9a09f3f..71410116add1 100644
8817 --- a/sound/soc/sof/nocodec.c
8818 +++ b/sound/soc/sof/nocodec.c
8819 @@ -14,6 +14,7 @@
8820
8821 static struct snd_soc_card sof_nocodec_card = {
8822 .name = "nocodec", /* the sof- prefix is added by the core */
8823 + .owner = THIS_MODULE
8824 };
8825
8826 static int sof_nocodec_bes_setup(struct device *dev,
8827 diff --git a/sound/usb/card.h b/sound/usb/card.h
8828 index f39f23e3525d..d8ec5caf464d 100644
8829 --- a/sound/usb/card.h
8830 +++ b/sound/usb/card.h
8831 @@ -133,6 +133,7 @@ struct snd_usb_substream {
8832 unsigned int tx_length_quirk:1; /* add length specifier to transfers */
8833 unsigned int fmt_type; /* USB audio format type (1-3) */
8834 unsigned int pkt_offset_adj; /* Bytes to drop from beginning of packets (for non-compliant devices) */
8835 + unsigned int stream_offset_adj; /* Bytes to drop from beginning of stream (for non-compliant devices) */
8836
8837 unsigned int running: 1; /* running status */
8838
8839 diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
8840 index d39bf5b648d1..49f0dc0e3e4d 100644
8841 --- a/sound/usb/mixer_quirks.c
8842 +++ b/sound/usb/mixer_quirks.c
8843 @@ -184,6 +184,7 @@ static const struct rc_config {
8844 { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */
8845 { USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
8846 { USB_ID(0x041e, 0x3237), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
8847 + { USB_ID(0x041e, 0x3263), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
8848 { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */
8849 };
8850
8851 diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
8852 index d11d00efc574..7b41f9748978 100644
8853 --- a/sound/usb/pcm.c
8854 +++ b/sound/usb/pcm.c
8855 @@ -1417,6 +1417,12 @@ static void retire_capture_urb(struct snd_usb_substream *subs,
8856 // continue;
8857 }
8858 bytes = urb->iso_frame_desc[i].actual_length;
8859 + if (subs->stream_offset_adj > 0) {
8860 + unsigned int adj = min(subs->stream_offset_adj, bytes);
8861 + cp += adj;
8862 + bytes -= adj;
8863 + subs->stream_offset_adj -= adj;
8864 + }
8865 frames = bytes / stride;
8866 if (!subs->txfr_quirk)
8867 bytes = frames * stride;
8868 diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
8869 index 562179492a33..1573229d8cf4 100644
8870 --- a/sound/usb/quirks-table.h
8871 +++ b/sound/usb/quirks-table.h
8872 @@ -3570,6 +3570,62 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
8873 }
8874 }
8875 },
8876 +{
8877 + /*
8878 + * PIONEER DJ DDJ-RB
8879 + * PCM is 4 channels out, 2 dummy channels in @ 44.1 fixed
8880 + * The feedback for the output is the dummy input.
8881 + */
8882 + USB_DEVICE_VENDOR_SPEC(0x2b73, 0x000e),
8883 + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
8884 + .ifnum = QUIRK_ANY_INTERFACE,
8885 + .type = QUIRK_COMPOSITE,
8886 + .data = (const struct snd_usb_audio_quirk[]) {
8887 + {
8888 + .ifnum = 0,
8889 + .type = QUIRK_AUDIO_FIXED_ENDPOINT,
8890 + .data = &(const struct audioformat) {
8891 + .formats = SNDRV_PCM_FMTBIT_S24_3LE,
8892 + .channels = 4,
8893 + .iface = 0,
8894 + .altsetting = 1,
8895 + .altset_idx = 1,
8896 + .endpoint = 0x01,
8897 + .ep_attr = USB_ENDPOINT_XFER_ISOC|
8898 + USB_ENDPOINT_SYNC_ASYNC,
8899 + .rates = SNDRV_PCM_RATE_44100,
8900 + .rate_min = 44100,
8901 + .rate_max = 44100,
8902 + .nr_rates = 1,
8903 + .rate_table = (unsigned int[]) { 44100 }
8904 + }
8905 + },
8906 + {
8907 + .ifnum = 0,
8908 + .type = QUIRK_AUDIO_FIXED_ENDPOINT,
8909 + .data = &(const struct audioformat) {
8910 + .formats = SNDRV_PCM_FMTBIT_S24_3LE,
8911 + .channels = 2,
8912 + .iface = 0,
8913 + .altsetting = 1,
8914 + .altset_idx = 1,
8915 + .endpoint = 0x82,
8916 + .ep_attr = USB_ENDPOINT_XFER_ISOC|
8917 + USB_ENDPOINT_SYNC_ASYNC|
8918 + USB_ENDPOINT_USAGE_IMPLICIT_FB,
8919 + .rates = SNDRV_PCM_RATE_44100,
8920 + .rate_min = 44100,
8921 + .rate_max = 44100,
8922 + .nr_rates = 1,
8923 + .rate_table = (unsigned int[]) { 44100 }
8924 + }
8925 + },
8926 + {
8927 + .ifnum = -1
8928 + }
8929 + }
8930 + }
8931 +},
8932
8933 #define ALC1220_VB_DESKTOP(vend, prod) { \
8934 USB_DEVICE(vend, prod), \
8935 @@ -3623,7 +3679,13 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
8936 * with.
8937 */
8938 {
8939 - USB_DEVICE(0x534d, 0x2109),
8940 + .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
8941 + USB_DEVICE_ID_MATCH_INT_CLASS |
8942 + USB_DEVICE_ID_MATCH_INT_SUBCLASS,
8943 + .idVendor = 0x534d,
8944 + .idProduct = 0x2109,
8945 + .bInterfaceClass = USB_CLASS_AUDIO,
8946 + .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
8947 .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
8948 .vendor_name = "MacroSilicon",
8949 .product_name = "MS2109",
8950 diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
8951 index a8bb953cc468..a756f50d9f07 100644
8952 --- a/sound/usb/quirks.c
8953 +++ b/sound/usb/quirks.c
8954 @@ -1432,6 +1432,9 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
8955 case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */
8956 set_format_emu_quirk(subs, fmt);
8957 break;
8958 + case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
8959 + subs->stream_offset_adj = 2;
8960 + break;
8961 }
8962 }
8963
8964 diff --git a/sound/usb/stream.c b/sound/usb/stream.c
8965 index 11785f9652ad..d01edd5da6cf 100644
8966 --- a/sound/usb/stream.c
8967 +++ b/sound/usb/stream.c
8968 @@ -94,6 +94,7 @@ static void snd_usb_init_substream(struct snd_usb_stream *as,
8969 subs->tx_length_quirk = as->chip->tx_length_quirk;
8970 subs->speed = snd_usb_get_speed(subs->dev);
8971 subs->pkt_offset_adj = 0;
8972 + subs->stream_offset_adj = 0;
8973
8974 snd_usb_set_pcm_ops(as->pcm, stream);
8975
8976 diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
8977 index 9a9376d1d3df..66765f970bc5 100644
8978 --- a/tools/bpf/bpftool/btf.c
8979 +++ b/tools/bpf/bpftool/btf.c
8980 @@ -510,7 +510,7 @@ static int do_dump(int argc, char **argv)
8981 goto done;
8982 }
8983 if (!btf) {
8984 - err = ENOENT;
8985 + err = -ENOENT;
8986 p_err("can't find btf with ID (%u)", btf_id);
8987 goto done;
8988 }
8989 diff --git a/tools/build/Build.include b/tools/build/Build.include
8990 index 9ec01f4454f9..585486e40995 100644
8991 --- a/tools/build/Build.include
8992 +++ b/tools/build/Build.include
8993 @@ -74,7 +74,8 @@ dep-cmd = $(if $(wildcard $(fixdep)),
8994 # dependencies in the cmd file
8995 if_changed_dep = $(if $(strip $(any-prereq) $(arg-check)), \
8996 @set -e; \
8997 - $(echo-cmd) $(cmd_$(1)) && $(dep-cmd))
8998 + $(echo-cmd) $(cmd_$(1)); \
8999 + $(dep-cmd))
9000
9001 # if_changed - execute command if any prerequisite is newer than
9002 # target, or command line has changed
9003 diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
9004 index a2e8c9da7fa5..d50cc05df495 100644
9005 --- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c
9006 +++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
9007 @@ -19,6 +19,7 @@
9008 #include <limits.h>
9009 #include <sys/time.h>
9010 #include <sys/syscall.h>
9011 +#include <sys/sysinfo.h>
9012 #include <sys/types.h>
9013 #include <sys/shm.h>
9014 #include <linux/futex.h>
9015 @@ -104,8 +105,9 @@ static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu)
9016
9017 static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu)
9018 {
9019 - int pid;
9020 - cpu_set_t cpuset;
9021 + int pid, ncpus;
9022 + cpu_set_t *cpuset;
9023 + size_t size;
9024
9025 pid = fork();
9026 if (pid == -1) {
9027 @@ -116,14 +118,23 @@ static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu)
9028 if (pid)
9029 return;
9030
9031 - CPU_ZERO(&cpuset);
9032 - CPU_SET(cpu, &cpuset);
9033 + ncpus = get_nprocs();
9034 + size = CPU_ALLOC_SIZE(ncpus);
9035 + cpuset = CPU_ALLOC(ncpus);
9036 + if (!cpuset) {
9037 + perror("malloc");
9038 + exit(1);
9039 + }
9040 + CPU_ZERO_S(size, cpuset);
9041 + CPU_SET_S(cpu, size, cpuset);
9042
9043 - if (sched_setaffinity(0, sizeof(cpuset), &cpuset)) {
9044 + if (sched_setaffinity(0, size, cpuset)) {
9045 perror("sched_setaffinity");
9046 + CPU_FREE(cpuset);
9047 exit(1);
9048 }
9049
9050 + CPU_FREE(cpuset);
9051 fn(arg);
9052
9053 exit(0);
9054 diff --git a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
9055 index f52ed92b53e7..00dc32c0ed75 100755
9056 --- a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
9057 +++ b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
9058 @@ -5,12 +5,17 @@ pe_ok() {
9059 local dev="$1"
9060 local path="/sys/bus/pci/devices/$dev/eeh_pe_state"
9061
9062 - if ! [ -e "$path" ] ; then
9063 + # if a driver doesn't support the error handling callbacks then the
9064 + # device is recovered by removing and re-probing it. This causes the
9065 + # sysfs directory to disappear so read the PE state once and squash
9066 + # any potential error messages
9067 + local eeh_state="$(cat $path 2>/dev/null)"
9068 + if [ -z "$eeh_state" ]; then
9069 return 1;
9070 fi
9071
9072 - local fw_state="$(cut -d' ' -f1 < $path)"
9073 - local sw_state="$(cut -d' ' -f2 < $path)"
9074 + local fw_state="$(echo $eeh_state | cut -d' ' -f1)"
9075 + local sw_state="$(echo $eeh_state | cut -d' ' -f2)"
9076
9077 # If EEH_PE_ISOLATED or EEH_PE_RECOVERING are set then the PE is in an
9078 # error state or being recovered. Either way, not ok.
9079 diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c
9080 index c02d24835db4..176102eca994 100644
9081 --- a/tools/testing/selftests/powerpc/utils.c
9082 +++ b/tools/testing/selftests/powerpc/utils.c
9083 @@ -16,6 +16,7 @@
9084 #include <string.h>
9085 #include <sys/ioctl.h>
9086 #include <sys/stat.h>
9087 +#include <sys/sysinfo.h>
9088 #include <sys/types.h>
9089 #include <sys/utsname.h>
9090 #include <unistd.h>
9091 @@ -88,28 +89,40 @@ void *get_auxv_entry(int type)
9092
9093 int pick_online_cpu(void)
9094 {
9095 - cpu_set_t mask;
9096 - int cpu;
9097 + int ncpus, cpu = -1;
9098 + cpu_set_t *mask;
9099 + size_t size;
9100 +
9101 + ncpus = get_nprocs_conf();
9102 + size = CPU_ALLOC_SIZE(ncpus);
9103 + mask = CPU_ALLOC(ncpus);
9104 + if (!mask) {
9105 + perror("malloc");
9106 + return -1;
9107 + }
9108
9109 - CPU_ZERO(&mask);
9110 + CPU_ZERO_S(size, mask);
9111
9112 - if (sched_getaffinity(0, sizeof(mask), &mask)) {
9113 + if (sched_getaffinity(0, size, mask)) {
9114 perror("sched_getaffinity");
9115 - return -1;
9116 + goto done;
9117 }
9118
9119 /* We prefer a primary thread, but skip 0 */
9120 - for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8)
9121 - if (CPU_ISSET(cpu, &mask))
9122 - return cpu;
9123 + for (cpu = 8; cpu < ncpus; cpu += 8)
9124 + if (CPU_ISSET_S(cpu, size, mask))
9125 + goto done;
9126
9127 /* Search for anything, but in reverse */
9128 - for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--)
9129 - if (CPU_ISSET(cpu, &mask))
9130 - return cpu;
9131 + for (cpu = ncpus - 1; cpu >= 0; cpu--)
9132 + if (CPU_ISSET_S(cpu, size, mask))
9133 + goto done;
9134
9135 printf("No cpus in affinity mask?!\n");
9136 - return -1;
9137 +
9138 +done:
9139 + CPU_FREE(mask);
9140 + return cpu;
9141 }
9142
9143 bool is_ppc64le(void)
9144 diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
9145 index 96bbda4f10fc..19c7351eeb74 100644
9146 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c
9147 +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
9148 @@ -177,7 +177,7 @@ struct seccomp_metadata {
9149 #define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif)
9150 #define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \
9151 struct seccomp_notif_resp)
9152 -#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64)
9153 +#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64)
9154
9155 struct seccomp_notif {
9156 __u64 id;