Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0201-5.4.102-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (show annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (18 months, 1 week ago) by niro
File size: 383461 byte(s)
-add missing
1 diff --git a/Documentation/filesystems/seq_file.txt b/Documentation/filesystems/seq_file.txt
2 index d412b236a9d6f..7cf7143921a1f 100644
3 --- a/Documentation/filesystems/seq_file.txt
4 +++ b/Documentation/filesystems/seq_file.txt
5 @@ -192,6 +192,12 @@ between the calls to start() and stop(), so holding a lock during that time
6 is a reasonable thing to do. The seq_file code will also avoid taking any
7 other locks while the iterator is active.
8
9 +The iterater value returned by start() or next() is guaranteed to be
10 +passed to a subsequent next() or stop() call. This allows resources
11 +such as locks that were taken to be reliably released. There is *no*
12 +guarantee that the iterator will be passed to show(), though in practice
13 +it often will be.
14 +
15
16 Formatted output
17
18 diff --git a/Makefile b/Makefile
19 index f56442751d2c3..80ff67e5f73a6 100644
20 --- a/Makefile
21 +++ b/Makefile
22 @@ -1,7 +1,7 @@
23 # SPDX-License-Identifier: GPL-2.0
24 VERSION = 5
25 PATCHLEVEL = 4
26 -SUBLEVEL = 101
27 +SUBLEVEL = 102
28 EXTRAVERSION =
29 NAME = Kleptomaniac Octopus
30
31 diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
32 index 93dffed0ac6e0..cbe126297f549 100644
33 --- a/arch/arm/boot/compressed/head.S
34 +++ b/arch/arm/boot/compressed/head.S
35 @@ -1142,9 +1142,9 @@ __armv4_mmu_cache_off:
36 __armv7_mmu_cache_off:
37 mrc p15, 0, r0, c1, c0
38 #ifdef CONFIG_MMU
39 - bic r0, r0, #0x000d
40 + bic r0, r0, #0x0005
41 #else
42 - bic r0, r0, #0x000c
43 + bic r0, r0, #0x0004
44 #endif
45 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
46 mov r12, lr
47 diff --git a/arch/arm/boot/dts/armada-388-helios4.dts b/arch/arm/boot/dts/armada-388-helios4.dts
48 index 705adfa8c680f..a94758090fb0d 100644
49 --- a/arch/arm/boot/dts/armada-388-helios4.dts
50 +++ b/arch/arm/boot/dts/armada-388-helios4.dts
51 @@ -70,6 +70,9 @@
52
53 system-leds {
54 compatible = "gpio-leds";
55 + pinctrl-names = "default";
56 + pinctrl-0 = <&helios_system_led_pins>;
57 +
58 status-led {
59 label = "helios4:green:status";
60 gpios = <&gpio0 24 GPIO_ACTIVE_LOW>;
61 @@ -86,6 +89,9 @@
62
63 io-leds {
64 compatible = "gpio-leds";
65 + pinctrl-names = "default";
66 + pinctrl-0 = <&helios_io_led_pins>;
67 +
68 sata1-led {
69 label = "helios4:green:ata1";
70 gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
71 @@ -121,11 +127,15 @@
72 fan1: j10-pwm {
73 compatible = "pwm-fan";
74 pwms = <&gpio1 9 40000>; /* Target freq:25 kHz */
75 + pinctrl-names = "default";
76 + pinctrl-0 = <&helios_fan1_pins>;
77 };
78
79 fan2: j17-pwm {
80 compatible = "pwm-fan";
81 pwms = <&gpio1 23 40000>; /* Target freq:25 kHz */
82 + pinctrl-names = "default";
83 + pinctrl-0 = <&helios_fan2_pins>;
84 };
85
86 usb2_phy: usb2-phy {
87 @@ -291,16 +301,22 @@
88 "mpp39", "mpp40";
89 marvell,function = "sd0";
90 };
91 - helios_led_pins: helios-led-pins {
92 - marvell,pins = "mpp24", "mpp25",
93 - "mpp49", "mpp50",
94 + helios_system_led_pins: helios-system-led-pins {
95 + marvell,pins = "mpp24", "mpp25";
96 + marvell,function = "gpio";
97 + };
98 + helios_io_led_pins: helios-io-led-pins {
99 + marvell,pins = "mpp49", "mpp50",
100 "mpp52", "mpp53",
101 "mpp54";
102 marvell,function = "gpio";
103 };
104 - helios_fan_pins: helios-fan-pins {
105 - marvell,pins = "mpp41", "mpp43",
106 - "mpp48", "mpp55";
107 + helios_fan1_pins: helios_fan1_pins {
108 + marvell,pins = "mpp41", "mpp43";
109 + marvell,function = "gpio";
110 + };
111 + helios_fan2_pins: helios_fan2_pins {
112 + marvell,pins = "mpp48", "mpp55";
113 marvell,function = "gpio";
114 };
115 microsom_spi1_cs_pins: spi1-cs-pins {
116 diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
117 index dffb595d30e40..679d04d585a4a 100644
118 --- a/arch/arm/boot/dts/aspeed-g4.dtsi
119 +++ b/arch/arm/boot/dts/aspeed-g4.dtsi
120 @@ -371,6 +371,7 @@
121 compatible = "aspeed,ast2400-ibt-bmc";
122 reg = <0xc0 0x18>;
123 interrupts = <8>;
124 + clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
125 status = "disabled";
126 };
127 };
128 diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
129 index e8feb8b66a2f7..412c96b3c3ac0 100644
130 --- a/arch/arm/boot/dts/aspeed-g5.dtsi
131 +++ b/arch/arm/boot/dts/aspeed-g5.dtsi
132 @@ -464,6 +464,7 @@
133 compatible = "aspeed,ast2500-ibt-bmc";
134 reg = <0xc0 0x18>;
135 interrupts = <8>;
136 + clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
137 status = "disabled";
138 };
139 };
140 diff --git a/arch/arm/boot/dts/exynos3250-artik5.dtsi b/arch/arm/boot/dts/exynos3250-artik5.dtsi
141 index dee35e3a5c4ba..69d134db6e94e 100644
142 --- a/arch/arm/boot/dts/exynos3250-artik5.dtsi
143 +++ b/arch/arm/boot/dts/exynos3250-artik5.dtsi
144 @@ -75,7 +75,7 @@
145 s2mps14_pmic@66 {
146 compatible = "samsung,s2mps14-pmic";
147 interrupt-parent = <&gpx3>;
148 - interrupts = <5 IRQ_TYPE_NONE>;
149 + interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
150 pinctrl-names = "default";
151 pinctrl-0 = <&s2mps14_irq>;
152 reg = <0x66>;
153 diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts
154 index 248bd372fe705..a23a8749c94e4 100644
155 --- a/arch/arm/boot/dts/exynos3250-monk.dts
156 +++ b/arch/arm/boot/dts/exynos3250-monk.dts
157 @@ -195,7 +195,7 @@
158 s2mps14_pmic@66 {
159 compatible = "samsung,s2mps14-pmic";
160 interrupt-parent = <&gpx0>;
161 - interrupts = <7 IRQ_TYPE_NONE>;
162 + interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
163 reg = <0x66>;
164 wakeup-source;
165
166 diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
167 index 86c26a4edfd72..468932f452895 100644
168 --- a/arch/arm/boot/dts/exynos3250-rinato.dts
169 +++ b/arch/arm/boot/dts/exynos3250-rinato.dts
170 @@ -260,7 +260,7 @@
171 s2mps14_pmic@66 {
172 compatible = "samsung,s2mps14-pmic";
173 interrupt-parent = <&gpx0>;
174 - interrupts = <7 IRQ_TYPE_NONE>;
175 + interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
176 reg = <0x66>;
177 wakeup-source;
178
179 diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
180 index 3d501926c2278..2355c53164840 100644
181 --- a/arch/arm/boot/dts/exynos5250-spring.dts
182 +++ b/arch/arm/boot/dts/exynos5250-spring.dts
183 @@ -108,7 +108,7 @@
184 compatible = "samsung,s5m8767-pmic";
185 reg = <0x66>;
186 interrupt-parent = <&gpx3>;
187 - interrupts = <2 IRQ_TYPE_NONE>;
188 + interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
189 pinctrl-names = "default";
190 pinctrl-0 = <&s5m8767_irq &s5m8767_dvs &s5m8767_ds>;
191 wakeup-source;
192 diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
193 index 592d7b45ecc87..53bf988855e0d 100644
194 --- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
195 +++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
196 @@ -349,7 +349,7 @@
197 reg = <0x66>;
198
199 interrupt-parent = <&gpx3>;
200 - interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
201 + interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
202 pinctrl-names = "default";
203 pinctrl-0 = <&s2mps11_irq>;
204
205 diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
206 index 829147e320e08..9e64a4ab94940 100644
207 --- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
208 +++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
209 @@ -141,7 +141,7 @@
210 samsung,s2mps11-acokb-ground;
211
212 interrupt-parent = <&gpx0>;
213 - interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
214 + interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
215 pinctrl-names = "default";
216 pinctrl-0 = <&s2mps11_irq>;
217
218 diff --git a/arch/arm/boot/dts/omap443x.dtsi b/arch/arm/boot/dts/omap443x.dtsi
219 index 86b9caf461dfa..6e320efd9fc1d 100644
220 --- a/arch/arm/boot/dts/omap443x.dtsi
221 +++ b/arch/arm/boot/dts/omap443x.dtsi
222 @@ -33,10 +33,12 @@
223 };
224
225 ocp {
226 + /* 4430 has only gpio_86 tshut and no talert interrupt */
227 bandgap: bandgap@4a002260 {
228 reg = <0x4a002260 0x4
229 0x4a00232C 0x4>;
230 compatible = "ti,omap4430-bandgap";
231 + gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
232
233 #thermal-sensor-cells = <0>;
234 };
235 diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
236 index f7211b57b1e78..165c184801e19 100644
237 --- a/arch/arm/mach-ixp4xx/Kconfig
238 +++ b/arch/arm/mach-ixp4xx/Kconfig
239 @@ -13,7 +13,6 @@ config MACH_IXP4XX_OF
240 select I2C
241 select I2C_IOP3XX
242 select PCI
243 - select TIMER_OF
244 select USE_OF
245 help
246 Say 'Y' here to support Device Tree-based IXP4xx platforms.
247 diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
248 index a0bc9bbb92f34..9c8ea59398658 100644
249 --- a/arch/arm64/Kconfig
250 +++ b/arch/arm64/Kconfig
251 @@ -489,7 +489,7 @@ config ARM64_ERRATUM_1024718
252 help
253 This option adds a workaround for ARM Cortex-A55 Erratum 1024718.
254
255 - Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
256 + Affected Cortex-A55 cores (all revisions) could cause incorrect
257 update of the hardware dirty bit when the DBM/AP bits are updated
258 without a break-before-make. The workaround is to disable the usage
259 of hardware DBM locally on the affected cores. CPUs not affected by
260 diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
261 index 78c82a665c84a..bb1de8217b86d 100644
262 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
263 +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
264 @@ -103,8 +103,6 @@
265 };
266
267 &ehci0 {
268 - phys = <&usbphy 0>;
269 - phy-names = "usb";
270 status = "okay";
271 };
272
273 @@ -142,6 +140,7 @@
274 pinctrl-0 = <&mmc2_pins>, <&mmc2_ds_pin>;
275 vmmc-supply = <&reg_dcdc1>;
276 vqmmc-supply = <&reg_eldo1>;
277 + max-frequency = <200000000>;
278 bus-width = <8>;
279 non-removable;
280 cap-mmc-hw-reset;
281 @@ -150,8 +149,6 @@
282 };
283
284 &ohci0 {
285 - phys = <&usbphy 0>;
286 - phy-names = "usb";
287 status = "okay";
288 };
289
290 diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
291 index 9d20e13f0c02b..d935e3028fcb6 100644
292 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
293 +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
294 @@ -55,7 +55,6 @@
295 pinctrl-names = "default";
296 pinctrl-0 = <&mmc0_pins>;
297 vmmc-supply = <&reg_dcdc1>;
298 - non-removable;
299 disable-wp;
300 bus-width = <4>;
301 cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
302 diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
303 index 367699c8c9028..cf9e3234afaf8 100644
304 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
305 +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
306 @@ -476,7 +476,7 @@
307 resets = <&ccu RST_BUS_MMC2>;
308 reset-names = "ahb";
309 interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
310 - max-frequency = <200000000>;
311 + max-frequency = <150000000>;
312 status = "disabled";
313 #address-cells = <1>;
314 #size-cells = <0>;
315 @@ -530,6 +530,8 @@
316 <&ccu CLK_USB_OHCI0>;
317 resets = <&ccu RST_BUS_OHCI0>,
318 <&ccu RST_BUS_EHCI0>;
319 + phys = <&usbphy 0>;
320 + phy-names = "usb";
321 status = "disabled";
322 };
323
324 @@ -540,6 +542,8 @@
325 clocks = <&ccu CLK_BUS_OHCI0>,
326 <&ccu CLK_USB_OHCI0>;
327 resets = <&ccu RST_BUS_OHCI0>;
328 + phys = <&usbphy 0>;
329 + phy-names = "usb";
330 status = "disabled";
331 };
332
333 diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
334 index ab081efd59718..1583cd5915214 100644
335 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
336 +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
337 @@ -332,6 +332,7 @@
338 interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
339 pinctrl-names = "default";
340 pinctrl-0 = <&mmc0_pins>;
341 + max-frequency = <150000000>;
342 status = "disabled";
343 #address-cells = <1>;
344 #size-cells = <0>;
345 @@ -348,6 +349,7 @@
346 interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
347 pinctrl-names = "default";
348 pinctrl-0 = <&mmc1_pins>;
349 + max-frequency = <150000000>;
350 status = "disabled";
351 #address-cells = <1>;
352 #size-cells = <0>;
353 @@ -364,6 +366,7 @@
354 interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
355 pinctrl-names = "default";
356 pinctrl-0 = <&mmc2_pins>;
357 + max-frequency = <150000000>;
358 status = "disabled";
359 #address-cells = <1>;
360 #size-cells = <0>;
361 @@ -533,6 +536,8 @@
362 <&ccu CLK_USB_OHCI0>;
363 resets = <&ccu RST_BUS_OHCI0>,
364 <&ccu RST_BUS_EHCI0>;
365 + phys = <&usb2phy 0>;
366 + phy-names = "usb";
367 status = "disabled";
368 };
369
370 @@ -543,6 +548,8 @@
371 clocks = <&ccu CLK_BUS_OHCI0>,
372 <&ccu CLK_USB_OHCI0>;
373 resets = <&ccu RST_BUS_OHCI0>;
374 + phys = <&usb2phy 0>;
375 + phy-names = "usb";
376 status = "disabled";
377 };
378
379 diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
380 index 6f90b0e62cba6..148bdca8d9c96 100644
381 --- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
382 +++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
383 @@ -389,7 +389,7 @@
384 s2mps13-pmic@66 {
385 compatible = "samsung,s2mps13-pmic";
386 interrupt-parent = <&gpa0>;
387 - interrupts = <7 IRQ_TYPE_NONE>;
388 + interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
389 reg = <0x66>;
390 samsung,s2mps11-wrstbi-ground;
391
392 diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
393 index 61ee7b6a31594..09aead2be000c 100644
394 --- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
395 +++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
396 @@ -90,7 +90,7 @@
397 s2mps15_pmic@66 {
398 compatible = "samsung,s2mps15-pmic";
399 reg = <0x66>;
400 - interrupts = <2 IRQ_TYPE_NONE>;
401 + interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
402 interrupt-parent = <&gpa0>;
403 pinctrl-names = "default";
404 pinctrl-0 = <&pmic_irq>;
405 diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
406 index aa52927e2e9c2..fad70c2df7bc0 100644
407 --- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
408 +++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
409 @@ -202,7 +202,7 @@
410 };
411
412 partition@20000 {
413 - label = "u-boot";
414 + label = "a53-firmware";
415 reg = <0x20000 0x160000>;
416 };
417
418 diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
419 index 7cd8c3f52b471..e7e002d8b1089 100644
420 --- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
421 +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
422 @@ -698,6 +698,8 @@
423 clocks = <&pericfg CLK_PERI_MSDC30_1_PD>,
424 <&topckgen CLK_TOP_AXI_SEL>;
425 clock-names = "source", "hclk";
426 + resets = <&pericfg MT7622_PERI_MSDC1_SW_RST>;
427 + reset-names = "hrst";
428 status = "disabled";
429 };
430
431 diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
432 index d95273af9f1e4..449843f2184d8 100644
433 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
434 +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
435 @@ -53,7 +53,7 @@
436 no-map;
437 };
438
439 - reserved@8668000 {
440 + reserved@86680000 {
441 reg = <0x0 0x86680000 0x0 0x80000>;
442 no-map;
443 };
444 @@ -66,7 +66,7 @@
445 qcom,client-id = <1>;
446 };
447
448 - rfsa@867e00000 {
449 + rfsa@867e0000 {
450 reg = <0x0 0x867e0000 0x0 0x20000>;
451 no-map;
452 };
453 diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
454 index aa57dc639f77f..aa13344a3a5e8 100644
455 --- a/arch/arm64/crypto/aes-glue.c
456 +++ b/arch/arm64/crypto/aes-glue.c
457 @@ -55,7 +55,7 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
458 #define aes_mac_update neon_aes_mac_update
459 MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
460 #endif
461 -#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS)
462 +#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
463 MODULE_ALIAS_CRYPTO("ecb(aes)");
464 MODULE_ALIAS_CRYPTO("cbc(aes)");
465 MODULE_ALIAS_CRYPTO("ctr(aes)");
466 @@ -668,7 +668,7 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req)
467 }
468
469 static struct skcipher_alg aes_algs[] = { {
470 -#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS)
471 +#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
472 .base = {
473 .cra_name = "__ecb(aes)",
474 .cra_driver_name = "__ecb-aes-" MODE,
475 diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
476 index bdc1b6d7aff79..05cdad31b0225 100644
477 --- a/arch/arm64/crypto/sha1-ce-glue.c
478 +++ b/arch/arm64/crypto/sha1-ce-glue.c
479 @@ -19,6 +19,7 @@
480 MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
481 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
482 MODULE_LICENSE("GPL v2");
483 +MODULE_ALIAS_CRYPTO("sha1");
484
485 struct sha1_ce_state {
486 struct sha1_state sst;
487 diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
488 index 604a01a4ede6f..1de80293ac312 100644
489 --- a/arch/arm64/crypto/sha2-ce-glue.c
490 +++ b/arch/arm64/crypto/sha2-ce-glue.c
491 @@ -19,6 +19,8 @@
492 MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
493 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
494 MODULE_LICENSE("GPL v2");
495 +MODULE_ALIAS_CRYPTO("sha224");
496 +MODULE_ALIAS_CRYPTO("sha256");
497
498 struct sha256_ce_state {
499 struct sha256_state sst;
500 diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
501 index 9a4bbfc45f407..ddf7aca9ff459 100644
502 --- a/arch/arm64/crypto/sha3-ce-glue.c
503 +++ b/arch/arm64/crypto/sha3-ce-glue.c
504 @@ -23,6 +23,10 @@
505 MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions");
506 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
507 MODULE_LICENSE("GPL v2");
508 +MODULE_ALIAS_CRYPTO("sha3-224");
509 +MODULE_ALIAS_CRYPTO("sha3-256");
510 +MODULE_ALIAS_CRYPTO("sha3-384");
511 +MODULE_ALIAS_CRYPTO("sha3-512");
512
513 asmlinkage void sha3_ce_transform(u64 *st, const u8 *data, int blocks,
514 int md_len);
515 diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
516 index 2369540040aa9..6dfcb4f3e7768 100644
517 --- a/arch/arm64/crypto/sha512-ce-glue.c
518 +++ b/arch/arm64/crypto/sha512-ce-glue.c
519 @@ -23,6 +23,8 @@
520 MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash using ARMv8 Crypto Extensions");
521 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
522 MODULE_LICENSE("GPL v2");
523 +MODULE_ALIAS_CRYPTO("sha384");
524 +MODULE_ALIAS_CRYPTO("sha512");
525
526 asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
527 int blocks);
528 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
529 index f2ec845404149..79caab15ccbf7 100644
530 --- a/arch/arm64/kernel/cpufeature.c
531 +++ b/arch/arm64/kernel/cpufeature.c
532 @@ -1092,7 +1092,7 @@ static bool cpu_has_broken_dbm(void)
533 /* List of CPUs which have broken DBM support. */
534 static const struct midr_range cpus[] = {
535 #ifdef CONFIG_ARM64_ERRATUM_1024718
536 - MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
537 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
538 #endif
539 {},
540 };
541 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
542 index bdb5ec3419006..438de2301cfe3 100644
543 --- a/arch/arm64/kernel/head.S
544 +++ b/arch/arm64/kernel/head.S
545 @@ -970,6 +970,7 @@ __primary_switch:
546
547 tlbi vmalle1 // Remove any stale TLB entries
548 dsb nsh
549 + isb
550
551 msr sctlr_el1, x19 // re-enable the MMU
552 isb
553 diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
554 index 7b08bf9499b6b..d2a62dd17d79d 100644
555 --- a/arch/arm64/kernel/machine_kexec_file.c
556 +++ b/arch/arm64/kernel/machine_kexec_file.c
557 @@ -150,8 +150,10 @@ static int create_dtb(struct kimage *image,
558
559 /* duplicate a device tree blob */
560 ret = fdt_open_into(initial_boot_params, buf, buf_size);
561 - if (ret)
562 + if (ret) {
563 + vfree(buf);
564 return -EINVAL;
565 + }
566
567 ret = setup_dtb(image, initrd_load_addr, initrd_len,
568 cmdline, buf);
569 diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
570 index a412d8edbcd24..2c247634552b1 100644
571 --- a/arch/arm64/kernel/probes/uprobes.c
572 +++ b/arch/arm64/kernel/probes/uprobes.c
573 @@ -38,7 +38,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
574
575 /* TODO: Currently we do not support AARCH32 instruction probing */
576 if (mm->context.flags & MMCF_AARCH32)
577 - return -ENOTSUPP;
578 + return -EOPNOTSUPP;
579 else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
580 return -EINVAL;
581
582 diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
583 index c23527ba65d09..64bffc1f75e0c 100644
584 --- a/arch/mips/include/asm/asm.h
585 +++ b/arch/mips/include/asm/asm.h
586 @@ -20,10 +20,27 @@
587 #include <asm/sgidefs.h>
588 #include <asm/asm-eva.h>
589
590 +#ifndef __VDSO__
591 +/*
592 + * Emit CFI data in .debug_frame sections, not .eh_frame sections.
593 + * We don't do DWARF unwinding at runtime, so only the offline DWARF
594 + * information is useful to anyone. Note we should change this if we
595 + * ever decide to enable DWARF unwinding at runtime.
596 + */
597 +#define CFI_SECTIONS .cfi_sections .debug_frame
598 +#else
599 + /*
600 + * For the vDSO, emit both runtime unwind information and debug
601 + * symbols for the .dbg file.
602 + */
603 +#define CFI_SECTIONS
604 +#endif
605 +
606 /*
607 * LEAF - declare leaf routine
608 */
609 #define LEAF(symbol) \
610 + CFI_SECTIONS; \
611 .globl symbol; \
612 .align 2; \
613 .type symbol, @function; \
614 @@ -36,6 +53,7 @@ symbol: .frame sp, 0, ra; \
615 * NESTED - declare nested routine entry point
616 */
617 #define NESTED(symbol, framesize, rpc) \
618 + CFI_SECTIONS; \
619 .globl symbol; \
620 .align 2; \
621 .type symbol, @function; \
622 diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
623 index eb9d7af938365..faf98f209b3f4 100644
624 --- a/arch/mips/kernel/vmlinux.lds.S
625 +++ b/arch/mips/kernel/vmlinux.lds.S
626 @@ -93,6 +93,7 @@ SECTIONS
627
628 INIT_TASK_DATA(THREAD_SIZE)
629 NOSAVE_DATA
630 + PAGE_ALIGNED_DATA(PAGE_SIZE)
631 CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
632 READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
633 DATA_DATA
634 @@ -225,6 +226,5 @@ SECTIONS
635 *(.options)
636 *(.pdr)
637 *(.reginfo)
638 - *(.eh_frame)
639 }
640 }
641 diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
642 index 115b417dfb8e3..9fcc118312cb9 100644
643 --- a/arch/mips/lantiq/irq.c
644 +++ b/arch/mips/lantiq/irq.c
645 @@ -302,7 +302,7 @@ static void ltq_hw_irq_handler(struct irq_desc *desc)
646 generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
647
648 /* if this is a EBU irq, we need to ack it or get a deadlock */
649 - if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
650 + if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
651 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
652 LTQ_EBU_PCC_ISTAT);
653 }
654 diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
655 index 504fd61592405..3375bbe63284e 100644
656 --- a/arch/mips/mm/c-r4k.c
657 +++ b/arch/mips/mm/c-r4k.c
658 @@ -1560,7 +1560,7 @@ static int probe_scache(void)
659 return 1;
660 }
661
662 -static void __init loongson2_sc_init(void)
663 +static void loongson2_sc_init(void)
664 {
665 struct cpuinfo_mips *c = &current_cpu_data;
666
667 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
668 index cb285e474c880..c4cbb65e742f4 100644
669 --- a/arch/powerpc/Kconfig
670 +++ b/arch/powerpc/Kconfig
671 @@ -723,7 +723,7 @@ config PPC_64K_PAGES
672
673 config PPC_256K_PAGES
674 bool "256k page size"
675 - depends on 44x && !STDBINUTILS
676 + depends on 44x && !STDBINUTILS && !PPC_47x
677 help
678 Make the page size 256k.
679
680 diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
681 index f29bb176381f1..c72894ff9d614 100644
682 --- a/arch/powerpc/kernel/entry_32.S
683 +++ b/arch/powerpc/kernel/entry_32.S
684 @@ -336,6 +336,9 @@ trace_syscall_entry_irq_off:
685
686 .globl transfer_to_syscall
687 transfer_to_syscall:
688 +#ifdef CONFIG_PPC_BOOK3S_32
689 + kuep_lock r11, r12
690 +#endif
691 #ifdef CONFIG_TRACE_IRQFLAGS
692 andi. r12,r9,MSR_EE
693 beq- trace_syscall_entry_irq_off
694 diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
695 index f6428b90a6c77..6f3e417f55a35 100644
696 --- a/arch/powerpc/kernel/head_8xx.S
697 +++ b/arch/powerpc/kernel/head_8xx.S
698 @@ -191,7 +191,7 @@ SystemCall:
699 /* On the MPC8xx, this is a software emulation interrupt. It occurs
700 * for all unimplemented and illegal instructions.
701 */
702 - EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD)
703 + EXCEPTION(0x1000, SoftEmu, emulation_assist_interrupt, EXC_XFER_STD)
704
705 /* Called from DataStoreTLBMiss when perf TLB misses events are activated */
706 #ifdef CONFIG_PERF_EVENTS
707 diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
708 index 689664cd4e79b..1b65fb7c0bdaa 100644
709 --- a/arch/powerpc/kernel/prom_init.c
710 +++ b/arch/powerpc/kernel/prom_init.c
711 @@ -1305,14 +1305,10 @@ static void __init prom_check_platform_support(void)
712 if (prop_len > sizeof(vec))
713 prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
714 prop_len);
715 - prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
716 - &vec, sizeof(vec));
717 - for (i = 0; i < sizeof(vec); i += 2) {
718 - prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
719 - , vec[i]
720 - , vec[i + 1]);
721 - prom_parse_platform_support(vec[i], vec[i + 1],
722 - &supported);
723 + prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
724 + for (i = 0; i < prop_len; i += 2) {
725 + prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
726 + prom_parse_platform_support(vec[i], vec[i + 1], &supported);
727 }
728 }
729
730 diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
731 index 3a77bb6434521..e03c064716789 100644
732 --- a/arch/powerpc/kvm/powerpc.c
733 +++ b/arch/powerpc/kvm/powerpc.c
734 @@ -1513,7 +1513,7 @@ int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
735 return emulated;
736 }
737
738 -int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
739 +static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
740 {
741 union kvmppc_one_reg reg;
742 int vmx_offset = 0;
743 @@ -1531,7 +1531,7 @@ int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
744 return result;
745 }
746
747 -int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
748 +static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
749 {
750 union kvmppc_one_reg reg;
751 int vmx_offset = 0;
752 @@ -1549,7 +1549,7 @@ int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
753 return result;
754 }
755
756 -int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
757 +static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
758 {
759 union kvmppc_one_reg reg;
760 int vmx_offset = 0;
761 @@ -1567,7 +1567,7 @@ int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
762 return result;
763 }
764
765 -int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
766 +static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
767 {
768 union kvmppc_one_reg reg;
769 int vmx_offset = 0;
770 diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
771 index 16e86ba8aa209..f6b7749d6ada7 100644
772 --- a/arch/powerpc/platforms/pseries/dlpar.c
773 +++ b/arch/powerpc/platforms/pseries/dlpar.c
774 @@ -127,7 +127,6 @@ void dlpar_free_cc_nodes(struct device_node *dn)
775 #define NEXT_PROPERTY 3
776 #define PREV_PARENT 4
777 #define MORE_MEMORY 5
778 -#define CALL_AGAIN -2
779 #define ERR_CFG_USE -9003
780
781 struct device_node *dlpar_configure_connector(__be32 drc_index,
782 @@ -168,6 +167,9 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
783
784 spin_unlock(&rtas_data_buf_lock);
785
786 + if (rtas_busy_delay(rc))
787 + continue;
788 +
789 switch (rc) {
790 case COMPLETE:
791 break;
792 @@ -216,9 +218,6 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
793 last_dn = last_dn->parent;
794 break;
795
796 - case CALL_AGAIN:
797 - break;
798 -
799 case MORE_MEMORY:
800 case ERR_CFG_USE:
801 default:
802 diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
803 index c475ca49cfc6b..3e72f955bff7f 100644
804 --- a/arch/s390/kernel/vtime.c
805 +++ b/arch/s390/kernel/vtime.c
806 @@ -136,7 +136,8 @@ static int do_account_vtime(struct task_struct *tsk)
807 " stck %1" /* Store current tod clock value */
808 #endif
809 : "=Q" (S390_lowcore.last_update_timer),
810 - "=Q" (S390_lowcore.last_update_clock));
811 + "=Q" (S390_lowcore.last_update_clock)
812 + : : "cc");
813 clock = S390_lowcore.last_update_clock - clock;
814 timer -= S390_lowcore.last_update_timer;
815
816 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
817 index 18e9fb6fcf1bf..349e27771ceaf 100644
818 --- a/arch/sparc/Kconfig
819 +++ b/arch/sparc/Kconfig
820 @@ -524,7 +524,7 @@ config COMPAT
821 bool
822 depends on SPARC64
823 default y
824 - select COMPAT_BINFMT_ELF
825 + select COMPAT_BINFMT_ELF if BINFMT_ELF
826 select HAVE_UID16
827 select ARCH_WANT_OLD_COMPAT_IPC
828 select COMPAT_OLD_SIGACTION
829 diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
830 index b89d42b29e344..f427f34b8b79b 100644
831 --- a/arch/sparc/lib/memset.S
832 +++ b/arch/sparc/lib/memset.S
833 @@ -142,6 +142,7 @@ __bzero:
834 ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
835 ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
836 13:
837 + EXT(12b, 13b, 21f)
838 be 8f
839 andcc %o1, 4, %g0
840
841 diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
842 index b7eaf655635cd..11499136720d8 100644
843 --- a/arch/um/kernel/tlb.c
844 +++ b/arch/um/kernel/tlb.c
845 @@ -126,6 +126,9 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
846 struct host_vm_op *last;
847 int fd = -1, ret = 0;
848
849 + if (virt + len > STUB_START && virt < STUB_END)
850 + return -EINVAL;
851 +
852 if (hvc->userspace)
853 fd = phys_mapping(phys, &offset);
854 else
855 @@ -163,7 +166,7 @@ static int add_munmap(unsigned long addr, unsigned long len,
856 struct host_vm_op *last;
857 int ret = 0;
858
859 - if ((addr >= STUB_START) && (addr < STUB_END))
860 + if (addr + len > STUB_START && addr < STUB_END)
861 return -EINVAL;
862
863 if (hvc->index != 0) {
864 @@ -193,6 +196,9 @@ static int add_mprotect(unsigned long addr, unsigned long len,
865 struct host_vm_op *last;
866 int ret = 0;
867
868 + if (addr + len > STUB_START && addr < STUB_END)
869 + return -EINVAL;
870 +
871 if (hvc->index != 0) {
872 last = &hvc->ops[hvc->index - 1];
873 if ((last->type == MPROTECT) &&
874 @@ -433,6 +439,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
875 struct mm_id *mm_id;
876
877 address &= PAGE_MASK;
878 +
879 + if (address >= STUB_START && address < STUB_END)
880 + goto kill;
881 +
882 pgd = pgd_offset(mm, address);
883 if (!pgd_present(*pgd))
884 goto kill;
885 diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
886 index 3e707e81afdb4..88ad272aa2b46 100644
887 --- a/arch/x86/crypto/aesni-intel_glue.c
888 +++ b/arch/x86/crypto/aesni-intel_glue.c
889 @@ -707,7 +707,8 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
890 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
891 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
892 const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
893 - struct gcm_context_data data AESNI_ALIGN_ATTR;
894 + u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
895 + struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
896 struct scatter_walk dst_sg_walk = {};
897 unsigned long left = req->cryptlen;
898 unsigned long len, srclen, dstlen;
899 @@ -760,8 +761,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
900 }
901
902 kernel_fpu_begin();
903 - gcm_tfm->init(aes_ctx, &data, iv,
904 - hash_subkey, assoc, assoclen);
905 + gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
906 if (req->src != req->dst) {
907 while (left) {
908 src = scatterwalk_map(&src_sg_walk);
909 @@ -771,10 +771,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
910 len = min(srclen, dstlen);
911 if (len) {
912 if (enc)
913 - gcm_tfm->enc_update(aes_ctx, &data,
914 + gcm_tfm->enc_update(aes_ctx, data,
915 dst, src, len);
916 else
917 - gcm_tfm->dec_update(aes_ctx, &data,
918 + gcm_tfm->dec_update(aes_ctx, data,
919 dst, src, len);
920 }
921 left -= len;
922 @@ -792,10 +792,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
923 len = scatterwalk_clamp(&src_sg_walk, left);
924 if (len) {
925 if (enc)
926 - gcm_tfm->enc_update(aes_ctx, &data,
927 + gcm_tfm->enc_update(aes_ctx, data,
928 src, src, len);
929 else
930 - gcm_tfm->dec_update(aes_ctx, &data,
931 + gcm_tfm->dec_update(aes_ctx, data,
932 src, src, len);
933 }
934 left -= len;
935 @@ -804,7 +804,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
936 scatterwalk_done(&src_sg_walk, 1, left);
937 }
938 }
939 - gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len);
940 + gcm_tfm->finalize(aes_ctx, data, authTag, auth_tag_len);
941 kernel_fpu_end();
942
943 if (!assocmem)
944 @@ -853,7 +853,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
945 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
946 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
947 void *aes_ctx = &(ctx->aes_key_expanded);
948 - u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
949 + u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
950 + u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
951 unsigned int i;
952 __be32 counter = cpu_to_be32(1);
953
954 @@ -880,7 +881,8 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
955 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
956 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
957 void *aes_ctx = &(ctx->aes_key_expanded);
958 - u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
959 + u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
960 + u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
961 unsigned int i;
962
963 if (unlikely(req->assoclen != 16 && req->assoclen != 20))
964 @@ -1010,7 +1012,8 @@ static int generic_gcmaes_encrypt(struct aead_request *req)
965 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
966 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
967 void *aes_ctx = &(ctx->aes_key_expanded);
968 - u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
969 + u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
970 + u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
971 __be32 counter = cpu_to_be32(1);
972
973 memcpy(iv, req->iv, 12);
974 @@ -1026,7 +1029,8 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
975 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
976 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
977 void *aes_ctx = &(ctx->aes_key_expanded);
978 - u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
979 + u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
980 + u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
981
982 memcpy(iv, req->iv, 12);
983 *((__be32 *)(iv+12)) = counter;
984 diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
985 index 9aad0e0876fba..fda3e7747c223 100644
986 --- a/arch/x86/include/asm/virtext.h
987 +++ b/arch/x86/include/asm/virtext.h
988 @@ -30,15 +30,22 @@ static inline int cpu_has_vmx(void)
989 }
990
991
992 -/** Disable VMX on the current CPU
993 +/**
994 + * cpu_vmxoff() - Disable VMX on the current CPU
995 *
996 - * vmxoff causes a undefined-opcode exception if vmxon was not run
997 - * on the CPU previously. Only call this function if you know VMX
998 - * is enabled.
999 + * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
1000 + *
1001 + * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
1002 + * atomically track post-VMXON state, e.g. this may be called in NMI context.
1003 + * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
1004 + * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
1005 + * magically in RM, VM86, compat mode, or at CPL>0.
1006 */
1007 static inline void cpu_vmxoff(void)
1008 {
1009 - asm volatile ("vmxoff");
1010 + asm_volatile_goto("1: vmxoff\n\t"
1011 + _ASM_EXTABLE(1b, %l[fault]) :::: fault);
1012 +fault:
1013 cr4_clear_bits(X86_CR4_VMXE);
1014 }
1015
1016 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
1017 index 762f5c1465a6f..835b6fc0c1bbf 100644
1018 --- a/arch/x86/kernel/reboot.c
1019 +++ b/arch/x86/kernel/reboot.c
1020 @@ -538,29 +538,20 @@ static void emergency_vmx_disable_all(void)
1021 local_irq_disable();
1022
1023 /*
1024 - * We need to disable VMX on all CPUs before rebooting, otherwise
1025 - * we risk hanging up the machine, because the CPU ignore INIT
1026 - * signals when VMX is enabled.
1027 + * Disable VMX on all CPUs before rebooting, otherwise we risk hanging
1028 + * the machine, because the CPU blocks INIT when it's in VMX root.
1029 *
1030 - * We can't take any locks and we may be on an inconsistent
1031 - * state, so we use NMIs as IPIs to tell the other CPUs to disable
1032 - * VMX and halt.
1033 + * We can't take any locks and we may be on an inconsistent state, so
1034 + * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt.
1035 *
1036 - * For safety, we will avoid running the nmi_shootdown_cpus()
1037 - * stuff unnecessarily, but we don't have a way to check
1038 - * if other CPUs have VMX enabled. So we will call it only if the
1039 - * CPU we are running on has VMX enabled.
1040 - *
1041 - * We will miss cases where VMX is not enabled on all CPUs. This
1042 - * shouldn't do much harm because KVM always enable VMX on all
1043 - * CPUs anyway. But we can miss it on the small window where KVM
1044 - * is still enabling VMX.
1045 + * Do the NMI shootdown even if VMX if off on _this_ CPU, as that
1046 + * doesn't prevent a different CPU from being in VMX root operation.
1047 */
1048 - if (cpu_has_vmx() && cpu_vmx_enabled()) {
1049 - /* Disable VMX on this CPU. */
1050 - cpu_vmxoff();
1051 + if (cpu_has_vmx()) {
1052 + /* Safely force _this_ CPU out of VMX root operation. */
1053 + __cpu_emergency_vmxoff();
1054
1055 - /* Halt and disable VMX on the other CPUs */
1056 + /* Halt and exit VMX root operation on the other CPUs. */
1057 nmi_shootdown_cpus(vmxoff_nmi);
1058
1059 }
1060 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1061 index 1da558f28aa57..b9d14fdbd2d81 100644
1062 --- a/arch/x86/kvm/svm.c
1063 +++ b/arch/x86/kvm/svm.c
1064 @@ -4327,7 +4327,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1065 !guest_has_spec_ctrl_msr(vcpu))
1066 return 1;
1067
1068 - if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
1069 + if (kvm_spec_ctrl_test_value(data))
1070 return 1;
1071
1072 svm->spec_ctrl = data;
1073 diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
1074 index e7fd2f00edc11..e177848a36313 100644
1075 --- a/arch/x86/kvm/vmx/vmx.c
1076 +++ b/arch/x86/kvm/vmx/vmx.c
1077 @@ -1974,7 +1974,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1078 !guest_has_spec_ctrl_msr(vcpu))
1079 return 1;
1080
1081 - if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
1082 + if (kvm_spec_ctrl_test_value(data))
1083 return 1;
1084
1085 vmx->spec_ctrl = data;
1086 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1087 index 73095d7213993..153659e8f4039 100644
1088 --- a/arch/x86/kvm/x86.c
1089 +++ b/arch/x86/kvm/x86.c
1090 @@ -10374,28 +10374,32 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
1091 }
1092 EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
1093
1094 -u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu)
1095 +
1096 +int kvm_spec_ctrl_test_value(u64 value)
1097 {
1098 - uint64_t bits = SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD;
1099 + /*
1100 + * test that setting IA32_SPEC_CTRL to given value
1101 + * is allowed by the host processor
1102 + */
1103 +
1104 + u64 saved_value;
1105 + unsigned long flags;
1106 + int ret = 0;
1107
1108 - /* The STIBP bit doesn't fault even if it's not advertised */
1109 - if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
1110 - !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
1111 - bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
1112 - if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
1113 - !boot_cpu_has(X86_FEATURE_AMD_IBRS))
1114 - bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
1115 + local_irq_save(flags);
1116
1117 - if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL_SSBD) &&
1118 - !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
1119 - bits &= ~SPEC_CTRL_SSBD;
1120 - if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
1121 - !boot_cpu_has(X86_FEATURE_AMD_SSBD))
1122 - bits &= ~SPEC_CTRL_SSBD;
1123 + if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value))
1124 + ret = 1;
1125 + else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value))
1126 + ret = 1;
1127 + else
1128 + wrmsrl(MSR_IA32_SPEC_CTRL, saved_value);
1129
1130 - return bits;
1131 + local_irq_restore(flags);
1132 +
1133 + return ret;
1134 }
1135 -EXPORT_SYMBOL_GPL(kvm_spec_ctrl_valid_bits);
1136 +EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
1137
1138 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
1139 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
1140 diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
1141 index 301286d924320..c520d373790a2 100644
1142 --- a/arch/x86/kvm/x86.h
1143 +++ b/arch/x86/kvm/x86.h
1144 @@ -368,6 +368,6 @@ static inline bool kvm_pat_valid(u64 data)
1145
1146 void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
1147 void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
1148 -u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
1149 +int kvm_spec_ctrl_test_value(u64 value);
1150
1151 #endif
1152 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
1153 index d9fbd4f699202..35b2e35c22035 100644
1154 --- a/arch/x86/mm/pat.c
1155 +++ b/arch/x86/mm/pat.c
1156 @@ -1132,12 +1132,14 @@ static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
1157
1158 static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1159 {
1160 + kfree(v);
1161 ++*pos;
1162 return memtype_get_idx(*pos);
1163 }
1164
1165 static void memtype_seq_stop(struct seq_file *seq, void *v)
1166 {
1167 + kfree(v);
1168 }
1169
1170 static int memtype_seq_show(struct seq_file *seq, void *v)
1171 @@ -1146,7 +1148,6 @@ static int memtype_seq_show(struct seq_file *seq, void *v)
1172
1173 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
1174 print_entry->start, print_entry->end);
1175 - kfree(print_entry);
1176
1177 return 0;
1178 }
1179 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
1180 index ba32adaeefdd0..c19006d59b791 100644
1181 --- a/block/bfq-iosched.c
1182 +++ b/block/bfq-iosched.c
1183 @@ -2937,6 +2937,7 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
1184 }
1185
1186 bfqd->in_service_queue = bfqq;
1187 + bfqd->in_serv_last_pos = 0;
1188 }
1189
1190 /*
1191 diff --git a/block/blk-settings.c b/block/blk-settings.c
1192 index be1dca0103a45..13be635300a85 100644
1193 --- a/block/blk-settings.c
1194 +++ b/block/blk-settings.c
1195 @@ -473,6 +473,14 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
1196 }
1197 EXPORT_SYMBOL(blk_queue_stack_limits);
1198
1199 +static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
1200 +{
1201 + sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
1202 + if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
1203 + sectors = PAGE_SIZE >> SECTOR_SHIFT;
1204 + return sectors;
1205 +}
1206 +
1207 /**
1208 * blk_stack_limits - adjust queue_limits for stacked devices
1209 * @t: the stacking driver limits (top device)
1210 @@ -586,6 +594,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1211 ret = -1;
1212 }
1213
1214 + t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
1215 + t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
1216 + t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
1217 +
1218 /* Discard alignment and granularity */
1219 if (b->discard_granularity) {
1220 alignment = queue_limit_discard_alignment(b, start);
1221 diff --git a/block/bsg.c b/block/bsg.c
1222 index 833c44b3d458e..0d012efef5274 100644
1223 --- a/block/bsg.c
1224 +++ b/block/bsg.c
1225 @@ -157,8 +157,10 @@ static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
1226 return PTR_ERR(rq);
1227
1228 ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode);
1229 - if (ret)
1230 + if (ret) {
1231 + blk_put_request(rq);
1232 return ret;
1233 + }
1234
1235 rq->timeout = msecs_to_jiffies(hdr.timeout);
1236 if (!rq->timeout)
1237 diff --git a/certs/blacklist.c b/certs/blacklist.c
1238 index ec00bf337eb67..025a41de28fda 100644
1239 --- a/certs/blacklist.c
1240 +++ b/certs/blacklist.c
1241 @@ -153,7 +153,7 @@ static int __init blacklist_init(void)
1242 KEY_USR_VIEW | KEY_USR_READ |
1243 KEY_USR_SEARCH,
1244 KEY_ALLOC_NOT_IN_QUOTA |
1245 - KEY_FLAG_KEEP,
1246 + KEY_ALLOC_SET_KEEP,
1247 NULL, NULL);
1248 if (IS_ERR(blacklist_keyring))
1249 panic("Can't allocate system blacklist keyring\n");
1250 diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c
1251 index 66fcb2ea81544..fca63b559f655 100644
1252 --- a/crypto/ecdh_helper.c
1253 +++ b/crypto/ecdh_helper.c
1254 @@ -67,6 +67,9 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len,
1255 if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH)
1256 return -EINVAL;
1257
1258 + if (unlikely(len < secret.len))
1259 + return -EINVAL;
1260 +
1261 ptr = ecdh_unpack_data(&params->curve_id, ptr, sizeof(params->curve_id));
1262 ptr = ecdh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
1263 if (secret.len != crypto_ecdh_key_len(params))
1264 diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
1265 index 01738d8e888e3..06c756651425e 100644
1266 --- a/drivers/acpi/acpi_configfs.c
1267 +++ b/drivers/acpi/acpi_configfs.c
1268 @@ -267,7 +267,12 @@ static int __init acpi_configfs_init(void)
1269
1270 acpi_table_group = configfs_register_default_group(root, "table",
1271 &acpi_tables_type);
1272 - return PTR_ERR_OR_ZERO(acpi_table_group);
1273 + if (IS_ERR(acpi_table_group)) {
1274 + configfs_unregister_subsystem(&acpi_configfs);
1275 + return PTR_ERR(acpi_table_group);
1276 + }
1277 +
1278 + return 0;
1279 }
1280 module_init(acpi_configfs_init);
1281
1282 diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
1283 index 3eacf474e1e39..a08e3eb2a6f9f 100644
1284 --- a/drivers/acpi/property.c
1285 +++ b/drivers/acpi/property.c
1286 @@ -794,9 +794,6 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
1287 const union acpi_object *obj;
1288 int ret;
1289
1290 - if (!val)
1291 - return -EINVAL;
1292 -
1293 if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64) {
1294 ret = acpi_data_get_property(data, propname, ACPI_TYPE_INTEGER, &obj);
1295 if (ret)
1296 @@ -806,28 +803,43 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
1297 case DEV_PROP_U8:
1298 if (obj->integer.value > U8_MAX)
1299 return -EOVERFLOW;
1300 - *(u8 *)val = obj->integer.value;
1301 +
1302 + if (val)
1303 + *(u8 *)val = obj->integer.value;
1304 +
1305 break;
1306 case DEV_PROP_U16:
1307 if (obj->integer.value > U16_MAX)
1308 return -EOVERFLOW;
1309 - *(u16 *)val = obj->integer.value;
1310 +
1311 + if (val)
1312 + *(u16 *)val = obj->integer.value;
1313 +
1314 break;
1315 case DEV_PROP_U32:
1316 if (obj->integer.value > U32_MAX)
1317 return -EOVERFLOW;
1318 - *(u32 *)val = obj->integer.value;
1319 +
1320 + if (val)
1321 + *(u32 *)val = obj->integer.value;
1322 +
1323 break;
1324 default:
1325 - *(u64 *)val = obj->integer.value;
1326 + if (val)
1327 + *(u64 *)val = obj->integer.value;
1328 +
1329 break;
1330 }
1331 +
1332 + if (!val)
1333 + return 1;
1334 } else if (proptype == DEV_PROP_STRING) {
1335 ret = acpi_data_get_property(data, propname, ACPI_TYPE_STRING, &obj);
1336 if (ret)
1337 return ret;
1338
1339 - *(char **)val = obj->string.pointer;
1340 + if (val)
1341 + *(char **)val = obj->string.pointer;
1342
1343 return 1;
1344 } else {
1345 @@ -841,7 +853,7 @@ int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
1346 {
1347 int ret;
1348
1349 - if (!adev)
1350 + if (!adev || !val)
1351 return -EINVAL;
1352
1353 ret = acpi_data_prop_read_single(&adev->data, propname, proptype, val);
1354 @@ -935,10 +947,20 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
1355 const union acpi_object *items;
1356 int ret;
1357
1358 - if (val && nval == 1) {
1359 + if (nval == 1 || !val) {
1360 ret = acpi_data_prop_read_single(data, propname, proptype, val);
1361 - if (ret >= 0)
1362 + /*
1363 + * The overflow error means that the property is there and it is
1364 + * single-value, but its type does not match, so return.
1365 + */
1366 + if (ret >= 0 || ret == -EOVERFLOW)
1367 return ret;
1368 +
1369 + /*
1370 + * Reading this property as a single-value one failed, but its
1371 + * value may still be represented as one-element array, so
1372 + * continue.
1373 + */
1374 }
1375
1376 ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj);
1377 diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
1378 index fe1523664816a..af58768a03937 100644
1379 --- a/drivers/amba/bus.c
1380 +++ b/drivers/amba/bus.c
1381 @@ -299,10 +299,11 @@ static int amba_remove(struct device *dev)
1382 {
1383 struct amba_device *pcdev = to_amba_device(dev);
1384 struct amba_driver *drv = to_amba_driver(dev->driver);
1385 - int ret;
1386 + int ret = 0;
1387
1388 pm_runtime_get_sync(dev);
1389 - ret = drv->remove(pcdev);
1390 + if (drv->remove)
1391 + ret = drv->remove(pcdev);
1392 pm_runtime_put_noidle(dev);
1393
1394 /* Undo the runtime PM settings in amba_probe() */
1395 @@ -319,7 +320,9 @@ static int amba_remove(struct device *dev)
1396 static void amba_shutdown(struct device *dev)
1397 {
1398 struct amba_driver *drv = to_amba_driver(dev->driver);
1399 - drv->shutdown(to_amba_device(dev));
1400 +
1401 + if (drv->shutdown)
1402 + drv->shutdown(to_amba_device(dev));
1403 }
1404
1405 /**
1406 @@ -332,12 +335,13 @@ static void amba_shutdown(struct device *dev)
1407 */
1408 int amba_driver_register(struct amba_driver *drv)
1409 {
1410 - drv->drv.bus = &amba_bustype;
1411 + if (!drv->probe)
1412 + return -EINVAL;
1413
1414 -#define SETFN(fn) if (drv->fn) drv->drv.fn = amba_##fn
1415 - SETFN(probe);
1416 - SETFN(remove);
1417 - SETFN(shutdown);
1418 + drv->drv.bus = &amba_bustype;
1419 + drv->drv.probe = amba_probe;
1420 + drv->drv.remove = amba_remove;
1421 + drv->drv.shutdown = amba_shutdown;
1422
1423 return driver_register(&drv->drv);
1424 }
1425 diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
1426 index 66a570d0da837..067b55cc157ef 100644
1427 --- a/drivers/ata/ahci_brcm.c
1428 +++ b/drivers/ata/ahci_brcm.c
1429 @@ -361,6 +361,10 @@ static int brcm_ahci_resume(struct device *dev)
1430 if (ret)
1431 return ret;
1432
1433 + ret = ahci_platform_enable_regulators(hpriv);
1434 + if (ret)
1435 + goto out_disable_clks;
1436 +
1437 brcm_sata_init(priv);
1438 brcm_sata_phys_enable(priv);
1439 brcm_sata_alpm_init(hpriv);
1440 @@ -390,6 +394,8 @@ out_disable_platform_phys:
1441 ahci_platform_disable_phys(hpriv);
1442 out_disable_phys:
1443 brcm_sata_phys_disable(priv);
1444 + ahci_platform_disable_regulators(hpriv);
1445 +out_disable_clks:
1446 ahci_platform_disable_clks(hpriv);
1447 return ret;
1448 }
1449 @@ -463,6 +469,10 @@ static int brcm_ahci_probe(struct platform_device *pdev)
1450 if (ret)
1451 goto out_reset;
1452
1453 + ret = ahci_platform_enable_regulators(hpriv);
1454 + if (ret)
1455 + goto out_disable_clks;
1456 +
1457 /* Must be first so as to configure endianness including that
1458 * of the standard AHCI register space.
1459 */
1460 @@ -472,7 +482,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
1461 priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
1462 if (!priv->port_mask) {
1463 ret = -ENODEV;
1464 - goto out_disable_clks;
1465 + goto out_disable_regulators;
1466 }
1467
1468 /* Must be done before ahci_platform_enable_phys() */
1469 @@ -497,6 +507,8 @@ out_disable_platform_phys:
1470 ahci_platform_disable_phys(hpriv);
1471 out_disable_phys:
1472 brcm_sata_phys_disable(priv);
1473 +out_disable_regulators:
1474 + ahci_platform_disable_regulators(hpriv);
1475 out_disable_clks:
1476 ahci_platform_disable_clks(hpriv);
1477 out_reset:
1478 diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
1479 index a2fcde582e2a1..33b887b389061 100644
1480 --- a/drivers/auxdisplay/ht16k33.c
1481 +++ b/drivers/auxdisplay/ht16k33.c
1482 @@ -117,8 +117,7 @@ static void ht16k33_fb_queue(struct ht16k33_priv *priv)
1483 {
1484 struct ht16k33_fbdev *fbdev = &priv->fbdev;
1485
1486 - schedule_delayed_work(&fbdev->work,
1487 - msecs_to_jiffies(HZ / fbdev->refresh_rate));
1488 + schedule_delayed_work(&fbdev->work, HZ / fbdev->refresh_rate);
1489 }
1490
1491 /*
1492 diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
1493 index 50a66382d87d0..e75168b941d0c 100644
1494 --- a/drivers/base/regmap/regmap-sdw.c
1495 +++ b/drivers/base/regmap/regmap-sdw.c
1496 @@ -12,7 +12,7 @@ static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val)
1497 struct device *dev = context;
1498 struct sdw_slave *slave = dev_to_sdw_dev(dev);
1499
1500 - return sdw_write(slave, reg, val);
1501 + return sdw_write_no_pm(slave, reg, val);
1502 }
1503
1504 static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
1505 @@ -21,7 +21,7 @@ static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
1506 struct sdw_slave *slave = dev_to_sdw_dev(dev);
1507 int read;
1508
1509 - read = sdw_read(slave, reg);
1510 + read = sdw_read_no_pm(slave, reg);
1511 if (read < 0)
1512 return read;
1513
1514 diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
1515 index 77cc138d138cd..7d5236eafe845 100644
1516 --- a/drivers/base/swnode.c
1517 +++ b/drivers/base/swnode.c
1518 @@ -534,14 +534,18 @@ software_node_get_next_child(const struct fwnode_handle *fwnode,
1519 struct swnode *c = to_swnode(child);
1520
1521 if (!p || list_empty(&p->children) ||
1522 - (c && list_is_last(&c->entry, &p->children)))
1523 + (c && list_is_last(&c->entry, &p->children))) {
1524 + fwnode_handle_put(child);
1525 return NULL;
1526 + }
1527
1528 if (c)
1529 c = list_next_entry(c, entry);
1530 else
1531 c = list_first_entry(&p->children, struct swnode, entry);
1532 - return &c->fwnode;
1533 +
1534 + fwnode_handle_put(child);
1535 + return fwnode_handle_get(&c->fwnode);
1536 }
1537
1538 static struct fwnode_handle *
1539 diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
1540 index ac97a1e2e5ddc..40ea1a425c431 100644
1541 --- a/drivers/block/floppy.c
1542 +++ b/drivers/block/floppy.c
1543 @@ -4063,21 +4063,22 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
1544 if (UFDCS->rawcmd == 1)
1545 UFDCS->rawcmd = 2;
1546
1547 - if (!(mode & FMODE_NDELAY)) {
1548 - if (mode & (FMODE_READ|FMODE_WRITE)) {
1549 - UDRS->last_checked = 0;
1550 - clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
1551 - check_disk_change(bdev);
1552 - if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
1553 - goto out;
1554 - if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
1555 - goto out;
1556 - }
1557 - res = -EROFS;
1558 - if ((mode & FMODE_WRITE) &&
1559 - !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
1560 + if (mode & (FMODE_READ|FMODE_WRITE)) {
1561 + UDRS->last_checked = 0;
1562 + clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
1563 + check_disk_change(bdev);
1564 + if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
1565 + goto out;
1566 + if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
1567 goto out;
1568 }
1569 +
1570 + res = -EROFS;
1571 +
1572 + if ((mode & FMODE_WRITE) &&
1573 + !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
1574 + goto out;
1575 +
1576 mutex_unlock(&open_lock);
1577 mutex_unlock(&floppy_mutex);
1578 return 0;
1579 diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
1580 index 98d53764871f5..2acb719e596f5 100644
1581 --- a/drivers/bluetooth/btqcomsmd.c
1582 +++ b/drivers/bluetooth/btqcomsmd.c
1583 @@ -142,12 +142,16 @@ static int btqcomsmd_probe(struct platform_device *pdev)
1584
1585 btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD",
1586 btqcomsmd_cmd_callback, btq);
1587 - if (IS_ERR(btq->cmd_channel))
1588 - return PTR_ERR(btq->cmd_channel);
1589 + if (IS_ERR(btq->cmd_channel)) {
1590 + ret = PTR_ERR(btq->cmd_channel);
1591 + goto destroy_acl_channel;
1592 + }
1593
1594 hdev = hci_alloc_dev();
1595 - if (!hdev)
1596 - return -ENOMEM;
1597 + if (!hdev) {
1598 + ret = -ENOMEM;
1599 + goto destroy_cmd_channel;
1600 + }
1601
1602 hci_set_drvdata(hdev, btq);
1603 btq->hdev = hdev;
1604 @@ -161,14 +165,21 @@ static int btqcomsmd_probe(struct platform_device *pdev)
1605 hdev->set_bdaddr = qca_set_bdaddr_rome;
1606
1607 ret = hci_register_dev(hdev);
1608 - if (ret < 0) {
1609 - hci_free_dev(hdev);
1610 - return ret;
1611 - }
1612 + if (ret < 0)
1613 + goto hci_free_dev;
1614
1615 platform_set_drvdata(pdev, btq);
1616
1617 return 0;
1618 +
1619 +hci_free_dev:
1620 + hci_free_dev(hdev);
1621 +destroy_cmd_channel:
1622 + rpmsg_destroy_ept(btq->cmd_channel);
1623 +destroy_acl_channel:
1624 + rpmsg_destroy_ept(btq->acl_channel);
1625 +
1626 + return ret;
1627 }
1628
1629 static int btqcomsmd_remove(struct platform_device *pdev)
1630 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1631 index b92bd97b1c399..b467fd05c5e82 100644
1632 --- a/drivers/bluetooth/btusb.c
1633 +++ b/drivers/bluetooth/btusb.c
1634 @@ -2568,7 +2568,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
1635 skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
1636 if (!skb) {
1637 hdev->stat.err_rx++;
1638 - goto err_out;
1639 + return;
1640 }
1641
1642 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
1643 @@ -2586,13 +2586,18 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
1644 */
1645 if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) {
1646 data->evt_skb = skb_clone(skb, GFP_ATOMIC);
1647 - if (!data->evt_skb)
1648 - goto err_out;
1649 + if (!data->evt_skb) {
1650 + kfree_skb(skb);
1651 + return;
1652 + }
1653 }
1654
1655 err = hci_recv_frame(hdev, skb);
1656 - if (err < 0)
1657 - goto err_free_skb;
1658 + if (err < 0) {
1659 + kfree_skb(data->evt_skb);
1660 + data->evt_skb = NULL;
1661 + return;
1662 + }
1663
1664 if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT,
1665 &data->flags)) {
1666 @@ -2601,11 +2606,6 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
1667 wake_up_bit(&data->flags,
1668 BTUSB_TX_WAIT_VND_EVT);
1669 }
1670 -err_out:
1671 - return;
1672 -err_free_skb:
1673 - kfree_skb(data->evt_skb);
1674 - data->evt_skb = NULL;
1675 return;
1676 } else if (urb->status == -ENOENT) {
1677 /* Avoid suspend failed when usb_kill_urb */
1678 diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
1679 index f83d67eafc9f0..8be4d807d1370 100644
1680 --- a/drivers/bluetooth/hci_ldisc.c
1681 +++ b/drivers/bluetooth/hci_ldisc.c
1682 @@ -127,10 +127,9 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
1683 if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
1684 goto no_schedule;
1685
1686 - if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
1687 - set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
1688 + set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
1689 + if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state))
1690 goto no_schedule;
1691 - }
1692
1693 BT_DBG("");
1694
1695 @@ -174,10 +173,10 @@ restart:
1696 kfree_skb(skb);
1697 }
1698
1699 + clear_bit(HCI_UART_SENDING, &hu->tx_state);
1700 if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state))
1701 goto restart;
1702
1703 - clear_bit(HCI_UART_SENDING, &hu->tx_state);
1704 wake_up_bit(&hu->tx_state, HCI_UART_SENDING);
1705 }
1706
1707 diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
1708 index 5b9aa73ff2b7f..1b4ad231e6ed3 100644
1709 --- a/drivers/bluetooth/hci_serdev.c
1710 +++ b/drivers/bluetooth/hci_serdev.c
1711 @@ -85,9 +85,9 @@ static void hci_uart_write_work(struct work_struct *work)
1712 hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
1713 kfree_skb(skb);
1714 }
1715 - } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
1716
1717 - clear_bit(HCI_UART_SENDING, &hu->tx_state);
1718 + clear_bit(HCI_UART_SENDING, &hu->tx_state);
1719 + } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
1720 }
1721
1722 /* ------- Interface to HCI layer ------ */
1723 diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
1724 index e262445fed5f5..f35f0f31f52ad 100644
1725 --- a/drivers/char/hw_random/timeriomem-rng.c
1726 +++ b/drivers/char/hw_random/timeriomem-rng.c
1727 @@ -69,7 +69,7 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
1728 */
1729 if (retval > 0)
1730 usleep_range(period_us,
1731 - period_us + min(1, period_us / 100));
1732 + period_us + max(1, period_us / 100));
1733
1734 *(u32 *)data = readl(priv->io_base);
1735 retval += sizeof(u32);
1736 diff --git a/drivers/char/random.c b/drivers/char/random.c
1737 index 2c29f83ae3d5a..ffd61aadb7614 100644
1738 --- a/drivers/char/random.c
1739 +++ b/drivers/char/random.c
1740 @@ -2149,7 +2149,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1741 return -EPERM;
1742 if (crng_init < 2)
1743 return -ENODATA;
1744 - crng_reseed(&primary_crng, NULL);
1745 + crng_reseed(&primary_crng, &input_pool);
1746 crng_global_init_time = jiffies - 1;
1747 return 0;
1748 default:
1749 diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
1750 index 63f6bed78d893..46d1fac247db7 100644
1751 --- a/drivers/char/tpm/tpm_tis_core.c
1752 +++ b/drivers/char/tpm/tpm_tis_core.c
1753 @@ -125,7 +125,8 @@ static bool check_locality(struct tpm_chip *chip, int l)
1754 if (rc < 0)
1755 return false;
1756
1757 - if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
1758 + if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID
1759 + | TPM_ACCESS_REQUEST_USE)) ==
1760 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
1761 priv->locality = l;
1762 return true;
1763 @@ -134,58 +135,13 @@ static bool check_locality(struct tpm_chip *chip, int l)
1764 return false;
1765 }
1766
1767 -static bool locality_inactive(struct tpm_chip *chip, int l)
1768 -{
1769 - struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
1770 - int rc;
1771 - u8 access;
1772 -
1773 - rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
1774 - if (rc < 0)
1775 - return false;
1776 -
1777 - if ((access & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
1778 - == TPM_ACCESS_VALID)
1779 - return true;
1780 -
1781 - return false;
1782 -}
1783 -
1784 static int release_locality(struct tpm_chip *chip, int l)
1785 {
1786 struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
1787 - unsigned long stop, timeout;
1788 - long rc;
1789
1790 tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
1791
1792 - stop = jiffies + chip->timeout_a;
1793 -
1794 - if (chip->flags & TPM_CHIP_FLAG_IRQ) {
1795 -again:
1796 - timeout = stop - jiffies;
1797 - if ((long)timeout <= 0)
1798 - return -1;
1799 -
1800 - rc = wait_event_interruptible_timeout(priv->int_queue,
1801 - (locality_inactive(chip, l)),
1802 - timeout);
1803 -
1804 - if (rc > 0)
1805 - return 0;
1806 -
1807 - if (rc == -ERESTARTSYS && freezing(current)) {
1808 - clear_thread_flag(TIF_SIGPENDING);
1809 - goto again;
1810 - }
1811 - } else {
1812 - do {
1813 - if (locality_inactive(chip, l))
1814 - return 0;
1815 - tpm_msleep(TPM_TIMEOUT);
1816 - } while (time_before(jiffies, stop));
1817 - }
1818 - return -1;
1819 + return 0;
1820 }
1821
1822 static int request_locality(struct tpm_chip *chip, int l)
1823 diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
1824 index 7015974f24b43..84ca38450d021 100644
1825 --- a/drivers/clk/clk-ast2600.c
1826 +++ b/drivers/clk/clk-ast2600.c
1827 @@ -17,7 +17,8 @@
1828
1829 #define ASPEED_G6_NUM_CLKS 67
1830
1831 -#define ASPEED_G6_SILICON_REV 0x004
1832 +#define ASPEED_G6_SILICON_REV 0x014
1833 +#define CHIP_REVISION_ID GENMASK(23, 16)
1834
1835 #define ASPEED_G6_RESET_CTRL 0x040
1836 #define ASPEED_G6_RESET_CTRL2 0x050
1837 @@ -189,18 +190,34 @@ static struct clk_hw *ast2600_calc_pll(const char *name, u32 val)
1838 static struct clk_hw *ast2600_calc_apll(const char *name, u32 val)
1839 {
1840 unsigned int mult, div;
1841 + u32 chip_id = readl(scu_g6_base + ASPEED_G6_SILICON_REV);
1842
1843 - if (val & BIT(20)) {
1844 - /* Pass through mode */
1845 - mult = div = 1;
1846 + if (((chip_id & CHIP_REVISION_ID) >> 16) >= 2) {
1847 + if (val & BIT(24)) {
1848 + /* Pass through mode */
1849 + mult = div = 1;
1850 + } else {
1851 + /* F = 25Mhz * [(m + 1) / (n + 1)] / (p + 1) */
1852 + u32 m = val & 0x1fff;
1853 + u32 n = (val >> 13) & 0x3f;
1854 + u32 p = (val >> 19) & 0xf;
1855 +
1856 + mult = (m + 1);
1857 + div = (n + 1) * (p + 1);
1858 + }
1859 } else {
1860 - /* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */
1861 - u32 m = (val >> 5) & 0x3f;
1862 - u32 od = (val >> 4) & 0x1;
1863 - u32 n = val & 0xf;
1864 + if (val & BIT(20)) {
1865 + /* Pass through mode */
1866 + mult = div = 1;
1867 + } else {
1868 + /* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */
1869 + u32 m = (val >> 5) & 0x3f;
1870 + u32 od = (val >> 4) & 0x1;
1871 + u32 n = val & 0xf;
1872
1873 - mult = (2 - od) * (m + 2);
1874 - div = n + 1;
1875 + mult = (2 - od) * (m + 2);
1876 + div = n + 1;
1877 + }
1878 }
1879 return clk_hw_register_fixed_factor(NULL, name, "clkin", 0,
1880 mult, div);
1881 diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
1882 index 3a5853ca98c6c..e8df254f8085b 100644
1883 --- a/drivers/clk/meson/clk-pll.c
1884 +++ b/drivers/clk/meson/clk-pll.c
1885 @@ -363,13 +363,14 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
1886 {
1887 struct clk_regmap *clk = to_clk_regmap(hw);
1888 struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
1889 - unsigned int enabled, m, n, frac = 0, ret;
1890 + unsigned int enabled, m, n, frac = 0;
1891 unsigned long old_rate;
1892 + int ret;
1893
1894 if (parent_rate == 0 || rate == 0)
1895 return -EINVAL;
1896
1897 - old_rate = rate;
1898 + old_rate = clk_hw_get_rate(hw);
1899
1900 ret = meson_clk_get_pll_settings(rate, parent_rate, &m, &n, pll);
1901 if (ret)
1902 @@ -391,7 +392,8 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
1903 if (!enabled)
1904 return 0;
1905
1906 - if (meson_clk_pll_enable(hw)) {
1907 + ret = meson_clk_pll_enable(hw);
1908 + if (ret) {
1909 pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
1910 __func__, old_rate);
1911 /*
1912 @@ -403,7 +405,7 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
1913 meson_clk_pll_set_rate(hw, old_rate, parent_rate);
1914 }
1915
1916 - return 0;
1917 + return ret;
1918 }
1919
1920 /*
1921 diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
1922 index 091acd59c1d64..752f267b2881a 100644
1923 --- a/drivers/clk/qcom/gcc-msm8998.c
1924 +++ b/drivers/clk/qcom/gcc-msm8998.c
1925 @@ -135,7 +135,7 @@ static struct pll_vco fabia_vco[] = {
1926
1927 static struct clk_alpha_pll gpll0 = {
1928 .offset = 0x0,
1929 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
1930 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
1931 .vco_table = fabia_vco,
1932 .num_vco = ARRAY_SIZE(fabia_vco),
1933 .clkr = {
1934 @@ -145,58 +145,58 @@ static struct clk_alpha_pll gpll0 = {
1935 .name = "gpll0",
1936 .parent_names = (const char *[]){ "xo" },
1937 .num_parents = 1,
1938 - .ops = &clk_alpha_pll_ops,
1939 + .ops = &clk_alpha_pll_fixed_fabia_ops,
1940 }
1941 },
1942 };
1943
1944 static struct clk_alpha_pll_postdiv gpll0_out_even = {
1945 .offset = 0x0,
1946 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
1947 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
1948 .clkr.hw.init = &(struct clk_init_data){
1949 .name = "gpll0_out_even",
1950 .parent_names = (const char *[]){ "gpll0" },
1951 .num_parents = 1,
1952 - .ops = &clk_alpha_pll_postdiv_ops,
1953 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
1954 },
1955 };
1956
1957 static struct clk_alpha_pll_postdiv gpll0_out_main = {
1958 .offset = 0x0,
1959 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
1960 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
1961 .clkr.hw.init = &(struct clk_init_data){
1962 .name = "gpll0_out_main",
1963 .parent_names = (const char *[]){ "gpll0" },
1964 .num_parents = 1,
1965 - .ops = &clk_alpha_pll_postdiv_ops,
1966 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
1967 },
1968 };
1969
1970 static struct clk_alpha_pll_postdiv gpll0_out_odd = {
1971 .offset = 0x0,
1972 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
1973 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
1974 .clkr.hw.init = &(struct clk_init_data){
1975 .name = "gpll0_out_odd",
1976 .parent_names = (const char *[]){ "gpll0" },
1977 .num_parents = 1,
1978 - .ops = &clk_alpha_pll_postdiv_ops,
1979 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
1980 },
1981 };
1982
1983 static struct clk_alpha_pll_postdiv gpll0_out_test = {
1984 .offset = 0x0,
1985 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
1986 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
1987 .clkr.hw.init = &(struct clk_init_data){
1988 .name = "gpll0_out_test",
1989 .parent_names = (const char *[]){ "gpll0" },
1990 .num_parents = 1,
1991 - .ops = &clk_alpha_pll_postdiv_ops,
1992 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
1993 },
1994 };
1995
1996 static struct clk_alpha_pll gpll1 = {
1997 .offset = 0x1000,
1998 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
1999 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2000 .vco_table = fabia_vco,
2001 .num_vco = ARRAY_SIZE(fabia_vco),
2002 .clkr = {
2003 @@ -206,58 +206,58 @@ static struct clk_alpha_pll gpll1 = {
2004 .name = "gpll1",
2005 .parent_names = (const char *[]){ "xo" },
2006 .num_parents = 1,
2007 - .ops = &clk_alpha_pll_ops,
2008 + .ops = &clk_alpha_pll_fixed_fabia_ops,
2009 }
2010 },
2011 };
2012
2013 static struct clk_alpha_pll_postdiv gpll1_out_even = {
2014 .offset = 0x1000,
2015 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2016 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2017 .clkr.hw.init = &(struct clk_init_data){
2018 .name = "gpll1_out_even",
2019 .parent_names = (const char *[]){ "gpll1" },
2020 .num_parents = 1,
2021 - .ops = &clk_alpha_pll_postdiv_ops,
2022 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
2023 },
2024 };
2025
2026 static struct clk_alpha_pll_postdiv gpll1_out_main = {
2027 .offset = 0x1000,
2028 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2029 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2030 .clkr.hw.init = &(struct clk_init_data){
2031 .name = "gpll1_out_main",
2032 .parent_names = (const char *[]){ "gpll1" },
2033 .num_parents = 1,
2034 - .ops = &clk_alpha_pll_postdiv_ops,
2035 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
2036 },
2037 };
2038
2039 static struct clk_alpha_pll_postdiv gpll1_out_odd = {
2040 .offset = 0x1000,
2041 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2042 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2043 .clkr.hw.init = &(struct clk_init_data){
2044 .name = "gpll1_out_odd",
2045 .parent_names = (const char *[]){ "gpll1" },
2046 .num_parents = 1,
2047 - .ops = &clk_alpha_pll_postdiv_ops,
2048 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
2049 },
2050 };
2051
2052 static struct clk_alpha_pll_postdiv gpll1_out_test = {
2053 .offset = 0x1000,
2054 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2055 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2056 .clkr.hw.init = &(struct clk_init_data){
2057 .name = "gpll1_out_test",
2058 .parent_names = (const char *[]){ "gpll1" },
2059 .num_parents = 1,
2060 - .ops = &clk_alpha_pll_postdiv_ops,
2061 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
2062 },
2063 };
2064
2065 static struct clk_alpha_pll gpll2 = {
2066 .offset = 0x2000,
2067 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2068 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2069 .vco_table = fabia_vco,
2070 .num_vco = ARRAY_SIZE(fabia_vco),
2071 .clkr = {
2072 @@ -267,58 +267,58 @@ static struct clk_alpha_pll gpll2 = {
2073 .name = "gpll2",
2074 .parent_names = (const char *[]){ "xo" },
2075 .num_parents = 1,
2076 - .ops = &clk_alpha_pll_ops,
2077 + .ops = &clk_alpha_pll_fixed_fabia_ops,
2078 }
2079 },
2080 };
2081
2082 static struct clk_alpha_pll_postdiv gpll2_out_even = {
2083 .offset = 0x2000,
2084 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2085 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2086 .clkr.hw.init = &(struct clk_init_data){
2087 .name = "gpll2_out_even",
2088 .parent_names = (const char *[]){ "gpll2" },
2089 .num_parents = 1,
2090 - .ops = &clk_alpha_pll_postdiv_ops,
2091 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
2092 },
2093 };
2094
2095 static struct clk_alpha_pll_postdiv gpll2_out_main = {
2096 .offset = 0x2000,
2097 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2098 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2099 .clkr.hw.init = &(struct clk_init_data){
2100 .name = "gpll2_out_main",
2101 .parent_names = (const char *[]){ "gpll2" },
2102 .num_parents = 1,
2103 - .ops = &clk_alpha_pll_postdiv_ops,
2104 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
2105 },
2106 };
2107
2108 static struct clk_alpha_pll_postdiv gpll2_out_odd = {
2109 .offset = 0x2000,
2110 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2111 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2112 .clkr.hw.init = &(struct clk_init_data){
2113 .name = "gpll2_out_odd",
2114 .parent_names = (const char *[]){ "gpll2" },
2115 .num_parents = 1,
2116 - .ops = &clk_alpha_pll_postdiv_ops,
2117 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
2118 },
2119 };
2120
2121 static struct clk_alpha_pll_postdiv gpll2_out_test = {
2122 .offset = 0x2000,
2123 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2124 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2125 .clkr.hw.init = &(struct clk_init_data){
2126 .name = "gpll2_out_test",
2127 .parent_names = (const char *[]){ "gpll2" },
2128 .num_parents = 1,
2129 - .ops = &clk_alpha_pll_postdiv_ops,
2130 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
2131 },
2132 };
2133
2134 static struct clk_alpha_pll gpll3 = {
2135 .offset = 0x3000,
2136 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2137 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2138 .vco_table = fabia_vco,
2139 .num_vco = ARRAY_SIZE(fabia_vco),
2140 .clkr = {
2141 @@ -328,58 +328,58 @@ static struct clk_alpha_pll gpll3 = {
2142 .name = "gpll3",
2143 .parent_names = (const char *[]){ "xo" },
2144 .num_parents = 1,
2145 - .ops = &clk_alpha_pll_ops,
2146 + .ops = &clk_alpha_pll_fixed_fabia_ops,
2147 }
2148 },
2149 };
2150
2151 static struct clk_alpha_pll_postdiv gpll3_out_even = {
2152 .offset = 0x3000,
2153 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2154 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2155 .clkr.hw.init = &(struct clk_init_data){
2156 .name = "gpll3_out_even",
2157 .parent_names = (const char *[]){ "gpll3" },
2158 .num_parents = 1,
2159 - .ops = &clk_alpha_pll_postdiv_ops,
2160 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
2161 },
2162 };
2163
2164 static struct clk_alpha_pll_postdiv gpll3_out_main = {
2165 .offset = 0x3000,
2166 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2167 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2168 .clkr.hw.init = &(struct clk_init_data){
2169 .name = "gpll3_out_main",
2170 .parent_names = (const char *[]){ "gpll3" },
2171 .num_parents = 1,
2172 - .ops = &clk_alpha_pll_postdiv_ops,
2173 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
2174 },
2175 };
2176
2177 static struct clk_alpha_pll_postdiv gpll3_out_odd = {
2178 .offset = 0x3000,
2179 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2180 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2181 .clkr.hw.init = &(struct clk_init_data){
2182 .name = "gpll3_out_odd",
2183 .parent_names = (const char *[]){ "gpll3" },
2184 .num_parents = 1,
2185 - .ops = &clk_alpha_pll_postdiv_ops,
2186 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
2187 },
2188 };
2189
2190 static struct clk_alpha_pll_postdiv gpll3_out_test = {
2191 .offset = 0x3000,
2192 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2193 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2194 .clkr.hw.init = &(struct clk_init_data){
2195 .name = "gpll3_out_test",
2196 .parent_names = (const char *[]){ "gpll3" },
2197 .num_parents = 1,
2198 - .ops = &clk_alpha_pll_postdiv_ops,
2199 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
2200 },
2201 };
2202
2203 static struct clk_alpha_pll gpll4 = {
2204 .offset = 0x77000,
2205 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2206 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2207 .vco_table = fabia_vco,
2208 .num_vco = ARRAY_SIZE(fabia_vco),
2209 .clkr = {
2210 @@ -389,52 +389,52 @@ static struct clk_alpha_pll gpll4 = {
2211 .name = "gpll4",
2212 .parent_names = (const char *[]){ "xo" },
2213 .num_parents = 1,
2214 - .ops = &clk_alpha_pll_ops,
2215 + .ops = &clk_alpha_pll_fixed_fabia_ops,
2216 }
2217 },
2218 };
2219
2220 static struct clk_alpha_pll_postdiv gpll4_out_even = {
2221 .offset = 0x77000,
2222 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2223 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2224 .clkr.hw.init = &(struct clk_init_data){
2225 .name = "gpll4_out_even",
2226 .parent_names = (const char *[]){ "gpll4" },
2227 .num_parents = 1,
2228 - .ops = &clk_alpha_pll_postdiv_ops,
2229 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
2230 },
2231 };
2232
2233 static struct clk_alpha_pll_postdiv gpll4_out_main = {
2234 .offset = 0x77000,
2235 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2236 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2237 .clkr.hw.init = &(struct clk_init_data){
2238 .name = "gpll4_out_main",
2239 .parent_names = (const char *[]){ "gpll4" },
2240 .num_parents = 1,
2241 - .ops = &clk_alpha_pll_postdiv_ops,
2242 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
2243 },
2244 };
2245
2246 static struct clk_alpha_pll_postdiv gpll4_out_odd = {
2247 .offset = 0x77000,
2248 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2249 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2250 .clkr.hw.init = &(struct clk_init_data){
2251 .name = "gpll4_out_odd",
2252 .parent_names = (const char *[]){ "gpll4" },
2253 .num_parents = 1,
2254 - .ops = &clk_alpha_pll_postdiv_ops,
2255 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
2256 },
2257 };
2258
2259 static struct clk_alpha_pll_postdiv gpll4_out_test = {
2260 .offset = 0x77000,
2261 - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
2262 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
2263 .clkr.hw.init = &(struct clk_init_data){
2264 .name = "gpll4_out_test",
2265 .parent_names = (const char *[]){ "gpll4" },
2266 .num_parents = 1,
2267 - .ops = &clk_alpha_pll_postdiv_ops,
2268 + .ops = &clk_alpha_pll_postdiv_fabia_ops,
2269 },
2270 };
2271
2272 diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
2273 index d89353a3cdec7..2f00f1b7b9c00 100644
2274 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
2275 +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
2276 @@ -228,7 +228,7 @@ static const char * const psi_ahb1_ahb2_parents[] = { "osc24M", "osc32k",
2277 static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2",
2278 psi_ahb1_ahb2_parents,
2279 0x510,
2280 - 0, 5, /* M */
2281 + 0, 2, /* M */
2282 8, 2, /* P */
2283 24, 2, /* mux */
2284 0);
2285 @@ -237,19 +237,19 @@ static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k",
2286 "psi-ahb1-ahb2",
2287 "pll-periph0" };
2288 static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c,
2289 - 0, 5, /* M */
2290 + 0, 2, /* M */
2291 8, 2, /* P */
2292 24, 2, /* mux */
2293 0);
2294
2295 static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520,
2296 - 0, 5, /* M */
2297 + 0, 2, /* M */
2298 8, 2, /* P */
2299 24, 2, /* mux */
2300 0);
2301
2302 static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524,
2303 - 0, 5, /* M */
2304 + 0, 2, /* M */
2305 8, 2, /* P */
2306 24, 2, /* mux */
2307 0);
2308 @@ -673,7 +673,7 @@ static struct ccu_mux hdmi_cec_clk = {
2309
2310 .common = {
2311 .reg = 0xb10,
2312 - .features = CCU_FEATURE_VARIABLE_PREDIV,
2313 + .features = CCU_FEATURE_FIXED_PREDIV,
2314 .hw.init = CLK_HW_INIT_PARENTS("hdmi-cec",
2315 hdmi_cec_parents,
2316 &ccu_mux_ops,
2317 diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
2318 index f35a53ce8988a..3bb5625504e2f 100644
2319 --- a/drivers/clocksource/Kconfig
2320 +++ b/drivers/clocksource/Kconfig
2321 @@ -79,6 +79,7 @@ config IXP4XX_TIMER
2322 bool "Intel XScale IXP4xx timer driver" if COMPILE_TEST
2323 depends on HAS_IOMEM
2324 select CLKSRC_MMIO
2325 + select TIMER_OF if OF
2326 help
2327 Enables support for the Intel XScale IXP4xx SoC timer.
2328
2329 diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
2330 index f6ddae30933f7..dae8c0c2e606f 100644
2331 --- a/drivers/clocksource/mxs_timer.c
2332 +++ b/drivers/clocksource/mxs_timer.c
2333 @@ -138,10 +138,7 @@ static void mxs_irq_clear(char *state)
2334
2335 /* Clear pending interrupt */
2336 timrot_irq_acknowledge();
2337 -
2338 -#ifdef DEBUG
2339 - pr_info("%s: changing mode to %s\n", __func__, state)
2340 -#endif /* DEBUG */
2341 + pr_debug("%s: changing mode to %s\n", __func__, state);
2342 }
2343
2344 static int mxs_shutdown(struct clock_event_device *evt)
2345 diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
2346 index 77b0e5d0fb134..a3c82f530d608 100644
2347 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
2348 +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
2349 @@ -566,6 +566,16 @@ unmap_base:
2350 return ret;
2351 }
2352
2353 +static void brcm_avs_prepare_uninit(struct platform_device *pdev)
2354 +{
2355 + struct private_data *priv;
2356 +
2357 + priv = platform_get_drvdata(pdev);
2358 +
2359 + iounmap(priv->avs_intr_base);
2360 + iounmap(priv->base);
2361 +}
2362 +
2363 static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy)
2364 {
2365 struct cpufreq_frequency_table *freq_table;
2366 @@ -701,21 +711,21 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
2367
2368 brcm_avs_driver.driver_data = pdev;
2369
2370 - return cpufreq_register_driver(&brcm_avs_driver);
2371 + ret = cpufreq_register_driver(&brcm_avs_driver);
2372 + if (ret)
2373 + brcm_avs_prepare_uninit(pdev);
2374 +
2375 + return ret;
2376 }
2377
2378 static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
2379 {
2380 - struct private_data *priv;
2381 int ret;
2382
2383 ret = cpufreq_unregister_driver(&brcm_avs_driver);
2384 - if (ret)
2385 - return ret;
2386 + WARN_ON(ret);
2387
2388 - priv = platform_get_drvdata(pdev);
2389 - iounmap(priv->base);
2390 - iounmap(priv->avs_intr_base);
2391 + brcm_avs_prepare_uninit(pdev);
2392
2393 return 0;
2394 }
2395 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
2396 index b9ca89dc75c7d..88fe803a044d5 100644
2397 --- a/drivers/cpufreq/intel_pstate.c
2398 +++ b/drivers/cpufreq/intel_pstate.c
2399 @@ -1566,11 +1566,9 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
2400 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
2401 {
2402 cpu->pstate.min_pstate = pstate_funcs.get_min();
2403 - cpu->pstate.max_pstate = pstate_funcs.get_max();
2404 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
2405 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
2406 cpu->pstate.scaling = pstate_funcs.get_scaling();
2407 - cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
2408
2409 if (hwp_active && !hwp_mode_bdw) {
2410 unsigned int phy_max, current_max;
2411 @@ -1578,9 +1576,12 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
2412 intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
2413 cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
2414 cpu->pstate.turbo_pstate = phy_max;
2415 + cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(READ_ONCE(cpu->hwp_cap_cached));
2416 } else {
2417 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
2418 + cpu->pstate.max_pstate = pstate_funcs.get_max();
2419 }
2420 + cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
2421
2422 if (pstate_funcs.get_aperf_mperf_shift)
2423 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
2424 diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
2425 index ec4b5033013eb..98b8483577ce2 100644
2426 --- a/drivers/crypto/bcm/cipher.c
2427 +++ b/drivers/crypto/bcm/cipher.c
2428 @@ -41,7 +41,7 @@
2429
2430 /* ================= Device Structure ================== */
2431
2432 -struct device_private iproc_priv;
2433 +struct bcm_device_private iproc_priv;
2434
2435 /* ==================== Parameters ===================== */
2436
2437 diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
2438 index 766452b24d0ab..01feed268a0d4 100644
2439 --- a/drivers/crypto/bcm/cipher.h
2440 +++ b/drivers/crypto/bcm/cipher.h
2441 @@ -418,7 +418,7 @@ struct spu_hw {
2442 u32 num_chan;
2443 };
2444
2445 -struct device_private {
2446 +struct bcm_device_private {
2447 struct platform_device *pdev;
2448
2449 struct spu_hw spu;
2450 @@ -465,6 +465,6 @@ struct device_private {
2451 struct mbox_chan **mbox;
2452 };
2453
2454 -extern struct device_private iproc_priv;
2455 +extern struct bcm_device_private iproc_priv;
2456
2457 #endif
2458 diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
2459 index cd7504101acde..7227dbf8f46c7 100644
2460 --- a/drivers/crypto/bcm/util.c
2461 +++ b/drivers/crypto/bcm/util.c
2462 @@ -348,7 +348,7 @@ char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode)
2463 static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
2464 size_t count, loff_t *offp)
2465 {
2466 - struct device_private *ipriv;
2467 + struct bcm_device_private *ipriv;
2468 char *buf;
2469 ssize_t ret, out_offset, out_count;
2470 int i;
2471 diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h
2472 index 3fac0c74a41fa..df4451b306495 100644
2473 --- a/drivers/crypto/chelsio/chtls/chtls_cm.h
2474 +++ b/drivers/crypto/chelsio/chtls/chtls_cm.h
2475 @@ -50,9 +50,6 @@
2476 #define MIN_RCV_WND (24 * 1024U)
2477 #define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
2478
2479 -/* ulp_mem_io + ulptx_idata + payload + padding */
2480 -#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
2481 -
2482 /* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
2483 #define TX_HEADER_LEN \
2484 (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
2485 diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
2486 index 7e5e092a23b3c..dce3a6f96c97e 100644
2487 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
2488 +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
2489 @@ -30,6 +30,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
2490 unsigned int ileft = areq->cryptlen;
2491 unsigned int oleft = areq->cryptlen;
2492 unsigned int todo;
2493 + unsigned long pi = 0, po = 0; /* progress for in and out */
2494 + bool miter_err;
2495 struct sg_mapping_iter mi, mo;
2496 unsigned int oi, oo; /* offset for in and out */
2497 unsigned long flags;
2498 @@ -44,50 +46,62 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
2499
2500 spin_lock_irqsave(&ss->slock, flags);
2501
2502 - for (i = 0; i < op->keylen; i += 4)
2503 - writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
2504 + for (i = 0; i < op->keylen / 4; i++)
2505 + writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
2506
2507 if (areq->iv) {
2508 for (i = 0; i < 4 && i < ivsize / 4; i++) {
2509 v = *(u32 *)(areq->iv + i * 4);
2510 - writel(v, ss->base + SS_IV0 + i * 4);
2511 + writesl(ss->base + SS_IV0 + i * 4, &v, 1);
2512 }
2513 }
2514 writel(mode, ss->base + SS_CTL);
2515
2516 - sg_miter_start(&mi, areq->src, sg_nents(areq->src),
2517 - SG_MITER_FROM_SG | SG_MITER_ATOMIC);
2518 - sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
2519 - SG_MITER_TO_SG | SG_MITER_ATOMIC);
2520 - sg_miter_next(&mi);
2521 - sg_miter_next(&mo);
2522 - if (!mi.addr || !mo.addr) {
2523 - dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
2524 - err = -EINVAL;
2525 - goto release_ss;
2526 - }
2527
2528 ileft = areq->cryptlen / 4;
2529 oleft = areq->cryptlen / 4;
2530 oi = 0;
2531 oo = 0;
2532 do {
2533 - todo = min(rx_cnt, ileft);
2534 - todo = min_t(size_t, todo, (mi.length - oi) / 4);
2535 - if (todo) {
2536 - ileft -= todo;
2537 - writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
2538 - oi += todo * 4;
2539 - }
2540 - if (oi == mi.length) {
2541 - sg_miter_next(&mi);
2542 - oi = 0;
2543 + if (ileft) {
2544 + sg_miter_start(&mi, areq->src, sg_nents(areq->src),
2545 + SG_MITER_FROM_SG | SG_MITER_ATOMIC);
2546 + if (pi)
2547 + sg_miter_skip(&mi, pi);
2548 + miter_err = sg_miter_next(&mi);
2549 + if (!miter_err || !mi.addr) {
2550 + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
2551 + err = -EINVAL;
2552 + goto release_ss;
2553 + }
2554 + todo = min(rx_cnt, ileft);
2555 + todo = min_t(size_t, todo, (mi.length - oi) / 4);
2556 + if (todo) {
2557 + ileft -= todo;
2558 + writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
2559 + oi += todo * 4;
2560 + }
2561 + if (oi == mi.length) {
2562 + pi += mi.length;
2563 + oi = 0;
2564 + }
2565 + sg_miter_stop(&mi);
2566 }
2567
2568 spaces = readl(ss->base + SS_FCSR);
2569 rx_cnt = SS_RXFIFO_SPACES(spaces);
2570 tx_cnt = SS_TXFIFO_SPACES(spaces);
2571
2572 + sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
2573 + SG_MITER_TO_SG | SG_MITER_ATOMIC);
2574 + if (po)
2575 + sg_miter_skip(&mo, po);
2576 + miter_err = sg_miter_next(&mo);
2577 + if (!miter_err || !mo.addr) {
2578 + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
2579 + err = -EINVAL;
2580 + goto release_ss;
2581 + }
2582 todo = min(tx_cnt, oleft);
2583 todo = min_t(size_t, todo, (mo.length - oo) / 4);
2584 if (todo) {
2585 @@ -96,9 +110,10 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
2586 oo += todo * 4;
2587 }
2588 if (oo == mo.length) {
2589 - sg_miter_next(&mo);
2590 oo = 0;
2591 + po += mo.length;
2592 }
2593 + sg_miter_stop(&mo);
2594 } while (oleft);
2595
2596 if (areq->iv) {
2597 @@ -109,8 +124,6 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
2598 }
2599
2600 release_ss:
2601 - sg_miter_stop(&mi);
2602 - sg_miter_stop(&mo);
2603 writel(0, ss->base + SS_CTL);
2604 spin_unlock_irqrestore(&ss->slock, flags);
2605 return err;
2606 @@ -164,12 +177,14 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
2607 unsigned int oleft = areq->cryptlen;
2608 unsigned int todo;
2609 struct sg_mapping_iter mi, mo;
2610 + unsigned long pi = 0, po = 0; /* progress for in and out */
2611 + bool miter_err;
2612 unsigned int oi, oo; /* offset for in and out */
2613 unsigned int ob = 0; /* offset in buf */
2614 unsigned int obo = 0; /* offset in bufo*/
2615 unsigned int obl = 0; /* length of data in bufo */
2616 unsigned long flags;
2617 - bool need_fallback;
2618 + bool need_fallback = false;
2619
2620 if (!areq->cryptlen)
2621 return 0;
2622 @@ -188,12 +203,12 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
2623 * we can use the SS optimized function
2624 */
2625 while (in_sg && no_chunk == 1) {
2626 - if (in_sg->length % 4)
2627 + if ((in_sg->length | in_sg->offset) & 3u)
2628 no_chunk = 0;
2629 in_sg = sg_next(in_sg);
2630 }
2631 while (out_sg && no_chunk == 1) {
2632 - if (out_sg->length % 4)
2633 + if ((out_sg->length | out_sg->offset) & 3u)
2634 no_chunk = 0;
2635 out_sg = sg_next(out_sg);
2636 }
2637 @@ -206,28 +221,17 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
2638
2639 spin_lock_irqsave(&ss->slock, flags);
2640
2641 - for (i = 0; i < op->keylen; i += 4)
2642 - writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
2643 + for (i = 0; i < op->keylen / 4; i++)
2644 + writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
2645
2646 if (areq->iv) {
2647 for (i = 0; i < 4 && i < ivsize / 4; i++) {
2648 v = *(u32 *)(areq->iv + i * 4);
2649 - writel(v, ss->base + SS_IV0 + i * 4);
2650 + writesl(ss->base + SS_IV0 + i * 4, &v, 1);
2651 }
2652 }
2653 writel(mode, ss->base + SS_CTL);
2654
2655 - sg_miter_start(&mi, areq->src, sg_nents(areq->src),
2656 - SG_MITER_FROM_SG | SG_MITER_ATOMIC);
2657 - sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
2658 - SG_MITER_TO_SG | SG_MITER_ATOMIC);
2659 - sg_miter_next(&mi);
2660 - sg_miter_next(&mo);
2661 - if (!mi.addr || !mo.addr) {
2662 - dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
2663 - err = -EINVAL;
2664 - goto release_ss;
2665 - }
2666 ileft = areq->cryptlen;
2667 oleft = areq->cryptlen;
2668 oi = 0;
2669 @@ -235,8 +239,16 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
2670
2671 while (oleft) {
2672 if (ileft) {
2673 - char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
2674 -
2675 + sg_miter_start(&mi, areq->src, sg_nents(areq->src),
2676 + SG_MITER_FROM_SG | SG_MITER_ATOMIC);
2677 + if (pi)
2678 + sg_miter_skip(&mi, pi);
2679 + miter_err = sg_miter_next(&mi);
2680 + if (!miter_err || !mi.addr) {
2681 + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
2682 + err = -EINVAL;
2683 + goto release_ss;
2684 + }
2685 /*
2686 * todo is the number of consecutive 4byte word that we
2687 * can read from current SG
2688 @@ -258,52 +270,57 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
2689 */
2690 todo = min(rx_cnt * 4 - ob, ileft);
2691 todo = min_t(size_t, todo, mi.length - oi);
2692 - memcpy(buf + ob, mi.addr + oi, todo);
2693 + memcpy(ss->buf + ob, mi.addr + oi, todo);
2694 ileft -= todo;
2695 oi += todo;
2696 ob += todo;
2697 if (!(ob % 4)) {
2698 - writesl(ss->base + SS_RXFIFO, buf,
2699 + writesl(ss->base + SS_RXFIFO, ss->buf,
2700 ob / 4);
2701 ob = 0;
2702 }
2703 }
2704 if (oi == mi.length) {
2705 - sg_miter_next(&mi);
2706 + pi += mi.length;
2707 oi = 0;
2708 }
2709 + sg_miter_stop(&mi);
2710 }
2711
2712 spaces = readl(ss->base + SS_FCSR);
2713 rx_cnt = SS_RXFIFO_SPACES(spaces);
2714 tx_cnt = SS_TXFIFO_SPACES(spaces);
2715 - dev_dbg(ss->dev,
2716 - "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
2717 - mode,
2718 - oi, mi.length, ileft, areq->cryptlen, rx_cnt,
2719 - oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
2720
2721 if (!tx_cnt)
2722 continue;
2723 + sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
2724 + SG_MITER_TO_SG | SG_MITER_ATOMIC);
2725 + if (po)
2726 + sg_miter_skip(&mo, po);
2727 + miter_err = sg_miter_next(&mo);
2728 + if (!miter_err || !mo.addr) {
2729 + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
2730 + err = -EINVAL;
2731 + goto release_ss;
2732 + }
2733 /* todo in 4bytes word */
2734 todo = min(tx_cnt, oleft / 4);
2735 todo = min_t(size_t, todo, (mo.length - oo) / 4);
2736 +
2737 if (todo) {
2738 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
2739 oleft -= todo * 4;
2740 oo += todo * 4;
2741 if (oo == mo.length) {
2742 - sg_miter_next(&mo);
2743 + po += mo.length;
2744 oo = 0;
2745 }
2746 } else {
2747 - char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
2748 -
2749 /*
2750 * read obl bytes in bufo, we read at maximum for
2751 * emptying the device
2752 */
2753 - readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
2754 + readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt);
2755 obl = tx_cnt * 4;
2756 obo = 0;
2757 do {
2758 @@ -315,17 +332,19 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
2759 */
2760 todo = min_t(size_t,
2761 mo.length - oo, obl - obo);
2762 - memcpy(mo.addr + oo, bufo + obo, todo);
2763 + memcpy(mo.addr + oo, ss->bufo + obo, todo);
2764 oleft -= todo;
2765 obo += todo;
2766 oo += todo;
2767 if (oo == mo.length) {
2768 + po += mo.length;
2769 sg_miter_next(&mo);
2770 oo = 0;
2771 }
2772 } while (obo < obl);
2773 /* bufo must be fully used here */
2774 }
2775 + sg_miter_stop(&mo);
2776 }
2777 if (areq->iv) {
2778 for (i = 0; i < 4 && i < ivsize / 4; i++) {
2779 @@ -335,8 +354,6 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
2780 }
2781
2782 release_ss:
2783 - sg_miter_stop(&mi);
2784 - sg_miter_stop(&mo);
2785 writel(0, ss->base + SS_CTL);
2786 spin_unlock_irqrestore(&ss->slock, flags);
2787
2788 diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
2789 index 35a27a7145f84..9a2adc130d9aa 100644
2790 --- a/drivers/crypto/sunxi-ss/sun4i-ss.h
2791 +++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
2792 @@ -138,6 +138,8 @@ struct sun4i_ss_ctx {
2793 struct reset_control *reset;
2794 struct device *dev;
2795 struct resource *res;
2796 + char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
2797 + char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
2798 spinlock_t slock; /* control the use of the device */
2799 #ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG
2800 u32 seed[SS_SEED_LEN / BITS_PER_LONG];
2801 diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
2802 index b7c66fc0ae0c2..8ef6e93e43f3c 100644
2803 --- a/drivers/crypto/talitos.c
2804 +++ b/drivers/crypto/talitos.c
2805 @@ -1097,11 +1097,12 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
2806 */
2807 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
2808 unsigned int offset, int datalen, int elen,
2809 - struct talitos_ptr *link_tbl_ptr)
2810 + struct talitos_ptr *link_tbl_ptr, int align)
2811 {
2812 int n_sg = elen ? sg_count + 1 : sg_count;
2813 int count = 0;
2814 int cryptlen = datalen + elen;
2815 + int padding = ALIGN(cryptlen, align) - cryptlen;
2816
2817 while (cryptlen && sg && n_sg--) {
2818 unsigned int len = sg_dma_len(sg);
2819 @@ -1125,7 +1126,7 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
2820 offset += datalen;
2821 }
2822 to_talitos_ptr(link_tbl_ptr + count,
2823 - sg_dma_address(sg) + offset, len, 0);
2824 + sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0);
2825 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
2826 count++;
2827 cryptlen -= len;
2828 @@ -1148,10 +1149,11 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
2829 unsigned int len, struct talitos_edesc *edesc,
2830 struct talitos_ptr *ptr, int sg_count,
2831 unsigned int offset, int tbl_off, int elen,
2832 - bool force)
2833 + bool force, int align)
2834 {
2835 struct talitos_private *priv = dev_get_drvdata(dev);
2836 bool is_sec1 = has_ftr_sec1(priv);
2837 + int aligned_len = ALIGN(len, align);
2838
2839 if (!src) {
2840 to_talitos_ptr(ptr, 0, 0, is_sec1);
2841 @@ -1159,22 +1161,22 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
2842 }
2843 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
2844 if (sg_count == 1 && !force) {
2845 - to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
2846 + to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1);
2847 return sg_count;
2848 }
2849 if (is_sec1) {
2850 - to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
2851 + to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1);
2852 return sg_count;
2853 }
2854 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
2855 - &edesc->link_tbl[tbl_off]);
2856 + &edesc->link_tbl[tbl_off], align);
2857 if (sg_count == 1 && !force) {
2858 /* Only one segment now, so no link tbl needed*/
2859 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
2860 return sg_count;
2861 }
2862 to_talitos_ptr(ptr, edesc->dma_link_tbl +
2863 - tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
2864 + tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1);
2865 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
2866
2867 return sg_count;
2868 @@ -1186,7 +1188,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
2869 unsigned int offset, int tbl_off)
2870 {
2871 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
2872 - tbl_off, 0, false);
2873 + tbl_off, 0, false, 1);
2874 }
2875
2876 /*
2877 @@ -1255,7 +1257,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
2878
2879 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
2880 sg_count, areq->assoclen, tbl_off, elen,
2881 - false);
2882 + false, 1);
2883
2884 if (ret > 1) {
2885 tbl_off += ret;
2886 @@ -1275,7 +1277,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
2887 elen = 0;
2888 ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
2889 sg_count, areq->assoclen, tbl_off, elen,
2890 - is_ipsec_esp && !encrypt);
2891 + is_ipsec_esp && !encrypt, 1);
2892 tbl_off += ret;
2893
2894 if (!encrypt && is_ipsec_esp) {
2895 @@ -1583,6 +1585,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
2896 bool sync_needed = false;
2897 struct talitos_private *priv = dev_get_drvdata(dev);
2898 bool is_sec1 = has_ftr_sec1(priv);
2899 + bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU &&
2900 + (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR;
2901
2902 /* first DWORD empty */
2903
2904 @@ -1603,8 +1607,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
2905 /*
2906 * cipher in
2907 */
2908 - sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
2909 - &desc->ptr[3], sg_count, 0, 0);
2910 + sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3],
2911 + sg_count, 0, 0, 0, false, is_ctr ? 16 : 1);
2912 if (sg_count > 1)
2913 sync_needed = true;
2914
2915 diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
2916 index 1469b956948ab..32825119e8805 100644
2917 --- a/drivers/crypto/talitos.h
2918 +++ b/drivers/crypto/talitos.h
2919 @@ -344,6 +344,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
2920
2921 /* primary execution unit mode (MODE0) and derivatives */
2922 #define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000)
2923 +#define DESC_HDR_MODE0_AESU_MASK cpu_to_be32(0x00600000)
2924 #define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000)
2925 #define DESC_HDR_MODE0_AESU_CTR cpu_to_be32(0x00600000)
2926 #define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000)
2927 diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
2928 index ad72b3f42ffa0..eae385a312b88 100644
2929 --- a/drivers/dma/fsldma.c
2930 +++ b/drivers/dma/fsldma.c
2931 @@ -1214,6 +1214,7 @@ static int fsldma_of_probe(struct platform_device *op)
2932 {
2933 struct fsldma_device *fdev;
2934 struct device_node *child;
2935 + unsigned int i;
2936 int err;
2937
2938 fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
2939 @@ -1292,6 +1293,10 @@ static int fsldma_of_probe(struct platform_device *op)
2940 return 0;
2941
2942 out_free_fdev:
2943 + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
2944 + if (fdev->chan[i])
2945 + fsl_dma_chan_remove(fdev->chan[i]);
2946 + }
2947 irq_dispose_mapping(fdev->irq);
2948 iounmap(fdev->regs);
2949 out_free:
2950 @@ -1314,6 +1319,7 @@ static int fsldma_of_remove(struct platform_device *op)
2951 if (fdev->chan[i])
2952 fsl_dma_chan_remove(fdev->chan[i]);
2953 }
2954 + irq_dispose_mapping(fdev->irq);
2955
2956 iounmap(fdev->regs);
2957 kfree(fdev);
2958 diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
2959 index 07cc7320a614f..9045a6f7f5893 100644
2960 --- a/drivers/dma/hsu/pci.c
2961 +++ b/drivers/dma/hsu/pci.c
2962 @@ -26,22 +26,12 @@
2963 static irqreturn_t hsu_pci_irq(int irq, void *dev)
2964 {
2965 struct hsu_dma_chip *chip = dev;
2966 - struct pci_dev *pdev = to_pci_dev(chip->dev);
2967 u32 dmaisr;
2968 u32 status;
2969 unsigned short i;
2970 int ret = 0;
2971 int err;
2972
2973 - /*
2974 - * On Intel Tangier B0 and Anniedale the interrupt line, disregarding
2975 - * to have different numbers, is shared between HSU DMA and UART IPs.
2976 - * Thus on such SoCs we are expecting that IRQ handler is called in
2977 - * UART driver only.
2978 - */
2979 - if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
2980 - return IRQ_HANDLED;
2981 -
2982 dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
2983 for (i = 0; i < chip->hsu->nr_channels; i++) {
2984 if (dmaisr & 0x1) {
2985 @@ -105,6 +95,17 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2986 if (ret)
2987 goto err_register_irq;
2988
2989 + /*
2990 + * On Intel Tangier B0 and Anniedale the interrupt line, disregarding
2991 + * to have different numbers, is shared between HSU DMA and UART IPs.
2992 + * Thus on such SoCs we are expecting that IRQ handler is called in
2993 + * UART driver only. Instead of handling the spurious interrupt
2994 + * from HSU DMA here and waste CPU time and delay HSU UART interrupt
2995 + * handling, disable the interrupt entirely.
2996 + */
2997 + if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
2998 + disable_irq_nosync(chip->irq);
2999 +
3000 pci_set_drvdata(pdev, chip);
3001
3002 return 0;
3003 diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
3004 index af20e9a790a2a..bb9c361e224bc 100644
3005 --- a/drivers/dma/owl-dma.c
3006 +++ b/drivers/dma/owl-dma.c
3007 @@ -1201,6 +1201,7 @@ static int owl_dma_remove(struct platform_device *pdev)
3008 owl_dma_free(od);
3009
3010 clk_disable_unprepare(od->clk);
3011 + dma_pool_destroy(od->lli_pool);
3012
3013 return 0;
3014 }
3015 diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
3016 index 14fb8f6a1ad29..abcc8c0136c4e 100644
3017 --- a/drivers/gpio/gpio-pcf857x.c
3018 +++ b/drivers/gpio/gpio-pcf857x.c
3019 @@ -332,7 +332,7 @@ static int pcf857x_probe(struct i2c_client *client,
3020 * reset state. Otherwise it flags pins to be driven low.
3021 */
3022 gpio->out = ~n_latch;
3023 - gpio->status = gpio->out;
3024 + gpio->status = gpio->read(gpio->client);
3025
3026 status = devm_gpiochip_add_data(&client->dev, &gpio->chip, gpio);
3027 if (status < 0)
3028 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
3029 index 3f744e72912f1..bcb7ab5c602d1 100644
3030 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
3031 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
3032 @@ -870,7 +870,7 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
3033 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
3034 {
3035 int ret;
3036 - long level;
3037 + unsigned long level;
3038 char *sub_str = NULL;
3039 char *tmp;
3040 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
3041 @@ -886,8 +886,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
3042 while (tmp[0]) {
3043 sub_str = strsep(&tmp, delimiter);
3044 if (strlen(sub_str)) {
3045 - ret = kstrtol(sub_str, 0, &level);
3046 - if (ret)
3047 + ret = kstrtoul(sub_str, 0, &level);
3048 + if (ret || level > 31)
3049 return -EINVAL;
3050 *mask |= 1 << level;
3051 } else
3052 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
3053 index 91899d28fa722..e8132210c244c 100644
3054 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
3055 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
3056 @@ -21,7 +21,7 @@
3057 *
3058 */
3059
3060 -#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
3061 +#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
3062 #define _AMDGPU_TRACE_H_
3063
3064 #include <linux/stringify.h>
3065 diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
3066 index 317aa257c06bb..41631271d64ca 100644
3067 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c
3068 +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
3069 @@ -276,6 +276,8 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
3070 {
3071 u32 reference_clock = adev->clock.spll.reference_freq;
3072
3073 + if (adev->asic_type == CHIP_RENOIR)
3074 + return 10000;
3075 if (adev->asic_type == CHIP_RAVEN)
3076 return reference_clock / 4;
3077
3078 diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
3079 index 5815983caaf80..0d2e13627c647 100644
3080 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
3081 +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
3082 @@ -245,6 +245,23 @@ static enum bp_result encoder_control_digx_v3(
3083 cntl->enable_dp_audio);
3084 params.ucLaneNum = (uint8_t)(cntl->lanes_number);
3085
3086 + switch (cntl->color_depth) {
3087 + case COLOR_DEPTH_888:
3088 + params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
3089 + break;
3090 + case COLOR_DEPTH_101010:
3091 + params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
3092 + break;
3093 + case COLOR_DEPTH_121212:
3094 + params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
3095 + break;
3096 + case COLOR_DEPTH_161616:
3097 + params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
3098 + break;
3099 + default:
3100 + break;
3101 + }
3102 +
3103 if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
3104 result = BP_RESULT_OK;
3105
3106 @@ -274,6 +291,23 @@ static enum bp_result encoder_control_digx_v4(
3107 cntl->enable_dp_audio));
3108 params.ucLaneNum = (uint8_t)(cntl->lanes_number);
3109
3110 + switch (cntl->color_depth) {
3111 + case COLOR_DEPTH_888:
3112 + params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
3113 + break;
3114 + case COLOR_DEPTH_101010:
3115 + params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
3116 + break;
3117 + case COLOR_DEPTH_121212:
3118 + params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
3119 + break;
3120 + case COLOR_DEPTH_161616:
3121 + params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
3122 + break;
3123 + default:
3124 + break;
3125 + }
3126 +
3127 if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
3128 result = BP_RESULT_OK;
3129
3130 @@ -1057,6 +1091,19 @@ static enum bp_result set_pixel_clock_v5(
3131 * driver choose program it itself, i.e. here we program it
3132 * to 888 by default.
3133 */
3134 + if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
3135 + switch (bp_params->color_depth) {
3136 + case TRANSMITTER_COLOR_DEPTH_30:
3137 + /* yes this is correct, the atom define is wrong */
3138 + clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
3139 + break;
3140 + case TRANSMITTER_COLOR_DEPTH_36:
3141 + /* yes this is correct, the atom define is wrong */
3142 + clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
3143 + break;
3144 + default:
3145 + break;
3146 + }
3147
3148 if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
3149 result = BP_RESULT_OK;
3150 @@ -1135,6 +1182,20 @@ static enum bp_result set_pixel_clock_v6(
3151 * driver choose program it itself, i.e. here we pass required
3152 * target rate that includes deep color.
3153 */
3154 + if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
3155 + switch (bp_params->color_depth) {
3156 + case TRANSMITTER_COLOR_DEPTH_30:
3157 + clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
3158 + break;
3159 + case TRANSMITTER_COLOR_DEPTH_36:
3160 + clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
3161 + break;
3162 + case TRANSMITTER_COLOR_DEPTH_48:
3163 + clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
3164 + break;
3165 + default:
3166 + break;
3167 + }
3168
3169 if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
3170 result = BP_RESULT_OK;
3171 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
3172 index f787a6b947812..eca67d5d5b10d 100644
3173 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
3174 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
3175 @@ -871,6 +871,20 @@ static bool dce110_program_pix_clk(
3176 bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC =
3177 pll_settings->use_external_clk;
3178
3179 + switch (pix_clk_params->color_depth) {
3180 + case COLOR_DEPTH_101010:
3181 + bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_30;
3182 + break;
3183 + case COLOR_DEPTH_121212:
3184 + bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_36;
3185 + break;
3186 + case COLOR_DEPTH_161616:
3187 + bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_48;
3188 + break;
3189 + default:
3190 + break;
3191 + }
3192 +
3193 if (clk_src->bios->funcs->set_pixel_clock(
3194 clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
3195 return false;
3196 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
3197 index 6ed922a3c1cd5..c25840a774f94 100644
3198 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
3199 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
3200 @@ -563,6 +563,7 @@ static void dce110_stream_encoder_hdmi_set_stream_attribute(
3201 cntl.enable_dp_audio = enable_audio;
3202 cntl.pixel_clock = actual_pix_clk_khz;
3203 cntl.lanes_number = LANE_COUNT_FOUR;
3204 + cntl.color_depth = crtc_timing->display_color_depth;
3205
3206 if (enc110->base.bp->funcs->encoder_control(
3207 enc110->base.bp, &cntl) != BP_RESULT_OK)
3208 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
3209 index ab63d0d0304cb..6fd57cfb112f5 100644
3210 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
3211 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
3212 @@ -429,12 +429,12 @@ static void set_clamp(
3213 clamp_max = 0x3FC0;
3214 break;
3215 case COLOR_DEPTH_101010:
3216 - /* 10bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
3217 - clamp_max = 0x3FFC;
3218 + /* 10bit MSB aligned on 14 bit bus '11 1111 1111 0000' */
3219 + clamp_max = 0x3FF0;
3220 break;
3221 case COLOR_DEPTH_121212:
3222 - /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1111' */
3223 - clamp_max = 0x3FFF;
3224 + /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
3225 + clamp_max = 0x3FFC;
3226 break;
3227 default:
3228 clamp_max = 0x3FC0;
3229 diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
3230 index cbe7818529bbf..623455cd75203 100644
3231 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
3232 +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
3233 @@ -168,6 +168,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
3234 .ack = NULL
3235 };
3236
3237 +static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
3238 + .set = NULL,
3239 + .ack = NULL
3240 +};
3241 +
3242 #undef BASE_INNER
3243 #define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
3244
3245 @@ -230,6 +235,17 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
3246 .funcs = &vblank_irq_info_funcs\
3247 }
3248
3249 +/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
3250 + * of DCE's DC_IRQ_SOURCE_VUPDATEx.
3251 + */
3252 +#define vupdate_no_lock_int_entry(reg_num)\
3253 + [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
3254 + IRQ_REG_ENTRY(OTG, reg_num,\
3255 + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
3256 + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
3257 + .funcs = &vupdate_no_lock_irq_info_funcs\
3258 + }
3259 +
3260 #define vblank_int_entry(reg_num)\
3261 [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
3262 IRQ_REG_ENTRY(OTG, reg_num,\
3263 @@ -338,6 +354,12 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = {
3264 vupdate_int_entry(3),
3265 vupdate_int_entry(4),
3266 vupdate_int_entry(5),
3267 + vupdate_no_lock_int_entry(0),
3268 + vupdate_no_lock_int_entry(1),
3269 + vupdate_no_lock_int_entry(2),
3270 + vupdate_no_lock_int_entry(3),
3271 + vupdate_no_lock_int_entry(4),
3272 + vupdate_no_lock_int_entry(5),
3273 vblank_int_entry(0),
3274 vblank_int_entry(1),
3275 vblank_int_entry(2),
3276 diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
3277 index 6b8502bcf0fd3..02ffde5fd7226 100644
3278 --- a/drivers/gpu/drm/drm_fb_helper.c
3279 +++ b/drivers/gpu/drm/drm_fb_helper.c
3280 @@ -965,11 +965,15 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
3281 drm_modeset_lock_all(fb_helper->dev);
3282 drm_client_for_each_modeset(modeset, &fb_helper->client) {
3283 crtc = modeset->crtc;
3284 - if (!crtc->funcs->gamma_set || !crtc->gamma_size)
3285 - return -EINVAL;
3286 + if (!crtc->funcs->gamma_set || !crtc->gamma_size) {
3287 + ret = -EINVAL;
3288 + goto out;
3289 + }
3290
3291 - if (cmap->start + cmap->len > crtc->gamma_size)
3292 - return -EINVAL;
3293 + if (cmap->start + cmap->len > crtc->gamma_size) {
3294 + ret = -EINVAL;
3295 + goto out;
3296 + }
3297
3298 r = crtc->gamma_store;
3299 g = r + crtc->gamma_size;
3300 @@ -982,8 +986,9 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
3301 ret = crtc->funcs->gamma_set(crtc, r, g, b,
3302 crtc->gamma_size, NULL);
3303 if (ret)
3304 - return ret;
3305 + goto out;
3306 }
3307 +out:
3308 drm_modeset_unlock_all(fb_helper->dev);
3309
3310 return ret;
3311 diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
3312 index e281070611480..fc9a34ed58bd1 100644
3313 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
3314 +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
3315 @@ -279,11 +279,8 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
3316 hdmi_dev = pci_get_drvdata(dev);
3317
3318 i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
3319 - if (i2c_dev == NULL) {
3320 - DRM_ERROR("Can't allocate interface\n");
3321 - ret = -ENOMEM;
3322 - goto exit;
3323 - }
3324 + if (!i2c_dev)
3325 + return -ENOMEM;
3326
3327 i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
3328 i2c_dev->status = I2C_STAT_INIT;
3329 @@ -300,16 +297,23 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
3330 oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
3331 if (ret) {
3332 DRM_ERROR("Failed to request IRQ for I2C controller\n");
3333 - goto err;
3334 + goto free_dev;
3335 }
3336
3337 /* Adapter registration */
3338 ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
3339 - return ret;
3340 + if (ret) {
3341 + DRM_ERROR("Failed to add I2C adapter\n");
3342 + goto free_irq;
3343 + }
3344
3345 -err:
3346 + return 0;
3347 +
3348 +free_irq:
3349 + free_irq(dev->irq, hdmi_dev);
3350 +free_dev:
3351 kfree(i2c_dev);
3352 -exit:
3353 +
3354 return ret;
3355 }
3356
3357 diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
3358 index 7005f8f69c683..d414525eccf6d 100644
3359 --- a/drivers/gpu/drm/gma500/psb_drv.c
3360 +++ b/drivers/gpu/drm/gma500/psb_drv.c
3361 @@ -313,6 +313,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
3362 if (ret)
3363 goto out_err;
3364
3365 + ret = -ENOMEM;
3366 +
3367 dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
3368 if (!dev_priv->mmu)
3369 goto out_err;
3370 diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
3371 index b030f7ae33029..4cf95e8031e38 100644
3372 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c
3373 +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
3374 @@ -2129,7 +2129,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
3375 if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits, force_dvi))
3376 return MODE_CLOCK_HIGH;
3377
3378 - /* BXT DPLL can't generate 223-240 MHz */
3379 + /* GLK DPLL can't generate 446-480 MHz */
3380 + if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000)
3381 + return MODE_CLOCK_RANGE;
3382 +
3383 + /* BXT/GLK DPLL can't generate 223-240 MHz */
3384 if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000)
3385 return MODE_CLOCK_RANGE;
3386
3387 diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
3388 index 03c6d6157e4d0..395146884a222 100644
3389 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
3390 +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
3391 @@ -1099,7 +1099,7 @@ static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
3392 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
3393 pp_done);
3394
3395 - complete(&mdp5_crtc->pp_completion);
3396 + complete_all(&mdp5_crtc->pp_completion);
3397 }
3398
3399 static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
3400 diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
3401 index 1afb7c579dbbb..eca86bf448f74 100644
3402 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
3403 +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
3404 @@ -139,7 +139,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
3405 .disable = dsi_20nm_phy_disable,
3406 .init = msm_dsi_phy_init_common,
3407 },
3408 - .io_start = { 0xfd998300, 0xfd9a0300 },
3409 + .io_start = { 0xfd998500, 0xfd9a0500 },
3410 .num_dsi_phy = 2,
3411 };
3412
3413 diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
3414 index f5f59261ea819..d1beaad0c82b6 100644
3415 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
3416 +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
3417 @@ -14,6 +14,7 @@ enum dcb_connector_type {
3418 DCB_CONNECTOR_LVDS_SPWG = 0x41,
3419 DCB_CONNECTOR_DP = 0x46,
3420 DCB_CONNECTOR_eDP = 0x47,
3421 + DCB_CONNECTOR_mDP = 0x48,
3422 DCB_CONNECTOR_HDMI_0 = 0x60,
3423 DCB_CONNECTOR_HDMI_1 = 0x61,
3424 DCB_CONNECTOR_HDMI_C = 0x63,
3425 diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
3426 index 282fd90b65e13..9ce7b0d4b8764 100644
3427 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c
3428 +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
3429 @@ -497,6 +497,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
3430 if (ret) {
3431 NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
3432 nouveau_channel_del(pchan);
3433 + goto done;
3434 }
3435
3436 ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
3437 diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
3438 index 0994aee7671ad..496e7dcd6b7dc 100644
3439 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c
3440 +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
3441 @@ -1240,6 +1240,7 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
3442 case DCB_CONNECTOR_DMS59_DP0:
3443 case DCB_CONNECTOR_DMS59_DP1:
3444 case DCB_CONNECTOR_DP :
3445 + case DCB_CONNECTOR_mDP :
3446 case DCB_CONNECTOR_USB_C : return DRM_MODE_CONNECTOR_DisplayPort;
3447 case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
3448 case DCB_CONNECTOR_HDMI_0 :
3449 diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
3450 index 134e9106ebac1..37679507f9432 100644
3451 --- a/drivers/gpu/drm/scheduler/sched_main.c
3452 +++ b/drivers/gpu/drm/scheduler/sched_main.c
3453 @@ -851,6 +851,9 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
3454 if (sched->thread)
3455 kthread_stop(sched->thread);
3456
3457 + /* Confirm no work left behind accessing device structures */
3458 + cancel_delayed_work_sync(&sched->work_tdr);
3459 +
3460 sched->ready = false;
3461 }
3462 EXPORT_SYMBOL(drm_sched_fini);
3463 diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
3464 index 6bf1425e8b0ca..eb3b2350687fb 100644
3465 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
3466 +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
3467 @@ -545,30 +545,13 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
3468 if (info->bus_flags & DRM_BUS_FLAG_DE_LOW)
3469 val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
3470
3471 - /*
3472 - * On A20 and similar SoCs, the only way to achieve Positive Edge
3473 - * (Rising Edge), is setting dclk clock phase to 2/3(240°).
3474 - * By default TCON works in Negative Edge(Falling Edge),
3475 - * this is why phase is set to 0 in that case.
3476 - * Unfortunately there's no way to logically invert dclk through
3477 - * IO_POL register.
3478 - * The only acceptable way to work, triple checked with scope,
3479 - * is using clock phase set to 0° for Negative Edge and set to 240°
3480 - * for Positive Edge.
3481 - * On A33 and similar SoCs there would be a 90° phase option,
3482 - * but it divides also dclk by 2.
3483 - * Following code is a way to avoid quirks all around TCON
3484 - * and DOTCLOCK drivers.
3485 - */
3486 - if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
3487 - clk_set_phase(tcon->dclk, 240);
3488 -
3489 if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
3490 - clk_set_phase(tcon->dclk, 0);
3491 + val |= SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE;
3492
3493 regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
3494 SUN4I_TCON0_IO_POL_HSYNC_POSITIVE |
3495 SUN4I_TCON0_IO_POL_VSYNC_POSITIVE |
3496 + SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE |
3497 SUN4I_TCON0_IO_POL_DE_NEGATIVE,
3498 val);
3499
3500 diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
3501 index 5bdbaf0847824..ce500c8dd4c72 100644
3502 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
3503 +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
3504 @@ -113,6 +113,7 @@
3505 #define SUN4I_TCON0_IO_POL_REG 0x88
3506 #define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28)
3507 #define SUN4I_TCON0_IO_POL_DE_NEGATIVE BIT(27)
3508 +#define SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE BIT(26)
3509 #define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25)
3510 #define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE BIT(24)
3511
3512 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
3513 index 263eca119ff0f..8d202011b2db5 100644
3514 --- a/drivers/hid/hid-core.c
3515 +++ b/drivers/hid/hid-core.c
3516 @@ -1300,6 +1300,9 @@ EXPORT_SYMBOL_GPL(hid_open_report);
3517
3518 static s32 snto32(__u32 value, unsigned n)
3519 {
3520 + if (!value || !n)
3521 + return 0;
3522 +
3523 switch (n) {
3524 case 8: return ((__s8)value);
3525 case 16: return ((__s16)value);
3526 diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
3527 index e5550a5bf49d0..86001cfbdb6f1 100644
3528 --- a/drivers/hid/hid-logitech-dj.c
3529 +++ b/drivers/hid/hid-logitech-dj.c
3530 @@ -980,6 +980,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
3531 case 0x07:
3532 device_type = "eQUAD step 4 Gaming";
3533 logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
3534 + workitem.reports_supported |= STD_KEYBOARD;
3535 break;
3536 case 0x08:
3537 device_type = "eQUAD step 4 for gamepads";
3538 diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
3539 index b74acbd5997b5..f1928c1ac139c 100644
3540 --- a/drivers/hid/wacom_wac.c
3541 +++ b/drivers/hid/wacom_wac.c
3542 @@ -2600,7 +2600,12 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
3543 wacom_wac->is_invalid_bt_frame = !value;
3544 return;
3545 case HID_DG_CONTACTMAX:
3546 - features->touch_max = value;
3547 + if (!features->touch_max) {
3548 + features->touch_max = value;
3549 + } else {
3550 + hid_warn(hdev, "%s: ignoring attempt to overwrite non-zero touch_max "
3551 + "%d -> %d\n", __func__, features->touch_max, value);
3552 + }
3553 return;
3554 }
3555
3556 diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
3557 index 2be9c01e175ca..f36036be7f032 100644
3558 --- a/drivers/hsi/controllers/omap_ssi_core.c
3559 +++ b/drivers/hsi/controllers/omap_ssi_core.c
3560 @@ -424,7 +424,7 @@ static int ssi_hw_init(struct hsi_controller *ssi)
3561 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
3562 int err;
3563
3564 - err = pm_runtime_get_sync(ssi->device.parent);
3565 + err = pm_runtime_resume_and_get(ssi->device.parent);
3566 if (err < 0) {
3567 dev_err(&ssi->device, "runtime PM failed %d\n", err);
3568 return err;
3569 diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
3570 index 452307c79e4b9..0b55bc146b292 100644
3571 --- a/drivers/hv/channel_mgmt.c
3572 +++ b/drivers/hv/channel_mgmt.c
3573 @@ -1101,8 +1101,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
3574 vmbus_device_unregister(channel->device_obj);
3575 put_device(dev);
3576 }
3577 - }
3578 - if (channel->primary_channel != NULL) {
3579 + } else if (channel->primary_channel != NULL) {
3580 /*
3581 * Sub-channel is being rescinded. Following is the channel
3582 * close sequence when initiated from the driveri (refer to
3583 diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
3584 index dd9661c11782a..70cd9fc7fb869 100644
3585 --- a/drivers/i2c/busses/i2c-bcm-iproc.c
3586 +++ b/drivers/i2c/busses/i2c-bcm-iproc.c
3587 @@ -157,6 +157,11 @@
3588
3589 #define IE_S_ALL_INTERRUPT_SHIFT 21
3590 #define IE_S_ALL_INTERRUPT_MASK 0x3f
3591 +/*
3592 + * It takes ~18us to reading 10bytes of data, hence to keep tasklet
3593 + * running for less time, max slave read per tasklet is set to 10 bytes.
3594 + */
3595 +#define MAX_SLAVE_RX_PER_INT 10
3596
3597 enum i2c_slave_read_status {
3598 I2C_SLAVE_RX_FIFO_EMPTY = 0,
3599 @@ -203,8 +208,18 @@ struct bcm_iproc_i2c_dev {
3600 /* bytes that have been read */
3601 unsigned int rx_bytes;
3602 unsigned int thld_bytes;
3603 +
3604 + bool slave_rx_only;
3605 + bool rx_start_rcvd;
3606 + bool slave_read_complete;
3607 + u32 tx_underrun;
3608 + u32 slave_int_mask;
3609 + struct tasklet_struct slave_rx_tasklet;
3610 };
3611
3612 +/* tasklet to process slave rx data */
3613 +static void slave_rx_tasklet_fn(unsigned long);
3614 +
3615 /*
3616 * Can be expanded in the future if more interrupt status bits are utilized
3617 */
3618 @@ -213,7 +228,8 @@ struct bcm_iproc_i2c_dev {
3619
3620 #define ISR_MASK_SLAVE (BIT(IS_S_START_BUSY_SHIFT)\
3621 | BIT(IS_S_RX_EVENT_SHIFT) | BIT(IS_S_RD_EVENT_SHIFT)\
3622 - | BIT(IS_S_TX_UNDERRUN_SHIFT))
3623 + | BIT(IS_S_TX_UNDERRUN_SHIFT) | BIT(IS_S_RX_FIFO_FULL_SHIFT)\
3624 + | BIT(IS_S_RX_THLD_SHIFT))
3625
3626 static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave);
3627 static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave);
3628 @@ -257,6 +273,7 @@ static void bcm_iproc_i2c_slave_init(
3629 {
3630 u32 val;
3631
3632 + iproc_i2c->tx_underrun = 0;
3633 if (need_reset) {
3634 /* put controller in reset */
3635 val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
3636 @@ -293,8 +310,11 @@ static void bcm_iproc_i2c_slave_init(
3637
3638 /* Enable interrupt register to indicate a valid byte in receive fifo */
3639 val = BIT(IE_S_RX_EVENT_SHIFT);
3640 + /* Enable interrupt register to indicate a Master read transaction */
3641 + val |= BIT(IE_S_RD_EVENT_SHIFT);
3642 /* Enable interrupt register for the Slave BUSY command */
3643 val |= BIT(IE_S_START_BUSY_SHIFT);
3644 + iproc_i2c->slave_int_mask = val;
3645 iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
3646 }
3647
3648 @@ -319,76 +339,176 @@ static void bcm_iproc_i2c_check_slave_status(
3649 }
3650 }
3651
3652 -static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
3653 - u32 status)
3654 +static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c)
3655 {
3656 + u8 rx_data, rx_status;
3657 + u32 rx_bytes = 0;
3658 u32 val;
3659 - u8 value, rx_status;
3660
3661 - /* Slave RX byte receive */
3662 - if (status & BIT(IS_S_RX_EVENT_SHIFT)) {
3663 + while (rx_bytes < MAX_SLAVE_RX_PER_INT) {
3664 val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET);
3665 rx_status = (val >> S_RX_STATUS_SHIFT) & S_RX_STATUS_MASK;
3666 + rx_data = ((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
3667 +
3668 if (rx_status == I2C_SLAVE_RX_START) {
3669 - /* Start of SMBUS for Master write */
3670 + /* Start of SMBUS Master write */
3671 i2c_slave_event(iproc_i2c->slave,
3672 - I2C_SLAVE_WRITE_REQUESTED, &value);
3673 -
3674 - val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET);
3675 - value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
3676 + I2C_SLAVE_WRITE_REQUESTED, &rx_data);
3677 + iproc_i2c->rx_start_rcvd = true;
3678 + iproc_i2c->slave_read_complete = false;
3679 + } else if (rx_status == I2C_SLAVE_RX_DATA &&
3680 + iproc_i2c->rx_start_rcvd) {
3681 + /* Middle of SMBUS Master write */
3682 i2c_slave_event(iproc_i2c->slave,
3683 - I2C_SLAVE_WRITE_RECEIVED, &value);
3684 - } else if (status & BIT(IS_S_RD_EVENT_SHIFT)) {
3685 - /* Start of SMBUS for Master Read */
3686 - i2c_slave_event(iproc_i2c->slave,
3687 - I2C_SLAVE_READ_REQUESTED, &value);
3688 - iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value);
3689 + I2C_SLAVE_WRITE_RECEIVED, &rx_data);
3690 + } else if (rx_status == I2C_SLAVE_RX_END &&
3691 + iproc_i2c->rx_start_rcvd) {
3692 + /* End of SMBUS Master write */
3693 + if (iproc_i2c->slave_rx_only)
3694 + i2c_slave_event(iproc_i2c->slave,
3695 + I2C_SLAVE_WRITE_RECEIVED,
3696 + &rx_data);
3697 +
3698 + i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP,
3699 + &rx_data);
3700 + } else if (rx_status == I2C_SLAVE_RX_FIFO_EMPTY) {
3701 + iproc_i2c->rx_start_rcvd = false;
3702 + iproc_i2c->slave_read_complete = true;
3703 + break;
3704 + }
3705
3706 - val = BIT(S_CMD_START_BUSY_SHIFT);
3707 - iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
3708 + rx_bytes++;
3709 + }
3710 +}
3711
3712 - /*
3713 - * Enable interrupt for TX FIFO becomes empty and
3714 - * less than PKT_LENGTH bytes were output on the SMBUS
3715 - */
3716 - val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
3717 - val |= BIT(IE_S_TX_UNDERRUN_SHIFT);
3718 - iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
3719 - } else {
3720 - /* Master write other than start */
3721 - value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
3722 +static void slave_rx_tasklet_fn(unsigned long data)
3723 +{
3724 + struct bcm_iproc_i2c_dev *iproc_i2c = (struct bcm_iproc_i2c_dev *)data;
3725 + u32 int_clr;
3726 +
3727 + bcm_iproc_i2c_slave_read(iproc_i2c);
3728 +
3729 + /* clear pending IS_S_RX_EVENT_SHIFT interrupt */
3730 + int_clr = BIT(IS_S_RX_EVENT_SHIFT);
3731 +
3732 + if (!iproc_i2c->slave_rx_only && iproc_i2c->slave_read_complete) {
3733 + /*
3734 + * In case of single byte master-read request,
3735 + * IS_S_TX_UNDERRUN_SHIFT event is generated before
3736 + * IS_S_START_BUSY_SHIFT event. Hence start slave data send
3737 + * from first IS_S_TX_UNDERRUN_SHIFT event.
3738 + *
3739 + * This means don't send any data from slave when
3740 + * IS_S_RD_EVENT_SHIFT event is generated else it will increment
3741 + * eeprom or other backend slave driver read pointer twice.
3742 + */
3743 + iproc_i2c->tx_underrun = 0;
3744 + iproc_i2c->slave_int_mask |= BIT(IE_S_TX_UNDERRUN_SHIFT);
3745 +
3746 + /* clear IS_S_RD_EVENT_SHIFT interrupt */
3747 + int_clr |= BIT(IS_S_RD_EVENT_SHIFT);
3748 + }
3749 +
3750 + /* clear slave interrupt */
3751 + iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, int_clr);
3752 + /* enable slave interrupts */
3753 + iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, iproc_i2c->slave_int_mask);
3754 +}
3755 +
3756 +static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
3757 + u32 status)
3758 +{
3759 + u32 val;
3760 + u8 value;
3761 +
3762 + /*
3763 + * Slave events in case of master-write, master-write-read and,
3764 + * master-read
3765 + *
3766 + * Master-write : only IS_S_RX_EVENT_SHIFT event
3767 + * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
3768 + * events
3769 + * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
3770 + * events or only IS_S_RD_EVENT_SHIFT
3771 + */
3772 + if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
3773 + status & BIT(IS_S_RD_EVENT_SHIFT)) {
3774 + /* disable slave interrupts */
3775 + val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
3776 + val &= ~iproc_i2c->slave_int_mask;
3777 + iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
3778 +
3779 + if (status & BIT(IS_S_RD_EVENT_SHIFT))
3780 + /* Master-write-read request */
3781 + iproc_i2c->slave_rx_only = false;
3782 + else
3783 + /* Master-write request only */
3784 + iproc_i2c->slave_rx_only = true;
3785 +
3786 + /* schedule tasklet to read data later */
3787 + tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
3788 +
3789 + /* clear only IS_S_RX_EVENT_SHIFT interrupt */
3790 + iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET,
3791 + BIT(IS_S_RX_EVENT_SHIFT));
3792 + }
3793 +
3794 + if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
3795 + iproc_i2c->tx_underrun++;
3796 + if (iproc_i2c->tx_underrun == 1)
3797 + /* Start of SMBUS for Master Read */
3798 i2c_slave_event(iproc_i2c->slave,
3799 - I2C_SLAVE_WRITE_RECEIVED, &value);
3800 - if (rx_status == I2C_SLAVE_RX_END)
3801 - i2c_slave_event(iproc_i2c->slave,
3802 - I2C_SLAVE_STOP, &value);
3803 - }
3804 - } else if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
3805 - /* Master read other than start */
3806 - i2c_slave_event(iproc_i2c->slave,
3807 - I2C_SLAVE_READ_PROCESSED, &value);
3808 + I2C_SLAVE_READ_REQUESTED,
3809 + &value);
3810 + else
3811 + /* Master read other than start */
3812 + i2c_slave_event(iproc_i2c->slave,
3813 + I2C_SLAVE_READ_PROCESSED,
3814 + &value);
3815
3816 iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value);
3817 + /* start transfer */
3818 val = BIT(S_CMD_START_BUSY_SHIFT);
3819 iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
3820 +
3821 + /* clear interrupt */
3822 + iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET,
3823 + BIT(IS_S_TX_UNDERRUN_SHIFT));
3824 }
3825
3826 - /* Stop */
3827 + /* Stop received from master in case of master read transaction */
3828 if (status & BIT(IS_S_START_BUSY_SHIFT)) {
3829 - i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value);
3830 /*
3831 * Enable interrupt for TX FIFO becomes empty and
3832 * less than PKT_LENGTH bytes were output on the SMBUS
3833 */
3834 - val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
3835 - val &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
3836 - iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
3837 + iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
3838 + iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET,
3839 + iproc_i2c->slave_int_mask);
3840 +
3841 + /* End of SMBUS for Master Read */
3842 + val = BIT(S_TX_WR_STATUS_SHIFT);
3843 + iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, val);
3844 +
3845 + val = BIT(S_CMD_START_BUSY_SHIFT);
3846 + iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
3847 +
3848 + /* flush TX FIFOs */
3849 + val = iproc_i2c_rd_reg(iproc_i2c, S_FIFO_CTRL_OFFSET);
3850 + val |= (BIT(S_FIFO_TX_FLUSH_SHIFT));
3851 + iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, val);
3852 +
3853 + i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value);
3854 +
3855 + /* clear interrupt */
3856 + iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET,
3857 + BIT(IS_S_START_BUSY_SHIFT));
3858 }
3859
3860 - /* clear interrupt status */
3861 - iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, status);
3862 + /* check slave transmit status only if slave is transmitting */
3863 + if (!iproc_i2c->slave_rx_only)
3864 + bcm_iproc_i2c_check_slave_status(iproc_i2c);
3865
3866 - bcm_iproc_i2c_check_slave_status(iproc_i2c);
3867 return true;
3868 }
3869
3870 @@ -503,12 +623,17 @@ static void bcm_iproc_i2c_process_m_event(struct bcm_iproc_i2c_dev *iproc_i2c,
3871 static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
3872 {
3873 struct bcm_iproc_i2c_dev *iproc_i2c = data;
3874 - u32 status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET);
3875 + u32 slave_status;
3876 + u32 status;
3877 bool ret;
3878 - u32 sl_status = status & ISR_MASK_SLAVE;
3879
3880 - if (sl_status) {
3881 - ret = bcm_iproc_i2c_slave_isr(iproc_i2c, sl_status);
3882 + status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET);
3883 + /* process only slave interrupt which are enabled */
3884 + slave_status = status & iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET) &
3885 + ISR_MASK_SLAVE;
3886 +
3887 + if (slave_status) {
3888 + ret = bcm_iproc_i2c_slave_isr(iproc_i2c, slave_status);
3889 if (ret)
3890 return IRQ_HANDLED;
3891 else
3892 @@ -1025,6 +1150,10 @@ static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
3893 return -EAFNOSUPPORT;
3894
3895 iproc_i2c->slave = slave;
3896 +
3897 + tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn,
3898 + (unsigned long)iproc_i2c);
3899 +
3900 bcm_iproc_i2c_slave_init(iproc_i2c, false);
3901 return 0;
3902 }
3903 @@ -1045,6 +1174,8 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
3904 IE_S_ALL_INTERRUPT_SHIFT);
3905 iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
3906
3907 + tasklet_kill(&iproc_i2c->slave_rx_tasklet);
3908 +
3909 /* Erase the slave address programmed */
3910 tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
3911 tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
3912 diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
3913 index 506991596b68d..5e89cd6b690ce 100644
3914 --- a/drivers/i2c/busses/i2c-brcmstb.c
3915 +++ b/drivers/i2c/busses/i2c-brcmstb.c
3916 @@ -316,7 +316,7 @@ static int brcmstb_send_i2c_cmd(struct brcmstb_i2c_dev *dev,
3917 goto cmd_out;
3918 }
3919
3920 - if ((CMD_RD || CMD_WR) &&
3921 + if ((cmd == CMD_RD || cmd == CMD_WR) &&
3922 bsc_readl(dev, iic_enable) & BSC_IIC_EN_NOACK_MASK) {
3923 rc = -EREMOTEIO;
3924 dev_dbg(dev->device, "controller received NOACK intr for %s\n",
3925 diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
3926 index 17abf60c94aeb..b56a427fb928f 100644
3927 --- a/drivers/i2c/busses/i2c-qcom-geni.c
3928 +++ b/drivers/i2c/busses/i2c-qcom-geni.c
3929 @@ -87,6 +87,9 @@ struct geni_i2c_dev {
3930 u32 clk_freq_out;
3931 const struct geni_i2c_clk_fld *clk_fld;
3932 int suspended;
3933 + void *dma_buf;
3934 + size_t xfer_len;
3935 + dma_addr_t dma_addr;
3936 };
3937
3938 struct geni_i2c_err_log {
3939 @@ -350,14 +353,39 @@ static void geni_i2c_tx_fsm_rst(struct geni_i2c_dev *gi2c)
3940 dev_err(gi2c->se.dev, "Timeout resetting TX_FSM\n");
3941 }
3942
3943 +static void geni_i2c_rx_msg_cleanup(struct geni_i2c_dev *gi2c,
3944 + struct i2c_msg *cur)
3945 +{
3946 + gi2c->cur_rd = 0;
3947 + if (gi2c->dma_buf) {
3948 + if (gi2c->err)
3949 + geni_i2c_rx_fsm_rst(gi2c);
3950 + geni_se_rx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
3951 + i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
3952 + }
3953 +}
3954 +
3955 +static void geni_i2c_tx_msg_cleanup(struct geni_i2c_dev *gi2c,
3956 + struct i2c_msg *cur)
3957 +{
3958 + gi2c->cur_wr = 0;
3959 + if (gi2c->dma_buf) {
3960 + if (gi2c->err)
3961 + geni_i2c_tx_fsm_rst(gi2c);
3962 + geni_se_tx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
3963 + i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
3964 + }
3965 +}
3966 +
3967 static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
3968 u32 m_param)
3969 {
3970 - dma_addr_t rx_dma;
3971 + dma_addr_t rx_dma = 0;
3972 unsigned long time_left;
3973 void *dma_buf = NULL;
3974 struct geni_se *se = &gi2c->se;
3975 size_t len = msg->len;
3976 + struct i2c_msg *cur;
3977
3978 if (!of_machine_is_compatible("lenovo,yoga-c630"))
3979 dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
3980 @@ -374,19 +402,18 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
3981 geni_se_select_mode(se, GENI_SE_FIFO);
3982 i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
3983 dma_buf = NULL;
3984 + } else {
3985 + gi2c->xfer_len = len;
3986 + gi2c->dma_addr = rx_dma;
3987 + gi2c->dma_buf = dma_buf;
3988 }
3989
3990 + cur = gi2c->cur;
3991 time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
3992 if (!time_left)
3993 geni_i2c_abort_xfer(gi2c);
3994
3995 - gi2c->cur_rd = 0;
3996 - if (dma_buf) {
3997 - if (gi2c->err)
3998 - geni_i2c_rx_fsm_rst(gi2c);
3999 - geni_se_rx_dma_unprep(se, rx_dma, len);
4000 - i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
4001 - }
4002 + geni_i2c_rx_msg_cleanup(gi2c, cur);
4003
4004 return gi2c->err;
4005 }
4006 @@ -394,11 +421,12 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
4007 static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
4008 u32 m_param)
4009 {
4010 - dma_addr_t tx_dma;
4011 + dma_addr_t tx_dma = 0;
4012 unsigned long time_left;
4013 void *dma_buf = NULL;
4014 struct geni_se *se = &gi2c->se;
4015 size_t len = msg->len;
4016 + struct i2c_msg *cur;
4017
4018 if (!of_machine_is_compatible("lenovo,yoga-c630"))
4019 dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
4020 @@ -415,22 +443,21 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
4021 geni_se_select_mode(se, GENI_SE_FIFO);
4022 i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
4023 dma_buf = NULL;
4024 + } else {
4025 + gi2c->xfer_len = len;
4026 + gi2c->dma_addr = tx_dma;
4027 + gi2c->dma_buf = dma_buf;
4028 }
4029
4030 if (!dma_buf) /* Get FIFO IRQ */
4031 writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);
4032
4033 + cur = gi2c->cur;
4034 time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
4035 if (!time_left)
4036 geni_i2c_abort_xfer(gi2c);
4037
4038 - gi2c->cur_wr = 0;
4039 - if (dma_buf) {
4040 - if (gi2c->err)
4041 - geni_i2c_tx_fsm_rst(gi2c);
4042 - geni_se_tx_dma_unprep(se, tx_dma, len);
4043 - i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
4044 - }
4045 + geni_i2c_tx_msg_cleanup(gi2c, cur);
4046
4047 return gi2c->err;
4048 }
4049 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
4050 index fd7c84721b0de..c933c1c7ddd8e 100644
4051 --- a/drivers/infiniband/core/cm.c
4052 +++ b/drivers/infiniband/core/cm.c
4053 @@ -4336,7 +4336,7 @@ static void cm_add_one(struct ib_device *ib_device)
4054 unsigned long flags;
4055 int ret;
4056 int count = 0;
4057 - u8 i;
4058 + unsigned int i;
4059
4060 cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
4061 GFP_KERNEL);
4062 @@ -4348,7 +4348,7 @@ static void cm_add_one(struct ib_device *ib_device)
4063 cm_dev->going_down = 0;
4064
4065 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4066 - for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4067 + rdma_for_each_port (ib_device, i) {
4068 if (!rdma_cap_ib_cm(ib_device, i))
4069 continue;
4070
4071 @@ -4427,7 +4427,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4072 .clr_port_cap_mask = IB_PORT_CM_SUP
4073 };
4074 unsigned long flags;
4075 - int i;
4076 + unsigned int i;
4077
4078 if (!cm_dev)
4079 return;
4080 @@ -4440,7 +4440,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4081 cm_dev->going_down = 1;
4082 spin_unlock_irq(&cm.lock);
4083
4084 - for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4085 + rdma_for_each_port (ib_device, i) {
4086 if (!rdma_cap_ib_cm(ib_device, i))
4087 continue;
4088
4089 diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
4090 index da229eab59032..ad3a092b8b5c3 100644
4091 --- a/drivers/infiniband/core/user_mad.c
4092 +++ b/drivers/infiniband/core/user_mad.c
4093 @@ -379,6 +379,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
4094
4095 mutex_lock(&file->mutex);
4096
4097 + if (file->agents_dead) {
4098 + mutex_unlock(&file->mutex);
4099 + return -EIO;
4100 + }
4101 +
4102 while (list_empty(&file->recv_list)) {
4103 mutex_unlock(&file->mutex);
4104
4105 @@ -392,6 +397,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
4106 mutex_lock(&file->mutex);
4107 }
4108
4109 + if (file->agents_dead) {
4110 + mutex_unlock(&file->mutex);
4111 + return -EIO;
4112 + }
4113 +
4114 packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
4115 list_del(&packet->list);
4116
4117 @@ -524,7 +534,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
4118
4119 agent = __get_agent(file, packet->mad.hdr.id);
4120 if (!agent) {
4121 - ret = -EINVAL;
4122 + ret = -EIO;
4123 goto err_up;
4124 }
4125
4126 @@ -653,10 +663,14 @@ static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
4127 /* we will always be able to post a MAD send */
4128 __poll_t mask = EPOLLOUT | EPOLLWRNORM;
4129
4130 + mutex_lock(&file->mutex);
4131 poll_wait(filp, &file->recv_wait, wait);
4132
4133 if (!list_empty(&file->recv_list))
4134 mask |= EPOLLIN | EPOLLRDNORM;
4135 + if (file->agents_dead)
4136 + mask = EPOLLERR;
4137 + mutex_unlock(&file->mutex);
4138
4139 return mask;
4140 }
4141 @@ -1336,6 +1350,7 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
4142 list_for_each_entry(file, &port->file_list, port_list) {
4143 mutex_lock(&file->mutex);
4144 file->agents_dead = 1;
4145 + wake_up_interruptible(&file->recv_wait);
4146 mutex_unlock(&file->mutex);
4147
4148 for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
4149 diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
4150 index e36d315690819..3e68ba9dab45d 100644
4151 --- a/drivers/infiniband/hw/hns/hns_roce_device.h
4152 +++ b/drivers/infiniband/hw/hns/hns_roce_device.h
4153 @@ -657,7 +657,7 @@ struct hns_roce_qp {
4154 u8 rdb_en;
4155 u8 sdb_en;
4156 u32 doorbell_qpn;
4157 - u32 sq_signal_bits;
4158 + enum ib_sig_type sq_signal_bits;
4159 struct hns_roce_wq sq;
4160
4161 struct ib_umem *umem;
4162 diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4163 index e8933daab4995..d01e3222c00cf 100644
4164 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4165 +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4166 @@ -1009,7 +1009,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
4167 u32 timeout = 0;
4168 int handle = 0;
4169 u16 desc_ret;
4170 - int ret = 0;
4171 + int ret;
4172 int ntc;
4173
4174 spin_lock_bh(&csq->lock);
4175 @@ -1054,15 +1054,14 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
4176 if (hns_roce_cmq_csq_done(hr_dev)) {
4177 complete = true;
4178 handle = 0;
4179 + ret = 0;
4180 while (handle < num) {
4181 /* get the result of hardware write back */
4182 desc_to_use = &csq->desc[ntc];
4183 desc[handle] = *desc_to_use;
4184 dev_dbg(hr_dev->dev, "Get cmq desc:\n");
4185 desc_ret = le16_to_cpu(desc[handle].retval);
4186 - if (desc_ret == CMD_EXEC_SUCCESS)
4187 - ret = 0;
4188 - else
4189 + if (unlikely(desc_ret != CMD_EXEC_SUCCESS))
4190 ret = -EIO;
4191 priv->cmq.last_status = desc_ret;
4192 ntc++;
4193 diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
4194 index b5d196c119eec..f23a341400c06 100644
4195 --- a/drivers/infiniband/hw/hns/hns_roce_main.c
4196 +++ b/drivers/infiniband/hw/hns/hns_roce_main.c
4197 @@ -848,8 +848,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
4198 return 0;
4199
4200 err_qp_table_free:
4201 - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
4202 - hns_roce_cleanup_qp_table(hr_dev);
4203 + hns_roce_cleanup_qp_table(hr_dev);
4204
4205 err_cq_table_free:
4206 hns_roce_cleanup_cq_table(hr_dev);
4207 diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
4208 index fd75a9043bf15..4d6f25fdcc0ef 100644
4209 --- a/drivers/infiniband/hw/mlx5/devx.c
4210 +++ b/drivers/infiniband/hw/mlx5/devx.c
4211 @@ -1118,7 +1118,9 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
4212 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
4213 break;
4214 case MLX5_CMD_OP_CREATE_TIR:
4215 - MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
4216 + *obj_id = MLX5_GET(create_tir_out, out, tirn);
4217 + MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
4218 + MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
4219 break;
4220 case MLX5_CMD_OP_CREATE_TIS:
4221 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
4222 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
4223 index c9e583c05ef27..e2656b68ec222 100644
4224 --- a/drivers/infiniband/hw/mlx5/main.c
4225 +++ b/drivers/infiniband/hw/mlx5/main.c
4226 @@ -6213,8 +6213,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
4227
4228 err_mp:
4229 mlx5_ib_cleanup_multiport_master(dev);
4230 -
4231 - return -ENOMEM;
4232 + return err;
4233 }
4234
4235 static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev)
4236 diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
4237 index 312c2fc961c00..d411356828911 100644
4238 --- a/drivers/infiniband/sw/rxe/rxe_net.c
4239 +++ b/drivers/infiniband/sw/rxe/rxe_net.c
4240 @@ -453,6 +453,11 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
4241
4242 void rxe_loopback(struct sk_buff *skb)
4243 {
4244 + if (skb->protocol == htons(ETH_P_IP))
4245 + skb_pull(skb, sizeof(struct iphdr));
4246 + else
4247 + skb_pull(skb, sizeof(struct ipv6hdr));
4248 +
4249 rxe_rcv(skb);
4250 }
4251
4252 diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
4253 index 9bfb98056fc2a..369ba76f1605e 100644
4254 --- a/drivers/infiniband/sw/rxe/rxe_recv.c
4255 +++ b/drivers/infiniband/sw/rxe/rxe_recv.c
4256 @@ -36,21 +36,26 @@
4257 #include "rxe.h"
4258 #include "rxe_loc.h"
4259
4260 +/* check that QP matches packet opcode type and is in a valid state */
4261 static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
4262 struct rxe_qp *qp)
4263 {
4264 + unsigned int pkt_type;
4265 +
4266 if (unlikely(!qp->valid))
4267 goto err1;
4268
4269 + pkt_type = pkt->opcode & 0xe0;
4270 +
4271 switch (qp_type(qp)) {
4272 case IB_QPT_RC:
4273 - if (unlikely((pkt->opcode & IB_OPCODE_RC) != 0)) {
4274 + if (unlikely(pkt_type != IB_OPCODE_RC)) {
4275 pr_warn_ratelimited("bad qp type\n");
4276 goto err1;
4277 }
4278 break;
4279 case IB_QPT_UC:
4280 - if (unlikely(!(pkt->opcode & IB_OPCODE_UC))) {
4281 + if (unlikely(pkt_type != IB_OPCODE_UC)) {
4282 pr_warn_ratelimited("bad qp type\n");
4283 goto err1;
4284 }
4285 @@ -58,7 +63,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
4286 case IB_QPT_UD:
4287 case IB_QPT_SMI:
4288 case IB_QPT_GSI:
4289 - if (unlikely(!(pkt->opcode & IB_OPCODE_UD))) {
4290 + if (unlikely(pkt_type != IB_OPCODE_UD)) {
4291 pr_warn_ratelimited("bad qp type\n");
4292 goto err1;
4293 }
4294 @@ -300,7 +305,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
4295
4296 list_for_each_entry(mce, &mcg->qp_list, qp_list) {
4297 qp = mce->qp;
4298 - pkt = SKB_TO_PKT(skb);
4299
4300 /* validate qp for incoming packet */
4301 err = check_type_state(rxe, pkt, qp);
4302 @@ -312,12 +316,18 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
4303 continue;
4304
4305 /* for all but the last qp create a new clone of the
4306 - * skb and pass to the qp.
4307 + * skb and pass to the qp. If an error occurs in the
4308 + * checks for the last qp in the list we need to
4309 + * free the skb since it hasn't been passed on to
4310 + * rxe_rcv_pkt() which would free it later.
4311 */
4312 - if (mce->qp_list.next != &mcg->qp_list)
4313 + if (mce->qp_list.next != &mcg->qp_list) {
4314 per_qp_skb = skb_clone(skb, GFP_ATOMIC);
4315 - else
4316 + } else {
4317 per_qp_skb = skb;
4318 + /* show we have consumed the skb */
4319 + skb = NULL;
4320 + }
4321
4322 if (unlikely(!per_qp_skb))
4323 continue;
4324 @@ -332,9 +342,8 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
4325
4326 rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
4327
4328 - return;
4329 -
4330 err1:
4331 + /* free skb if not consumed */
4332 kfree_skb(skb);
4333 }
4334
4335 diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
4336 index dba4535494abd..4d8bc995b4503 100644
4337 --- a/drivers/infiniband/sw/siw/siw.h
4338 +++ b/drivers/infiniband/sw/siw/siw.h
4339 @@ -667,7 +667,7 @@ static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
4340 {
4341 struct siw_sqe *orq_e = orq_get_tail(qp);
4342
4343 - if (orq_e && READ_ONCE(orq_e->flags) == 0)
4344 + if (READ_ONCE(orq_e->flags) == 0)
4345 return orq_e;
4346
4347 return NULL;
4348 diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
4349 index fb66d67572787..dbbf8c6c16d38 100644
4350 --- a/drivers/infiniband/sw/siw/siw_main.c
4351 +++ b/drivers/infiniband/sw/siw/siw_main.c
4352 @@ -134,7 +134,7 @@ static struct {
4353
4354 static int siw_init_cpulist(void)
4355 {
4356 - int i, num_nodes = num_possible_nodes();
4357 + int i, num_nodes = nr_node_ids;
4358
4359 memset(siw_tx_thread, 0, sizeof(siw_tx_thread));
4360
4361 diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
4362 index b4317480cee74..5927ac5923dd8 100644
4363 --- a/drivers/infiniband/sw/siw/siw_qp.c
4364 +++ b/drivers/infiniband/sw/siw/siw_qp.c
4365 @@ -199,26 +199,26 @@ void siw_qp_llp_write_space(struct sock *sk)
4366
4367 static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
4368 {
4369 - irq_size = roundup_pow_of_two(irq_size);
4370 - orq_size = roundup_pow_of_two(orq_size);
4371 -
4372 - qp->attrs.irq_size = irq_size;
4373 - qp->attrs.orq_size = orq_size;
4374 -
4375 - qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe));
4376 - if (!qp->irq) {
4377 - siw_dbg_qp(qp, "irq malloc for %d failed\n", irq_size);
4378 - qp->attrs.irq_size = 0;
4379 - return -ENOMEM;
4380 + if (irq_size) {
4381 + irq_size = roundup_pow_of_two(irq_size);
4382 + qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe));
4383 + if (!qp->irq) {
4384 + qp->attrs.irq_size = 0;
4385 + return -ENOMEM;
4386 + }
4387 }
4388 - qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe));
4389 - if (!qp->orq) {
4390 - siw_dbg_qp(qp, "orq malloc for %d failed\n", orq_size);
4391 - qp->attrs.orq_size = 0;
4392 - qp->attrs.irq_size = 0;
4393 - vfree(qp->irq);
4394 - return -ENOMEM;
4395 + if (orq_size) {
4396 + orq_size = roundup_pow_of_two(orq_size);
4397 + qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe));
4398 + if (!qp->orq) {
4399 + qp->attrs.orq_size = 0;
4400 + qp->attrs.irq_size = 0;
4401 + vfree(qp->irq);
4402 + return -ENOMEM;
4403 + }
4404 }
4405 + qp->attrs.irq_size = irq_size;
4406 + qp->attrs.orq_size = orq_size;
4407 siw_dbg_qp(qp, "ORD %d, IRD %d\n", orq_size, irq_size);
4408 return 0;
4409 }
4410 @@ -288,13 +288,14 @@ int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl)
4411 if (ctrl & MPA_V2_RDMA_WRITE_RTR)
4412 wqe->sqe.opcode = SIW_OP_WRITE;
4413 else if (ctrl & MPA_V2_RDMA_READ_RTR) {
4414 - struct siw_sqe *rreq;
4415 + struct siw_sqe *rreq = NULL;
4416
4417 wqe->sqe.opcode = SIW_OP_READ;
4418
4419 spin_lock(&qp->orq_lock);
4420
4421 - rreq = orq_get_free(qp);
4422 + if (qp->attrs.orq_size)
4423 + rreq = orq_get_free(qp);
4424 if (rreq) {
4425 siw_read_to_orq(rreq, &wqe->sqe);
4426 qp->orq_put++;
4427 @@ -877,135 +878,88 @@ void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe)
4428 rreq->num_sge = 1;
4429 }
4430
4431 -/*
4432 - * Must be called with SQ locked.
4433 - * To avoid complete SQ starvation by constant inbound READ requests,
4434 - * the active IRQ will not be served after qp->irq_burst, if the
4435 - * SQ has pending work.
4436 - */
4437 -int siw_activate_tx(struct siw_qp *qp)
4438 +static int siw_activate_tx_from_sq(struct siw_qp *qp)
4439 {
4440 - struct siw_sqe *irqe, *sqe;
4441 + struct siw_sqe *sqe;
4442 struct siw_wqe *wqe = tx_wqe(qp);
4443 int rv = 1;
4444
4445 - irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size];
4446 -
4447 - if (irqe->flags & SIW_WQE_VALID) {
4448 - sqe = sq_get_next(qp);
4449 -
4450 - /*
4451 - * Avoid local WQE processing starvation in case
4452 - * of constant inbound READ request stream
4453 - */
4454 - if (sqe && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) {
4455 - qp->irq_burst = 0;
4456 - goto skip_irq;
4457 - }
4458 - memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
4459 - wqe->wr_status = SIW_WR_QUEUED;
4460 -
4461 - /* start READ RESPONSE */
4462 - wqe->sqe.opcode = SIW_OP_READ_RESPONSE;
4463 - wqe->sqe.flags = 0;
4464 - if (irqe->num_sge) {
4465 - wqe->sqe.num_sge = 1;
4466 - wqe->sqe.sge[0].length = irqe->sge[0].length;
4467 - wqe->sqe.sge[0].laddr = irqe->sge[0].laddr;
4468 - wqe->sqe.sge[0].lkey = irqe->sge[0].lkey;
4469 - } else {
4470 - wqe->sqe.num_sge = 0;
4471 - }
4472 -
4473 - /* Retain original RREQ's message sequence number for
4474 - * potential error reporting cases.
4475 - */
4476 - wqe->sqe.sge[1].length = irqe->sge[1].length;
4477 -
4478 - wqe->sqe.rkey = irqe->rkey;
4479 - wqe->sqe.raddr = irqe->raddr;
4480 + sqe = sq_get_next(qp);
4481 + if (!sqe)
4482 + return 0;
4483
4484 - wqe->processed = 0;
4485 - qp->irq_get++;
4486 + memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
4487 + wqe->wr_status = SIW_WR_QUEUED;
4488
4489 - /* mark current IRQ entry free */
4490 - smp_store_mb(irqe->flags, 0);
4491 + /* First copy SQE to kernel private memory */
4492 + memcpy(&wqe->sqe, sqe, sizeof(*sqe));
4493
4494 + if (wqe->sqe.opcode >= SIW_NUM_OPCODES) {
4495 + rv = -EINVAL;
4496 goto out;
4497 }
4498 - sqe = sq_get_next(qp);
4499 - if (sqe) {
4500 -skip_irq:
4501 - memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
4502 - wqe->wr_status = SIW_WR_QUEUED;
4503 -
4504 - /* First copy SQE to kernel private memory */
4505 - memcpy(&wqe->sqe, sqe, sizeof(*sqe));
4506 -
4507 - if (wqe->sqe.opcode >= SIW_NUM_OPCODES) {
4508 + if (wqe->sqe.flags & SIW_WQE_INLINE) {
4509 + if (wqe->sqe.opcode != SIW_OP_SEND &&
4510 + wqe->sqe.opcode != SIW_OP_WRITE) {
4511 rv = -EINVAL;
4512 goto out;
4513 }
4514 - if (wqe->sqe.flags & SIW_WQE_INLINE) {
4515 - if (wqe->sqe.opcode != SIW_OP_SEND &&
4516 - wqe->sqe.opcode != SIW_OP_WRITE) {
4517 - rv = -EINVAL;
4518 - goto out;
4519 - }
4520 - if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) {
4521 - rv = -EINVAL;
4522 - goto out;
4523 - }
4524 - wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
4525 - wqe->sqe.sge[0].lkey = 0;
4526 - wqe->sqe.num_sge = 1;
4527 + if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) {
4528 + rv = -EINVAL;
4529 + goto out;
4530 }
4531 - if (wqe->sqe.flags & SIW_WQE_READ_FENCE) {
4532 - /* A READ cannot be fenced */
4533 - if (unlikely(wqe->sqe.opcode == SIW_OP_READ ||
4534 - wqe->sqe.opcode ==
4535 - SIW_OP_READ_LOCAL_INV)) {
4536 - siw_dbg_qp(qp, "cannot fence read\n");
4537 - rv = -EINVAL;
4538 - goto out;
4539 - }
4540 - spin_lock(&qp->orq_lock);
4541 + wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
4542 + wqe->sqe.sge[0].lkey = 0;
4543 + wqe->sqe.num_sge = 1;
4544 + }
4545 + if (wqe->sqe.flags & SIW_WQE_READ_FENCE) {
4546 + /* A READ cannot be fenced */
4547 + if (unlikely(wqe->sqe.opcode == SIW_OP_READ ||
4548 + wqe->sqe.opcode ==
4549 + SIW_OP_READ_LOCAL_INV)) {
4550 + siw_dbg_qp(qp, "cannot fence read\n");
4551 + rv = -EINVAL;
4552 + goto out;
4553 + }
4554 + spin_lock(&qp->orq_lock);
4555
4556 - if (!siw_orq_empty(qp)) {
4557 - qp->tx_ctx.orq_fence = 1;
4558 - rv = 0;
4559 - }
4560 - spin_unlock(&qp->orq_lock);
4561 + if (qp->attrs.orq_size && !siw_orq_empty(qp)) {
4562 + qp->tx_ctx.orq_fence = 1;
4563 + rv = 0;
4564 + }
4565 + spin_unlock(&qp->orq_lock);
4566
4567 - } else if (wqe->sqe.opcode == SIW_OP_READ ||
4568 - wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
4569 - struct siw_sqe *rreq;
4570 + } else if (wqe->sqe.opcode == SIW_OP_READ ||
4571 + wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
4572 + struct siw_sqe *rreq;
4573
4574 - wqe->sqe.num_sge = 1;
4575 + if (unlikely(!qp->attrs.orq_size)) {
4576 + /* We negotiated not to send READ req's */
4577 + rv = -EINVAL;
4578 + goto out;
4579 + }
4580 + wqe->sqe.num_sge = 1;
4581
4582 - spin_lock(&qp->orq_lock);
4583 + spin_lock(&qp->orq_lock);
4584
4585 - rreq = orq_get_free(qp);
4586 - if (rreq) {
4587 - /*
4588 - * Make an immediate copy in ORQ to be ready
4589 - * to process loopback READ reply
4590 - */
4591 - siw_read_to_orq(rreq, &wqe->sqe);
4592 - qp->orq_put++;
4593 - } else {
4594 - qp->tx_ctx.orq_fence = 1;
4595 - rv = 0;
4596 - }
4597 - spin_unlock(&qp->orq_lock);
4598 + rreq = orq_get_free(qp);
4599 + if (rreq) {
4600 + /*
4601 + * Make an immediate copy in ORQ to be ready
4602 + * to process loopback READ reply
4603 + */
4604 + siw_read_to_orq(rreq, &wqe->sqe);
4605 + qp->orq_put++;
4606 + } else {
4607 + qp->tx_ctx.orq_fence = 1;
4608 + rv = 0;
4609 }
4610 -
4611 - /* Clear SQE, can be re-used by application */
4612 - smp_store_mb(sqe->flags, 0);
4613 - qp->sq_get++;
4614 - } else {
4615 - rv = 0;
4616 + spin_unlock(&qp->orq_lock);
4617 }
4618 +
4619 + /* Clear SQE, can be re-used by application */
4620 + smp_store_mb(sqe->flags, 0);
4621 + qp->sq_get++;
4622 out:
4623 if (unlikely(rv < 0)) {
4624 siw_dbg_qp(qp, "error %d\n", rv);
4625 @@ -1014,6 +968,65 @@ out:
4626 return rv;
4627 }
4628
4629 +/*
4630 + * Must be called with SQ locked.
4631 + * To avoid complete SQ starvation by constant inbound READ requests,
4632 + * the active IRQ will not be served after qp->irq_burst, if the
4633 + * SQ has pending work.
4634 + */
4635 +int siw_activate_tx(struct siw_qp *qp)
4636 +{
4637 + struct siw_sqe *irqe;
4638 + struct siw_wqe *wqe = tx_wqe(qp);
4639 +
4640 + if (!qp->attrs.irq_size)
4641 + return siw_activate_tx_from_sq(qp);
4642 +
4643 + irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size];
4644 +
4645 + if (!(irqe->flags & SIW_WQE_VALID))
4646 + return siw_activate_tx_from_sq(qp);
4647 +
4648 + /*
4649 + * Avoid local WQE processing starvation in case
4650 + * of constant inbound READ request stream
4651 + */
4652 + if (sq_get_next(qp) && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) {
4653 + qp->irq_burst = 0;
4654 + return siw_activate_tx_from_sq(qp);
4655 + }
4656 + memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
4657 + wqe->wr_status = SIW_WR_QUEUED;
4658 +
4659 + /* start READ RESPONSE */
4660 + wqe->sqe.opcode = SIW_OP_READ_RESPONSE;
4661 + wqe->sqe.flags = 0;
4662 + if (irqe->num_sge) {
4663 + wqe->sqe.num_sge = 1;
4664 + wqe->sqe.sge[0].length = irqe->sge[0].length;
4665 + wqe->sqe.sge[0].laddr = irqe->sge[0].laddr;
4666 + wqe->sqe.sge[0].lkey = irqe->sge[0].lkey;
4667 + } else {
4668 + wqe->sqe.num_sge = 0;
4669 + }
4670 +
4671 + /* Retain original RREQ's message sequence number for
4672 + * potential error reporting cases.
4673 + */
4674 + wqe->sqe.sge[1].length = irqe->sge[1].length;
4675 +
4676 + wqe->sqe.rkey = irqe->rkey;
4677 + wqe->sqe.raddr = irqe->raddr;
4678 +
4679 + wqe->processed = 0;
4680 + qp->irq_get++;
4681 +
4682 + /* mark current IRQ entry free */
4683 + smp_store_mb(irqe->flags, 0);
4684 +
4685 + return 1;
4686 +}
4687 +
4688 /*
4689 * Check if current CQ state qualifies for calling CQ completion
4690 * handler. Must be called with CQ lock held.
4691 diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
4692 index 0520e70084f97..c7c38f7fd29d6 100644
4693 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c
4694 +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
4695 @@ -680,6 +680,10 @@ static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx)
4696 }
4697 spin_lock_irqsave(&qp->sq_lock, flags);
4698
4699 + if (unlikely(!qp->attrs.irq_size)) {
4700 + run_sq = 0;
4701 + goto error_irq;
4702 + }
4703 if (tx_work->wr_status == SIW_WR_IDLE) {
4704 /*
4705 * immediately schedule READ response w/o
4706 @@ -712,8 +716,9 @@ static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx)
4707 /* RRESP now valid as current TX wqe or placed into IRQ */
4708 smp_store_mb(resp->flags, SIW_WQE_VALID);
4709 } else {
4710 - pr_warn("siw: [QP %u]: irq %d exceeded %d\n", qp_id(qp),
4711 - qp->irq_put % qp->attrs.irq_size, qp->attrs.irq_size);
4712 +error_irq:
4713 + pr_warn("siw: [QP %u]: IRQ exceeded or null, size %d\n",
4714 + qp_id(qp), qp->attrs.irq_size);
4715
4716 siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
4717 RDMAP_ETYPE_REMOTE_OPERATION,
4718 @@ -740,6 +745,9 @@ static int siw_orqe_start_rx(struct siw_qp *qp)
4719 struct siw_sqe *orqe;
4720 struct siw_wqe *wqe = NULL;
4721
4722 + if (unlikely(!qp->attrs.orq_size))
4723 + return -EPROTO;
4724 +
4725 /* make sure ORQ indices are current */
4726 smp_mb();
4727
4728 @@ -796,8 +804,8 @@ int siw_proc_rresp(struct siw_qp *qp)
4729 */
4730 rv = siw_orqe_start_rx(qp);
4731 if (rv) {
4732 - pr_warn("siw: [QP %u]: ORQ empty at idx %d\n",
4733 - qp_id(qp), qp->orq_get % qp->attrs.orq_size);
4734 + pr_warn("siw: [QP %u]: ORQ empty, size %d\n",
4735 + qp_id(qp), qp->attrs.orq_size);
4736 goto error_term;
4737 }
4738 rv = siw_rresp_check_ntoh(srx, frx);
4739 @@ -1290,11 +1298,13 @@ static int siw_rdmap_complete(struct siw_qp *qp, int error)
4740 wc_status);
4741 siw_wqe_put_mem(wqe, SIW_OP_READ);
4742
4743 - if (!error)
4744 + if (!error) {
4745 rv = siw_check_tx_fence(qp);
4746 - else
4747 - /* Disable current ORQ eleement */
4748 - WRITE_ONCE(orq_get_current(qp)->flags, 0);
4749 + } else {
4750 + /* Disable current ORQ element */
4751 + if (qp->attrs.orq_size)
4752 + WRITE_ONCE(orq_get_current(qp)->flags, 0);
4753 + }
4754 break;
4755
4756 case RDMAP_RDMA_READ_REQ:
4757 diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
4758 index e7cd04eda04ac..424918eb1cd4a 100644
4759 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c
4760 +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
4761 @@ -1107,8 +1107,8 @@ next_wqe:
4762 /*
4763 * RREQ may have already been completed by inbound RRESP!
4764 */
4765 - if (tx_type == SIW_OP_READ ||
4766 - tx_type == SIW_OP_READ_LOCAL_INV) {
4767 + if ((tx_type == SIW_OP_READ ||
4768 + tx_type == SIW_OP_READ_LOCAL_INV) && qp->attrs.orq_size) {
4769 /* Cleanup pending entry in ORQ */
4770 qp->orq_put--;
4771 qp->orq[qp->orq_put % qp->attrs.orq_size].flags = 0;
4772 diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
4773 index 1b1a40db529c6..2c3704f0f10fa 100644
4774 --- a/drivers/infiniband/sw/siw/siw_verbs.c
4775 +++ b/drivers/infiniband/sw/siw/siw_verbs.c
4776 @@ -387,13 +387,23 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
4777 if (rv)
4778 goto err_out;
4779
4780 + num_sqe = attrs->cap.max_send_wr;
4781 + num_rqe = attrs->cap.max_recv_wr;
4782 +
4783 /* All queue indices are derived from modulo operations
4784 * on a free running 'get' (consumer) and 'put' (producer)
4785 * unsigned counter. Having queue sizes at power of two
4786 * avoids handling counter wrap around.
4787 */
4788 - num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr);
4789 - num_rqe = roundup_pow_of_two(attrs->cap.max_recv_wr);
4790 + if (num_sqe)
4791 + num_sqe = roundup_pow_of_two(num_sqe);
4792 + else {
4793 + /* Zero sized SQ is not supported */
4794 + rv = -EINVAL;
4795 + goto err_out;
4796 + }
4797 + if (num_rqe)
4798 + num_rqe = roundup_pow_of_two(num_rqe);
4799
4800 if (qp->kernel_verbs)
4801 qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
4802 @@ -401,7 +411,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
4803 qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
4804
4805 if (qp->sendq == NULL) {
4806 - siw_dbg(base_dev, "SQ size %d alloc failed\n", num_sqe);
4807 rv = -ENOMEM;
4808 goto err_out_xa;
4809 }
4810 @@ -434,7 +443,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
4811 vmalloc_user(num_rqe * sizeof(struct siw_rqe));
4812
4813 if (qp->recvq == NULL) {
4814 - siw_dbg(base_dev, "RQ size %d alloc failed\n", num_rqe);
4815 rv = -ENOMEM;
4816 goto err_out_xa;
4817 }
4818 @@ -982,9 +990,9 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
4819 unsigned long flags;
4820 int rv = 0;
4821
4822 - if (qp->srq) {
4823 + if (qp->srq || qp->attrs.rq_size == 0) {
4824 *bad_wr = wr;
4825 - return -EOPNOTSUPP; /* what else from errno.h? */
4826 + return -EINVAL;
4827 }
4828 if (!qp->kernel_verbs) {
4829 siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
4830 diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
4831 index a2b5fbba2d3b3..430dc69750048 100644
4832 --- a/drivers/input/joydev.c
4833 +++ b/drivers/input/joydev.c
4834 @@ -456,7 +456,7 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
4835 if (IS_ERR(abspam))
4836 return PTR_ERR(abspam);
4837
4838 - for (i = 0; i < joydev->nabs; i++) {
4839 + for (i = 0; i < len && i < joydev->nabs; i++) {
4840 if (abspam[i] > ABS_MAX) {
4841 retval = -EINVAL;
4842 goto out;
4843 @@ -480,6 +480,9 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
4844 int i;
4845 int retval = 0;
4846
4847 + if (len % sizeof(*keypam))
4848 + return -EINVAL;
4849 +
4850 len = min(len, sizeof(joydev->keypam));
4851
4852 /* Validate the map. */
4853 @@ -487,7 +490,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
4854 if (IS_ERR(keypam))
4855 return PTR_ERR(keypam);
4856
4857 - for (i = 0; i < joydev->nkey; i++) {
4858 + for (i = 0; i < (len / 2) && i < joydev->nkey; i++) {
4859 if (keypam[i] > KEY_MAX || keypam[i] < BTN_MISC) {
4860 retval = -EINVAL;
4861 goto out;
4862 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
4863 index 3d004ca76b6ed..e5f1e3cf9179f 100644
4864 --- a/drivers/input/joystick/xpad.c
4865 +++ b/drivers/input/joystick/xpad.c
4866 @@ -305,6 +305,7 @@ static const struct xpad_device {
4867 { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
4868 { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
4869 { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
4870 + { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
4871 { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
4872 { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
4873 { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
4874 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
4875 index b7dbcbac3a1a5..e7346c5f4738a 100644
4876 --- a/drivers/input/serio/i8042-x86ia64io.h
4877 +++ b/drivers/input/serio/i8042-x86ia64io.h
4878 @@ -588,6 +588,10 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
4879 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
4880 DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
4881 },
4882 + .matches = {
4883 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
4884 + DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
4885 + },
4886 },
4887 { }
4888 };
4889 diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
4890 index d6772a2c2d096..e396857cb4c1b 100644
4891 --- a/drivers/input/touchscreen/elo.c
4892 +++ b/drivers/input/touchscreen/elo.c
4893 @@ -341,8 +341,10 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv)
4894 switch (elo->id) {
4895
4896 case 0: /* 10-byte protocol */
4897 - if (elo_setup_10(elo))
4898 + if (elo_setup_10(elo)) {
4899 + err = -EIO;
4900 goto fail3;
4901 + }
4902
4903 break;
4904
4905 diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
4906 index fe245439adee0..2c67f8eacc7c5 100644
4907 --- a/drivers/input/touchscreen/raydium_i2c_ts.c
4908 +++ b/drivers/input/touchscreen/raydium_i2c_ts.c
4909 @@ -410,6 +410,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
4910 enum raydium_bl_ack state)
4911 {
4912 int error;
4913 + static const u8 cmd[] = { 0xFF, 0x39 };
4914
4915 error = raydium_i2c_send(client, RM_CMD_BOOT_WRT, data, len);
4916 if (error) {
4917 @@ -418,7 +419,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
4918 return error;
4919 }
4920
4921 - error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, NULL, 0);
4922 + error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, cmd, sizeof(cmd));
4923 if (error) {
4924 dev_err(&client->dev, "Ack obj command failed: %d\n", error);
4925 return error;
4926 diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
4927 index 2e2ea5719c90e..902522df03592 100644
4928 --- a/drivers/input/touchscreen/sur40.c
4929 +++ b/drivers/input/touchscreen/sur40.c
4930 @@ -774,6 +774,7 @@ static int sur40_probe(struct usb_interface *interface,
4931 dev_err(&interface->dev,
4932 "Unable to register video controls.");
4933 v4l2_ctrl_handler_free(&sur40->hdl);
4934 + error = sur40->hdl.error;
4935 goto err_unreg_v4l2;
4936 }
4937
4938 diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
4939 index 859567ad3db4e..36de6f7ddf221 100644
4940 --- a/drivers/md/bcache/bcache.h
4941 +++ b/drivers/md/bcache/bcache.h
4942 @@ -986,6 +986,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
4943
4944 extern struct workqueue_struct *bcache_wq;
4945 extern struct workqueue_struct *bch_journal_wq;
4946 +extern struct workqueue_struct *bch_flush_wq;
4947 extern struct mutex bch_register_lock;
4948 extern struct list_head bch_cache_sets;
4949
4950 @@ -1027,5 +1028,7 @@ void bch_debug_exit(void);
4951 void bch_debug_init(void);
4952 void bch_request_exit(void);
4953 int bch_request_init(void);
4954 +void bch_btree_exit(void);
4955 +int bch_btree_init(void);
4956
4957 #endif /* _BCACHE_H */
4958 diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
4959 index 8d06105fc9ff5..5a33910aea788 100644
4960 --- a/drivers/md/bcache/btree.c
4961 +++ b/drivers/md/bcache/btree.c
4962 @@ -99,6 +99,8 @@
4963 #define PTR_HASH(c, k) \
4964 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
4965
4966 +static struct workqueue_struct *btree_io_wq;
4967 +
4968 #define insert_lock(s, b) ((b)->level <= (s)->lock)
4969
4970 /*
4971 @@ -366,7 +368,7 @@ static void __btree_node_write_done(struct closure *cl)
4972 btree_complete_write(b, w);
4973
4974 if (btree_node_dirty(b))
4975 - schedule_delayed_work(&b->work, 30 * HZ);
4976 + queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
4977
4978 closure_return_with_destructor(cl, btree_node_write_unlock);
4979 }
4980 @@ -539,7 +541,7 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
4981 BUG_ON(!i->keys);
4982
4983 if (!btree_node_dirty(b))
4984 - schedule_delayed_work(&b->work, 30 * HZ);
4985 + queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
4986
4987 set_btree_node_dirty(b);
4988
4989 @@ -2659,3 +2661,18 @@ void bch_keybuf_init(struct keybuf *buf)
4990 spin_lock_init(&buf->lock);
4991 array_allocator_init(&buf->freelist);
4992 }
4993 +
4994 +void bch_btree_exit(void)
4995 +{
4996 + if (btree_io_wq)
4997 + destroy_workqueue(btree_io_wq);
4998 +}
4999 +
5000 +int __init bch_btree_init(void)
5001 +{
5002 + btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
5003 + if (!btree_io_wq)
5004 + return -ENOMEM;
5005 +
5006 + return 0;
5007 +}
5008 diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
5009 index 8250d2d1d780c..b4fd923e0d401 100644
5010 --- a/drivers/md/bcache/journal.c
5011 +++ b/drivers/md/bcache/journal.c
5012 @@ -958,8 +958,8 @@ atomic_t *bch_journal(struct cache_set *c,
5013 journal_try_write(c);
5014 } else if (!w->dirty) {
5015 w->dirty = true;
5016 - schedule_delayed_work(&c->journal.work,
5017 - msecs_to_jiffies(c->journal_delay_ms));
5018 + queue_delayed_work(bch_flush_wq, &c->journal.work,
5019 + msecs_to_jiffies(c->journal_delay_ms));
5020 spin_unlock(&c->journal.lock);
5021 } else {
5022 spin_unlock(&c->journal.lock);
5023 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
5024 index 63f5ce18311bb..b0d569032dd4e 100644
5025 --- a/drivers/md/bcache/super.c
5026 +++ b/drivers/md/bcache/super.c
5027 @@ -48,6 +48,7 @@ static int bcache_major;
5028 static DEFINE_IDA(bcache_device_idx);
5029 static wait_queue_head_t unregister_wait;
5030 struct workqueue_struct *bcache_wq;
5031 +struct workqueue_struct *bch_flush_wq;
5032 struct workqueue_struct *bch_journal_wq;
5033
5034
5035 @@ -2652,6 +2653,9 @@ static void bcache_exit(void)
5036 destroy_workqueue(bcache_wq);
5037 if (bch_journal_wq)
5038 destroy_workqueue(bch_journal_wq);
5039 + if (bch_flush_wq)
5040 + destroy_workqueue(bch_flush_wq);
5041 + bch_btree_exit();
5042
5043 if (bcache_major)
5044 unregister_blkdev(bcache_major, "bcache");
5045 @@ -2707,10 +2711,26 @@ static int __init bcache_init(void)
5046 return bcache_major;
5047 }
5048
5049 + if (bch_btree_init())
5050 + goto err;
5051 +
5052 bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
5053 if (!bcache_wq)
5054 goto err;
5055
5056 + /*
5057 + * Let's not make this `WQ_MEM_RECLAIM` for the following reasons:
5058 + *
5059 + * 1. It used `system_wq` before which also does no memory reclaim.
5060 + * 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and
5061 + * reduced throughput can be observed.
5062 + *
5063 + * We still want to user our own queue to not congest the `system_wq`.
5064 + */
5065 + bch_flush_wq = alloc_workqueue("bch_flush", 0, 0);
5066 + if (!bch_flush_wq)
5067 + goto err;
5068 +
5069 bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
5070 if (!bch_journal_wq)
5071 goto err;
5072 diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
5073 index c4ef1fceead6e..3fea121fcbcf9 100644
5074 --- a/drivers/md/dm-core.h
5075 +++ b/drivers/md/dm-core.h
5076 @@ -106,6 +106,10 @@ struct mapped_device {
5077
5078 struct block_device *bdev;
5079
5080 + int swap_bios;
5081 + struct semaphore swap_bios_semaphore;
5082 + struct mutex swap_bios_lock;
5083 +
5084 struct dm_stats stats;
5085
5086 /* for blk-mq request-based DM support */
5087 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
5088 index 1af82fbbac0c4..d85648b9c247a 100644
5089 --- a/drivers/md/dm-crypt.c
5090 +++ b/drivers/md/dm-crypt.c
5091 @@ -2737,6 +2737,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
5092 wake_up_process(cc->write_thread);
5093
5094 ti->num_flush_bios = 1;
5095 + ti->limit_swap_bios = true;
5096
5097 return 0;
5098
5099 diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
5100 index bdb84b8e71621..6b0b3a13ab4a2 100644
5101 --- a/drivers/md/dm-era-target.c
5102 +++ b/drivers/md/dm-era-target.c
5103 @@ -47,6 +47,7 @@ struct writeset {
5104 static void writeset_free(struct writeset *ws)
5105 {
5106 vfree(ws->bits);
5107 + ws->bits = NULL;
5108 }
5109
5110 static int setup_on_disk_bitset(struct dm_disk_bitset *info,
5111 @@ -71,8 +72,6 @@ static size_t bitset_size(unsigned nr_bits)
5112 */
5113 static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
5114 {
5115 - ws->md.nr_bits = nr_blocks;
5116 - ws->md.root = INVALID_WRITESET_ROOT;
5117 ws->bits = vzalloc(bitset_size(nr_blocks));
5118 if (!ws->bits) {
5119 DMERR("%s: couldn't allocate in memory bitset", __func__);
5120 @@ -85,12 +84,14 @@ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
5121 /*
5122 * Wipes the in-core bitset, and creates a new on disk bitset.
5123 */
5124 -static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws)
5125 +static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws,
5126 + dm_block_t nr_blocks)
5127 {
5128 int r;
5129
5130 - memset(ws->bits, 0, bitset_size(ws->md.nr_bits));
5131 + memset(ws->bits, 0, bitset_size(nr_blocks));
5132
5133 + ws->md.nr_bits = nr_blocks;
5134 r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root);
5135 if (r) {
5136 DMERR("%s: setup_on_disk_bitset failed", __func__);
5137 @@ -134,7 +135,7 @@ static int writeset_test_and_set(struct dm_disk_bitset *info,
5138 {
5139 int r;
5140
5141 - if (!test_and_set_bit(block, ws->bits)) {
5142 + if (!test_bit(block, ws->bits)) {
5143 r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root);
5144 if (r) {
5145 /* FIXME: fail mode */
5146 @@ -388,7 +389,7 @@ static void ws_dec(void *context, const void *value)
5147
5148 static int ws_eq(void *context, const void *value1, const void *value2)
5149 {
5150 - return !memcmp(value1, value2, sizeof(struct writeset_metadata));
5151 + return !memcmp(value1, value2, sizeof(struct writeset_disk));
5152 }
5153
5154 /*----------------------------------------------------------------*/
5155 @@ -564,6 +565,15 @@ static int open_metadata(struct era_metadata *md)
5156 }
5157
5158 disk = dm_block_data(sblock);
5159 +
5160 + /* Verify the data block size hasn't changed */
5161 + if (le32_to_cpu(disk->data_block_size) != md->block_size) {
5162 + DMERR("changing the data block size (from %u to %llu) is not supported",
5163 + le32_to_cpu(disk->data_block_size), md->block_size);
5164 + r = -EINVAL;
5165 + goto bad;
5166 + }
5167 +
5168 r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION,
5169 disk->metadata_space_map_root,
5170 sizeof(disk->metadata_space_map_root),
5171 @@ -575,10 +585,10 @@ static int open_metadata(struct era_metadata *md)
5172
5173 setup_infos(md);
5174
5175 - md->block_size = le32_to_cpu(disk->data_block_size);
5176 md->nr_blocks = le32_to_cpu(disk->nr_blocks);
5177 md->current_era = le32_to_cpu(disk->current_era);
5178
5179 + ws_unpack(&disk->current_writeset, &md->current_writeset->md);
5180 md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root);
5181 md->era_array_root = le64_to_cpu(disk->era_array_root);
5182 md->metadata_snap = le64_to_cpu(disk->metadata_snap);
5183 @@ -746,6 +756,12 @@ static int metadata_digest_lookup_writeset(struct era_metadata *md,
5184 ws_unpack(&disk, &d->writeset);
5185 d->value = cpu_to_le32(key);
5186
5187 + /*
5188 + * We initialise another bitset info to avoid any caching side effects
5189 + * with the previous one.
5190 + */
5191 + dm_disk_bitset_init(md->tm, &d->info);
5192 +
5193 d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks);
5194 d->current_bit = 0;
5195 d->step = metadata_digest_transcribe_writeset;
5196 @@ -759,12 +775,6 @@ static int metadata_digest_start(struct era_metadata *md, struct digest *d)
5197 return 0;
5198
5199 memset(d, 0, sizeof(*d));
5200 -
5201 - /*
5202 - * We initialise another bitset info to avoid any caching side
5203 - * effects with the previous one.
5204 - */
5205 - dm_disk_bitset_init(md->tm, &d->info);
5206 d->step = metadata_digest_lookup_writeset;
5207
5208 return 0;
5209 @@ -802,6 +812,8 @@ static struct era_metadata *metadata_open(struct block_device *bdev,
5210
5211 static void metadata_close(struct era_metadata *md)
5212 {
5213 + writeset_free(&md->writesets[0]);
5214 + writeset_free(&md->writesets[1]);
5215 destroy_persistent_data_objects(md);
5216 kfree(md);
5217 }
5218 @@ -839,6 +851,7 @@ static int metadata_resize(struct era_metadata *md, void *arg)
5219 r = writeset_alloc(&md->writesets[1], *new_size);
5220 if (r) {
5221 DMERR("%s: writeset_alloc failed for writeset 1", __func__);
5222 + writeset_free(&md->writesets[0]);
5223 return r;
5224 }
5225
5226 @@ -849,6 +862,8 @@ static int metadata_resize(struct era_metadata *md, void *arg)
5227 &value, &md->era_array_root);
5228 if (r) {
5229 DMERR("%s: dm_array_resize failed", __func__);
5230 + writeset_free(&md->writesets[0]);
5231 + writeset_free(&md->writesets[1]);
5232 return r;
5233 }
5234
5235 @@ -870,7 +885,6 @@ static int metadata_era_archive(struct era_metadata *md)
5236 }
5237
5238 ws_pack(&md->current_writeset->md, &value);
5239 - md->current_writeset->md.root = INVALID_WRITESET_ROOT;
5240
5241 keys[0] = md->current_era;
5242 __dm_bless_for_disk(&value);
5243 @@ -882,6 +896,7 @@ static int metadata_era_archive(struct era_metadata *md)
5244 return r;
5245 }
5246
5247 + md->current_writeset->md.root = INVALID_WRITESET_ROOT;
5248 md->archived_writesets = true;
5249
5250 return 0;
5251 @@ -898,7 +913,7 @@ static int metadata_new_era(struct era_metadata *md)
5252 int r;
5253 struct writeset *new_writeset = next_writeset(md);
5254
5255 - r = writeset_init(&md->bitset_info, new_writeset);
5256 + r = writeset_init(&md->bitset_info, new_writeset, md->nr_blocks);
5257 if (r) {
5258 DMERR("%s: writeset_init failed", __func__);
5259 return r;
5260 @@ -951,7 +966,7 @@ static int metadata_commit(struct era_metadata *md)
5261 int r;
5262 struct dm_block *sblock;
5263
5264 - if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) {
5265 + if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
5266 r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
5267 &md->current_writeset->md.root);
5268 if (r) {
5269 @@ -1226,8 +1241,10 @@ static void process_deferred_bios(struct era *era)
5270 int r;
5271 struct bio_list deferred_bios, marked_bios;
5272 struct bio *bio;
5273 + struct blk_plug plug;
5274 bool commit_needed = false;
5275 bool failed = false;
5276 + struct writeset *ws = era->md->current_writeset;
5277
5278 bio_list_init(&deferred_bios);
5279 bio_list_init(&marked_bios);
5280 @@ -1237,9 +1254,11 @@ static void process_deferred_bios(struct era *era)
5281 bio_list_init(&era->deferred_bios);
5282 spin_unlock(&era->deferred_lock);
5283
5284 + if (bio_list_empty(&deferred_bios))
5285 + return;
5286 +
5287 while ((bio = bio_list_pop(&deferred_bios))) {
5288 - r = writeset_test_and_set(&era->md->bitset_info,
5289 - era->md->current_writeset,
5290 + r = writeset_test_and_set(&era->md->bitset_info, ws,
5291 get_block(era, bio));
5292 if (r < 0) {
5293 /*
5294 @@ -1247,7 +1266,6 @@ static void process_deferred_bios(struct era *era)
5295 * FIXME: finish.
5296 */
5297 failed = true;
5298 -
5299 } else if (r == 0)
5300 commit_needed = true;
5301
5302 @@ -1263,9 +1281,19 @@ static void process_deferred_bios(struct era *era)
5303 if (failed)
5304 while ((bio = bio_list_pop(&marked_bios)))
5305 bio_io_error(bio);
5306 - else
5307 - while ((bio = bio_list_pop(&marked_bios)))
5308 + else {
5309 + blk_start_plug(&plug);
5310 + while ((bio = bio_list_pop(&marked_bios))) {
5311 + /*
5312 + * Only update the in-core writeset if the on-disk one
5313 + * was updated too.
5314 + */
5315 + if (commit_needed)
5316 + set_bit(get_block(era, bio), ws->bits);
5317 generic_make_request(bio);
5318 + }
5319 + blk_finish_plug(&plug);
5320 + }
5321 }
5322
5323 static void process_rpc_calls(struct era *era)
5324 @@ -1486,15 +1514,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
5325 }
5326 era->md = md;
5327
5328 - era->nr_blocks = calc_nr_blocks(era);
5329 -
5330 - r = metadata_resize(era->md, &era->nr_blocks);
5331 - if (r) {
5332 - ti->error = "couldn't resize metadata";
5333 - era_destroy(era);
5334 - return -ENOMEM;
5335 - }
5336 -
5337 era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
5338 if (!era->wq) {
5339 ti->error = "could not create workqueue for metadata object";
5340 @@ -1571,16 +1590,24 @@ static int era_preresume(struct dm_target *ti)
5341 dm_block_t new_size = calc_nr_blocks(era);
5342
5343 if (era->nr_blocks != new_size) {
5344 - r = in_worker1(era, metadata_resize, &new_size);
5345 - if (r)
5346 + r = metadata_resize(era->md, &new_size);
5347 + if (r) {
5348 + DMERR("%s: metadata_resize failed", __func__);
5349 + return r;
5350 + }
5351 +
5352 + r = metadata_commit(era->md);
5353 + if (r) {
5354 + DMERR("%s: metadata_commit failed", __func__);
5355 return r;
5356 + }
5357
5358 era->nr_blocks = new_size;
5359 }
5360
5361 start_worker(era);
5362
5363 - r = in_worker0(era, metadata_new_era);
5364 + r = in_worker0(era, metadata_era_rollover);
5365 if (r) {
5366 DMERR("%s: metadata_era_rollover failed", __func__);
5367 return r;
5368 diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
5369 index 08ae59a6e8734..4c2971835d330 100644
5370 --- a/drivers/md/dm-writecache.c
5371 +++ b/drivers/md/dm-writecache.c
5372 @@ -142,6 +142,7 @@ struct dm_writecache {
5373 size_t metadata_sectors;
5374 size_t n_blocks;
5375 uint64_t seq_count;
5376 + sector_t data_device_sectors;
5377 void *block_start;
5378 struct wc_entry *entries;
5379 unsigned block_size;
5380 @@ -918,6 +919,8 @@ static void writecache_resume(struct dm_target *ti)
5381
5382 wc_lock(wc);
5383
5384 + wc->data_device_sectors = i_size_read(wc->dev->bdev->bd_inode) >> SECTOR_SHIFT;
5385 +
5386 if (WC_MODE_PMEM(wc)) {
5387 persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
5388 } else {
5389 @@ -1488,6 +1491,10 @@ static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t
5390 void *address = memory_data(wc, e);
5391
5392 persistent_memory_flush_cache(address, block_size);
5393 +
5394 + if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors))
5395 + return true;
5396 +
5397 return bio_add_page(&wb->bio, persistent_memory_page(address),
5398 block_size, persistent_memory_page_offset(address)) != 0;
5399 }
5400 @@ -1559,6 +1566,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
5401 if (writecache_has_error(wc)) {
5402 bio->bi_status = BLK_STS_IOERR;
5403 bio_endio(bio);
5404 + } else if (unlikely(!bio_sectors(bio))) {
5405 + bio->bi_status = BLK_STS_OK;
5406 + bio_endio(bio);
5407 } else {
5408 submit_bio(bio);
5409 }
5410 @@ -1602,6 +1612,14 @@ static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writebac
5411 e = f;
5412 }
5413
5414 + if (unlikely(to.sector + to.count > wc->data_device_sectors)) {
5415 + if (to.sector >= wc->data_device_sectors) {
5416 + writecache_copy_endio(0, 0, c);
5417 + continue;
5418 + }
5419 + from.count = to.count = wc->data_device_sectors - to.sector;
5420 + }
5421 +
5422 dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
5423
5424 __writeback_throttle(wc, wbl);
5425 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
5426 index c6ce42daff27b..de32f8553735f 100644
5427 --- a/drivers/md/dm.c
5428 +++ b/drivers/md/dm.c
5429 @@ -146,6 +146,16 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
5430 #define DM_NUMA_NODE NUMA_NO_NODE
5431 static int dm_numa_node = DM_NUMA_NODE;
5432
5433 +#define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE)
5434 +static int swap_bios = DEFAULT_SWAP_BIOS;
5435 +static int get_swap_bios(void)
5436 +{
5437 + int latch = READ_ONCE(swap_bios);
5438 + if (unlikely(latch <= 0))
5439 + latch = DEFAULT_SWAP_BIOS;
5440 + return latch;
5441 +}
5442 +
5443 /*
5444 * For mempools pre-allocation at the table loading time.
5445 */
5446 @@ -972,6 +982,11 @@ void disable_write_zeroes(struct mapped_device *md)
5447 limits->max_write_zeroes_sectors = 0;
5448 }
5449
5450 +static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
5451 +{
5452 + return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
5453 +}
5454 +
5455 static void clone_endio(struct bio *bio)
5456 {
5457 blk_status_t error = bio->bi_status;
5458 @@ -1009,6 +1024,11 @@ static void clone_endio(struct bio *bio)
5459 }
5460 }
5461
5462 + if (unlikely(swap_bios_limit(tio->ti, bio))) {
5463 + struct mapped_device *md = io->md;
5464 + up(&md->swap_bios_semaphore);
5465 + }
5466 +
5467 free_tio(tio);
5468 dec_pending(io, error);
5469 }
5470 @@ -1263,6 +1283,22 @@ void dm_remap_zone_report(struct dm_target *ti, sector_t start,
5471 }
5472 EXPORT_SYMBOL_GPL(dm_remap_zone_report);
5473
5474 +static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
5475 +{
5476 + mutex_lock(&md->swap_bios_lock);
5477 + while (latch < md->swap_bios) {
5478 + cond_resched();
5479 + down(&md->swap_bios_semaphore);
5480 + md->swap_bios--;
5481 + }
5482 + while (latch > md->swap_bios) {
5483 + cond_resched();
5484 + up(&md->swap_bios_semaphore);
5485 + md->swap_bios++;
5486 + }
5487 + mutex_unlock(&md->swap_bios_lock);
5488 +}
5489 +
5490 static blk_qc_t __map_bio(struct dm_target_io *tio)
5491 {
5492 int r;
5493 @@ -1283,6 +1319,14 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
5494 atomic_inc(&io->io_count);
5495 sector = clone->bi_iter.bi_sector;
5496
5497 + if (unlikely(swap_bios_limit(ti, clone))) {
5498 + struct mapped_device *md = io->md;
5499 + int latch = get_swap_bios();
5500 + if (unlikely(latch != md->swap_bios))
5501 + __set_swap_bios_limit(md, latch);
5502 + down(&md->swap_bios_semaphore);
5503 + }
5504 +
5505 r = ti->type->map(ti, clone);
5506 switch (r) {
5507 case DM_MAPIO_SUBMITTED:
5508 @@ -1297,10 +1341,18 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
5509 ret = generic_make_request(clone);
5510 break;
5511 case DM_MAPIO_KILL:
5512 + if (unlikely(swap_bios_limit(ti, clone))) {
5513 + struct mapped_device *md = io->md;
5514 + up(&md->swap_bios_semaphore);
5515 + }
5516 free_tio(tio);
5517 dec_pending(io, BLK_STS_IOERR);
5518 break;
5519 case DM_MAPIO_REQUEUE:
5520 + if (unlikely(swap_bios_limit(ti, clone))) {
5521 + struct mapped_device *md = io->md;
5522 + up(&md->swap_bios_semaphore);
5523 + }
5524 free_tio(tio);
5525 dec_pending(io, BLK_STS_DM_REQUEUE);
5526 break;
5527 @@ -1894,6 +1946,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
5528 mutex_destroy(&md->suspend_lock);
5529 mutex_destroy(&md->type_lock);
5530 mutex_destroy(&md->table_devices_lock);
5531 + mutex_destroy(&md->swap_bios_lock);
5532
5533 dm_mq_cleanup_mapped_device(md);
5534 }
5535 @@ -1963,6 +2016,10 @@ static struct mapped_device *alloc_dev(int minor)
5536 init_waitqueue_head(&md->eventq);
5537 init_completion(&md->kobj_holder.completion);
5538
5539 + md->swap_bios = get_swap_bios();
5540 + sema_init(&md->swap_bios_semaphore, md->swap_bios);
5541 + mutex_init(&md->swap_bios_lock);
5542 +
5543 md->disk->major = _major;
5544 md->disk->first_minor = minor;
5545 md->disk->fops = &dm_blk_dops;
5546 @@ -3245,6 +3302,9 @@ MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
5547 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
5548 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
5549
5550 +module_param(swap_bios, int, S_IRUGO | S_IWUSR);
5551 +MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
5552 +
5553 MODULE_DESCRIPTION(DM_NAME " driver");
5554 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
5555 MODULE_LICENSE("GPL");
5556 diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
5557 index 041fcbb4eebdf..79e608dba4b6d 100644
5558 --- a/drivers/media/i2c/ov5670.c
5559 +++ b/drivers/media/i2c/ov5670.c
5560 @@ -2081,7 +2081,8 @@ static int ov5670_init_controls(struct ov5670 *ov5670)
5561
5562 /* By default, V4L2_CID_PIXEL_RATE is read only */
5563 ov5670->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops,
5564 - V4L2_CID_PIXEL_RATE, 0,
5565 + V4L2_CID_PIXEL_RATE,
5566 + link_freq_configs[0].pixel_rate,
5567 link_freq_configs[0].pixel_rate,
5568 1,
5569 link_freq_configs[0].pixel_rate);
5570 diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
5571 index 41be22ce66f3e..44839a6461e88 100644
5572 --- a/drivers/media/pci/cx25821/cx25821-core.c
5573 +++ b/drivers/media/pci/cx25821/cx25821-core.c
5574 @@ -976,8 +976,10 @@ int cx25821_riscmem_alloc(struct pci_dev *pci,
5575 __le32 *cpu;
5576 dma_addr_t dma = 0;
5577
5578 - if (NULL != risc->cpu && risc->size < size)
5579 + if (risc->cpu && risc->size < size) {
5580 pci_free_consistent(pci, risc->size, risc->cpu, risc->dma);
5581 + risc->cpu = NULL;
5582 + }
5583 if (NULL == risc->cpu) {
5584 cpu = pci_zalloc_consistent(pci, size, &dma);
5585 if (NULL == cpu)
5586 diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
5587 index 253f05aef3b1f..7808ec1052bf6 100644
5588 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
5589 +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
5590 @@ -1288,7 +1288,7 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
5591 fmt->format.code = formats[0].mbus_code;
5592
5593 for (i = 0; i < ARRAY_SIZE(formats); i++) {
5594 - if (formats[i].mbus_code == fmt->format.code) {
5595 + if (formats[i].mbus_code == mbus_code) {
5596 fmt->format.code = mbus_code;
5597 break;
5598 }
5599 diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
5600 index cb65d345fd3e9..e2666d1c68964 100644
5601 --- a/drivers/media/pci/saa7134/saa7134-empress.c
5602 +++ b/drivers/media/pci/saa7134/saa7134-empress.c
5603 @@ -282,8 +282,11 @@ static int empress_init(struct saa7134_dev *dev)
5604 q->lock = &dev->lock;
5605 q->dev = &dev->pci->dev;
5606 err = vb2_queue_init(q);
5607 - if (err)
5608 + if (err) {
5609 + video_device_release(dev->empress_dev);
5610 + dev->empress_dev = NULL;
5611 return err;
5612 + }
5613 dev->empress_dev->queue = q;
5614 dev->empress_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
5615 V4L2_CAP_VIDEO_CAPTURE;
5616 diff --git a/drivers/media/pci/smipcie/smipcie-ir.c b/drivers/media/pci/smipcie/smipcie-ir.c
5617 index 9445d792bfc98..731aa702e2b79 100644
5618 --- a/drivers/media/pci/smipcie/smipcie-ir.c
5619 +++ b/drivers/media/pci/smipcie/smipcie-ir.c
5620 @@ -60,39 +60,45 @@ static void smi_ir_decode(struct smi_rc *ir)
5621 {
5622 struct smi_dev *dev = ir->dev;
5623 struct rc_dev *rc_dev = ir->rc_dev;
5624 - u32 dwIRControl, dwIRData;
5625 - u8 index, ucIRCount, readLoop;
5626 + u32 control, data;
5627 + u8 index, ir_count, read_loop;
5628
5629 - dwIRControl = smi_read(IR_Init_Reg);
5630 + control = smi_read(IR_Init_Reg);
5631
5632 - if (dwIRControl & rbIRVld) {
5633 - ucIRCount = (u8) smi_read(IR_Data_Cnt);
5634 + dev_dbg(&rc_dev->dev, "ircontrol: 0x%08x\n", control);
5635
5636 - readLoop = ucIRCount/4;
5637 - if (ucIRCount % 4)
5638 - readLoop += 1;
5639 - for (index = 0; index < readLoop; index++) {
5640 - dwIRData = smi_read(IR_DATA_BUFFER_BASE + (index * 4));
5641 + if (control & rbIRVld) {
5642 + ir_count = (u8)smi_read(IR_Data_Cnt);
5643
5644 - ir->irData[index*4 + 0] = (u8)(dwIRData);
5645 - ir->irData[index*4 + 1] = (u8)(dwIRData >> 8);
5646 - ir->irData[index*4 + 2] = (u8)(dwIRData >> 16);
5647 - ir->irData[index*4 + 3] = (u8)(dwIRData >> 24);
5648 + dev_dbg(&rc_dev->dev, "ircount %d\n", ir_count);
5649 +
5650 + read_loop = ir_count / 4;
5651 + if (ir_count % 4)
5652 + read_loop += 1;
5653 + for (index = 0; index < read_loop; index++) {
5654 + data = smi_read(IR_DATA_BUFFER_BASE + (index * 4));
5655 + dev_dbg(&rc_dev->dev, "IRData 0x%08x\n", data);
5656 +
5657 + ir->irData[index * 4 + 0] = (u8)(data);
5658 + ir->irData[index * 4 + 1] = (u8)(data >> 8);
5659 + ir->irData[index * 4 + 2] = (u8)(data >> 16);
5660 + ir->irData[index * 4 + 3] = (u8)(data >> 24);
5661 }
5662 - smi_raw_process(rc_dev, ir->irData, ucIRCount);
5663 - smi_set(IR_Init_Reg, rbIRVld);
5664 + smi_raw_process(rc_dev, ir->irData, ir_count);
5665 }
5666
5667 - if (dwIRControl & rbIRhighidle) {
5668 + if (control & rbIRhighidle) {
5669 struct ir_raw_event rawir = {};
5670
5671 + dev_dbg(&rc_dev->dev, "high idle\n");
5672 +
5673 rawir.pulse = 0;
5674 rawir.duration = US_TO_NS(SMI_SAMPLE_PERIOD *
5675 SMI_SAMPLE_IDLEMIN);
5676 ir_raw_event_store_with_filter(rc_dev, &rawir);
5677 - smi_set(IR_Init_Reg, rbIRhighidle);
5678 }
5679
5680 + smi_set(IR_Init_Reg, rbIRVld);
5681 ir_raw_event_handle(rc_dev);
5682 }
5683
5684 @@ -151,7 +157,7 @@ int smi_ir_init(struct smi_dev *dev)
5685 rc_dev->dev.parent = &dev->pci_dev->dev;
5686
5687 rc_dev->map_name = dev->info->rc_map;
5688 - rc_dev->timeout = MS_TO_NS(100);
5689 + rc_dev->timeout = US_TO_NS(SMI_SAMPLE_PERIOD * SMI_SAMPLE_IDLEMIN);
5690 rc_dev->rx_resolution = US_TO_NS(SMI_SAMPLE_PERIOD);
5691
5692 ir->rc_dev = rc_dev;
5693 @@ -174,7 +180,7 @@ void smi_ir_exit(struct smi_dev *dev)
5694 struct smi_rc *ir = &dev->ir;
5695 struct rc_dev *rc_dev = ir->rc_dev;
5696
5697 - smi_ir_stop(ir);
5698 rc_unregister_device(rc_dev);
5699 + smi_ir_stop(ir);
5700 ir->rc_dev = NULL;
5701 }
5702 diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
5703 index 4eaaf39b9223c..e0299a7899231 100644
5704 --- a/drivers/media/platform/aspeed-video.c
5705 +++ b/drivers/media/platform/aspeed-video.c
5706 @@ -1529,12 +1529,12 @@ static int aspeed_video_setup_video(struct aspeed_video *video)
5707 V4L2_JPEG_CHROMA_SUBSAMPLING_420, mask,
5708 V4L2_JPEG_CHROMA_SUBSAMPLING_444);
5709
5710 - if (video->ctrl_handler.error) {
5711 + rc = video->ctrl_handler.error;
5712 + if (rc) {
5713 v4l2_ctrl_handler_free(&video->ctrl_handler);
5714 v4l2_device_unregister(v4l2_dev);
5715
5716 - dev_err(video->dev, "Failed to init controls: %d\n",
5717 - video->ctrl_handler.error);
5718 + dev_err(video->dev, "Failed to init controls: %d\n", rc);
5719 return rc;
5720 }
5721
5722 diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
5723 index 8d47ea0c33f84..6e04e3ec61bac 100644
5724 --- a/drivers/media/platform/pxa_camera.c
5725 +++ b/drivers/media/platform/pxa_camera.c
5726 @@ -1447,6 +1447,9 @@ static int pxac_vb2_prepare(struct vb2_buffer *vb)
5727 struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
5728 struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
5729 int ret = 0;
5730 +#ifdef DEBUG
5731 + int i;
5732 +#endif
5733
5734 switch (pcdev->channels) {
5735 case 1:
5736 diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
5737 index 1d50dfbbb762e..4c2675b437181 100644
5738 --- a/drivers/media/platform/qcom/camss/camss-video.c
5739 +++ b/drivers/media/platform/qcom/camss/camss-video.c
5740 @@ -901,6 +901,7 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
5741 video->nformats = ARRAY_SIZE(formats_rdi_8x96);
5742 }
5743 } else {
5744 + ret = -EINVAL;
5745 goto error_video_register;
5746 }
5747
5748 diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
5749 index dc62533cf32ce..aa66e4f5f3f34 100644
5750 --- a/drivers/media/platform/vsp1/vsp1_drv.c
5751 +++ b/drivers/media/platform/vsp1/vsp1_drv.c
5752 @@ -882,8 +882,10 @@ static int vsp1_probe(struct platform_device *pdev)
5753 }
5754
5755 done:
5756 - if (ret)
5757 + if (ret) {
5758 pm_runtime_disable(&pdev->dev);
5759 + rcar_fcp_put(vsp1->fcp);
5760 + }
5761
5762 return ret;
5763 }
5764 diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
5765 index f9616158bcf44..867f5fb6fbe11 100644
5766 --- a/drivers/media/rc/mceusb.c
5767 +++ b/drivers/media/rc/mceusb.c
5768 @@ -1169,7 +1169,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in)
5769 switch (subcmd) {
5770 /* the one and only 5-byte return value command */
5771 case MCE_RSP_GETPORTSTATUS:
5772 - if (buf_in[5] == 0)
5773 + if (buf_in[5] == 0 && *hi < 8)
5774 ir->txports_cabled |= 1 << *hi;
5775 break;
5776
5777 diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
5778 index 83ca5dc047ea2..baa9950783b66 100644
5779 --- a/drivers/media/tuners/qm1d1c0042.c
5780 +++ b/drivers/media/tuners/qm1d1c0042.c
5781 @@ -343,8 +343,10 @@ static int qm1d1c0042_init(struct dvb_frontend *fe)
5782 if (val == reg_initval[reg_index][0x00])
5783 break;
5784 }
5785 - if (reg_index >= QM1D1C0042_NUM_REG_ROWS)
5786 + if (reg_index >= QM1D1C0042_NUM_REG_ROWS) {
5787 + ret = -EINVAL;
5788 goto failed;
5789 + }
5790 memcpy(state->regs, reg_initval[reg_index], QM1D1C0042_NUM_REGS);
5791 usleep_range(2000, 3000);
5792
5793 diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
5794 index 62d3566bf7eeb..5ac1a6af87826 100644
5795 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
5796 +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
5797 @@ -391,7 +391,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
5798 ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
5799
5800 if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
5801 - lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa),
5802 + lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa);
5803
5804 usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC);
5805 info("INT Interrupt Service Started");
5806 diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
5807 index e6088b5d1b805..3daa64bb1e1d9 100644
5808 --- a/drivers/media/usb/em28xx/em28xx-core.c
5809 +++ b/drivers/media/usb/em28xx/em28xx-core.c
5810 @@ -956,14 +956,10 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
5811
5812 usb_bufs->buf[i] = kzalloc(sb_size, GFP_KERNEL);
5813 if (!usb_bufs->buf[i]) {
5814 - em28xx_uninit_usb_xfer(dev, mode);
5815 -
5816 for (i--; i >= 0; i--)
5817 kfree(usb_bufs->buf[i]);
5818
5819 - kfree(usb_bufs->buf);
5820 - usb_bufs->buf = NULL;
5821 -
5822 + em28xx_uninit_usb_xfer(dev, mode);
5823 return -ENOMEM;
5824 }
5825
5826 diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
5827 index 19c90fa9e443d..293a460f4616c 100644
5828 --- a/drivers/media/usb/tm6000/tm6000-dvb.c
5829 +++ b/drivers/media/usb/tm6000/tm6000-dvb.c
5830 @@ -141,6 +141,10 @@ static int tm6000_start_stream(struct tm6000_core *dev)
5831 if (ret < 0) {
5832 printk(KERN_ERR "tm6000: error %i in %s during pipe reset\n",
5833 ret, __func__);
5834 +
5835 + kfree(dvb->bulk_urb->transfer_buffer);
5836 + usb_free_urb(dvb->bulk_urb);
5837 + dvb->bulk_urb = NULL;
5838 return ret;
5839 } else
5840 printk(KERN_ERR "tm6000: pipe reset\n");
5841 diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
5842 index 5e6f3153b5ff8..7d60dd3b0bd85 100644
5843 --- a/drivers/media/usb/uvc/uvc_v4l2.c
5844 +++ b/drivers/media/usb/uvc/uvc_v4l2.c
5845 @@ -248,7 +248,9 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
5846 goto done;
5847
5848 /* After the probe, update fmt with the values returned from
5849 - * negotiation with the device.
5850 + * negotiation with the device. Some devices return invalid bFormatIndex
5851 + * and bFrameIndex values, in which case we can only assume they have
5852 + * accepted the requested format as-is.
5853 */
5854 for (i = 0; i < stream->nformats; ++i) {
5855 if (probe->bFormatIndex == stream->format[i].index) {
5856 @@ -257,11 +259,10 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
5857 }
5858 }
5859
5860 - if (i == stream->nformats) {
5861 - uvc_trace(UVC_TRACE_FORMAT, "Unknown bFormatIndex %u\n",
5862 + if (i == stream->nformats)
5863 + uvc_trace(UVC_TRACE_FORMAT,
5864 + "Unknown bFormatIndex %u, using default\n",
5865 probe->bFormatIndex);
5866 - return -EINVAL;
5867 - }
5868
5869 for (i = 0; i < format->nframes; ++i) {
5870 if (probe->bFrameIndex == format->frame[i].bFrameIndex) {
5871 @@ -270,11 +271,10 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
5872 }
5873 }
5874
5875 - if (i == format->nframes) {
5876 - uvc_trace(UVC_TRACE_FORMAT, "Unknown bFrameIndex %u\n",
5877 + if (i == format->nframes)
5878 + uvc_trace(UVC_TRACE_FORMAT,
5879 + "Unknown bFrameIndex %u, using default\n",
5880 probe->bFrameIndex);
5881 - return -EINVAL;
5882 - }
5883
5884 fmt->fmt.pix.width = frame->wWidth;
5885 fmt->fmt.pix.height = frame->wHeight;
5886 diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
5887 index a113e811faabe..da1ce7fd4cf5c 100644
5888 --- a/drivers/memory/mtk-smi.c
5889 +++ b/drivers/memory/mtk-smi.c
5890 @@ -127,7 +127,7 @@ static void mtk_smi_clk_disable(const struct mtk_smi *smi)
5891
5892 int mtk_smi_larb_get(struct device *larbdev)
5893 {
5894 - int ret = pm_runtime_get_sync(larbdev);
5895 + int ret = pm_runtime_resume_and_get(larbdev);
5896
5897 return (ret < 0) ? ret : 0;
5898 }
5899 @@ -336,7 +336,7 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
5900 int ret;
5901
5902 /* Power on smi-common. */
5903 - ret = pm_runtime_get_sync(larb->smi_common_dev);
5904 + ret = pm_runtime_resume_and_get(larb->smi_common_dev);
5905 if (ret < 0) {
5906 dev_err(dev, "Failed to pm get for smi-common(%d).\n", ret);
5907 return ret;
5908 diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
5909 index db526dbf71eed..94219d2a2773d 100644
5910 --- a/drivers/memory/ti-aemif.c
5911 +++ b/drivers/memory/ti-aemif.c
5912 @@ -378,8 +378,10 @@ static int aemif_probe(struct platform_device *pdev)
5913 */
5914 for_each_available_child_of_node(np, child_np) {
5915 ret = of_aemif_parse_abus_config(pdev, child_np);
5916 - if (ret < 0)
5917 + if (ret < 0) {
5918 + of_node_put(child_np);
5919 goto error;
5920 + }
5921 }
5922 } else if (pdata && pdata->num_abus_data > 0) {
5923 for (i = 0; i < pdata->num_abus_data; i++, aemif->num_cs++) {
5924 @@ -405,8 +407,10 @@ static int aemif_probe(struct platform_device *pdev)
5925 for_each_available_child_of_node(np, child_np) {
5926 ret = of_platform_populate(child_np, NULL,
5927 dev_lookup, dev);
5928 - if (ret < 0)
5929 + if (ret < 0) {
5930 + of_node_put(child_np);
5931 goto error;
5932 + }
5933 }
5934 } else if (pdata) {
5935 for (i = 0; i < pdata->num_sub_devices; i++) {
5936 diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
5937 index fab3cdc27ed64..19d57a45134c6 100644
5938 --- a/drivers/mfd/bd9571mwv.c
5939 +++ b/drivers/mfd/bd9571mwv.c
5940 @@ -185,9 +185,9 @@ static int bd9571mwv_probe(struct i2c_client *client,
5941 return ret;
5942 }
5943
5944 - ret = mfd_add_devices(bd->dev, PLATFORM_DEVID_AUTO, bd9571mwv_cells,
5945 - ARRAY_SIZE(bd9571mwv_cells), NULL, 0,
5946 - regmap_irq_get_domain(bd->irq_data));
5947 + ret = devm_mfd_add_devices(bd->dev, PLATFORM_DEVID_AUTO,
5948 + bd9571mwv_cells, ARRAY_SIZE(bd9571mwv_cells),
5949 + NULL, 0, regmap_irq_get_domain(bd->irq_data));
5950 if (ret) {
5951 regmap_del_irq_chip(bd->irq, bd->irq_data);
5952 return ret;
5953 diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
5954 index 8a7cc0f86958b..65b98f3fbd929 100644
5955 --- a/drivers/mfd/wm831x-auxadc.c
5956 +++ b/drivers/mfd/wm831x-auxadc.c
5957 @@ -93,11 +93,10 @@ static int wm831x_auxadc_read_irq(struct wm831x *wm831x,
5958 wait_for_completion_timeout(&req->done, msecs_to_jiffies(500));
5959
5960 mutex_lock(&wm831x->auxadc_lock);
5961 -
5962 - list_del(&req->list);
5963 ret = req->val;
5964
5965 out:
5966 + list_del(&req->list);
5967 mutex_unlock(&wm831x->auxadc_lock);
5968
5969 kfree(req);
5970 diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
5971 index 3a9467aaa4356..c3e3907a0c2f1 100644
5972 --- a/drivers/misc/cardreader/rts5227.c
5973 +++ b/drivers/misc/cardreader/rts5227.c
5974 @@ -338,6 +338,11 @@ static int rts522a_extra_init_hw(struct rtsx_pcr *pcr)
5975 {
5976 rts5227_extra_init_hw(pcr);
5977
5978 + /* Power down OCP for power consumption */
5979 + if (!pcr->card_exist)
5980 + rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
5981 + OC_POWER_DOWN);
5982 +
5983 rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, FUNC_FORCE_UPME_XMT_DBG,
5984 FUNC_FORCE_UPME_XMT_DBG);
5985 rtsx_pci_write_register(pcr, PCLK_CTL, 0x04, 0x04);
5986 diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
5987 index 94cfb675fe4ed..414dcbd3c3c25 100644
5988 --- a/drivers/misc/eeprom/eeprom_93xx46.c
5989 +++ b/drivers/misc/eeprom/eeprom_93xx46.c
5990 @@ -511,3 +511,4 @@ MODULE_LICENSE("GPL");
5991 MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs");
5992 MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
5993 MODULE_ALIAS("spi:93xx46");
5994 +MODULE_ALIAS("spi:eeprom-93xx46");
5995 diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
5996 index a44094cdbc36c..d20b2b99c6f24 100644
5997 --- a/drivers/misc/mei/hbm.c
5998 +++ b/drivers/misc/mei/hbm.c
5999 @@ -1300,7 +1300,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
6000 return -EPROTO;
6001 }
6002
6003 - dev->dev_state = MEI_DEV_POWER_DOWN;
6004 + mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
6005 dev_info(dev->dev, "hbm: stop response: resetting.\n");
6006 /* force the reset */
6007 return -EPROTO;
6008 diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
6009 index c49065887e8f5..c2338750313c4 100644
6010 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
6011 +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
6012 @@ -537,6 +537,9 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
6013
6014 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
6015
6016 + if (queue_size + queue_page_size > KMALLOC_MAX_SIZE)
6017 + return NULL;
6018 +
6019 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
6020 if (queue) {
6021 queue->q_header = NULL;
6022 @@ -630,7 +633,7 @@ static void qp_release_pages(struct page **pages,
6023
6024 for (i = 0; i < num_pages; i++) {
6025 if (dirty)
6026 - set_page_dirty(pages[i]);
6027 + set_page_dirty_lock(pages[i]);
6028
6029 put_page(pages[i]);
6030 pages[i] = NULL;
6031 diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
6032 index cb89f0578d425..f54d0427e9c00 100644
6033 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
6034 +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
6035 @@ -186,8 +186,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
6036 mmc_get_dma_dir(data)))
6037 goto force_pio;
6038
6039 - /* This DMAC cannot handle if buffer is not 8-bytes alignment */
6040 - if (!IS_ALIGNED(sg_dma_address(sg), 8))
6041 + /* This DMAC cannot handle if buffer is not 128-bytes alignment */
6042 + if (!IS_ALIGNED(sg_dma_address(sg), 128))
6043 goto force_pio_with_unmap;
6044
6045 if (data->flags & MMC_DATA_READ) {
6046 diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
6047 index b03d652226225..771676209005b 100644
6048 --- a/drivers/mmc/host/sdhci-esdhc-imx.c
6049 +++ b/drivers/mmc/host/sdhci-esdhc-imx.c
6050 @@ -1589,9 +1589,10 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
6051 struct sdhci_host *host = platform_get_drvdata(pdev);
6052 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
6053 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
6054 - int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
6055 + int dead;
6056
6057 pm_runtime_get_sync(&pdev->dev);
6058 + dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
6059 pm_runtime_disable(&pdev->dev);
6060 pm_runtime_put_noidle(&pdev->dev);
6061
6062 diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
6063 index d07b9793380f0..10705e5fa90ee 100644
6064 --- a/drivers/mmc/host/sdhci-sprd.c
6065 +++ b/drivers/mmc/host/sdhci-sprd.c
6066 @@ -665,14 +665,14 @@ static int sdhci_sprd_remove(struct platform_device *pdev)
6067 {
6068 struct sdhci_host *host = platform_get_drvdata(pdev);
6069 struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
6070 - struct mmc_host *mmc = host->mmc;
6071
6072 - mmc_remove_host(mmc);
6073 + sdhci_remove_host(host, 0);
6074 +
6075 clk_disable_unprepare(sprd_host->clk_sdio);
6076 clk_disable_unprepare(sprd_host->clk_enable);
6077 clk_disable_unprepare(sprd_host->clk_2x_enable);
6078
6079 - mmc_free_host(mmc);
6080 + sdhci_pltfm_free(pdev);
6081
6082 return 0;
6083 }
6084 diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
6085 index b11ac2314328d..6eba2441c7efd 100644
6086 --- a/drivers/mmc/host/usdhi6rol0.c
6087 +++ b/drivers/mmc/host/usdhi6rol0.c
6088 @@ -1860,10 +1860,12 @@ static int usdhi6_probe(struct platform_device *pdev)
6089
6090 ret = mmc_add_host(mmc);
6091 if (ret < 0)
6092 - goto e_clk_off;
6093 + goto e_release_dma;
6094
6095 return 0;
6096
6097 +e_release_dma:
6098 + usdhi6_dma_release(host);
6099 e_clk_off:
6100 clk_disable_unprepare(host->clk);
6101 e_free_mmc:
6102 diff --git a/drivers/mtd/parsers/afs.c b/drivers/mtd/parsers/afs.c
6103 index 752b6cf005f71..8fd61767af831 100644
6104 --- a/drivers/mtd/parsers/afs.c
6105 +++ b/drivers/mtd/parsers/afs.c
6106 @@ -370,10 +370,8 @@ static int parse_afs_partitions(struct mtd_info *mtd,
6107 return i;
6108
6109 out_free_parts:
6110 - while (i >= 0) {
6111 + while (--i >= 0)
6112 kfree(parts[i].name);
6113 - i--;
6114 - }
6115 kfree(parts);
6116 *pparts = NULL;
6117 return ret;
6118 diff --git a/drivers/mtd/parsers/parser_imagetag.c b/drivers/mtd/parsers/parser_imagetag.c
6119 index d69607b482272..fab0949aabba1 100644
6120 --- a/drivers/mtd/parsers/parser_imagetag.c
6121 +++ b/drivers/mtd/parsers/parser_imagetag.c
6122 @@ -83,6 +83,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
6123 pr_err("invalid rootfs address: %*ph\n",
6124 (int)sizeof(buf->flash_image_start),
6125 buf->flash_image_start);
6126 + ret = -EINVAL;
6127 goto out;
6128 }
6129
6130 @@ -92,6 +93,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
6131 pr_err("invalid kernel address: %*ph\n",
6132 (int)sizeof(buf->kernel_address),
6133 buf->kernel_address);
6134 + ret = -EINVAL;
6135 goto out;
6136 }
6137
6138 @@ -100,6 +102,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
6139 pr_err("invalid kernel length: %*ph\n",
6140 (int)sizeof(buf->kernel_length),
6141 buf->kernel_length);
6142 + ret = -EINVAL;
6143 goto out;
6144 }
6145
6146 @@ -108,6 +111,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
6147 pr_err("invalid total length: %*ph\n",
6148 (int)sizeof(buf->total_length),
6149 buf->total_length);
6150 + ret = -EINVAL;
6151 goto out;
6152 }
6153
6154 diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
6155 index 7bef63947b29f..97a5e1eaeefdf 100644
6156 --- a/drivers/mtd/spi-nor/cadence-quadspi.c
6157 +++ b/drivers/mtd/spi-nor/cadence-quadspi.c
6158 @@ -475,7 +475,7 @@ static int cqspi_read_setup(struct spi_nor *nor)
6159 /* Setup dummy clock cycles */
6160 dummy_clk = nor->read_dummy;
6161 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
6162 - dummy_clk = CQSPI_DUMMY_CLKS_MAX;
6163 + return -EOPNOTSUPP;
6164
6165 if (dummy_clk / 8) {
6166 reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
6167 diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c
6168 index 6dac9dd8bf42d..8fcc48056a8bc 100644
6169 --- a/drivers/mtd/spi-nor/hisi-sfc.c
6170 +++ b/drivers/mtd/spi-nor/hisi-sfc.c
6171 @@ -396,8 +396,10 @@ static int hisi_spi_nor_register_all(struct hifmc_host *host)
6172
6173 for_each_available_child_of_node(dev->of_node, np) {
6174 ret = hisi_spi_nor_register(np, host);
6175 - if (ret)
6176 + if (ret) {
6177 + of_node_put(np);
6178 goto fail;
6179 + }
6180
6181 if (host->num_chip == HIFMC_MAX_CHIP_NUM) {
6182 dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n");
6183 diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
6184 index 40586ad17f522..dd6963e4af2c7 100644
6185 --- a/drivers/mtd/spi-nor/spi-nor.c
6186 +++ b/drivers/mtd/spi-nor/spi-nor.c
6187 @@ -1011,14 +1011,15 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
6188
6189 erase = &map->erase_type[i];
6190
6191 + /* Alignment is not mandatory for overlaid regions */
6192 + if (region->offset & SNOR_OVERLAID_REGION &&
6193 + region->size <= len)
6194 + return erase;
6195 +
6196 /* Don't erase more than what the user has asked for. */
6197 if (erase->size > len)
6198 continue;
6199
6200 - /* Alignment is not mandatory for overlaid regions */
6201 - if (region->offset & SNOR_OVERLAID_REGION)
6202 - return erase;
6203 -
6204 spi_nor_div_by_erase_size(erase, addr, &rem);
6205 if (rem)
6206 continue;
6207 @@ -1152,6 +1153,7 @@ static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
6208 goto destroy_erase_cmd_list;
6209
6210 if (prev_erase != erase ||
6211 + erase->size != cmd->size ||
6212 region->offset & SNOR_OVERLAID_REGION) {
6213 cmd = spi_nor_init_erase_cmd(region, erase);
6214 if (IS_ERR(cmd)) {
6215 @@ -3700,7 +3702,7 @@ spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
6216 int i;
6217
6218 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
6219 - if (!(erase_type & BIT(i)))
6220 + if (!(erase[i].size && erase_type & BIT(erase[i].idx)))
6221 continue;
6222 if (region->size & erase[i].size_mask) {
6223 spi_nor_region_mark_overlay(region);
6224 @@ -3770,6 +3772,7 @@ spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
6225 offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
6226 region[i].size;
6227 }
6228 + spi_nor_region_mark_end(&region[i - 1]);
6229
6230 save_uniform_erase_type = map->uniform_erase_type;
6231 map->uniform_erase_type = spi_nor_sort_erase_mask(map,
6232 @@ -3793,8 +3796,6 @@ spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
6233 if (!(regions_erase_type & BIT(erase[i].idx)))
6234 spi_nor_set_erase_type(&erase[i], 0, 0xFF);
6235
6236 - spi_nor_region_mark_end(&region[i - 1]);
6237 -
6238 return 0;
6239 }
6240
6241 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
6242 index b40d4377cc71d..b2cd3bdba9f89 100644
6243 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
6244 +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
6245 @@ -1279,10 +1279,18 @@
6246 #define MDIO_PMA_10GBR_FECCTRL 0x00ab
6247 #endif
6248
6249 +#ifndef MDIO_PMA_RX_CTRL1
6250 +#define MDIO_PMA_RX_CTRL1 0x8051
6251 +#endif
6252 +
6253 #ifndef MDIO_PCS_DIG_CTRL
6254 #define MDIO_PCS_DIG_CTRL 0x8000
6255 #endif
6256
6257 +#ifndef MDIO_PCS_DIGITAL_STAT
6258 +#define MDIO_PCS_DIGITAL_STAT 0x8010
6259 +#endif
6260 +
6261 #ifndef MDIO_AN_XNP
6262 #define MDIO_AN_XNP 0x0016
6263 #endif
6264 @@ -1358,6 +1366,8 @@
6265 #define XGBE_KR_TRAINING_ENABLE BIT(1)
6266
6267 #define XGBE_PCS_CL37_BP BIT(12)
6268 +#define XGBE_PCS_PSEQ_STATE_MASK 0x1c
6269 +#define XGBE_PCS_PSEQ_STATE_POWER_GOOD 0x10
6270
6271 #define XGBE_AN_CL37_INT_CMPLT BIT(0)
6272 #define XGBE_AN_CL37_INT_MASK 0x01
6273 @@ -1375,6 +1385,10 @@
6274 #define XGBE_PMA_CDR_TRACK_EN_OFF 0x00
6275 #define XGBE_PMA_CDR_TRACK_EN_ON 0x01
6276
6277 +#define XGBE_PMA_RX_RST_0_MASK BIT(4)
6278 +#define XGBE_PMA_RX_RST_0_RESET_ON 0x10
6279 +#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00
6280 +
6281 /* Bit setting and getting macros
6282 * The get macro will extract the current bit field value from within
6283 * the variable
6284 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
6285 index 3bd20f7651207..da8c2c4aca7ef 100644
6286 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
6287 +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
6288 @@ -1443,6 +1443,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
6289 return;
6290
6291 netif_tx_stop_all_queues(netdev);
6292 + netif_carrier_off(pdata->netdev);
6293
6294 xgbe_stop_timers(pdata);
6295 flush_workqueue(pdata->dev_workqueue);
6296 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
6297 index 8a3a60bb26888..156a0bc8ab01d 100644
6298 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
6299 +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
6300 @@ -1345,7 +1345,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
6301 &an_restart);
6302 if (an_restart) {
6303 xgbe_phy_config_aneg(pdata);
6304 - return;
6305 + goto adjust_link;
6306 }
6307
6308 if (pdata->phy.link) {
6309 @@ -1396,7 +1396,6 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
6310 pdata->phy_if.phy_impl.stop(pdata);
6311
6312 pdata->phy.link = 0;
6313 - netif_carrier_off(pdata->netdev);
6314
6315 xgbe_phy_adjust_link(pdata);
6316 }
6317 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
6318 index 128cd648ba99c..d6f6afb67bcc6 100644
6319 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
6320 +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
6321 @@ -921,6 +921,9 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
6322 if ((phy_id & 0xfffffff0) != 0x03625d10)
6323 return false;
6324
6325 + /* Reset PHY - wait for self-clearing reset bit to clear */
6326 + genphy_soft_reset(phy_data->phydev);
6327 +
6328 /* Disable RGMII mode */
6329 phy_write(phy_data->phydev, 0x18, 0x7007);
6330 reg = phy_read(phy_data->phydev, 0x18);
6331 @@ -1948,6 +1951,27 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
6332 xgbe_phy_put_comm_ownership(pdata);
6333 }
6334
6335 +static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
6336 +{
6337 + int reg;
6338 +
6339 + reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PCS, MDIO_PCS_DIGITAL_STAT,
6340 + XGBE_PCS_PSEQ_STATE_MASK);
6341 + if (reg == XGBE_PCS_PSEQ_STATE_POWER_GOOD) {
6342 + /* Mailbox command timed out, reset of RX block is required.
6343 + * This can be done by asseting the reset bit and wait for
6344 + * its compeletion.
6345 + */
6346 + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
6347 + XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_ON);
6348 + ndelay(20);
6349 + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
6350 + XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_OFF);
6351 + usleep_range(40, 50);
6352 + netif_err(pdata, link, pdata->netdev, "firmware mailbox reset performed\n");
6353 + }
6354 +}
6355 +
6356 static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
6357 unsigned int cmd, unsigned int sub_cmd)
6358 {
6359 @@ -1955,9 +1979,11 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
6360 unsigned int wait;
6361
6362 /* Log if a previous command did not complete */
6363 - if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
6364 + if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) {
6365 netif_dbg(pdata, link, pdata->netdev,
6366 "firmware mailbox not ready for command\n");
6367 + xgbe_phy_rx_reset(pdata);
6368 + }
6369
6370 /* Construct the command */
6371 XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd);
6372 @@ -1979,6 +2005,9 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
6373
6374 netif_dbg(pdata, link, pdata->netdev,
6375 "firmware mailbox command did not complete\n");
6376 +
6377 + /* Reset on error */
6378 + xgbe_phy_rx_reset(pdata);
6379 }
6380
6381 static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
6382 @@ -2575,6 +2604,14 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
6383 if (reg & MDIO_STAT1_LSTATUS)
6384 return 1;
6385
6386 + if (pdata->phy.autoneg == AUTONEG_ENABLE &&
6387 + phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE) {
6388 + if (!test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
6389 + netif_carrier_off(pdata->netdev);
6390 + *an_restart = 1;
6391 + }
6392 + }
6393 +
6394 /* No link, attempt a receiver reset cycle */
6395 if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
6396 phy_data->rrc_count = 0;
6397 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
6398 index 7c8187d386756..4ae49d92c1eed 100644
6399 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
6400 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
6401 @@ -8347,9 +8347,10 @@ void bnxt_tx_disable(struct bnxt *bp)
6402 txr->dev_state = BNXT_DEV_STATE_CLOSING;
6403 }
6404 }
6405 + /* Drop carrier first to prevent TX timeout */
6406 + netif_carrier_off(bp->dev);
6407 /* Stop all TX queues */
6408 netif_tx_disable(bp->dev);
6409 - netif_carrier_off(bp->dev);
6410 }
6411
6412 void bnxt_tx_enable(struct bnxt *bp)
6413 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
6414 index cee582e361341..6b71ec33bf14d 100644
6415 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
6416 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
6417 @@ -44,6 +44,9 @@
6418
6419 #define MAX_ULD_QSETS 16
6420
6421 +/* ulp_mem_io + ulptx_idata + payload + padding */
6422 +#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
6423 +
6424 /* CPL message priority levels */
6425 enum {
6426 CPL_PRIORITY_DATA = 0, /* data messages */
6427 diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
6428 index 049f1bbe27ab3..57bf10b4d80c8 100644
6429 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
6430 +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
6431 @@ -2158,17 +2158,22 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
6432 * @skb: the packet
6433 *
6434 * Returns true if a packet can be sent as an offload WR with immediate
6435 - * data. We currently use the same limit as for Ethernet packets.
6436 + * data.
6437 + * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field.
6438 + * However, FW_ULPTX_WR commands have a 256 byte immediate only
6439 + * payload limit.
6440 */
6441 static inline int is_ofld_imm(const struct sk_buff *skb)
6442 {
6443 struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
6444 unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
6445
6446 - if (opcode == FW_CRYPTO_LOOKASIDE_WR)
6447 + if (unlikely(opcode == FW_ULPTX_WR))
6448 + return skb->len <= MAX_IMM_ULPTX_WR_LEN;
6449 + else if (opcode == FW_CRYPTO_LOOKASIDE_WR)
6450 return skb->len <= SGE_MAX_WR_LEN;
6451 else
6452 - return skb->len <= MAX_IMM_TX_PKT_LEN;
6453 + return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
6454 }
6455
6456 /**
6457 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
6458 index 0f35eec967ae8..309cdc5ebc1ff 100644
6459 --- a/drivers/net/ethernet/ibm/ibmvnic.c
6460 +++ b/drivers/net/ethernet/ibm/ibmvnic.c
6461 @@ -202,8 +202,13 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
6462 if (!ltb->buff)
6463 return;
6464
6465 + /* VIOS automatically unmaps the long term buffer at remote
6466 + * end for the following resets:
6467 + * FAILOVER, MOBILITY, TIMEOUT.
6468 + */
6469 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
6470 - adapter->reset_reason != VNIC_RESET_MOBILITY)
6471 + adapter->reset_reason != VNIC_RESET_MOBILITY &&
6472 + adapter->reset_reason != VNIC_RESET_TIMEOUT)
6473 send_request_unmap(adapter, ltb->map_id);
6474 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
6475 }
6476 @@ -1254,10 +1259,8 @@ static int __ibmvnic_close(struct net_device *netdev)
6477
6478 adapter->state = VNIC_CLOSING;
6479 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
6480 - if (rc)
6481 - return rc;
6482 adapter->state = VNIC_CLOSED;
6483 - return 0;
6484 + return rc;
6485 }
6486
6487 static int ibmvnic_close(struct net_device *netdev)
6488 @@ -1520,6 +1523,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
6489 skb_copy_from_linear_data(skb, dst, skb->len);
6490 }
6491
6492 + /* post changes to long_term_buff *dst before VIOS accessing it */
6493 + dma_wmb();
6494 +
6495 tx_pool->consumer_index =
6496 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
6497
6498 @@ -2342,6 +2348,8 @@ restart_poll:
6499 offset = be16_to_cpu(next->rx_comp.off_frame_data);
6500 flags = next->rx_comp.flags;
6501 skb = rx_buff->skb;
6502 + /* load long_term_buff before copying to skb */
6503 + dma_rmb();
6504 skb_copy_to_linear_data(skb, rx_buff->data + offset,
6505 length);
6506
6507 diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
6508 index b577e6adf3bff..82c62e4678705 100644
6509 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
6510 +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
6511 @@ -4874,7 +4874,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
6512 enum i40e_admin_queue_err adq_err;
6513 struct i40e_vsi *vsi = np->vsi;
6514 struct i40e_pf *pf = vsi->back;
6515 - bool is_reset_needed;
6516 + u32 reset_needed = 0;
6517 i40e_status status;
6518 u32 i, j;
6519
6520 @@ -4919,9 +4919,11 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
6521 flags_complete:
6522 changed_flags = orig_flags ^ new_flags;
6523
6524 - is_reset_needed = !!(changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
6525 - I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED |
6526 - I40E_FLAG_DISABLE_FW_LLDP));
6527 + if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP)
6528 + reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG;
6529 + if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
6530 + I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED))
6531 + reset_needed = BIT(__I40E_PF_RESET_REQUESTED);
6532
6533 /* Before we finalize any flag changes, we need to perform some
6534 * checks to ensure that the changes are supported and safe.
6535 @@ -5038,7 +5040,7 @@ flags_complete:
6536 case I40E_AQ_RC_EEXIST:
6537 dev_warn(&pf->pdev->dev,
6538 "FW LLDP agent is already running\n");
6539 - is_reset_needed = false;
6540 + reset_needed = 0;
6541 break;
6542 case I40E_AQ_RC_EPERM:
6543 dev_warn(&pf->pdev->dev,
6544 @@ -5067,8 +5069,8 @@ flags_complete:
6545 /* Issue reset to cause things to take effect, as additional bits
6546 * are added we will need to create a mask of bits requiring reset
6547 */
6548 - if (is_reset_needed)
6549 - i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
6550 + if (reset_needed)
6551 + i40e_do_reset(pf, reset_needed, true);
6552
6553 return 0;
6554 }
6555 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
6556 index c19b45a90fcd2..0604b5aaad86f 100644
6557 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
6558 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
6559 @@ -2603,7 +2603,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
6560 return;
6561 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
6562 return;
6563 - if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
6564 + if (test_bit(__I40E_VF_DISABLE, pf->state)) {
6565 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
6566 return;
6567 }
6568 @@ -2621,7 +2621,6 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
6569 }
6570 }
6571 }
6572 - clear_bit(__I40E_VF_DISABLE, pf->state);
6573 }
6574
6575 /**
6576 @@ -7612,6 +7611,8 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
6577 if (filter->flags >= ARRAY_SIZE(flag_table))
6578 return I40E_ERR_CONFIG;
6579
6580 + memset(&cld_filter, 0, sizeof(cld_filter));
6581 +
6582 /* copy element needed to add cloud filter from filter */
6583 i40e_set_cld_element(filter, &cld_filter);
6584
6585 @@ -7675,10 +7676,13 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
6586 return -EOPNOTSUPP;
6587
6588 /* adding filter using src_port/src_ip is not supported at this stage */
6589 - if (filter->src_port || filter->src_ipv4 ||
6590 + if (filter->src_port ||
6591 + (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
6592 !ipv6_addr_any(&filter->ip.v6.src_ip6))
6593 return -EOPNOTSUPP;
6594
6595 + memset(&cld_filter, 0, sizeof(cld_filter));
6596 +
6597 /* copy element needed to add cloud filter from filter */
6598 i40e_set_cld_element(filter, &cld_filter.element);
6599
6600 @@ -7702,7 +7706,7 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
6601 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
6602 }
6603
6604 - } else if (filter->dst_ipv4 ||
6605 + } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
6606 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
6607 cld_filter.element.flags =
6608 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
6609 @@ -8481,11 +8485,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
6610 dev_dbg(&pf->pdev->dev, "PFR requested\n");
6611 i40e_handle_reset_warning(pf, lock_acquired);
6612
6613 - dev_info(&pf->pdev->dev,
6614 - pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
6615 - "FW LLDP is disabled\n" :
6616 - "FW LLDP is enabled\n");
6617 -
6618 } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
6619 /* Request a PF Reset
6620 *
6621 @@ -8493,6 +8492,10 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
6622 */
6623 i40e_prep_for_reset(pf, lock_acquired);
6624 i40e_reset_and_rebuild(pf, true, lock_acquired);
6625 + dev_info(&pf->pdev->dev,
6626 + pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
6627 + "FW LLDP is disabled\n" :
6628 + "FW LLDP is enabled\n");
6629
6630 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
6631 int v;
6632 @@ -9955,7 +9958,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
6633 int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
6634 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6635 struct i40e_hw *hw = &pf->hw;
6636 - u8 set_fc_aq_fail = 0;
6637 i40e_status ret;
6638 u32 val;
6639 int v;
6640 @@ -10081,13 +10083,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
6641 i40e_stat_str(&pf->hw, ret),
6642 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6643
6644 - /* make sure our flow control settings are restored */
6645 - ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6646 - if (ret)
6647 - dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
6648 - i40e_stat_str(&pf->hw, ret),
6649 - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6650 -
6651 /* Rebuild the VSIs and VEBs that existed before reset.
6652 * They are still in our local switch element arrays, so only
6653 * need to rebuild the switch model in the HW.
6654 @@ -11770,6 +11765,8 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
6655 struct i40e_aqc_configure_partition_bw_data bw_data;
6656 i40e_status status;
6657
6658 + memset(&bw_data, 0, sizeof(bw_data));
6659 +
6660 /* Set the valid bit for this PF */
6661 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
6662 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
6663 @@ -14768,7 +14765,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6664 int err;
6665 u32 val;
6666 u32 i;
6667 - u8 set_fc_aq_fail;
6668
6669 err = pci_enable_device_mem(pdev);
6670 if (err)
6671 @@ -15090,24 +15086,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6672 }
6673 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
6674
6675 - /* Make sure flow control is set according to current settings */
6676 - err = i40e_set_fc(hw, &set_fc_aq_fail, true);
6677 - if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
6678 - dev_dbg(&pf->pdev->dev,
6679 - "Set fc with err %s aq_err %s on get_phy_cap\n",
6680 - i40e_stat_str(hw, err),
6681 - i40e_aq_str(hw, hw->aq.asq_last_status));
6682 - if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
6683 - dev_dbg(&pf->pdev->dev,
6684 - "Set fc with err %s aq_err %s on set_phy_config\n",
6685 - i40e_stat_str(hw, err),
6686 - i40e_aq_str(hw, hw->aq.asq_last_status));
6687 - if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
6688 - dev_dbg(&pf->pdev->dev,
6689 - "Set fc with err %s aq_err %s on get_link_info\n",
6690 - i40e_stat_str(hw, err),
6691 - i40e_aq_str(hw, hw->aq.asq_last_status));
6692 -
6693 /* if FDIR VSI was set up, start it now */
6694 for (i = 0; i < pf->num_alloc_vsi; i++) {
6695 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6696 diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
6697 index f47841f3a69d5..218aada8949d9 100644
6698 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
6699 +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
6700 @@ -3093,13 +3093,16 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
6701
6702 l4_proto = ip.v4->protocol;
6703 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
6704 + int ret;
6705 +
6706 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
6707
6708 exthdr = ip.hdr + sizeof(*ip.v6);
6709 l4_proto = ip.v6->nexthdr;
6710 - if (l4.hdr != exthdr)
6711 - ipv6_skip_exthdr(skb, exthdr - skb->data,
6712 - &l4_proto, &frag_off);
6713 + ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
6714 + &l4_proto, &frag_off);
6715 + if (ret < 0)
6716 + return -1;
6717 }
6718
6719 /* define outer transport */
6720 diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
6721 index 94e3f8b869be4..7b0543056b101 100644
6722 --- a/drivers/net/ethernet/marvell/mvneta.c
6723 +++ b/drivers/net/ethernet/marvell/mvneta.c
6724 @@ -3027,7 +3027,9 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
6725 }
6726
6727 /* Setup XPS mapping */
6728 - if (txq_number > 1)
6729 + if (pp->neta_armada3700)
6730 + cpu = 0;
6731 + else if (txq_number > 1)
6732 cpu = txq->id % num_present_cpus();
6733 else
6734 cpu = pp->rxq_def % num_present_cpus();
6735 @@ -3764,6 +3766,11 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
6736 node_online);
6737 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
6738
6739 + /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts
6740 + * are routed to CPU 0, so we don't need all the cpu-hotplug support
6741 + */
6742 + if (pp->neta_armada3700)
6743 + return 0;
6744
6745 spin_lock(&pp->lock);
6746 /*
6747 diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
6748 index 1187ef1375e29..cb341372d5a35 100644
6749 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
6750 +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
6751 @@ -4986,6 +4986,7 @@ static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule
6752
6753 if (!fs_rule->mirr_mbox) {
6754 mlx4_err(dev, "rule mirroring mailbox is null\n");
6755 + mlx4_free_cmd_mailbox(dev, mailbox);
6756 return -EINVAL;
6757 }
6758 memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
6759 diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
6760 index 1e8244ec5b332..131be1fa770cb 100644
6761 --- a/drivers/net/ethernet/realtek/r8169_main.c
6762 +++ b/drivers/net/ethernet/realtek/r8169_main.c
6763 @@ -4077,7 +4077,7 @@ static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
6764
6765 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
6766 {
6767 - RTL_W8(tp, MaxTxPacketSize, 0x3f);
6768 + RTL_W8(tp, MaxTxPacketSize, 0x24);
6769 RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
6770 RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
6771 rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B);
6772 @@ -4085,7 +4085,7 @@ static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
6773
6774 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
6775 {
6776 - RTL_W8(tp, MaxTxPacketSize, 0x0c);
6777 + RTL_W8(tp, MaxTxPacketSize, 0x3f);
6778 RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
6779 RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
6780 rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
6781 diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
6782 index 8b94d9ad9e2ba..f87e135a8aef9 100644
6783 --- a/drivers/net/ethernet/sun/sunvnet_common.c
6784 +++ b/drivers/net/ethernet/sun/sunvnet_common.c
6785 @@ -1353,27 +1353,12 @@ sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
6786 if (vio_version_after_eq(&port->vio, 1, 3))
6787 localmtu -= VLAN_HLEN;
6788
6789 - if (skb->protocol == htons(ETH_P_IP)) {
6790 - struct flowi4 fl4;
6791 - struct rtable *rt = NULL;
6792 -
6793 - memset(&fl4, 0, sizeof(fl4));
6794 - fl4.flowi4_oif = dev->ifindex;
6795 - fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
6796 - fl4.daddr = ip_hdr(skb)->daddr;
6797 - fl4.saddr = ip_hdr(skb)->saddr;
6798 -
6799 - rt = ip_route_output_key(dev_net(dev), &fl4);
6800 - if (!IS_ERR(rt)) {
6801 - skb_dst_set(skb, &rt->dst);
6802 - icmp_send(skb, ICMP_DEST_UNREACH,
6803 - ICMP_FRAG_NEEDED,
6804 - htonl(localmtu));
6805 - }
6806 - }
6807 + if (skb->protocol == htons(ETH_P_IP))
6808 + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
6809 + htonl(localmtu));
6810 #if IS_ENABLED(CONFIG_IPV6)
6811 else if (skb->protocol == htons(ETH_P_IPV6))
6812 - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
6813 + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
6814 #endif
6815 goto out_dropped;
6816 }
6817 diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
6818 index bb6e52f3bdf9b..f98318d93ce72 100644
6819 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
6820 +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
6821 @@ -1689,6 +1689,18 @@ static int axienet_probe(struct platform_device *pdev)
6822 lp->options = XAE_OPTION_DEFAULTS;
6823 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
6824 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
6825 +
6826 + lp->clk = devm_clk_get_optional(&pdev->dev, NULL);
6827 + if (IS_ERR(lp->clk)) {
6828 + ret = PTR_ERR(lp->clk);
6829 + goto free_netdev;
6830 + }
6831 + ret = clk_prepare_enable(lp->clk);
6832 + if (ret) {
6833 + dev_err(&pdev->dev, "Unable to enable clock: %d\n", ret);
6834 + goto free_netdev;
6835 + }
6836 +
6837 /* Map device registers */
6838 ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6839 lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
6840 @@ -1836,20 +1848,6 @@ static int axienet_probe(struct platform_device *pdev)
6841
6842 lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
6843 if (lp->phy_node) {
6844 - lp->clk = devm_clk_get(&pdev->dev, NULL);
6845 - if (IS_ERR(lp->clk)) {
6846 - dev_warn(&pdev->dev, "Failed to get clock: %ld\n",
6847 - PTR_ERR(lp->clk));
6848 - lp->clk = NULL;
6849 - } else {
6850 - ret = clk_prepare_enable(lp->clk);
6851 - if (ret) {
6852 - dev_err(&pdev->dev, "Unable to enable clock: %d\n",
6853 - ret);
6854 - goto free_netdev;
6855 - }
6856 - }
6857 -
6858 ret = axienet_mdio_setup(lp);
6859 if (ret)
6860 dev_warn(&pdev->dev,
6861 diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
6862 index 4e19c3149848b..d0653babab923 100644
6863 --- a/drivers/net/gtp.c
6864 +++ b/drivers/net/gtp.c
6865 @@ -545,9 +545,8 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
6866 if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
6867 mtu < ntohs(iph->tot_len)) {
6868 netdev_dbg(dev, "packet too big, fragmentation needed\n");
6869 - memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
6870 - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
6871 - htonl(mtu));
6872 + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
6873 + htonl(mtu));
6874 goto err_rt;
6875 }
6876
6877 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
6878 index 5502e145aa17b..8443df79fabc7 100644
6879 --- a/drivers/net/vxlan.c
6880 +++ b/drivers/net/vxlan.c
6881 @@ -4424,7 +4424,6 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
6882 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
6883 struct vxlan_dev *vxlan, *next;
6884 struct net_device *dev, *aux;
6885 - unsigned int h;
6886
6887 for_each_netdev_safe(net, dev, aux)
6888 if (dev->rtnl_link_ops == &vxlan_link_ops)
6889 @@ -4438,14 +4437,13 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
6890 unregister_netdevice_queue(vxlan->dev, head);
6891 }
6892
6893 - for (h = 0; h < PORT_HASH_SIZE; ++h)
6894 - WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
6895 }
6896
6897 static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
6898 {
6899 struct net *net;
6900 LIST_HEAD(list);
6901 + unsigned int h;
6902
6903 rtnl_lock();
6904 list_for_each_entry(net, net_list, exit_list)
6905 @@ -4453,6 +4451,13 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
6906
6907 unregister_netdevice_many(&list);
6908 rtnl_unlock();
6909 +
6910 + list_for_each_entry(net, net_list, exit_list) {
6911 + struct vxlan_net *vn = net_generic(net, vxlan_net_id);
6912 +
6913 + for (h = 0; h < PORT_HASH_SIZE; ++h)
6914 + WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
6915 + }
6916 }
6917
6918 static struct pernet_operations vxlan_net_ops = {
6919 diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
6920 index 63607c3b8e818..d4589b2ab3b6d 100644
6921 --- a/drivers/net/wireless/ath/ath10k/snoc.c
6922 +++ b/drivers/net/wireless/ath/ath10k/snoc.c
6923 @@ -1039,12 +1039,13 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar,
6924 ret = ath10k_snoc_init_pipes(ar);
6925 if (ret) {
6926 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
6927 - goto err_wlan_enable;
6928 + goto err_free_rri;
6929 }
6930
6931 return 0;
6932
6933 -err_wlan_enable:
6934 +err_free_rri:
6935 + ath10k_ce_free_rri(ar);
6936 ath10k_snoc_wlan_disable(ar);
6937
6938 return ret;
6939 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
6940 index 26ea51a721564..859a865c59950 100644
6941 --- a/drivers/net/wireless/ath/ath9k/debug.c
6942 +++ b/drivers/net/wireless/ath/ath9k/debug.c
6943 @@ -1223,8 +1223,11 @@ static ssize_t write_file_nf_override(struct file *file,
6944
6945 ah->nf_override = val;
6946
6947 - if (ah->curchan)
6948 + if (ah->curchan) {
6949 + ath9k_ps_wakeup(sc);
6950 ath9k_hw_loadnf(ah, ah->curchan);
6951 + ath9k_ps_restore(sc);
6952 + }
6953
6954 return count;
6955 }
6956 diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
6957 index d3c001fa8eb46..32ce1b42ce08b 100644
6958 --- a/drivers/net/wireless/broadcom/b43/phy_n.c
6959 +++ b/drivers/net/wireless/broadcom/b43/phy_n.c
6960 @@ -5308,7 +5308,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
6961
6962 for (i = 0; i < 4; i++) {
6963 if (dev->phy.rev >= 3)
6964 - table[i] = coef[i];
6965 + coef[i] = table[i];
6966 else
6967 coef[i] = 0;
6968 }
6969 diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
6970 index e889488b84a03..8090895873011 100644
6971 --- a/drivers/net/xen-netback/interface.c
6972 +++ b/drivers/net/xen-netback/interface.c
6973 @@ -162,13 +162,15 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
6974 {
6975 struct xenvif_queue *queue = dev_id;
6976 int old;
6977 + bool has_rx, has_tx;
6978
6979 old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
6980 WARN(old, "Interrupt while EOI pending\n");
6981
6982 - /* Use bitwise or as we need to call both functions. */
6983 - if ((!xenvif_handle_tx_interrupt(queue) |
6984 - !xenvif_handle_rx_interrupt(queue))) {
6985 + has_tx = xenvif_handle_tx_interrupt(queue);
6986 + has_rx = xenvif_handle_rx_interrupt(queue);
6987 +
6988 + if (!has_rx && !has_tx) {
6989 atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
6990 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
6991 }
6992 diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
6993 index 84f4078216a36..3ba68baeed1db 100644
6994 --- a/drivers/nvmem/core.c
6995 +++ b/drivers/nvmem/core.c
6996 @@ -314,7 +314,9 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
6997
6998 for_each_child_of_node(parent, child) {
6999 addr = of_get_property(child, "reg", &len);
7000 - if (!addr || (len < 2 * sizeof(u32))) {
7001 + if (!addr)
7002 + continue;
7003 + if (len < 2 * sizeof(u32)) {
7004 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
7005 return -EINVAL;
7006 }
7007 @@ -345,6 +347,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
7008 cell->name, nvmem->stride);
7009 /* Cells already added will be freed later. */
7010 kfree_const(cell->name);
7011 + of_node_put(cell->np);
7012 kfree(cell);
7013 return -EINVAL;
7014 }
7015 diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
7016 index 223d617ecfe17..fc24102e25ce7 100644
7017 --- a/drivers/of/fdt.c
7018 +++ b/drivers/of/fdt.c
7019 @@ -1153,8 +1153,16 @@ int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size)
7020 int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
7021 phys_addr_t size, bool nomap)
7022 {
7023 - if (nomap)
7024 - return memblock_remove(base, size);
7025 + if (nomap) {
7026 + /*
7027 + * If the memory is already reserved (by another region), we
7028 + * should not allow it to be marked nomap.
7029 + */
7030 + if (memblock_is_region_reserved(base, size))
7031 + return -EBUSY;
7032 +
7033 + return memblock_mark_nomap(base, size);
7034 + }
7035 return memblock_reserve(base, size);
7036 }
7037
7038 diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
7039 index 14196c0287a24..a8eab4e67af10 100644
7040 --- a/drivers/pci/controller/dwc/pcie-qcom.c
7041 +++ b/drivers/pci/controller/dwc/pcie-qcom.c
7042 @@ -402,7 +402,9 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
7043
7044 /* enable external reference clock */
7045 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
7046 - val &= ~PHY_REFCLK_USE_PAD;
7047 + /* USE_PAD is required only for ipq806x */
7048 + if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
7049 + val &= ~PHY_REFCLK_USE_PAD;
7050 val |= PHY_REFCLK_SSP_EN;
7051 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
7052
7053 diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
7054 index d21fa04fa44d2..da5023e27951a 100644
7055 --- a/drivers/pci/setup-res.c
7056 +++ b/drivers/pci/setup-res.c
7057 @@ -409,10 +409,16 @@ EXPORT_SYMBOL(pci_release_resource);
7058 int pci_resize_resource(struct pci_dev *dev, int resno, int size)
7059 {
7060 struct resource *res = dev->resource + resno;
7061 + struct pci_host_bridge *host;
7062 int old, ret;
7063 u32 sizes;
7064 u16 cmd;
7065
7066 + /* Check if we must preserve the firmware's resource assignment */
7067 + host = pci_find_host_bridge(dev->bus);
7068 + if (host->preserve_config)
7069 + return -ENOTSUPP;
7070 +
7071 /* Make sure the resource isn't assigned before resizing it. */
7072 if (!(res->flags & IORESOURCE_UNSET))
7073 return -EBUSY;
7074 diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
7075 index 31e39558d49d8..8b003c890b87b 100644
7076 --- a/drivers/pci/syscall.c
7077 +++ b/drivers/pci/syscall.c
7078 @@ -20,7 +20,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
7079 u16 word;
7080 u32 dword;
7081 long err;
7082 - long cfg_ret;
7083 + int cfg_ret;
7084
7085 if (!capable(CAP_SYS_ADMIN))
7086 return -EPERM;
7087 @@ -46,7 +46,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
7088 }
7089
7090 err = -EIO;
7091 - if (cfg_ret != PCIBIOS_SUCCESSFUL)
7092 + if (cfg_ret)
7093 goto error;
7094
7095 switch (len) {
7096 @@ -105,7 +105,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
7097 if (err)
7098 break;
7099 err = pci_user_write_config_byte(dev, off, byte);
7100 - if (err != PCIBIOS_SUCCESSFUL)
7101 + if (err)
7102 err = -EIO;
7103 break;
7104
7105 @@ -114,7 +114,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
7106 if (err)
7107 break;
7108 err = pci_user_write_config_word(dev, off, word);
7109 - if (err != PCIBIOS_SUCCESSFUL)
7110 + if (err)
7111 err = -EIO;
7112 break;
7113
7114 @@ -123,7 +123,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
7115 if (err)
7116 break;
7117 err = pci_user_write_config_dword(dev, off, dword);
7118 - if (err != PCIBIOS_SUCCESSFUL)
7119 + if (err)
7120 err = -EIO;
7121 break;
7122
7123 diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c
7124 index 2dc19ddd120f5..a005fc58bbf02 100644
7125 --- a/drivers/phy/rockchip/phy-rockchip-emmc.c
7126 +++ b/drivers/phy/rockchip/phy-rockchip-emmc.c
7127 @@ -240,15 +240,17 @@ static int rockchip_emmc_phy_init(struct phy *phy)
7128 * - SDHCI driver to get the PHY
7129 * - SDHCI driver to init the PHY
7130 *
7131 - * The clock is optional, so upon any error we just set to NULL.
7132 + * The clock is optional, using clk_get_optional() to get the clock
7133 + * and do error processing if the return value != NULL
7134 *
7135 * NOTE: we don't do anything special for EPROBE_DEFER here. Given the
7136 * above expected use case, EPROBE_DEFER isn't sensible to expect, so
7137 * it's just like any other error.
7138 */
7139 - rk_phy->emmcclk = clk_get(&phy->dev, "emmcclk");
7140 + rk_phy->emmcclk = clk_get_optional(&phy->dev, "emmcclk");
7141 if (IS_ERR(rk_phy->emmcclk)) {
7142 - dev_dbg(&phy->dev, "Error getting emmcclk: %d\n", ret);
7143 + ret = PTR_ERR(rk_phy->emmcclk);
7144 + dev_err(&phy->dev, "Error getting emmcclk: %d\n", ret);
7145 rk_phy->emmcclk = NULL;
7146 }
7147
7148 diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
7149 index e341cc5c0ea6f..c84df27cd5482 100644
7150 --- a/drivers/power/reset/at91-sama5d2_shdwc.c
7151 +++ b/drivers/power/reset/at91-sama5d2_shdwc.c
7152 @@ -37,7 +37,7 @@
7153
7154 #define AT91_SHDW_MR 0x04 /* Shut Down Mode Register */
7155 #define AT91_SHDW_WKUPDBC_SHIFT 24
7156 -#define AT91_SHDW_WKUPDBC_MASK GENMASK(31, 16)
7157 +#define AT91_SHDW_WKUPDBC_MASK GENMASK(26, 24)
7158 #define AT91_SHDW_WKUPDBC(x) (((x) << AT91_SHDW_WKUPDBC_SHIFT) \
7159 & AT91_SHDW_WKUPDBC_MASK)
7160
7161 diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
7162 index 73352e6fbccbf..6ad6aad215cf1 100644
7163 --- a/drivers/pwm/pwm-rockchip.c
7164 +++ b/drivers/pwm/pwm-rockchip.c
7165 @@ -361,7 +361,6 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
7166
7167 ret = pwmchip_add(&pc->chip);
7168 if (ret < 0) {
7169 - clk_unprepare(pc->clk);
7170 dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
7171 goto err_pclk;
7172 }
7173 diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
7174 index aefc351bfed59..86a3c2dd05848 100644
7175 --- a/drivers/regulator/axp20x-regulator.c
7176 +++ b/drivers/regulator/axp20x-regulator.c
7177 @@ -1072,7 +1072,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
7178 static int axp20x_regulator_parse_dt(struct platform_device *pdev)
7179 {
7180 struct device_node *np, *regulators;
7181 - int ret;
7182 + int ret = 0;
7183 u32 dcdcfreq = 0;
7184
7185 np = of_node_get(pdev->dev.parent->of_node);
7186 @@ -1087,13 +1087,12 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev)
7187 ret = axp20x_set_dcdc_freq(pdev, dcdcfreq);
7188 if (ret < 0) {
7189 dev_err(&pdev->dev, "Error setting dcdc frequency: %d\n", ret);
7190 - return ret;
7191 }
7192 -
7193 of_node_put(regulators);
7194 }
7195
7196 - return 0;
7197 + of_node_put(np);
7198 + return ret;
7199 }
7200
7201 static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 workmode)
7202 diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
7203 index 5b9d570df85cc..a31b6ae92a84e 100644
7204 --- a/drivers/regulator/core.c
7205 +++ b/drivers/regulator/core.c
7206 @@ -1576,7 +1576,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
7207 const char *supply_name)
7208 {
7209 struct regulator *regulator;
7210 - int err;
7211 + int err = 0;
7212
7213 if (dev) {
7214 char buf[REG_STR_SIZE];
7215 @@ -1622,8 +1622,8 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
7216 }
7217 }
7218
7219 - regulator->debugfs = debugfs_create_dir(supply_name,
7220 - rdev->debugfs);
7221 + if (err != -EEXIST)
7222 + regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
7223 if (!regulator->debugfs) {
7224 rdev_dbg(rdev, "Failed to create debugfs directory\n");
7225 } else {
7226 diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
7227 index a47f87b8373df..68d22acdb037a 100644
7228 --- a/drivers/regulator/qcom-rpmh-regulator.c
7229 +++ b/drivers/regulator/qcom-rpmh-regulator.c
7230 @@ -874,7 +874,7 @@ static const struct rpmh_vreg_init_data pm8009_vreg_data[] = {
7231 RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"),
7232 RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
7233 RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
7234 - RPMH_VREG("ldo7", "ldo%s6", &pmic5_pldo_lv, "vdd-l7"),
7235 + RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l7"),
7236 {},
7237 };
7238
7239 diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
7240 index 6ca27e9d5ef7d..5276f8442f3c6 100644
7241 --- a/drivers/regulator/s5m8767.c
7242 +++ b/drivers/regulator/s5m8767.c
7243 @@ -544,14 +544,18 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
7244 rdata = devm_kcalloc(&pdev->dev,
7245 pdata->num_regulators, sizeof(*rdata),
7246 GFP_KERNEL);
7247 - if (!rdata)
7248 + if (!rdata) {
7249 + of_node_put(regulators_np);
7250 return -ENOMEM;
7251 + }
7252
7253 rmode = devm_kcalloc(&pdev->dev,
7254 pdata->num_regulators, sizeof(*rmode),
7255 GFP_KERNEL);
7256 - if (!rmode)
7257 + if (!rmode) {
7258 + of_node_put(regulators_np);
7259 return -ENOMEM;
7260 + }
7261
7262 pdata->regulators = rdata;
7263 pdata->opmode = rmode;
7264 @@ -574,10 +578,13 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
7265 0,
7266 GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
7267 "s5m8767");
7268 - if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT)
7269 + if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT) {
7270 rdata->ext_control_gpiod = NULL;
7271 - else if (IS_ERR(rdata->ext_control_gpiod))
7272 + } else if (IS_ERR(rdata->ext_control_gpiod)) {
7273 + of_node_put(reg_np);
7274 + of_node_put(regulators_np);
7275 return PTR_ERR(rdata->ext_control_gpiod);
7276 + }
7277
7278 rdata->id = i;
7279 rdata->initdata = of_get_regulator_init_data(
7280 diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
7281 index c5b9804140860..9ae7ce3f50696 100644
7282 --- a/drivers/rtc/Kconfig
7283 +++ b/drivers/rtc/Kconfig
7284 @@ -683,6 +683,7 @@ config RTC_DRV_S5M
7285 tristate "Samsung S2M/S5M series"
7286 depends on MFD_SEC_CORE || COMPILE_TEST
7287 select REGMAP_IRQ
7288 + select REGMAP_I2C
7289 help
7290 If you say yes here you will get support for the
7291 RTC of Samsung S2MPS14 and S5M PMIC series.
7292 diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
7293 index 957889a42d2ea..f6f03a349c3f0 100644
7294 --- a/drivers/s390/virtio/virtio_ccw.c
7295 +++ b/drivers/s390/virtio/virtio_ccw.c
7296 @@ -117,7 +117,7 @@ struct virtio_rev_info {
7297 };
7298
7299 /* the highest virtio-ccw revision we support */
7300 -#define VIRTIO_CCW_REV_MAX 1
7301 +#define VIRTIO_CCW_REV_MAX 2
7302
7303 struct virtio_ccw_vq_info {
7304 struct virtqueue *vq;
7305 @@ -952,7 +952,7 @@ static u8 virtio_ccw_get_status(struct virtio_device *vdev)
7306 u8 old_status = vcdev->dma_area->status;
7307 struct ccw1 *ccw;
7308
7309 - if (vcdev->revision < 1)
7310 + if (vcdev->revision < 2)
7311 return vcdev->dma_area->status;
7312
7313 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
7314 diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
7315 index e0ccb48ec9617..40e9c9dc04bd8 100644
7316 --- a/drivers/scsi/bnx2fc/Kconfig
7317 +++ b/drivers/scsi/bnx2fc/Kconfig
7318 @@ -5,6 +5,7 @@ config SCSI_BNX2X_FCOE
7319 depends on (IPV6 || IPV6=n)
7320 depends on LIBFC
7321 depends on LIBFCOE
7322 + depends on MMU
7323 select NETDEVICES
7324 select ETHERNET
7325 select NET_VENDOR_BROADCOM
7326 diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
7327 index f3d8d53ab84de..dbe5325a324d5 100644
7328 --- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
7329 +++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
7330 @@ -11,6 +11,7 @@
7331 */
7332
7333 #include <linux/bitops.h>
7334 +#include <linux/clk.h>
7335 #include <linux/interrupt.h>
7336 #include <linux/fs.h>
7337 #include <linux/kfifo.h>
7338 @@ -67,6 +68,7 @@ struct aspeed_lpc_snoop_channel {
7339 struct aspeed_lpc_snoop {
7340 struct regmap *regmap;
7341 int irq;
7342 + struct clk *clk;
7343 struct aspeed_lpc_snoop_channel chan[NUM_SNOOP_CHANNELS];
7344 };
7345
7346 @@ -282,22 +284,42 @@ static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
7347 return -ENODEV;
7348 }
7349
7350 + lpc_snoop->clk = devm_clk_get(dev, NULL);
7351 + if (IS_ERR(lpc_snoop->clk)) {
7352 + rc = PTR_ERR(lpc_snoop->clk);
7353 + if (rc != -EPROBE_DEFER)
7354 + dev_err(dev, "couldn't get clock\n");
7355 + return rc;
7356 + }
7357 + rc = clk_prepare_enable(lpc_snoop->clk);
7358 + if (rc) {
7359 + dev_err(dev, "couldn't enable clock\n");
7360 + return rc;
7361 + }
7362 +
7363 rc = aspeed_lpc_snoop_config_irq(lpc_snoop, pdev);
7364 if (rc)
7365 - return rc;
7366 + goto err;
7367
7368 rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 0, port);
7369 if (rc)
7370 - return rc;
7371 + goto err;
7372
7373 /* Configuration of 2nd snoop channel port is optional */
7374 if (of_property_read_u32_index(dev->of_node, "snoop-ports",
7375 1, &port) == 0) {
7376 rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 1, port);
7377 - if (rc)
7378 + if (rc) {
7379 aspeed_lpc_disable_snoop(lpc_snoop, 0);
7380 + goto err;
7381 + }
7382 }
7383
7384 + return 0;
7385 +
7386 +err:
7387 + clk_disable_unprepare(lpc_snoop->clk);
7388 +
7389 return rc;
7390 }
7391
7392 @@ -309,6 +331,8 @@ static int aspeed_lpc_snoop_remove(struct platform_device *pdev)
7393 aspeed_lpc_disable_snoop(lpc_snoop, 0);
7394 aspeed_lpc_disable_snoop(lpc_snoop, 1);
7395
7396 + clk_disable_unprepare(lpc_snoop->clk);
7397 +
7398 return 0;
7399 }
7400
7401 diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
7402 index e3d06330d1258..f7d0f63921dc2 100644
7403 --- a/drivers/soundwire/cadence_master.c
7404 +++ b/drivers/soundwire/cadence_master.c
7405 @@ -368,10 +368,10 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns,
7406 if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) {
7407 no_ack = 1;
7408 dev_dbg_ratelimited(cdns->dev, "Msg Ack not received\n");
7409 - if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
7410 - nack = 1;
7411 - dev_err_ratelimited(cdns->dev, "Msg NACK received\n");
7412 - }
7413 + }
7414 + if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
7415 + nack = 1;
7416 + dev_err_ratelimited(cdns->dev, "Msg NACK received\n");
7417 }
7418 }
7419
7420 diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
7421 index abbc1582f457e..d9711ea5b01d3 100644
7422 --- a/drivers/spi/spi-atmel.c
7423 +++ b/drivers/spi/spi-atmel.c
7424 @@ -1569,7 +1569,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
7425 if (ret == 0) {
7426 as->use_dma = true;
7427 } else if (ret == -EPROBE_DEFER) {
7428 - return ret;
7429 + goto out_unmap_regs;
7430 }
7431 } else if (as->caps.has_pdc_support) {
7432 as->use_pdc = true;
7433 diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
7434 index f236e3034cf85..aafac128bb5f1 100644
7435 --- a/drivers/spi/spi-pxa2xx-pci.c
7436 +++ b/drivers/spi/spi-pxa2xx-pci.c
7437 @@ -21,7 +21,8 @@ enum {
7438 PORT_BSW1,
7439 PORT_BSW2,
7440 PORT_CE4100,
7441 - PORT_LPT,
7442 + PORT_LPT0,
7443 + PORT_LPT1,
7444 };
7445
7446 struct pxa_spi_info {
7447 @@ -57,8 +58,10 @@ static struct dw_dma_slave bsw1_rx_param = { .src_id = 7 };
7448 static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 };
7449 static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 };
7450
7451 -static struct dw_dma_slave lpt_tx_param = { .dst_id = 0 };
7452 -static struct dw_dma_slave lpt_rx_param = { .src_id = 1 };
7453 +static struct dw_dma_slave lpt1_tx_param = { .dst_id = 0 };
7454 +static struct dw_dma_slave lpt1_rx_param = { .src_id = 1 };
7455 +static struct dw_dma_slave lpt0_tx_param = { .dst_id = 2 };
7456 +static struct dw_dma_slave lpt0_rx_param = { .src_id = 3 };
7457
7458 static bool lpss_dma_filter(struct dma_chan *chan, void *param)
7459 {
7460 @@ -185,12 +188,19 @@ static struct pxa_spi_info spi_info_configs[] = {
7461 .num_chipselect = 1,
7462 .max_clk_rate = 50000000,
7463 },
7464 - [PORT_LPT] = {
7465 + [PORT_LPT0] = {
7466 .type = LPSS_LPT_SSP,
7467 .port_id = 0,
7468 .setup = lpss_spi_setup,
7469 - .tx_param = &lpt_tx_param,
7470 - .rx_param = &lpt_rx_param,
7471 + .tx_param = &lpt0_tx_param,
7472 + .rx_param = &lpt0_rx_param,
7473 + },
7474 + [PORT_LPT1] = {
7475 + .type = LPSS_LPT_SSP,
7476 + .port_id = 1,
7477 + .setup = lpss_spi_setup,
7478 + .tx_param = &lpt1_tx_param,
7479 + .rx_param = &lpt1_rx_param,
7480 },
7481 };
7482
7483 @@ -285,8 +295,9 @@ static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
7484 { PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 },
7485 { PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 },
7486 { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 },
7487 - { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT },
7488 - { },
7489 + { PCI_VDEVICE(INTEL, 0x9ce5), PORT_LPT0 },
7490 + { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT1 },
7491 + { }
7492 };
7493 MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
7494
7495 diff --git a/drivers/spi/spi-s3c24xx-fiq.S b/drivers/spi/spi-s3c24xx-fiq.S
7496 index e95d6282109e7..68ea12bead227 100644
7497 --- a/drivers/spi/spi-s3c24xx-fiq.S
7498 +++ b/drivers/spi/spi-s3c24xx-fiq.S
7499 @@ -33,7 +33,6 @@
7500 @ and an offset to the irq acknowledgment word
7501
7502 ENTRY(s3c24xx_spi_fiq_rx)
7503 -s3c24xx_spi_fix_rx:
7504 .word fiq_rx_end - fiq_rx_start
7505 .word fiq_rx_irq_ack - fiq_rx_start
7506 fiq_rx_start:
7507 @@ -47,7 +46,7 @@ fiq_rx_start:
7508 strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
7509
7510 subs fiq_rcount, fiq_rcount, #1
7511 - subnes pc, lr, #4 @@ return, still have work to do
7512 + subsne pc, lr, #4 @@ return, still have work to do
7513
7514 @@ set IRQ controller so that next op will trigger IRQ
7515 mov fiq_rtmp, #0
7516 @@ -59,7 +58,6 @@ fiq_rx_irq_ack:
7517 fiq_rx_end:
7518
7519 ENTRY(s3c24xx_spi_fiq_txrx)
7520 -s3c24xx_spi_fiq_txrx:
7521 .word fiq_txrx_end - fiq_txrx_start
7522 .word fiq_txrx_irq_ack - fiq_txrx_start
7523 fiq_txrx_start:
7524 @@ -74,7 +72,7 @@ fiq_txrx_start:
7525 strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
7526
7527 subs fiq_rcount, fiq_rcount, #1
7528 - subnes pc, lr, #4 @@ return, still have work to do
7529 + subsne pc, lr, #4 @@ return, still have work to do
7530
7531 mov fiq_rtmp, #0
7532 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
7533 @@ -86,7 +84,6 @@ fiq_txrx_irq_ack:
7534 fiq_txrx_end:
7535
7536 ENTRY(s3c24xx_spi_fiq_tx)
7537 -s3c24xx_spi_fix_tx:
7538 .word fiq_tx_end - fiq_tx_start
7539 .word fiq_tx_irq_ack - fiq_tx_start
7540 fiq_tx_start:
7541 @@ -99,7 +96,7 @@ fiq_tx_start:
7542 strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
7543
7544 subs fiq_rcount, fiq_rcount, #1
7545 - subnes pc, lr, #4 @@ return, still have work to do
7546 + subsne pc, lr, #4 @@ return, still have work to do
7547
7548 mov fiq_rtmp, #0
7549 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
7550 diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
7551 index 77ddf23b65d65..8622cf9d3f640 100644
7552 --- a/drivers/spi/spi-stm32.c
7553 +++ b/drivers/spi/spi-stm32.c
7554 @@ -1668,6 +1668,10 @@ static int stm32_spi_transfer_one(struct spi_master *master,
7555 struct stm32_spi *spi = spi_master_get_devdata(master);
7556 int ret;
7557
7558 + /* Don't do anything on 0 bytes transfers */
7559 + if (transfer->len == 0)
7560 + return 0;
7561 +
7562 spi->tx_buf = transfer->tx_buf;
7563 spi->rx_buf = transfer->rx_buf;
7564 spi->tx_len = spi->tx_buf ? transfer->len : 0;
7565 diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
7566 index 5ab5119e2f1b0..785e7c4451233 100644
7567 --- a/drivers/spi/spi-synquacer.c
7568 +++ b/drivers/spi/spi-synquacer.c
7569 @@ -490,6 +490,10 @@ static void synquacer_spi_set_cs(struct spi_device *spi, bool enable)
7570 val &= ~(SYNQUACER_HSSPI_DMPSEL_CS_MASK <<
7571 SYNQUACER_HSSPI_DMPSEL_CS_SHIFT);
7572 val |= spi->chip_select << SYNQUACER_HSSPI_DMPSEL_CS_SHIFT;
7573 +
7574 + if (!enable)
7575 + val |= SYNQUACER_HSSPI_DMSTOP_STOP;
7576 +
7577 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
7578 }
7579
7580 diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
7581 index de844b4121107..bbbd311eda030 100644
7582 --- a/drivers/spmi/spmi-pmic-arb.c
7583 +++ b/drivers/spmi/spmi-pmic-arb.c
7584 @@ -1,6 +1,6 @@
7585 // SPDX-License-Identifier: GPL-2.0-only
7586 /*
7587 - * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
7588 + * Copyright (c) 2012-2015, 2017, 2021, The Linux Foundation. All rights reserved.
7589 */
7590 #include <linux/bitmap.h>
7591 #include <linux/delay.h>
7592 @@ -505,8 +505,7 @@ static void cleanup_irq(struct spmi_pmic_arb *pmic_arb, u16 apid, int id)
7593 static void periph_interrupt(struct spmi_pmic_arb *pmic_arb, u16 apid)
7594 {
7595 unsigned int irq;
7596 - u32 status;
7597 - int id;
7598 + u32 status, id;
7599 u8 sid = (pmic_arb->apid_data[apid].ppid >> 8) & 0xF;
7600 u8 per = pmic_arb->apid_data[apid].ppid & 0xFF;
7601
7602 diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
7603 index dc4da66c3695b..54bdb64f52e88 100644
7604 --- a/drivers/staging/gdm724x/gdm_usb.c
7605 +++ b/drivers/staging/gdm724x/gdm_usb.c
7606 @@ -56,20 +56,24 @@ static int gdm_usb_recv(void *priv_dev,
7607
7608 static int request_mac_address(struct lte_udev *udev)
7609 {
7610 - u8 buf[16] = {0,};
7611 - struct hci_packet *hci = (struct hci_packet *)buf;
7612 + struct hci_packet *hci;
7613 struct usb_device *usbdev = udev->usbdev;
7614 int actual;
7615 int ret = -1;
7616
7617 + hci = kmalloc(struct_size(hci, data, 1), GFP_KERNEL);
7618 + if (!hci)
7619 + return -ENOMEM;
7620 +
7621 hci->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_GET_INFORMATION);
7622 hci->len = gdm_cpu_to_dev16(udev->gdm_ed, 1);
7623 hci->data[0] = MAC_ADDRESS;
7624
7625 - ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
7626 + ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), hci, 5,
7627 &actual, 1000);
7628
7629 udev->request_mac_addr = 1;
7630 + kfree(hci);
7631
7632 return ret;
7633 }
7634 diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
7635 index 2b635ebf62d6a..a15d970adb983 100644
7636 --- a/drivers/staging/media/imx/imx-media-csc-scaler.c
7637 +++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
7638 @@ -866,11 +866,7 @@ void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev)
7639 struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
7640 struct video_device *vfd = priv->vdev.vfd;
7641
7642 - mutex_lock(&priv->mutex);
7643 -
7644 video_unregister_device(vfd);
7645 -
7646 - mutex_unlock(&priv->mutex);
7647 }
7648
7649 struct imx_media_video_dev *
7650 diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
7651 index 2c3c2adca6832..e16408af92d9c 100644
7652 --- a/drivers/staging/media/imx/imx-media-dev.c
7653 +++ b/drivers/staging/media/imx/imx-media-dev.c
7654 @@ -53,6 +53,7 @@ static int imx6_media_probe_complete(struct v4l2_async_notifier *notifier)
7655 imxmd->m2m_vdev = imx_media_csc_scaler_device_init(imxmd);
7656 if (IS_ERR(imxmd->m2m_vdev)) {
7657 ret = PTR_ERR(imxmd->m2m_vdev);
7658 + imxmd->m2m_vdev = NULL;
7659 goto unlock;
7660 }
7661
7662 @@ -107,10 +108,14 @@ static int imx_media_remove(struct platform_device *pdev)
7663
7664 v4l2_info(&imxmd->v4l2_dev, "Removing imx-media\n");
7665
7666 + if (imxmd->m2m_vdev) {
7667 + imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
7668 + imxmd->m2m_vdev = NULL;
7669 + }
7670 +
7671 v4l2_async_notifier_unregister(&imxmd->notifier);
7672 imx_media_unregister_ipu_internal_subdevs(imxmd);
7673 v4l2_async_notifier_cleanup(&imxmd->notifier);
7674 - imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
7675 media_device_unregister(&imxmd->md);
7676 v4l2_device_unregister(&imxmd->v4l2_dev);
7677 media_device_cleanup(&imxmd->md);
7678 diff --git a/drivers/staging/mt7621-dma/Makefile b/drivers/staging/mt7621-dma/Makefile
7679 index 66da1bf10c32e..23256d1286f3e 100644
7680 --- a/drivers/staging/mt7621-dma/Makefile
7681 +++ b/drivers/staging/mt7621-dma/Makefile
7682 @@ -1,4 +1,4 @@
7683 # SPDX-License-Identifier: GPL-2.0
7684 -obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
7685 +obj-$(CONFIG_MTK_HSDMA) += hsdma-mt7621.o
7686
7687 ccflags-y += -I$(srctree)/drivers/dma
7688 diff --git a/drivers/staging/mt7621-dma/hsdma-mt7621.c b/drivers/staging/mt7621-dma/hsdma-mt7621.c
7689 new file mode 100644
7690 index 0000000000000..803b66d8ee6b5
7691 --- /dev/null
7692 +++ b/drivers/staging/mt7621-dma/hsdma-mt7621.c
7693 @@ -0,0 +1,762 @@
7694 +// SPDX-License-Identifier: GPL-2.0+
7695 +/*
7696 + * Copyright (C) 2015, Michael Lee <igvtee@gmail.com>
7697 + * MTK HSDMA support
7698 + */
7699 +
7700 +#include <linux/dmaengine.h>
7701 +#include <linux/dma-mapping.h>
7702 +#include <linux/err.h>
7703 +#include <linux/init.h>
7704 +#include <linux/list.h>
7705 +#include <linux/module.h>
7706 +#include <linux/platform_device.h>
7707 +#include <linux/slab.h>
7708 +#include <linux/spinlock.h>
7709 +#include <linux/irq.h>
7710 +#include <linux/of_dma.h>
7711 +#include <linux/reset.h>
7712 +#include <linux/of_device.h>
7713 +
7714 +#include "virt-dma.h"
7715 +
7716 +#define HSDMA_BASE_OFFSET 0x800
7717 +
7718 +#define HSDMA_REG_TX_BASE 0x00
7719 +#define HSDMA_REG_TX_CNT 0x04
7720 +#define HSDMA_REG_TX_CTX 0x08
7721 +#define HSDMA_REG_TX_DTX 0x0c
7722 +#define HSDMA_REG_RX_BASE 0x100
7723 +#define HSDMA_REG_RX_CNT 0x104
7724 +#define HSDMA_REG_RX_CRX 0x108
7725 +#define HSDMA_REG_RX_DRX 0x10c
7726 +#define HSDMA_REG_INFO 0x200
7727 +#define HSDMA_REG_GLO_CFG 0x204
7728 +#define HSDMA_REG_RST_CFG 0x208
7729 +#define HSDMA_REG_DELAY_INT 0x20c
7730 +#define HSDMA_REG_FREEQ_THRES 0x210
7731 +#define HSDMA_REG_INT_STATUS 0x220
7732 +#define HSDMA_REG_INT_MASK 0x228
7733 +#define HSDMA_REG_SCH_Q01 0x280
7734 +#define HSDMA_REG_SCH_Q23 0x284
7735 +
7736 +#define HSDMA_DESCS_MAX 0xfff
7737 +#define HSDMA_DESCS_NUM 8
7738 +#define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1)
7739 +#define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK)
7740 +
7741 +/* HSDMA_REG_INFO */
7742 +#define HSDMA_INFO_INDEX_MASK 0xf
7743 +#define HSDMA_INFO_INDEX_SHIFT 24
7744 +#define HSDMA_INFO_BASE_MASK 0xff
7745 +#define HSDMA_INFO_BASE_SHIFT 16
7746 +#define HSDMA_INFO_RX_MASK 0xff
7747 +#define HSDMA_INFO_RX_SHIFT 8
7748 +#define HSDMA_INFO_TX_MASK 0xff
7749 +#define HSDMA_INFO_TX_SHIFT 0
7750 +
7751 +/* HSDMA_REG_GLO_CFG */
7752 +#define HSDMA_GLO_TX_2B_OFFSET BIT(31)
7753 +#define HSDMA_GLO_CLK_GATE BIT(30)
7754 +#define HSDMA_GLO_BYTE_SWAP BIT(29)
7755 +#define HSDMA_GLO_MULTI_DMA BIT(10)
7756 +#define HSDMA_GLO_TWO_BUF BIT(9)
7757 +#define HSDMA_GLO_32B_DESC BIT(8)
7758 +#define HSDMA_GLO_BIG_ENDIAN BIT(7)
7759 +#define HSDMA_GLO_TX_DONE BIT(6)
7760 +#define HSDMA_GLO_BT_MASK 0x3
7761 +#define HSDMA_GLO_BT_SHIFT 4
7762 +#define HSDMA_GLO_RX_BUSY BIT(3)
7763 +#define HSDMA_GLO_RX_DMA BIT(2)
7764 +#define HSDMA_GLO_TX_BUSY BIT(1)
7765 +#define HSDMA_GLO_TX_DMA BIT(0)
7766 +
7767 +#define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT)
7768 +#define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT)
7769 +#define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT)
7770 +#define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT)
7771 +
7772 +#define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \
7773 + HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
7774 +
7775 +/* HSDMA_REG_RST_CFG */
7776 +#define HSDMA_RST_RX_SHIFT 16
7777 +#define HSDMA_RST_TX_SHIFT 0
7778 +
7779 +/* HSDMA_REG_DELAY_INT */
7780 +#define HSDMA_DELAY_INT_EN BIT(15)
7781 +#define HSDMA_DELAY_PEND_OFFSET 8
7782 +#define HSDMA_DELAY_TIME_OFFSET 0
7783 +#define HSDMA_DELAY_TX_OFFSET 16
7784 +#define HSDMA_DELAY_RX_OFFSET 0
7785 +
7786 +#define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \
7787 + ((x) << HSDMA_DELAY_PEND_OFFSET))
7788 +#define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \
7789 + HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
7790 +
7791 +/* HSDMA_REG_INT_STATUS */
7792 +#define HSDMA_INT_DELAY_RX_COH BIT(31)
7793 +#define HSDMA_INT_DELAY_RX_INT BIT(30)
7794 +#define HSDMA_INT_DELAY_TX_COH BIT(29)
7795 +#define HSDMA_INT_DELAY_TX_INT BIT(28)
7796 +#define HSDMA_INT_RX_MASK 0x3
7797 +#define HSDMA_INT_RX_SHIFT 16
7798 +#define HSDMA_INT_RX_Q0 BIT(16)
7799 +#define HSDMA_INT_TX_MASK 0xf
7800 +#define HSDMA_INT_TX_SHIFT 0
7801 +#define HSDMA_INT_TX_Q0 BIT(0)
7802 +
7803 +/* tx/rx dma desc flags */
7804 +#define HSDMA_PLEN_MASK 0x3fff
7805 +#define HSDMA_DESC_DONE BIT(31)
7806 +#define HSDMA_DESC_LS0 BIT(30)
7807 +#define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16)
7808 +#define HSDMA_DESC_TAG BIT(15)
7809 +#define HSDMA_DESC_LS1 BIT(14)
7810 +#define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK)
7811 +
7812 +/* align 4 bytes */
7813 +#define HSDMA_ALIGN_SIZE 3
7814 +/* align size 128bytes */
7815 +#define HSDMA_MAX_PLEN 0x3f80
7816 +
7817 +struct hsdma_desc {
7818 + u32 addr0;
7819 + u32 flags;
7820 + u32 addr1;
7821 + u32 unused;
7822 +};
7823 +
7824 +struct mtk_hsdma_sg {
7825 + dma_addr_t src_addr;
7826 + dma_addr_t dst_addr;
7827 + u32 len;
7828 +};
7829 +
7830 +struct mtk_hsdma_desc {
7831 + struct virt_dma_desc vdesc;
7832 + unsigned int num_sgs;
7833 + struct mtk_hsdma_sg sg[1];
7834 +};
7835 +
7836 +struct mtk_hsdma_chan {
7837 + struct virt_dma_chan vchan;
7838 + unsigned int id;
7839 + dma_addr_t desc_addr;
7840 + int tx_idx;
7841 + int rx_idx;
7842 + struct hsdma_desc *tx_ring;
7843 + struct hsdma_desc *rx_ring;
7844 + struct mtk_hsdma_desc *desc;
7845 + unsigned int next_sg;
7846 +};
7847 +
7848 +struct mtk_hsdam_engine {
7849 + struct dma_device ddev;
7850 + struct device_dma_parameters dma_parms;
7851 + void __iomem *base;
7852 + struct tasklet_struct task;
7853 + volatile unsigned long chan_issued;
7854 +
7855 + struct mtk_hsdma_chan chan[1];
7856 +};
7857 +
7858 +static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(
7859 + struct mtk_hsdma_chan *chan)
7860 +{
7861 + return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine,
7862 + ddev);
7863 +}
7864 +
7865 +static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c)
7866 +{
7867 + return container_of(c, struct mtk_hsdma_chan, vchan.chan);
7868 +}
7869 +
7870 +static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(
7871 + struct virt_dma_desc *vdesc)
7872 +{
7873 + return container_of(vdesc, struct mtk_hsdma_desc, vdesc);
7874 +}
7875 +
7876 +static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg)
7877 +{
7878 + return readl(hsdma->base + reg);
7879 +}
7880 +
7881 +static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma,
7882 + unsigned int reg, u32 val)
7883 +{
7884 + writel(val, hsdma->base + reg);
7885 +}
7886 +
7887 +static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
7888 + struct mtk_hsdma_chan *chan)
7889 +{
7890 + chan->tx_idx = 0;
7891 + chan->rx_idx = HSDMA_DESCS_NUM - 1;
7892 +
7893 + mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
7894 + mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
7895 +
7896 + mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
7897 + 0x1 << (chan->id + HSDMA_RST_TX_SHIFT));
7898 + mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
7899 + 0x1 << (chan->id + HSDMA_RST_RX_SHIFT));
7900 +}
7901 +
7902 +static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
7903 +{
7904 + dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, " \
7905 + "tctx %08x, tdtx: %08x, rbase %08x, " \
7906 + "rcnt %08x, rctx %08x, rdtx %08x\n",
7907 + mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
7908 + mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
7909 + mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX),
7910 + mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX),
7911 + mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE),
7912 + mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT),
7913 + mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
7914 + mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
7915 +
7916 + dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, " \
7917 + "intr_stat %08x, intr_mask %08x\n",
7918 + mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
7919 + mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
7920 + mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT),
7921 + mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS),
7922 + mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK));
7923 +}
7924 +
7925 +static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
7926 + struct mtk_hsdma_chan *chan)
7927 +{
7928 + struct hsdma_desc *tx_desc;
7929 + struct hsdma_desc *rx_desc;
7930 + int i;
7931 +
7932 + dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n",
7933 + chan->tx_idx, chan->rx_idx);
7934 +
7935 + for (i = 0; i < HSDMA_DESCS_NUM; i++) {
7936 + tx_desc = &chan->tx_ring[i];
7937 + rx_desc = &chan->rx_ring[i];
7938 +
7939 + dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, " \
7940 + "tx addr1: %08x, rx addr0 %08x, flags %08x\n",
7941 + i, tx_desc->addr0, tx_desc->flags, \
7942 + tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
7943 + }
7944 +}
7945 +
7946 +static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma,
7947 + struct mtk_hsdma_chan *chan)
7948 +{
7949 + int i;
7950 +
7951 + /* disable dma */
7952 + mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
7953 +
7954 + /* disable intr */
7955 + mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
7956 +
7957 + /* init desc value */
7958 + for (i = 0; i < HSDMA_DESCS_NUM; i++) {
7959 + chan->tx_ring[i].addr0 = 0;
7960 + chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
7961 + }
7962 + for (i = 0; i < HSDMA_DESCS_NUM; i++) {
7963 + chan->rx_ring[i].addr0 = 0;
7964 + chan->rx_ring[i].flags = 0;
7965 + }
7966 +
7967 + /* reset */
7968 + mtk_hsdma_reset_chan(hsdma, chan);
7969 +
7970 + /* enable intr */
7971 + mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
7972 +
7973 + /* enable dma */
7974 + mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
7975 +}
7976 +
7977 +static int mtk_hsdma_terminate_all(struct dma_chan *c)
7978 +{
7979 + struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
7980 + struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
7981 + unsigned long timeout;
7982 + LIST_HEAD(head);
7983 +
7984 + spin_lock_bh(&chan->vchan.lock);
7985 + chan->desc = NULL;
7986 + clear_bit(chan->id, &hsdma->chan_issued);
7987 + vchan_get_all_descriptors(&chan->vchan, &head);
7988 + spin_unlock_bh(&chan->vchan.lock);
7989 +
7990 + vchan_dma_desc_free_list(&chan->vchan, &head);
7991 +
7992 + /* wait dma transfer complete */
7993 + timeout = jiffies + msecs_to_jiffies(2000);
7994 + while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) &
7995 + (HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) {
7996 + if (time_after_eq(jiffies, timeout)) {
7997 + hsdma_dump_desc(hsdma, chan);
7998 + mtk_hsdma_reset(hsdma, chan);
7999 + dev_err(hsdma->ddev.dev, "timeout, reset it\n");
8000 + break;
8001 + }
8002 + cpu_relax();
8003 + }
8004 +
8005 + return 0;
8006 +}
8007 +
8008 +static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
8009 + struct mtk_hsdma_chan *chan)
8010 +{
8011 + dma_addr_t src, dst;
8012 + size_t len, tlen;
8013 + struct hsdma_desc *tx_desc, *rx_desc;
8014 + struct mtk_hsdma_sg *sg;
8015 + unsigned int i;
8016 + int rx_idx;
8017 +
8018 + sg = &chan->desc->sg[0];
8019 + len = sg->len;
8020 + chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN);
8021 +
8022 + /* tx desc */
8023 + src = sg->src_addr;
8024 + for (i = 0; i < chan->desc->num_sgs; i++) {
8025 + tx_desc = &chan->tx_ring[chan->tx_idx];
8026 +
8027 + if (len > HSDMA_MAX_PLEN)
8028 + tlen = HSDMA_MAX_PLEN;
8029 + else
8030 + tlen = len;
8031 +
8032 + if (i & 0x1) {
8033 + tx_desc->addr1 = src;
8034 + tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
8035 + } else {
8036 + tx_desc->addr0 = src;
8037 + tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
8038 +
8039 + /* update index */
8040 + chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx);
8041 + }
8042 +
8043 + src += tlen;
8044 + len -= tlen;
8045 + }
8046 + if (i & 0x1)
8047 + tx_desc->flags |= HSDMA_DESC_LS0;
8048 + else
8049 + tx_desc->flags |= HSDMA_DESC_LS1;
8050 +
8051 + /* rx desc */
8052 + rx_idx = HSDMA_NEXT_DESC(chan->rx_idx);
8053 + len = sg->len;
8054 + dst = sg->dst_addr;
8055 + for (i = 0; i < chan->desc->num_sgs; i++) {
8056 + rx_desc = &chan->rx_ring[rx_idx];
8057 + if (len > HSDMA_MAX_PLEN)
8058 + tlen = HSDMA_MAX_PLEN;
8059 + else
8060 + tlen = len;
8061 +
8062 + rx_desc->addr0 = dst;
8063 + rx_desc->flags = HSDMA_DESC_PLEN0(tlen);
8064 +
8065 + dst += tlen;
8066 + len -= tlen;
8067 +
8068 + /* update index */
8069 + rx_idx = HSDMA_NEXT_DESC(rx_idx);
8070 + }
8071 +
8072 + /* make sure desc and index all up to date */
8073 + wmb();
8074 + mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
8075 +
8076 + return 0;
8077 +}
8078 +
8079 +static int gdma_next_desc(struct mtk_hsdma_chan *chan)
8080 +{
8081 + struct virt_dma_desc *vdesc;
8082 +
8083 + vdesc = vchan_next_desc(&chan->vchan);
8084 + if (!vdesc) {
8085 + chan->desc = NULL;
8086 + return 0;
8087 + }
8088 + chan->desc = to_mtk_hsdma_desc(vdesc);
8089 + chan->next_sg = 0;
8090 +
8091 + return 1;
8092 +}
8093 +
8094 +static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma,
8095 + struct mtk_hsdma_chan *chan)
8096 +{
8097 + struct mtk_hsdma_desc *desc;
8098 + int chan_issued;
8099 +
8100 + chan_issued = 0;
8101 + spin_lock_bh(&chan->vchan.lock);
8102 + desc = chan->desc;
8103 + if (likely(desc)) {
8104 + if (chan->next_sg == desc->num_sgs) {
8105 + list_del(&desc->vdesc.node);
8106 + vchan_cookie_complete(&desc->vdesc);
8107 + chan_issued = gdma_next_desc(chan);
8108 + }
8109 + } else {
8110 + dev_dbg(hsdma->ddev.dev, "no desc to complete\n");
8111 + }
8112 +
8113 + if (chan_issued)
8114 + set_bit(chan->id, &hsdma->chan_issued);
8115 + spin_unlock_bh(&chan->vchan.lock);
8116 +}
8117 +
8118 +static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
8119 +{
8120 + struct mtk_hsdam_engine *hsdma = devid;
8121 + u32 status;
8122 +
8123 + status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS);
8124 + if (unlikely(!status))
8125 + return IRQ_NONE;
8126 +
8127 + if (likely(status & HSDMA_INT_RX_Q0))
8128 + tasklet_schedule(&hsdma->task);
8129 + else
8130 + dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n", status);
8131 + /* clean intr bits */
8132 + mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status);
8133 +
8134 + return IRQ_HANDLED;
8135 +}
8136 +
8137 +static void mtk_hsdma_issue_pending(struct dma_chan *c)
8138 +{
8139 + struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
8140 + struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
8141 +
8142 + spin_lock_bh(&chan->vchan.lock);
8143 + if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
8144 + if (gdma_next_desc(chan)) {
8145 + set_bit(chan->id, &hsdma->chan_issued);
8146 + tasklet_schedule(&hsdma->task);
8147 + } else {
8148 + dev_dbg(hsdma->ddev.dev, "no desc to issue\n");
8149 + }
8150 + }
8151 + spin_unlock_bh(&chan->vchan.lock);
8152 +}
8153 +
8154 +static struct dma_async_tx_descriptor *mtk_hsdma_prep_dma_memcpy(
8155 + struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
8156 + size_t len, unsigned long flags)
8157 +{
8158 + struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
8159 + struct mtk_hsdma_desc *desc;
8160 +
8161 + if (len <= 0)
8162 + return NULL;
8163 +
8164 + desc = kzalloc(sizeof(*desc), GFP_ATOMIC);
8165 + if (!desc) {
8166 + dev_err(c->device->dev, "alloc memcpy decs error\n");
8167 + return NULL;
8168 + }
8169 +
8170 + desc->sg[0].src_addr = src;
8171 + desc->sg[0].dst_addr = dest;
8172 + desc->sg[0].len = len;
8173 +
8174 + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
8175 +}
8176 +
8177 +static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
8178 + dma_cookie_t cookie,
8179 + struct dma_tx_state *state)
8180 +{
8181 + return dma_cookie_status(c, cookie, state);
8182 +}
8183 +
8184 +static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
8185 +{
8186 + vchan_free_chan_resources(to_virt_chan(c));
8187 +}
8188 +
8189 +static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc)
8190 +{
8191 + kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc));
8192 +}
8193 +
8194 +static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma)
8195 +{
8196 + struct mtk_hsdma_chan *chan;
8197 +
8198 + if (test_and_clear_bit(0, &hsdma->chan_issued)) {
8199 + chan = &hsdma->chan[0];
8200 + if (chan->desc)
8201 + mtk_hsdma_start_transfer(hsdma, chan);
8202 + else
8203 + dev_dbg(hsdma->ddev.dev, "chan 0 no desc to issue\n");
8204 + }
8205 +}
8206 +
8207 +static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma)
8208 +{
8209 + struct mtk_hsdma_chan *chan;
8210 + int next_idx, drx_idx, cnt;
8211 +
8212 + chan = &hsdma->chan[0];
8213 + next_idx = HSDMA_NEXT_DESC(chan->rx_idx);
8214 + drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX);
8215 +
8216 + cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK;
8217 + if (!cnt)
8218 + return;
8219 +
8220 + chan->next_sg += cnt;
8221 + chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK;
8222 +
8223 + /* update rx crx */
8224 + wmb();
8225 + mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
8226 +
8227 + mtk_hsdma_chan_done(hsdma, chan);
8228 +}
8229 +
8230 +static void mtk_hsdma_tasklet(unsigned long arg)
8231 +{
8232 + struct mtk_hsdam_engine *hsdma = (struct mtk_hsdam_engine *)arg;
8233 +
8234 + mtk_hsdma_rx(hsdma);
8235 + mtk_hsdma_tx(hsdma);
8236 +}
8237 +
8238 +static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
8239 + struct mtk_hsdma_chan *chan)
8240 +{
8241 + int i;
8242 +
8243 + chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
8244 + 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
8245 + &chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
8246 + if (!chan->tx_ring)
8247 + goto no_mem;
8248 +
8249 + chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM];
8250 +
8251 + /* init tx ring value */
8252 + for (i = 0; i < HSDMA_DESCS_NUM; i++)
8253 + chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
8254 +
8255 + return 0;
8256 +no_mem:
8257 + return -ENOMEM;
8258 +}
8259 +
8260 +static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
8261 + struct mtk_hsdma_chan *chan)
8262 +{
8263 + if (chan->tx_ring) {
8264 + dma_free_coherent(hsdma->ddev.dev,
8265 + 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
8266 + chan->tx_ring, chan->desc_addr);
8267 + chan->tx_ring = NULL;
8268 + chan->rx_ring = NULL;
8269 + }
8270 +}
8271 +
8272 +static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma)
8273 +{
8274 + struct mtk_hsdma_chan *chan;
8275 + int ret;
8276 + u32 reg;
8277 +
8278 + /* init desc */
8279 + chan = &hsdma->chan[0];
8280 + ret = mtk_hsdam_alloc_desc(hsdma, chan);
8281 + if (ret)
8282 + return ret;
8283 +
8284 + /* tx */
8285 + mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr);
8286 + mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM);
8287 + /* rx */
8288 + mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr +
8289 + (sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM));
8290 + mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM);
8291 + /* reset */
8292 + mtk_hsdma_reset_chan(hsdma, chan);
8293 +
8294 + /* enable rx intr */
8295 + mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
8296 +
8297 + /* enable dma */
8298 + mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
8299 +
8300 + /* hardware info */
8301 + reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO);
8302 + dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n",
8303 + (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK,
8304 + (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK);
8305 +
8306 + hsdma_dump_reg(hsdma);
8307 +
8308 + return ret;
8309 +}
8310 +
8311 +static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma)
8312 +{
8313 + struct mtk_hsdma_chan *chan;
8314 +
8315 + /* disable dma */
8316 + mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
8317 +
8318 + /* disable intr */
8319 + mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
8320 +
8321 + /* free desc */
8322 + chan = &hsdma->chan[0];
8323 + mtk_hsdam_free_desc(hsdma, chan);
8324 +
8325 + /* tx */
8326 + mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0);
8327 + mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0);
8328 + /* rx */
8329 + mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0);
8330 + mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0);
8331 + /* reset */
8332 + mtk_hsdma_reset_chan(hsdma, chan);
8333 +}
8334 +
8335 +static const struct of_device_id mtk_hsdma_of_match[] = {
8336 + { .compatible = "mediatek,mt7621-hsdma" },
8337 + { },
8338 +};
8339 +
8340 +static int mtk_hsdma_probe(struct platform_device *pdev)
8341 +{
8342 + const struct of_device_id *match;
8343 + struct mtk_hsdma_chan *chan;
8344 + struct mtk_hsdam_engine *hsdma;
8345 + struct dma_device *dd;
8346 + struct resource *res;
8347 + int ret;
8348 + int irq;
8349 + void __iomem *base;
8350 +
8351 + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8352 + if (ret)
8353 + return ret;
8354 +
8355 + match = of_match_device(mtk_hsdma_of_match, &pdev->dev);
8356 + if (!match)
8357 + return -EINVAL;
8358 +
8359 + hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
8360 + if (!hsdma)
8361 + return -EINVAL;
8362 +
8363 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
8364 + base = devm_ioremap_resource(&pdev->dev, res);
8365 + if (IS_ERR(base))
8366 + return PTR_ERR(base);
8367 + hsdma->base = base + HSDMA_BASE_OFFSET;
8368 + tasklet_init(&hsdma->task, mtk_hsdma_tasklet, (unsigned long)hsdma);
8369 +
8370 + irq = platform_get_irq(pdev, 0);
8371 + if (irq < 0)
8372 + return -EINVAL;
8373 + ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq,
8374 + 0, dev_name(&pdev->dev), hsdma);
8375 + if (ret) {
8376 + dev_err(&pdev->dev, "failed to request irq\n");
8377 + return ret;
8378 + }
8379 +
8380 + device_reset(&pdev->dev);
8381 +
8382 + dd = &hsdma->ddev;
8383 + dma_cap_set(DMA_MEMCPY, dd->cap_mask);
8384 + dd->copy_align = HSDMA_ALIGN_SIZE;
8385 + dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
8386 + dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
8387 + dd->device_terminate_all = mtk_hsdma_terminate_all;
8388 + dd->device_tx_status = mtk_hsdma_tx_status;
8389 + dd->device_issue_pending = mtk_hsdma_issue_pending;
8390 + dd->dev = &pdev->dev;
8391 + dd->dev->dma_parms = &hsdma->dma_parms;
8392 + dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN);
8393 + INIT_LIST_HEAD(&dd->channels);
8394 +
8395 + chan = &hsdma->chan[0];
8396 + chan->id = 0;
8397 + chan->vchan.desc_free = mtk_hsdma_desc_free;
8398 + vchan_init(&chan->vchan, dd);
8399 +
8400 + /* init hardware */
8401 + ret = mtk_hsdma_init(hsdma);
8402 + if (ret) {
8403 + dev_err(&pdev->dev, "failed to alloc ring descs\n");
8404 + return ret;
8405 + }
8406 +
8407 + ret = dma_async_device_register(dd);
8408 + if (ret) {
8409 + dev_err(&pdev->dev, "failed to register dma device\n");
8410 + goto err_uninit_hsdma;
8411 + }
8412 +
8413 + ret = of_dma_controller_register(pdev->dev.of_node,
8414 + of_dma_xlate_by_chan_id, hsdma);
8415 + if (ret) {
8416 + dev_err(&pdev->dev, "failed to register of dma controller\n");
8417 + goto err_unregister;
8418 + }
8419 +
8420 + platform_set_drvdata(pdev, hsdma);
8421 +
8422 + return 0;
8423 +
8424 +err_unregister:
8425 + dma_async_device_unregister(dd);
8426 +err_uninit_hsdma:
8427 + mtk_hsdma_uninit(hsdma);
8428 + return ret;
8429 +}
8430 +
8431 +static int mtk_hsdma_remove(struct platform_device *pdev)
8432 +{
8433 + struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev);
8434 +
8435 + mtk_hsdma_uninit(hsdma);
8436 +
8437 + of_dma_controller_free(pdev->dev.of_node);
8438 + dma_async_device_unregister(&hsdma->ddev);
8439 +
8440 + return 0;
8441 +}
8442 +
8443 +static struct platform_driver mtk_hsdma_driver = {
8444 + .probe = mtk_hsdma_probe,
8445 + .remove = mtk_hsdma_remove,
8446 + .driver = {
8447 + .name = KBUILD_MODNAME,
8448 + .of_match_table = mtk_hsdma_of_match,
8449 + },
8450 +};
8451 +module_platform_driver(mtk_hsdma_driver);
8452 +
8453 +MODULE_AUTHOR("Michael Lee <igvtee@gmail.com>");
8454 +MODULE_DESCRIPTION("MTK HSDMA driver");
8455 +MODULE_LICENSE("GPL v2");
8456 diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
8457 deleted file mode 100644
8458 index bf2772af1045f..0000000000000
8459 --- a/drivers/staging/mt7621-dma/mtk-hsdma.c
8460 +++ /dev/null
8461 @@ -1,762 +0,0 @@
8462 -// SPDX-License-Identifier: GPL-2.0+
8463 -/*
8464 - * Copyright (C) 2015, Michael Lee <igvtee@gmail.com>
8465 - * MTK HSDMA support
8466 - */
8467 -
8468 -#include <linux/dmaengine.h>
8469 -#include <linux/dma-mapping.h>
8470 -#include <linux/err.h>
8471 -#include <linux/init.h>
8472 -#include <linux/list.h>
8473 -#include <linux/module.h>
8474 -#include <linux/platform_device.h>
8475 -#include <linux/slab.h>
8476 -#include <linux/spinlock.h>
8477 -#include <linux/irq.h>
8478 -#include <linux/of_dma.h>
8479 -#include <linux/reset.h>
8480 -#include <linux/of_device.h>
8481 -
8482 -#include "virt-dma.h"
8483 -
8484 -#define HSDMA_BASE_OFFSET 0x800
8485 -
8486 -#define HSDMA_REG_TX_BASE 0x00
8487 -#define HSDMA_REG_TX_CNT 0x04
8488 -#define HSDMA_REG_TX_CTX 0x08
8489 -#define HSDMA_REG_TX_DTX 0x0c
8490 -#define HSDMA_REG_RX_BASE 0x100
8491 -#define HSDMA_REG_RX_CNT 0x104
8492 -#define HSDMA_REG_RX_CRX 0x108
8493 -#define HSDMA_REG_RX_DRX 0x10c
8494 -#define HSDMA_REG_INFO 0x200
8495 -#define HSDMA_REG_GLO_CFG 0x204
8496 -#define HSDMA_REG_RST_CFG 0x208
8497 -#define HSDMA_REG_DELAY_INT 0x20c
8498 -#define HSDMA_REG_FREEQ_THRES 0x210
8499 -#define HSDMA_REG_INT_STATUS 0x220
8500 -#define HSDMA_REG_INT_MASK 0x228
8501 -#define HSDMA_REG_SCH_Q01 0x280
8502 -#define HSDMA_REG_SCH_Q23 0x284
8503 -
8504 -#define HSDMA_DESCS_MAX 0xfff
8505 -#define HSDMA_DESCS_NUM 8
8506 -#define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1)
8507 -#define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK)
8508 -
8509 -/* HSDMA_REG_INFO */
8510 -#define HSDMA_INFO_INDEX_MASK 0xf
8511 -#define HSDMA_INFO_INDEX_SHIFT 24
8512 -#define HSDMA_INFO_BASE_MASK 0xff
8513 -#define HSDMA_INFO_BASE_SHIFT 16
8514 -#define HSDMA_INFO_RX_MASK 0xff
8515 -#define HSDMA_INFO_RX_SHIFT 8
8516 -#define HSDMA_INFO_TX_MASK 0xff
8517 -#define HSDMA_INFO_TX_SHIFT 0
8518 -
8519 -/* HSDMA_REG_GLO_CFG */
8520 -#define HSDMA_GLO_TX_2B_OFFSET BIT(31)
8521 -#define HSDMA_GLO_CLK_GATE BIT(30)
8522 -#define HSDMA_GLO_BYTE_SWAP BIT(29)
8523 -#define HSDMA_GLO_MULTI_DMA BIT(10)
8524 -#define HSDMA_GLO_TWO_BUF BIT(9)
8525 -#define HSDMA_GLO_32B_DESC BIT(8)
8526 -#define HSDMA_GLO_BIG_ENDIAN BIT(7)
8527 -#define HSDMA_GLO_TX_DONE BIT(6)
8528 -#define HSDMA_GLO_BT_MASK 0x3
8529 -#define HSDMA_GLO_BT_SHIFT 4
8530 -#define HSDMA_GLO_RX_BUSY BIT(3)
8531 -#define HSDMA_GLO_RX_DMA BIT(2)
8532 -#define HSDMA_GLO_TX_BUSY BIT(1)
8533 -#define HSDMA_GLO_TX_DMA BIT(0)
8534 -
8535 -#define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT)
8536 -#define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT)
8537 -#define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT)
8538 -#define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT)
8539 -
8540 -#define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \
8541 - HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
8542 -
8543 -/* HSDMA_REG_RST_CFG */
8544 -#define HSDMA_RST_RX_SHIFT 16
8545 -#define HSDMA_RST_TX_SHIFT 0
8546 -
8547 -/* HSDMA_REG_DELAY_INT */
8548 -#define HSDMA_DELAY_INT_EN BIT(15)
8549 -#define HSDMA_DELAY_PEND_OFFSET 8
8550 -#define HSDMA_DELAY_TIME_OFFSET 0
8551 -#define HSDMA_DELAY_TX_OFFSET 16
8552 -#define HSDMA_DELAY_RX_OFFSET 0
8553 -
8554 -#define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \
8555 - ((x) << HSDMA_DELAY_PEND_OFFSET))
8556 -#define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \
8557 - HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
8558 -
8559 -/* HSDMA_REG_INT_STATUS */
8560 -#define HSDMA_INT_DELAY_RX_COH BIT(31)
8561 -#define HSDMA_INT_DELAY_RX_INT BIT(30)
8562 -#define HSDMA_INT_DELAY_TX_COH BIT(29)
8563 -#define HSDMA_INT_DELAY_TX_INT BIT(28)
8564 -#define HSDMA_INT_RX_MASK 0x3
8565 -#define HSDMA_INT_RX_SHIFT 16
8566 -#define HSDMA_INT_RX_Q0 BIT(16)
8567 -#define HSDMA_INT_TX_MASK 0xf
8568 -#define HSDMA_INT_TX_SHIFT 0
8569 -#define HSDMA_INT_TX_Q0 BIT(0)
8570 -
8571 -/* tx/rx dma desc flags */
8572 -#define HSDMA_PLEN_MASK 0x3fff
8573 -#define HSDMA_DESC_DONE BIT(31)
8574 -#define HSDMA_DESC_LS0 BIT(30)
8575 -#define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16)
8576 -#define HSDMA_DESC_TAG BIT(15)
8577 -#define HSDMA_DESC_LS1 BIT(14)
8578 -#define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK)
8579 -
8580 -/* align 4 bytes */
8581 -#define HSDMA_ALIGN_SIZE 3
8582 -/* align size 128bytes */
8583 -#define HSDMA_MAX_PLEN 0x3f80
8584 -
8585 -struct hsdma_desc {
8586 - u32 addr0;
8587 - u32 flags;
8588 - u32 addr1;
8589 - u32 unused;
8590 -};
8591 -
8592 -struct mtk_hsdma_sg {
8593 - dma_addr_t src_addr;
8594 - dma_addr_t dst_addr;
8595 - u32 len;
8596 -};
8597 -
8598 -struct mtk_hsdma_desc {
8599 - struct virt_dma_desc vdesc;
8600 - unsigned int num_sgs;
8601 - struct mtk_hsdma_sg sg[1];
8602 -};
8603 -
8604 -struct mtk_hsdma_chan {
8605 - struct virt_dma_chan vchan;
8606 - unsigned int id;
8607 - dma_addr_t desc_addr;
8608 - int tx_idx;
8609 - int rx_idx;
8610 - struct hsdma_desc *tx_ring;
8611 - struct hsdma_desc *rx_ring;
8612 - struct mtk_hsdma_desc *desc;
8613 - unsigned int next_sg;
8614 -};
8615 -
8616 -struct mtk_hsdam_engine {
8617 - struct dma_device ddev;
8618 - struct device_dma_parameters dma_parms;
8619 - void __iomem *base;
8620 - struct tasklet_struct task;
8621 - volatile unsigned long chan_issued;
8622 -
8623 - struct mtk_hsdma_chan chan[1];
8624 -};
8625 -
8626 -static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(
8627 - struct mtk_hsdma_chan *chan)
8628 -{
8629 - return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine,
8630 - ddev);
8631 -}
8632 -
8633 -static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c)
8634 -{
8635 - return container_of(c, struct mtk_hsdma_chan, vchan.chan);
8636 -}
8637 -
8638 -static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(
8639 - struct virt_dma_desc *vdesc)
8640 -{
8641 - return container_of(vdesc, struct mtk_hsdma_desc, vdesc);
8642 -}
8643 -
8644 -static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg)
8645 -{
8646 - return readl(hsdma->base + reg);
8647 -}
8648 -
8649 -static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma,
8650 - unsigned int reg, u32 val)
8651 -{
8652 - writel(val, hsdma->base + reg);
8653 -}
8654 -
8655 -static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
8656 - struct mtk_hsdma_chan *chan)
8657 -{
8658 - chan->tx_idx = 0;
8659 - chan->rx_idx = HSDMA_DESCS_NUM - 1;
8660 -
8661 - mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
8662 - mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
8663 -
8664 - mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
8665 - 0x1 << (chan->id + HSDMA_RST_TX_SHIFT));
8666 - mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
8667 - 0x1 << (chan->id + HSDMA_RST_RX_SHIFT));
8668 -}
8669 -
8670 -static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
8671 -{
8672 - dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, " \
8673 - "tctx %08x, tdtx: %08x, rbase %08x, " \
8674 - "rcnt %08x, rctx %08x, rdtx %08x\n",
8675 - mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
8676 - mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
8677 - mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX),
8678 - mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX),
8679 - mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE),
8680 - mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT),
8681 - mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
8682 - mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
8683 -
8684 - dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, " \
8685 - "intr_stat %08x, intr_mask %08x\n",
8686 - mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
8687 - mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
8688 - mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT),
8689 - mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS),
8690 - mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK));
8691 -}
8692 -
8693 -static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
8694 - struct mtk_hsdma_chan *chan)
8695 -{
8696 - struct hsdma_desc *tx_desc;
8697 - struct hsdma_desc *rx_desc;
8698 - int i;
8699 -
8700 - dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n",
8701 - chan->tx_idx, chan->rx_idx);
8702 -
8703 - for (i = 0; i < HSDMA_DESCS_NUM; i++) {
8704 - tx_desc = &chan->tx_ring[i];
8705 - rx_desc = &chan->rx_ring[i];
8706 -
8707 - dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, " \
8708 - "tx addr1: %08x, rx addr0 %08x, flags %08x\n",
8709 - i, tx_desc->addr0, tx_desc->flags, \
8710 - tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
8711 - }
8712 -}
8713 -
8714 -static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma,
8715 - struct mtk_hsdma_chan *chan)
8716 -{
8717 - int i;
8718 -
8719 - /* disable dma */
8720 - mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
8721 -
8722 - /* disable intr */
8723 - mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
8724 -
8725 - /* init desc value */
8726 - for (i = 0; i < HSDMA_DESCS_NUM; i++) {
8727 - chan->tx_ring[i].addr0 = 0;
8728 - chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
8729 - }
8730 - for (i = 0; i < HSDMA_DESCS_NUM; i++) {
8731 - chan->rx_ring[i].addr0 = 0;
8732 - chan->rx_ring[i].flags = 0;
8733 - }
8734 -
8735 - /* reset */
8736 - mtk_hsdma_reset_chan(hsdma, chan);
8737 -
8738 - /* enable intr */
8739 - mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
8740 -
8741 - /* enable dma */
8742 - mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
8743 -}
8744 -
8745 -static int mtk_hsdma_terminate_all(struct dma_chan *c)
8746 -{
8747 - struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
8748 - struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
8749 - unsigned long timeout;
8750 - LIST_HEAD(head);
8751 -
8752 - spin_lock_bh(&chan->vchan.lock);
8753 - chan->desc = NULL;
8754 - clear_bit(chan->id, &hsdma->chan_issued);
8755 - vchan_get_all_descriptors(&chan->vchan, &head);
8756 - spin_unlock_bh(&chan->vchan.lock);
8757 -
8758 - vchan_dma_desc_free_list(&chan->vchan, &head);
8759 -
8760 - /* wait dma transfer complete */
8761 - timeout = jiffies + msecs_to_jiffies(2000);
8762 - while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) &
8763 - (HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) {
8764 - if (time_after_eq(jiffies, timeout)) {
8765 - hsdma_dump_desc(hsdma, chan);
8766 - mtk_hsdma_reset(hsdma, chan);
8767 - dev_err(hsdma->ddev.dev, "timeout, reset it\n");
8768 - break;
8769 - }
8770 - cpu_relax();
8771 - }
8772 -
8773 - return 0;
8774 -}
8775 -
8776 -static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
8777 - struct mtk_hsdma_chan *chan)
8778 -{
8779 - dma_addr_t src, dst;
8780 - size_t len, tlen;
8781 - struct hsdma_desc *tx_desc, *rx_desc;
8782 - struct mtk_hsdma_sg *sg;
8783 - unsigned int i;
8784 - int rx_idx;
8785 -
8786 - sg = &chan->desc->sg[0];
8787 - len = sg->len;
8788 - chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN);
8789 -
8790 - /* tx desc */
8791 - src = sg->src_addr;
8792 - for (i = 0; i < chan->desc->num_sgs; i++) {
8793 - tx_desc = &chan->tx_ring[chan->tx_idx];
8794 -
8795 - if (len > HSDMA_MAX_PLEN)
8796 - tlen = HSDMA_MAX_PLEN;
8797 - else
8798 - tlen = len;
8799 -
8800 - if (i & 0x1) {
8801 - tx_desc->addr1 = src;
8802 - tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
8803 - } else {
8804 - tx_desc->addr0 = src;
8805 - tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
8806 -
8807 - /* update index */
8808 - chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx);
8809 - }
8810 -
8811 - src += tlen;
8812 - len -= tlen;
8813 - }
8814 - if (i & 0x1)
8815 - tx_desc->flags |= HSDMA_DESC_LS0;
8816 - else
8817 - tx_desc->flags |= HSDMA_DESC_LS1;
8818 -
8819 - /* rx desc */
8820 - rx_idx = HSDMA_NEXT_DESC(chan->rx_idx);
8821 - len = sg->len;
8822 - dst = sg->dst_addr;
8823 - for (i = 0; i < chan->desc->num_sgs; i++) {
8824 - rx_desc = &chan->rx_ring[rx_idx];
8825 - if (len > HSDMA_MAX_PLEN)
8826 - tlen = HSDMA_MAX_PLEN;
8827 - else
8828 - tlen = len;
8829 -
8830 - rx_desc->addr0 = dst;
8831 - rx_desc->flags = HSDMA_DESC_PLEN0(tlen);
8832 -
8833 - dst += tlen;
8834 - len -= tlen;
8835 -
8836 - /* update index */
8837 - rx_idx = HSDMA_NEXT_DESC(rx_idx);
8838 - }
8839 -
8840 - /* make sure desc and index all up to date */
8841 - wmb();
8842 - mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
8843 -
8844 - return 0;
8845 -}
8846 -
8847 -static int gdma_next_desc(struct mtk_hsdma_chan *chan)
8848 -{
8849 - struct virt_dma_desc *vdesc;
8850 -
8851 - vdesc = vchan_next_desc(&chan->vchan);
8852 - if (!vdesc) {
8853 - chan->desc = NULL;
8854 - return 0;
8855 - }
8856 - chan->desc = to_mtk_hsdma_desc(vdesc);
8857 - chan->next_sg = 0;
8858 -
8859 - return 1;
8860 -}
8861 -
8862 -static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma,
8863 - struct mtk_hsdma_chan *chan)
8864 -{
8865 - struct mtk_hsdma_desc *desc;
8866 - int chan_issued;
8867 -
8868 - chan_issued = 0;
8869 - spin_lock_bh(&chan->vchan.lock);
8870 - desc = chan->desc;
8871 - if (likely(desc)) {
8872 - if (chan->next_sg == desc->num_sgs) {
8873 - list_del(&desc->vdesc.node);
8874 - vchan_cookie_complete(&desc->vdesc);
8875 - chan_issued = gdma_next_desc(chan);
8876 - }
8877 - } else {
8878 - dev_dbg(hsdma->ddev.dev, "no desc to complete\n");
8879 - }
8880 -
8881 - if (chan_issued)
8882 - set_bit(chan->id, &hsdma->chan_issued);
8883 - spin_unlock_bh(&chan->vchan.lock);
8884 -}
8885 -
8886 -static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
8887 -{
8888 - struct mtk_hsdam_engine *hsdma = devid;
8889 - u32 status;
8890 -
8891 - status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS);
8892 - if (unlikely(!status))
8893 - return IRQ_NONE;
8894 -
8895 - if (likely(status & HSDMA_INT_RX_Q0))
8896 - tasklet_schedule(&hsdma->task);
8897 - else
8898 - dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n", status);
8899 - /* clean intr bits */
8900 - mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status);
8901 -
8902 - return IRQ_HANDLED;
8903 -}
8904 -
8905 -static void mtk_hsdma_issue_pending(struct dma_chan *c)
8906 -{
8907 - struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
8908 - struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
8909 -
8910 - spin_lock_bh(&chan->vchan.lock);
8911 - if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
8912 - if (gdma_next_desc(chan)) {
8913 - set_bit(chan->id, &hsdma->chan_issued);
8914 - tasklet_schedule(&hsdma->task);
8915 - } else {
8916 - dev_dbg(hsdma->ddev.dev, "no desc to issue\n");
8917 - }
8918 - }
8919 - spin_unlock_bh(&chan->vchan.lock);
8920 -}
8921 -
8922 -static struct dma_async_tx_descriptor *mtk_hsdma_prep_dma_memcpy(
8923 - struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
8924 - size_t len, unsigned long flags)
8925 -{
8926 - struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
8927 - struct mtk_hsdma_desc *desc;
8928 -
8929 - if (len <= 0)
8930 - return NULL;
8931 -
8932 - desc = kzalloc(sizeof(*desc), GFP_ATOMIC);
8933 - if (!desc) {
8934 - dev_err(c->device->dev, "alloc memcpy decs error\n");
8935 - return NULL;
8936 - }
8937 -
8938 - desc->sg[0].src_addr = src;
8939 - desc->sg[0].dst_addr = dest;
8940 - desc->sg[0].len = len;
8941 -
8942 - return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
8943 -}
8944 -
8945 -static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
8946 - dma_cookie_t cookie,
8947 - struct dma_tx_state *state)
8948 -{
8949 - return dma_cookie_status(c, cookie, state);
8950 -}
8951 -
8952 -static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
8953 -{
8954 - vchan_free_chan_resources(to_virt_chan(c));
8955 -}
8956 -
8957 -static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc)
8958 -{
8959 - kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc));
8960 -}
8961 -
8962 -static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma)
8963 -{
8964 - struct mtk_hsdma_chan *chan;
8965 -
8966 - if (test_and_clear_bit(0, &hsdma->chan_issued)) {
8967 - chan = &hsdma->chan[0];
8968 - if (chan->desc)
8969 - mtk_hsdma_start_transfer(hsdma, chan);
8970 - else
8971 - dev_dbg(hsdma->ddev.dev, "chan 0 no desc to issue\n");
8972 - }
8973 -}
8974 -
8975 -static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma)
8976 -{
8977 - struct mtk_hsdma_chan *chan;
8978 - int next_idx, drx_idx, cnt;
8979 -
8980 - chan = &hsdma->chan[0];
8981 - next_idx = HSDMA_NEXT_DESC(chan->rx_idx);
8982 - drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX);
8983 -
8984 - cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK;
8985 - if (!cnt)
8986 - return;
8987 -
8988 - chan->next_sg += cnt;
8989 - chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK;
8990 -
8991 - /* update rx crx */
8992 - wmb();
8993 - mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
8994 -
8995 - mtk_hsdma_chan_done(hsdma, chan);
8996 -}
8997 -
8998 -static void mtk_hsdma_tasklet(unsigned long arg)
8999 -{
9000 - struct mtk_hsdam_engine *hsdma = (struct mtk_hsdam_engine *)arg;
9001 -
9002 - mtk_hsdma_rx(hsdma);
9003 - mtk_hsdma_tx(hsdma);
9004 -}
9005 -
9006 -static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
9007 - struct mtk_hsdma_chan *chan)
9008 -{
9009 - int i;
9010 -
9011 - chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
9012 - 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
9013 - &chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
9014 - if (!chan->tx_ring)
9015 - goto no_mem;
9016 -
9017 - chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM];
9018 -
9019 - /* init tx ring value */
9020 - for (i = 0; i < HSDMA_DESCS_NUM; i++)
9021 - chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
9022 -
9023 - return 0;
9024 -no_mem:
9025 - return -ENOMEM;
9026 -}
9027 -
9028 -static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
9029 - struct mtk_hsdma_chan *chan)
9030 -{
9031 - if (chan->tx_ring) {
9032 - dma_free_coherent(hsdma->ddev.dev,
9033 - 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
9034 - chan->tx_ring, chan->desc_addr);
9035 - chan->tx_ring = NULL;
9036 - chan->rx_ring = NULL;
9037 - }
9038 -}
9039 -
9040 -static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma)
9041 -{
9042 - struct mtk_hsdma_chan *chan;
9043 - int ret;
9044 - u32 reg;
9045 -
9046 - /* init desc */
9047 - chan = &hsdma->chan[0];
9048 - ret = mtk_hsdam_alloc_desc(hsdma, chan);
9049 - if (ret)
9050 - return ret;
9051 -
9052 - /* tx */
9053 - mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr);
9054 - mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM);
9055 - /* rx */
9056 - mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr +
9057 - (sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM));
9058 - mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM);
9059 - /* reset */
9060 - mtk_hsdma_reset_chan(hsdma, chan);
9061 -
9062 - /* enable rx intr */
9063 - mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
9064 -
9065 - /* enable dma */
9066 - mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
9067 -
9068 - /* hardware info */
9069 - reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO);
9070 - dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n",
9071 - (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK,
9072 - (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK);
9073 -
9074 - hsdma_dump_reg(hsdma);
9075 -
9076 - return ret;
9077 -}
9078 -
9079 -static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma)
9080 -{
9081 - struct mtk_hsdma_chan *chan;
9082 -
9083 - /* disable dma */
9084 - mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
9085 -
9086 - /* disable intr */
9087 - mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
9088 -
9089 - /* free desc */
9090 - chan = &hsdma->chan[0];
9091 - mtk_hsdam_free_desc(hsdma, chan);
9092 -
9093 - /* tx */
9094 - mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0);
9095 - mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0);
9096 - /* rx */
9097 - mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0);
9098 - mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0);
9099 - /* reset */
9100 - mtk_hsdma_reset_chan(hsdma, chan);
9101 -}
9102 -
9103 -static const struct of_device_id mtk_hsdma_of_match[] = {
9104 - { .compatible = "mediatek,mt7621-hsdma" },
9105 - { },
9106 -};
9107 -
9108 -static int mtk_hsdma_probe(struct platform_device *pdev)
9109 -{
9110 - const struct of_device_id *match;
9111 - struct mtk_hsdma_chan *chan;
9112 - struct mtk_hsdam_engine *hsdma;
9113 - struct dma_device *dd;
9114 - struct resource *res;
9115 - int ret;
9116 - int irq;
9117 - void __iomem *base;
9118 -
9119 - ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9120 - if (ret)
9121 - return ret;
9122 -
9123 - match = of_match_device(mtk_hsdma_of_match, &pdev->dev);
9124 - if (!match)
9125 - return -EINVAL;
9126 -
9127 - hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
9128 - if (!hsdma)
9129 - return -EINVAL;
9130 -
9131 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
9132 - base = devm_ioremap_resource(&pdev->dev, res);
9133 - if (IS_ERR(base))
9134 - return PTR_ERR(base);
9135 - hsdma->base = base + HSDMA_BASE_OFFSET;
9136 - tasklet_init(&hsdma->task, mtk_hsdma_tasklet, (unsigned long)hsdma);
9137 -
9138 - irq = platform_get_irq(pdev, 0);
9139 - if (irq < 0)
9140 - return -EINVAL;
9141 - ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq,
9142 - 0, dev_name(&pdev->dev), hsdma);
9143 - if (ret) {
9144 - dev_err(&pdev->dev, "failed to request irq\n");
9145 - return ret;
9146 - }
9147 -
9148 - device_reset(&pdev->dev);
9149 -
9150 - dd = &hsdma->ddev;
9151 - dma_cap_set(DMA_MEMCPY, dd->cap_mask);
9152 - dd->copy_align = HSDMA_ALIGN_SIZE;
9153 - dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
9154 - dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
9155 - dd->device_terminate_all = mtk_hsdma_terminate_all;
9156 - dd->device_tx_status = mtk_hsdma_tx_status;
9157 - dd->device_issue_pending = mtk_hsdma_issue_pending;
9158 - dd->dev = &pdev->dev;
9159 - dd->dev->dma_parms = &hsdma->dma_parms;
9160 - dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN);
9161 - INIT_LIST_HEAD(&dd->channels);
9162 -
9163 - chan = &hsdma->chan[0];
9164 - chan->id = 0;
9165 - chan->vchan.desc_free = mtk_hsdma_desc_free;
9166 - vchan_init(&chan->vchan, dd);
9167 -
9168 - /* init hardware */
9169 - ret = mtk_hsdma_init(hsdma);
9170 - if (ret) {
9171 - dev_err(&pdev->dev, "failed to alloc ring descs\n");
9172 - return ret;
9173 - }
9174 -
9175 - ret = dma_async_device_register(dd);
9176 - if (ret) {
9177 - dev_err(&pdev->dev, "failed to register dma device\n");
9178 - goto err_uninit_hsdma;
9179 - }
9180 -
9181 - ret = of_dma_controller_register(pdev->dev.of_node,
9182 - of_dma_xlate_by_chan_id, hsdma);
9183 - if (ret) {
9184 - dev_err(&pdev->dev, "failed to register of dma controller\n");
9185 - goto err_unregister;
9186 - }
9187 -
9188 - platform_set_drvdata(pdev, hsdma);
9189 -
9190 - return 0;
9191 -
9192 -err_unregister:
9193 - dma_async_device_unregister(dd);
9194 -err_uninit_hsdma:
9195 - mtk_hsdma_uninit(hsdma);
9196 - return ret;
9197 -}
9198 -
9199 -static int mtk_hsdma_remove(struct platform_device *pdev)
9200 -{
9201 - struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev);
9202 -
9203 - mtk_hsdma_uninit(hsdma);
9204 -
9205 - of_dma_controller_free(pdev->dev.of_node);
9206 - dma_async_device_unregister(&hsdma->ddev);
9207 -
9208 - return 0;
9209 -}
9210 -
9211 -static struct platform_driver mtk_hsdma_driver = {
9212 - .probe = mtk_hsdma_probe,
9213 - .remove = mtk_hsdma_remove,
9214 - .driver = {
9215 - .name = "hsdma-mt7621",
9216 - .of_match_table = mtk_hsdma_of_match,
9217 - },
9218 -};
9219 -module_platform_driver(mtk_hsdma_driver);
9220 -
9221 -MODULE_AUTHOR("Michael Lee <igvtee@gmail.com>");
9222 -MODULE_DESCRIPTION("MTK HSDMA driver");
9223 -MODULE_LICENSE("GPL v2");
9224 diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
9225 index f7f09c0d273f5..5b103e829ee7f 100644
9226 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
9227 +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
9228 @@ -41,6 +41,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
9229 {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
9230 {USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */
9231 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
9232 + {USB_DEVICE(0x7392, 0xb811)}, /* Edimax EW-7811UN V2 */
9233 {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
9234 {} /* Terminating entry */
9235 };
9236 diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
9237 index 578b9f734231e..65592bf84f380 100644
9238 --- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
9239 +++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
9240 @@ -34,7 +34,7 @@
9241 NL80211_RRF_PASSIVE_SCAN)
9242
9243 static const struct ieee80211_regdomain rtw_regdom_rd = {
9244 - .n_reg_rules = 3,
9245 + .n_reg_rules = 2,
9246 .alpha2 = "99",
9247 .reg_rules = {
9248 RTW_2GHZ_CH01_11,
9249 diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
9250 index fcdc4211e3c27..45a1bfa2f7351 100644
9251 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
9252 +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
9253 @@ -86,8 +86,7 @@ static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
9254 if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
9255 length += sizeof(struct cpl_tx_data_iso);
9256
9257 -#define MAX_IMM_TX_PKT_LEN 256
9258 - return length <= MAX_IMM_TX_PKT_LEN;
9259 + return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
9260 }
9261
9262 /*
9263 diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
9264 index 81afe553aa666..a91f2aa24118a 100644
9265 --- a/drivers/usb/dwc2/hcd.c
9266 +++ b/drivers/usb/dwc2/hcd.c
9267 @@ -1313,19 +1313,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
9268 if (num_packets > max_hc_pkt_count) {
9269 num_packets = max_hc_pkt_count;
9270 chan->xfer_len = num_packets * chan->max_packet;
9271 + } else if (chan->ep_is_in) {
9272 + /*
9273 + * Always program an integral # of max packets
9274 + * for IN transfers.
9275 + * Note: This assumes that the input buffer is
9276 + * aligned and sized accordingly.
9277 + */
9278 + chan->xfer_len = num_packets * chan->max_packet;
9279 }
9280 } else {
9281 /* Need 1 packet for transfer length of 0 */
9282 num_packets = 1;
9283 }
9284
9285 - if (chan->ep_is_in)
9286 - /*
9287 - * Always program an integral # of max packets for IN
9288 - * transfers
9289 - */
9290 - chan->xfer_len = num_packets * chan->max_packet;
9291 -
9292 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
9293 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
9294 /*
9295 diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
9296 index a052d39b4375e..d5f4ec1b73b15 100644
9297 --- a/drivers/usb/dwc2/hcd_intr.c
9298 +++ b/drivers/usb/dwc2/hcd_intr.c
9299 @@ -500,7 +500,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
9300 &short_read);
9301
9302 if (urb->actual_length + xfer_length > urb->length) {
9303 - dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
9304 + dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__);
9305 xfer_length = urb->length - urb->actual_length;
9306 }
9307
9308 @@ -1977,6 +1977,18 @@ error:
9309 qtd->error_count++;
9310 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
9311 qtd, DWC2_HC_XFER_XACT_ERR);
9312 + /*
9313 + * We can get here after a completed transaction
9314 + * (urb->actual_length >= urb->length) which was not reported
9315 + * as completed. If that is the case, and we do not abort
9316 + * the transfer, a transfer of size 0 will be enqueued
9317 + * subsequently. If urb->actual_length is not DMA-aligned,
9318 + * the buffer will then point to an unaligned address, and
9319 + * the resulting behavior is undefined. Bail out in that
9320 + * situation.
9321 + */
9322 + if (qtd->urb->actual_length >= qtd->urb->length)
9323 + qtd->error_count = 3;
9324 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
9325 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
9326 }
9327 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
9328 index 9269cda4c1831..904b0043011cf 100644
9329 --- a/drivers/usb/dwc3/gadget.c
9330 +++ b/drivers/usb/dwc3/gadget.c
9331 @@ -593,8 +593,23 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
9332 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
9333
9334 if (desc->bInterval) {
9335 - params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
9336 - dep->interval = 1 << (desc->bInterval - 1);
9337 + u8 bInterval_m1;
9338 +
9339 + /*
9340 + * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it
9341 + * must be set to 0 when the controller operates in full-speed.
9342 + */
9343 + bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
9344 + if (dwc->gadget.speed == USB_SPEED_FULL)
9345 + bInterval_m1 = 0;
9346 +
9347 + if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
9348 + dwc->gadget.speed == USB_SPEED_FULL)
9349 + dep->interval = desc->bInterval;
9350 + else
9351 + dep->interval = 1 << (desc->bInterval - 1);
9352 +
9353 + params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
9354 }
9355
9356 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
9357 diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
9358 index 56906d15fb551..223029fa84459 100644
9359 --- a/drivers/usb/gadget/function/u_audio.c
9360 +++ b/drivers/usb/gadget/function/u_audio.c
9361 @@ -89,7 +89,12 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
9362 struct snd_uac_chip *uac = prm->uac;
9363
9364 /* i/f shutting down */
9365 - if (!prm->ep_enabled || req->status == -ESHUTDOWN)
9366 + if (!prm->ep_enabled) {
9367 + usb_ep_free_request(ep, req);
9368 + return;
9369 + }
9370 +
9371 + if (req->status == -ESHUTDOWN)
9372 return;
9373
9374 /*
9375 @@ -351,8 +356,14 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
9376
9377 for (i = 0; i < params->req_number; i++) {
9378 if (prm->ureq[i].req) {
9379 - usb_ep_dequeue(ep, prm->ureq[i].req);
9380 - usb_ep_free_request(ep, prm->ureq[i].req);
9381 + if (usb_ep_dequeue(ep, prm->ureq[i].req))
9382 + usb_ep_free_request(ep, prm->ureq[i].req);
9383 + /*
9384 + * If usb_ep_dequeue() cannot successfully dequeue the
9385 + * request, the request will be freed by the completion
9386 + * callback.
9387 + */
9388 +
9389 prm->ureq[i].req = NULL;
9390 }
9391 }
9392 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
9393 index 0fbf9adef34be..9fcff4e94484e 100644
9394 --- a/drivers/usb/musb/musb_core.c
9395 +++ b/drivers/usb/musb/musb_core.c
9396 @@ -2102,32 +2102,35 @@ int musb_queue_resume_work(struct musb *musb,
9397 {
9398 struct musb_pending_work *w;
9399 unsigned long flags;
9400 + bool is_suspended;
9401 int error;
9402
9403 if (WARN_ON(!callback))
9404 return -EINVAL;
9405
9406 - if (pm_runtime_active(musb->controller))
9407 - return callback(musb, data);
9408 + spin_lock_irqsave(&musb->list_lock, flags);
9409 + is_suspended = musb->is_runtime_suspended;
9410 +
9411 + if (is_suspended) {
9412 + w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
9413 + if (!w) {
9414 + error = -ENOMEM;
9415 + goto out_unlock;
9416 + }
9417
9418 - w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
9419 - if (!w)
9420 - return -ENOMEM;
9421 + w->callback = callback;
9422 + w->data = data;
9423
9424 - w->callback = callback;
9425 - w->data = data;
9426 - spin_lock_irqsave(&musb->list_lock, flags);
9427 - if (musb->is_runtime_suspended) {
9428 list_add_tail(&w->node, &musb->pending_list);
9429 error = 0;
9430 - } else {
9431 - dev_err(musb->controller, "could not add resume work %p\n",
9432 - callback);
9433 - devm_kfree(musb->controller, w);
9434 - error = -EINPROGRESS;
9435 }
9436 +
9437 +out_unlock:
9438 spin_unlock_irqrestore(&musb->list_lock, flags);
9439
9440 + if (!is_suspended)
9441 + error = callback(musb, data);
9442 +
9443 return error;
9444 }
9445 EXPORT_SYMBOL_GPL(musb_queue_resume_work);
9446 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
9447 index 01a98d071c7c7..c00e4177651a8 100644
9448 --- a/drivers/usb/serial/ftdi_sio.c
9449 +++ b/drivers/usb/serial/ftdi_sio.c
9450 @@ -1386,8 +1386,9 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
9451 index_value = get_ftdi_divisor(tty, port);
9452 value = (u16)index_value;
9453 index = (u16)(index_value >> 16);
9454 - if ((priv->chip_type == FT2232C) || (priv->chip_type == FT2232H) ||
9455 - (priv->chip_type == FT4232H) || (priv->chip_type == FT232H)) {
9456 + if (priv->chip_type == FT2232C || priv->chip_type == FT2232H ||
9457 + priv->chip_type == FT4232H || priv->chip_type == FT232H ||
9458 + priv->chip_type == FTX) {
9459 /* Probably the BM type needs the MSB of the encoded fractional
9460 * divider also moved like for the chips above. Any infos? */
9461 index = (u16)((index << 8) | priv->interface);
9462 diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
9463 index 55b2879f27bdc..aefc1b58d9563 100644
9464 --- a/drivers/usb/serial/mos7720.c
9465 +++ b/drivers/usb/serial/mos7720.c
9466 @@ -1250,8 +1250,10 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
9467 if (urb->transfer_buffer == NULL) {
9468 urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
9469 GFP_ATOMIC);
9470 - if (!urb->transfer_buffer)
9471 + if (!urb->transfer_buffer) {
9472 + bytes_sent = -ENOMEM;
9473 goto exit;
9474 + }
9475 }
9476 transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
9477
9478 diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
9479 index ab4bf8d6d7df0..2b8a0d4b66fce 100644
9480 --- a/drivers/usb/serial/mos7840.c
9481 +++ b/drivers/usb/serial/mos7840.c
9482 @@ -1330,8 +1330,10 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
9483 if (urb->transfer_buffer == NULL) {
9484 urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
9485 GFP_ATOMIC);
9486 - if (!urb->transfer_buffer)
9487 + if (!urb->transfer_buffer) {
9488 + bytes_sent = -ENOMEM;
9489 goto exit;
9490 + }
9491 }
9492 transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
9493
9494 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
9495 index f49eae18500cc..5c167bc089a08 100644
9496 --- a/drivers/usb/serial/option.c
9497 +++ b/drivers/usb/serial/option.c
9498 @@ -1569,7 +1569,8 @@ static const struct usb_device_id option_ids[] = {
9499 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
9500 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
9501 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) },
9502 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) },
9503 + { USB_DEVICE(ZTE_VENDOR_ID, 0x1275), /* ZTE P685M */
9504 + .driver_info = RSVD(3) | RSVD(4) },
9505 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) },
9506 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) },
9507 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) },
9508 diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
9509 index bc6ba41686fa3..6b1e8cba17984 100644
9510 --- a/drivers/vfio/vfio_iommu_type1.c
9511 +++ b/drivers/vfio/vfio_iommu_type1.c
9512 @@ -24,6 +24,7 @@
9513 #include <linux/compat.h>
9514 #include <linux/device.h>
9515 #include <linux/fs.h>
9516 +#include <linux/highmem.h>
9517 #include <linux/iommu.h>
9518 #include <linux/module.h>
9519 #include <linux/mm.h>
9520 @@ -339,9 +340,11 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
9521 unsigned long vaddr, unsigned long *pfn,
9522 bool write_fault)
9523 {
9524 + pte_t *ptep;
9525 + spinlock_t *ptl;
9526 int ret;
9527
9528 - ret = follow_pfn(vma, vaddr, pfn);
9529 + ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
9530 if (ret) {
9531 bool unlocked = false;
9532
9533 @@ -355,9 +358,17 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
9534 if (ret)
9535 return ret;
9536
9537 - ret = follow_pfn(vma, vaddr, pfn);
9538 + ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
9539 + if (ret)
9540 + return ret;
9541 }
9542
9543 + if (write_fault && !pte_write(*ptep))
9544 + ret = -EFAULT;
9545 + else
9546 + *pfn = pte_pfn(*ptep);
9547 +
9548 + pte_unmap_unlock(ptep, ptl);
9549 return ret;
9550 }
9551
9552 @@ -866,6 +877,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
9553
9554 static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
9555 {
9556 + WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list));
9557 vfio_unmap_unpin(iommu, dma, true);
9558 vfio_unlink_dma(iommu, dma);
9559 put_task_struct(dma->task);
9560 @@ -1974,23 +1986,6 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
9561 }
9562 }
9563
9564 -static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu)
9565 -{
9566 - struct rb_node *n;
9567 -
9568 - n = rb_first(&iommu->dma_list);
9569 - for (; n; n = rb_next(n)) {
9570 - struct vfio_dma *dma;
9571 -
9572 - dma = rb_entry(n, struct vfio_dma, node);
9573 -
9574 - if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list)))
9575 - break;
9576 - }
9577 - /* mdev vendor driver must unregister notifier */
9578 - WARN_ON(iommu->notifier.head);
9579 -}
9580 -
9581 /*
9582 * Called when a domain is removed in detach. It is possible that
9583 * the removed domain decided the iova aperture window. Modify the
9584 @@ -2088,10 +2083,10 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
9585 kfree(group);
9586
9587 if (list_empty(&iommu->external_domain->group_list)) {
9588 - vfio_sanity_check_pfn_list(iommu);
9589 -
9590 - if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
9591 + if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) {
9592 + WARN_ON(iommu->notifier.head);
9593 vfio_iommu_unmap_unpin_all(iommu);
9594 + }
9595
9596 kfree(iommu->external_domain);
9597 iommu->external_domain = NULL;
9598 @@ -2124,10 +2119,12 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
9599 */
9600 if (list_empty(&domain->group_list)) {
9601 if (list_is_singular(&iommu->domain_list)) {
9602 - if (!iommu->external_domain)
9603 + if (!iommu->external_domain) {
9604 + WARN_ON(iommu->notifier.head);
9605 vfio_iommu_unmap_unpin_all(iommu);
9606 - else
9607 + } else {
9608 vfio_iommu_unmap_unpin_reaccount(iommu);
9609 + }
9610 }
9611 iommu_domain_free(domain->domain);
9612 list_del(&domain->next);
9613 @@ -2201,7 +2198,6 @@ static void vfio_iommu_type1_release(void *iommu_data)
9614
9615 if (iommu->external_domain) {
9616 vfio_release_domain(iommu->external_domain, true);
9617 - vfio_sanity_check_pfn_list(iommu);
9618 kfree(iommu->external_domain);
9619 }
9620
9621 diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
9622 index 1e70e838530ee..a7e5f12687b70 100644
9623 --- a/drivers/video/fbdev/Kconfig
9624 +++ b/drivers/video/fbdev/Kconfig
9625 @@ -1269,6 +1269,7 @@ config FB_ATY
9626 select FB_CFB_IMAGEBLIT
9627 select FB_BACKLIGHT if FB_ATY_BACKLIGHT
9628 select FB_MACMODES if PPC
9629 + select FB_ATY_CT if SPARC64 && PCI
9630 help
9631 This driver supports graphics boards with the ATI Mach64 chips.
9632 Say Y if you have such a graphics board.
9633 @@ -1279,7 +1280,6 @@ config FB_ATY
9634 config FB_ATY_CT
9635 bool "Mach64 CT/VT/GT/LT (incl. 3D RAGE) support"
9636 depends on PCI && FB_ATY
9637 - default y if SPARC64 && PCI
9638 help
9639 Say Y here to support use of ATI's 64-bit Rage boards (or other
9640 boards based on the Mach64 CT, VT, GT, and LT chipsets) as a
9641 diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
9642 index 43c391626a000..bf2945c25ca8f 100644
9643 --- a/drivers/virt/vboxguest/vboxguest_utils.c
9644 +++ b/drivers/virt/vboxguest/vboxguest_utils.c
9645 @@ -466,7 +466,7 @@ static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
9646 * Cancellation fun.
9647 */
9648 static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
9649 - u32 timeout_ms, bool *leak_it)
9650 + u32 timeout_ms, bool interruptible, bool *leak_it)
9651 {
9652 int rc, cancel_rc, ret;
9653 long timeout;
9654 @@ -493,10 +493,15 @@ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
9655 else
9656 timeout = msecs_to_jiffies(timeout_ms);
9657
9658 - timeout = wait_event_interruptible_timeout(
9659 - gdev->hgcm_wq,
9660 - hgcm_req_done(gdev, &call->header),
9661 - timeout);
9662 + if (interruptible) {
9663 + timeout = wait_event_interruptible_timeout(gdev->hgcm_wq,
9664 + hgcm_req_done(gdev, &call->header),
9665 + timeout);
9666 + } else {
9667 + timeout = wait_event_timeout(gdev->hgcm_wq,
9668 + hgcm_req_done(gdev, &call->header),
9669 + timeout);
9670 + }
9671
9672 /* timeout > 0 means hgcm_req_done has returned true, so success */
9673 if (timeout > 0)
9674 @@ -629,7 +634,8 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
9675 hgcm_call_init_call(call, client_id, function, parms, parm_count,
9676 bounce_bufs);
9677
9678 - ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it);
9679 + ret = vbg_hgcm_do_call(gdev, call, timeout_ms,
9680 + requestor & VMMDEV_REQUESTOR_USERMODE, &leak_it);
9681 if (ret == 0) {
9682 *vbox_status = call->header.result;
9683 ret = hgcm_call_copy_back_result(call, parms, parm_count,
9684 diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
9685 index 5391bf3e6b11d..c5967d8b4256a 100644
9686 --- a/drivers/watchdog/mei_wdt.c
9687 +++ b/drivers/watchdog/mei_wdt.c
9688 @@ -382,6 +382,7 @@ static int mei_wdt_register(struct mei_wdt *wdt)
9689
9690 watchdog_set_drvdata(&wdt->wdd, wdt);
9691 watchdog_stop_on_reboot(&wdt->wdd);
9692 + watchdog_stop_on_unregister(&wdt->wdd);
9693
9694 ret = watchdog_register_device(&wdt->wdd);
9695 if (ret)
9696 diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
9697 index ea8a6abd64ecb..094f096aee0f2 100644
9698 --- a/drivers/watchdog/qcom-wdt.c
9699 +++ b/drivers/watchdog/qcom-wdt.c
9700 @@ -22,7 +22,6 @@ enum wdt_reg {
9701 };
9702
9703 #define QCOM_WDT_ENABLE BIT(0)
9704 -#define QCOM_WDT_ENABLE_IRQ BIT(1)
9705
9706 static const u32 reg_offset_data_apcs_tmr[] = {
9707 [WDT_RST] = 0x38,
9708 @@ -58,16 +57,6 @@ struct qcom_wdt *to_qcom_wdt(struct watchdog_device *wdd)
9709 return container_of(wdd, struct qcom_wdt, wdd);
9710 }
9711
9712 -static inline int qcom_get_enable(struct watchdog_device *wdd)
9713 -{
9714 - int enable = QCOM_WDT_ENABLE;
9715 -
9716 - if (wdd->pretimeout)
9717 - enable |= QCOM_WDT_ENABLE_IRQ;
9718 -
9719 - return enable;
9720 -}
9721 -
9722 static irqreturn_t qcom_wdt_isr(int irq, void *arg)
9723 {
9724 struct watchdog_device *wdd = arg;
9725 @@ -86,7 +75,7 @@ static int qcom_wdt_start(struct watchdog_device *wdd)
9726 writel(1, wdt_addr(wdt, WDT_RST));
9727 writel(bark * wdt->rate, wdt_addr(wdt, WDT_BARK_TIME));
9728 writel(wdd->timeout * wdt->rate, wdt_addr(wdt, WDT_BITE_TIME));
9729 - writel(qcom_get_enable(wdd), wdt_addr(wdt, WDT_EN));
9730 + writel(QCOM_WDT_ENABLE, wdt_addr(wdt, WDT_EN));
9731 return 0;
9732 }
9733
9734 diff --git a/fs/affs/namei.c b/fs/affs/namei.c
9735 index 41c5749f4db78..5400a876d73fb 100644
9736 --- a/fs/affs/namei.c
9737 +++ b/fs/affs/namei.c
9738 @@ -460,8 +460,10 @@ affs_xrename(struct inode *old_dir, struct dentry *old_dentry,
9739 return -EIO;
9740
9741 bh_new = affs_bread(sb, d_inode(new_dentry)->i_ino);
9742 - if (!bh_new)
9743 + if (!bh_new) {
9744 + affs_brelse(bh_old);
9745 return -EIO;
9746 + }
9747
9748 /* Remove old header from its parent directory. */
9749 affs_lock_dir(old_dir);
9750 diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
9751 index a32f23981f60f..08ca9441270d2 100644
9752 --- a/fs/btrfs/block-group.c
9753 +++ b/fs/btrfs/block-group.c
9754 @@ -2391,8 +2391,10 @@ again:
9755
9756 if (!path) {
9757 path = btrfs_alloc_path();
9758 - if (!path)
9759 - return -ENOMEM;
9760 + if (!path) {
9761 + ret = -ENOMEM;
9762 + goto out;
9763 + }
9764 }
9765
9766 /*
9767 @@ -2487,16 +2489,14 @@ again:
9768 btrfs_put_block_group(cache);
9769 if (drop_reserve)
9770 btrfs_delayed_refs_rsv_release(fs_info, 1);
9771 -
9772 - if (ret)
9773 - break;
9774 -
9775 /*
9776 * Avoid blocking other tasks for too long. It might even save
9777 * us from writing caches for block groups that are going to be
9778 * removed.
9779 */
9780 mutex_unlock(&trans->transaction->cache_write_mutex);
9781 + if (ret)
9782 + goto out;
9783 mutex_lock(&trans->transaction->cache_write_mutex);
9784 }
9785 mutex_unlock(&trans->transaction->cache_write_mutex);
9786 @@ -2520,7 +2520,12 @@ again:
9787 goto again;
9788 }
9789 spin_unlock(&cur_trans->dirty_bgs_lock);
9790 - } else if (ret < 0) {
9791 + }
9792 +out:
9793 + if (ret < 0) {
9794 + spin_lock(&cur_trans->dirty_bgs_lock);
9795 + list_splice_init(&dirty, &cur_trans->dirty_bgs);
9796 + spin_unlock(&cur_trans->dirty_bgs_lock);
9797 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
9798 }
9799
9800 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
9801 index e25133a9e9dfe..1af73367087df 100644
9802 --- a/fs/btrfs/ctree.c
9803 +++ b/fs/btrfs/ctree.c
9804 @@ -260,9 +260,12 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
9805 ret = btrfs_inc_ref(trans, root, cow, 1);
9806 else
9807 ret = btrfs_inc_ref(trans, root, cow, 0);
9808 -
9809 - if (ret)
9810 + if (ret) {
9811 + btrfs_tree_unlock(cow);
9812 + free_extent_buffer(cow);
9813 + btrfs_abort_transaction(trans, ret);
9814 return ret;
9815 + }
9816
9817 btrfs_mark_buffer_dirty(cow);
9818 *cow_ret = cow;
9819 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
9820 index 6e6be922b937d..23f59d463e24e 100644
9821 --- a/fs/btrfs/free-space-cache.c
9822 +++ b/fs/btrfs/free-space-cache.c
9823 @@ -744,8 +744,10 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
9824 while (num_entries) {
9825 e = kmem_cache_zalloc(btrfs_free_space_cachep,
9826 GFP_NOFS);
9827 - if (!e)
9828 + if (!e) {
9829 + ret = -ENOMEM;
9830 goto free_cache;
9831 + }
9832
9833 ret = io_ctl_read_entry(&io_ctl, e, &type);
9834 if (ret) {
9835 @@ -754,6 +756,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
9836 }
9837
9838 if (!e->bytes) {
9839 + ret = -1;
9840 kmem_cache_free(btrfs_free_space_cachep, e);
9841 goto free_cache;
9842 }
9843 @@ -774,6 +777,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
9844 e->bitmap = kmem_cache_zalloc(
9845 btrfs_free_space_bitmap_cachep, GFP_NOFS);
9846 if (!e->bitmap) {
9847 + ret = -ENOMEM;
9848 kmem_cache_free(
9849 btrfs_free_space_cachep, e);
9850 goto free_cache;
9851 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
9852 index 001f13cf9ab8f..05b3e27b21d44 100644
9853 --- a/fs/btrfs/relocation.c
9854 +++ b/fs/btrfs/relocation.c
9855 @@ -1336,9 +1336,7 @@ static void __del_reloc_root(struct btrfs_root *root)
9856 RB_CLEAR_NODE(&node->rb_node);
9857 }
9858 spin_unlock(&rc->reloc_root_tree.lock);
9859 - if (!node)
9860 - return;
9861 - BUG_ON((struct btrfs_root *)node->data != root);
9862 + ASSERT(!node || (struct btrfs_root *)node->data == root);
9863 }
9864
9865 spin_lock(&fs_info->trans_lock);
9866 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
9867 index 7b975dbb2bb49..1c74a7cbf5b19 100644
9868 --- a/fs/debugfs/inode.c
9869 +++ b/fs/debugfs/inode.c
9870 @@ -293,7 +293,7 @@ struct dentry *debugfs_lookup(const char *name, struct dentry *parent)
9871 {
9872 struct dentry *dentry;
9873
9874 - if (IS_ERR(parent))
9875 + if (!debugfs_initialized() || IS_ERR_OR_NULL(name) || IS_ERR(parent))
9876 return NULL;
9877
9878 if (!parent)
9879 @@ -315,6 +315,9 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
9880 struct dentry *dentry;
9881 int error;
9882
9883 + if (!debugfs_initialized())
9884 + return ERR_PTR(-ENOENT);
9885 +
9886 pr_debug("creating file '%s'\n", name);
9887
9888 if (IS_ERR(parent))
9889 diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
9890 index 503bea20cde26..f78496776e764 100644
9891 --- a/fs/erofs/xattr.c
9892 +++ b/fs/erofs/xattr.c
9893 @@ -48,8 +48,14 @@ static int init_inode_xattrs(struct inode *inode)
9894 int ret = 0;
9895
9896 /* the most case is that xattrs of this inode are initialized. */
9897 - if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
9898 + if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) {
9899 + /*
9900 + * paired with smp_mb() at the end of the function to ensure
9901 + * fields will only be observed after the bit is set.
9902 + */
9903 + smp_mb();
9904 return 0;
9905 + }
9906
9907 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
9908 return -ERESTARTSYS;
9909 @@ -137,6 +143,8 @@ static int init_inode_xattrs(struct inode *inode)
9910 }
9911 xattr_iter_end(&it, atomic_map);
9912
9913 + /* paired with smp_mb() at the beginning of the function. */
9914 + smp_mb();
9915 set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
9916
9917 out_unlock:
9918 diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
9919 index 6a26c293ae2d9..fff5741007214 100644
9920 --- a/fs/erofs/zmap.c
9921 +++ b/fs/erofs/zmap.c
9922 @@ -36,8 +36,14 @@ static int fill_inode_lazy(struct inode *inode)
9923 void *kaddr;
9924 struct z_erofs_map_header *h;
9925
9926 - if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
9927 + if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
9928 + /*
9929 + * paired with smp_mb() at the end of the function to ensure
9930 + * fields will only be observed after the bit is set.
9931 + */
9932 + smp_mb();
9933 return 0;
9934 + }
9935
9936 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
9937 return -ERESTARTSYS;
9938 @@ -83,6 +89,8 @@ static int fill_inode_lazy(struct inode *inode)
9939
9940 vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits +
9941 ((h->h_clusterbits >> 5) & 7);
9942 + /* paired with smp_mb() at the beginning of the function */
9943 + smp_mb();
9944 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
9945 unmap_done:
9946 kunmap_atomic(kaddr);
9947 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
9948 index f05ec9bfbf4fd..7f22487d502b5 100644
9949 --- a/fs/ext4/namei.c
9950 +++ b/fs/ext4/namei.c
9951 @@ -2405,11 +2405,10 @@ again:
9952 (frame - 1)->bh);
9953 if (err)
9954 goto journal_error;
9955 - if (restart) {
9956 - err = ext4_handle_dirty_dx_node(handle, dir,
9957 - frame->bh);
9958 + err = ext4_handle_dirty_dx_node(handle, dir,
9959 + frame->bh);
9960 + if (err)
9961 goto journal_error;
9962 - }
9963 } else {
9964 struct dx_root *dxroot;
9965 memcpy((char *) entries2, (char *) entries,
9966 diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
9967 index 68be334afc286..64ee2a064e339 100644
9968 --- a/fs/f2fs/data.c
9969 +++ b/fs/f2fs/data.c
9970 @@ -318,7 +318,7 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
9971 if (test_opt(sbi, LFS) && current->plug)
9972 blk_finish_plug(current->plug);
9973
9974 - if (F2FS_IO_ALIGNED(sbi))
9975 + if (!F2FS_IO_ALIGNED(sbi))
9976 goto submit_io;
9977
9978 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
9979 diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
9980 index 5d94abe467a4f..6e58b2e62b189 100644
9981 --- a/fs/f2fs/file.c
9982 +++ b/fs/f2fs/file.c
9983 @@ -686,6 +686,10 @@ int f2fs_truncate(struct inode *inode)
9984 return -EIO;
9985 }
9986
9987 + err = dquot_initialize(inode);
9988 + if (err)
9989 + return err;
9990 +
9991 /* we should check inline_data size */
9992 if (!f2fs_may_inline_data(inode)) {
9993 err = f2fs_convert_inline_inode(inode);
9994 @@ -761,7 +765,8 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
9995 if (ia_valid & ATTR_MODE) {
9996 umode_t mode = attr->ia_mode;
9997
9998 - if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
9999 + if (!in_group_p(inode->i_gid) &&
10000 + !capable_wrt_inode_uidgid(inode, CAP_FSETID))
10001 mode &= ~S_ISGID;
10002 set_acl_inode(inode, mode);
10003 }
10004 diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
10005 index 183388393c6a8..cbd17e4ff920c 100644
10006 --- a/fs/f2fs/inline.c
10007 +++ b/fs/f2fs/inline.c
10008 @@ -189,6 +189,10 @@ int f2fs_convert_inline_inode(struct inode *inode)
10009 if (!f2fs_has_inline_data(inode))
10010 return 0;
10011
10012 + err = dquot_initialize(inode);
10013 + if (err)
10014 + return err;
10015 +
10016 page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
10017 if (!page)
10018 return -ENOMEM;
10019 diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
10020 index 50fa3e08c02f3..aaec3c5b02028 100644
10021 --- a/fs/gfs2/bmap.c
10022 +++ b/fs/gfs2/bmap.c
10023 @@ -1228,6 +1228,9 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
10024
10025 gfs2_inplace_release(ip);
10026
10027 + if (ip->i_qadata && ip->i_qadata->qa_qd_num)
10028 + gfs2_quota_unlock(ip);
10029 +
10030 if (length != written && (iomap->flags & IOMAP_F_NEW)) {
10031 /* Deallocate blocks that were just allocated. */
10032 loff_t blockmask = i_blocksize(inode) - 1;
10033 @@ -1240,9 +1243,6 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
10034 }
10035 }
10036
10037 - if (ip->i_qadata && ip->i_qadata->qa_qd_num)
10038 - gfs2_quota_unlock(ip);
10039 -
10040 if (unlikely(!written))
10041 goto out_unlock;
10042
10043 diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
10044 index 7c7197343ee2b..72dec177b3494 100644
10045 --- a/fs/gfs2/lock_dlm.c
10046 +++ b/fs/gfs2/lock_dlm.c
10047 @@ -280,7 +280,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
10048 {
10049 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
10050 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
10051 - int lvb_needs_unlock = 0;
10052 int error;
10053
10054 if (gl->gl_lksb.sb_lkid == 0) {
10055 @@ -293,13 +292,10 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
10056 gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
10057 gfs2_update_request_times(gl);
10058
10059 - /* don't want to skip dlm_unlock writing the lvb when lock is ex */
10060 -
10061 - if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE))
10062 - lvb_needs_unlock = 1;
10063 + /* don't want to skip dlm_unlock writing the lvb when lock has one */
10064
10065 if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
10066 - !lvb_needs_unlock) {
10067 + !gl->gl_lksb.sb_lvbptr) {
10068 gfs2_glock_free(gl);
10069 return;
10070 }
10071 diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
10072 index f0fe641893a5e..b9e6a7ec78be4 100644
10073 --- a/fs/isofs/dir.c
10074 +++ b/fs/isofs/dir.c
10075 @@ -152,6 +152,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
10076 printk(KERN_NOTICE "iso9660: Corrupted directory entry"
10077 " in block %lu of inode %lu\n", block,
10078 inode->i_ino);
10079 + brelse(bh);
10080 return -EIO;
10081 }
10082
10083 diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
10084 index cac468f04820e..558e7c51ce0d4 100644
10085 --- a/fs/isofs/namei.c
10086 +++ b/fs/isofs/namei.c
10087 @@ -102,6 +102,7 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry,
10088 printk(KERN_NOTICE "iso9660: Corrupted directory entry"
10089 " in block %lu of inode %lu\n", block,
10090 dir->i_ino);
10091 + brelse(bh);
10092 return 0;
10093 }
10094
10095 diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
10096 index be7c8a6a57480..4fe64519870f1 100644
10097 --- a/fs/jffs2/summary.c
10098 +++ b/fs/jffs2/summary.c
10099 @@ -783,6 +783,8 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
10100 dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n",
10101 je16_to_cpu(temp->u.nodetype));
10102 jffs2_sum_disable_collecting(c->summary);
10103 + /* The above call removes the list, nothing more to do */
10104 + goto bail_rwcompat;
10105 } else {
10106 BUG(); /* unknown node in summary information */
10107 }
10108 @@ -794,6 +796,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
10109
10110 c->summary->sum_num--;
10111 }
10112 + bail_rwcompat:
10113
10114 jffs2_sum_reset_collected(c->summary);
10115
10116 diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
10117 index caade185e568d..6fe82ce8663ef 100644
10118 --- a/fs/jfs/jfs_dmap.c
10119 +++ b/fs/jfs/jfs_dmap.c
10120 @@ -1656,7 +1656,7 @@ s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen)
10121 } else if (rc == -ENOSPC) {
10122 /* search for next smaller log2 block */
10123 l2nb = BLKSTOL2(nblocks) - 1;
10124 - nblocks = 1 << l2nb;
10125 + nblocks = 1LL << l2nb;
10126 } else {
10127 /* Trim any already allocated blocks */
10128 jfs_error(bmp->db_ipbmap->i_sb, "-EIO\n");
10129 diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
10130 index be418fccc9d86..7f39d6091dfa0 100644
10131 --- a/fs/nfsd/nfsctl.c
10132 +++ b/fs/nfsd/nfsctl.c
10133 @@ -1523,12 +1523,9 @@ static int __init init_nfsd(void)
10134 int retval;
10135 printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
10136
10137 - retval = register_pernet_subsys(&nfsd_net_ops);
10138 - if (retval < 0)
10139 - return retval;
10140 retval = register_cld_notifier();
10141 if (retval)
10142 - goto out_unregister_pernet;
10143 + return retval;
10144 retval = nfsd4_init_slabs();
10145 if (retval)
10146 goto out_unregister_notifier;
10147 @@ -1546,9 +1543,14 @@ static int __init init_nfsd(void)
10148 goto out_free_lockd;
10149 retval = register_filesystem(&nfsd_fs_type);
10150 if (retval)
10151 + goto out_free_exports;
10152 + retval = register_pernet_subsys(&nfsd_net_ops);
10153 + if (retval < 0)
10154 goto out_free_all;
10155 return 0;
10156 out_free_all:
10157 + unregister_pernet_subsys(&nfsd_net_ops);
10158 +out_free_exports:
10159 remove_proc_entry("fs/nfs/exports", NULL);
10160 remove_proc_entry("fs/nfs", NULL);
10161 out_free_lockd:
10162 @@ -1562,13 +1564,12 @@ out_free_slabs:
10163 nfsd4_free_slabs();
10164 out_unregister_notifier:
10165 unregister_cld_notifier();
10166 -out_unregister_pernet:
10167 - unregister_pernet_subsys(&nfsd_net_ops);
10168 return retval;
10169 }
10170
10171 static void __exit exit_nfsd(void)
10172 {
10173 + unregister_pernet_subsys(&nfsd_net_ops);
10174 nfsd_drc_slab_free();
10175 remove_proc_entry("fs/nfs/exports", NULL);
10176 remove_proc_entry("fs/nfs", NULL);
10177 @@ -1579,7 +1580,6 @@ static void __exit exit_nfsd(void)
10178 nfsd_fault_inject_cleanup();
10179 unregister_filesystem(&nfsd_fs_type);
10180 unregister_cld_notifier();
10181 - unregister_pernet_subsys(&nfsd_net_ops);
10182 }
10183
10184 MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
10185 diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
10186 index a368350d4c279..c843bc318382b 100644
10187 --- a/fs/ocfs2/cluster/heartbeat.c
10188 +++ b/fs/ocfs2/cluster/heartbeat.c
10189 @@ -2052,7 +2052,7 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
10190 o2hb_nego_timeout_handler,
10191 reg, NULL, &reg->hr_handler_list);
10192 if (ret)
10193 - goto free;
10194 + goto remove_item;
10195
10196 ret = o2net_register_handler(O2HB_NEGO_APPROVE_MSG, reg->hr_key,
10197 sizeof(struct o2hb_nego_msg),
10198 @@ -2067,6 +2067,12 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
10199
10200 unregister_handler:
10201 o2net_unregister_handler_list(&reg->hr_handler_list);
10202 +remove_item:
10203 + spin_lock(&o2hb_live_lock);
10204 + list_del(&reg->hr_all_item);
10205 + if (o2hb_global_heartbeat_active())
10206 + clear_bit(reg->hr_region_num, o2hb_region_bitmap);
10207 + spin_unlock(&o2hb_live_lock);
10208 free:
10209 kfree(reg);
10210 return ERR_PTR(ret);
10211 diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
10212 index 74a60bae2b237..705b79bb9b241 100644
10213 --- a/fs/pstore/platform.c
10214 +++ b/fs/pstore/platform.c
10215 @@ -275,7 +275,7 @@ static int pstore_compress(const void *in, void *out,
10216 {
10217 int ret;
10218
10219 - if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION))
10220 + if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS))
10221 return -EINVAL;
10222
10223 ret = crypto_comp_compress(tfm, in, inlen, out, &outlen);
10224 @@ -664,7 +664,7 @@ static void decompress_record(struct pstore_record *record)
10225 int unzipped_len;
10226 char *unzipped, *workspace;
10227
10228 - if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION) || !record->compressed)
10229 + if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed)
10230 return;
10231
10232 /* Only PSTORE_TYPE_DMESG support compression. */
10233 diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
10234 index 36dce17b01016..56aedf4ba8864 100644
10235 --- a/fs/quota/quota_v2.c
10236 +++ b/fs/quota/quota_v2.c
10237 @@ -166,19 +166,24 @@ static int v2_read_file_info(struct super_block *sb, int type)
10238 quota_error(sb, "Number of blocks too big for quota file size (%llu > %llu).",
10239 (loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits,
10240 i_size_read(sb_dqopt(sb)->files[type]));
10241 - goto out;
10242 + goto out_free;
10243 }
10244 if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) {
10245 quota_error(sb, "Free block number too big (%u >= %u).",
10246 qinfo->dqi_free_blk, qinfo->dqi_blocks);
10247 - goto out;
10248 + goto out_free;
10249 }
10250 if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) {
10251 quota_error(sb, "Block with free entry too big (%u >= %u).",
10252 qinfo->dqi_free_entry, qinfo->dqi_blocks);
10253 - goto out;
10254 + goto out_free;
10255 }
10256 ret = 0;
10257 +out_free:
10258 + if (ret) {
10259 + kfree(info->dqi_priv);
10260 + info->dqi_priv = NULL;
10261 + }
10262 out:
10263 up_read(&dqopt->dqio_sem);
10264 return ret;
10265 diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c
10266 index b10418b5fb719..8be17a7731961 100644
10267 --- a/fs/ubifs/auth.c
10268 +++ b/fs/ubifs/auth.c
10269 @@ -342,7 +342,7 @@ int ubifs_init_authentication(struct ubifs_info *c)
10270 ubifs_err(c, "hmac %s is bigger than maximum allowed hmac size (%d > %d)",
10271 hmac_name, c->hmac_desc_len, UBIFS_HMAC_ARR_SZ);
10272 err = -EINVAL;
10273 - goto out_free_hash;
10274 + goto out_free_hmac;
10275 }
10276
10277 err = crypto_shash_setkey(c->hmac_tfm, ukp->data, ukp->datalen);
10278 diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
10279 index e49bd69dfc1c8..701f15ba61352 100644
10280 --- a/fs/ubifs/super.c
10281 +++ b/fs/ubifs/super.c
10282 @@ -820,8 +820,10 @@ static int alloc_wbufs(struct ubifs_info *c)
10283 c->jheads[i].wbuf.jhead = i;
10284 c->jheads[i].grouped = 1;
10285 c->jheads[i].log_hash = ubifs_hash_get_desc(c);
10286 - if (IS_ERR(c->jheads[i].log_hash))
10287 + if (IS_ERR(c->jheads[i].log_hash)) {
10288 + err = PTR_ERR(c->jheads[i].log_hash);
10289 goto out;
10290 + }
10291 }
10292
10293 /*
10294 diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
10295 index 233a72f169bb7..e1de0fe281644 100644
10296 --- a/include/acpi/acexcep.h
10297 +++ b/include/acpi/acexcep.h
10298 @@ -59,11 +59,11 @@ struct acpi_exception_info {
10299
10300 #define AE_OK (acpi_status) 0x0000
10301
10302 -#define ACPI_ENV_EXCEPTION(status) (status & AE_CODE_ENVIRONMENTAL)
10303 -#define ACPI_AML_EXCEPTION(status) (status & AE_CODE_AML)
10304 -#define ACPI_PROG_EXCEPTION(status) (status & AE_CODE_PROGRAMMER)
10305 -#define ACPI_TABLE_EXCEPTION(status) (status & AE_CODE_ACPI_TABLES)
10306 -#define ACPI_CNTL_EXCEPTION(status) (status & AE_CODE_CONTROL)
10307 +#define ACPI_ENV_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ENVIRONMENTAL)
10308 +#define ACPI_AML_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_AML)
10309 +#define ACPI_PROG_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_PROGRAMMER)
10310 +#define ACPI_TABLE_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ACPI_TABLES)
10311 +#define ACPI_CNTL_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_CONTROL)
10312
10313 /*
10314 * Environmental exceptions
10315 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
10316 index 2267b7c763c64..8fe1912e3eeb9 100644
10317 --- a/include/asm-generic/vmlinux.lds.h
10318 +++ b/include/asm-generic/vmlinux.lds.h
10319 @@ -756,8 +756,13 @@
10320 /* DWARF 4 */ \
10321 .debug_types 0 : { *(.debug_types) } \
10322 /* DWARF 5 */ \
10323 + .debug_addr 0 : { *(.debug_addr) } \
10324 + .debug_line_str 0 : { *(.debug_line_str) } \
10325 + .debug_loclists 0 : { *(.debug_loclists) } \
10326 .debug_macro 0 : { *(.debug_macro) } \
10327 - .debug_addr 0 : { *(.debug_addr) }
10328 + .debug_names 0 : { *(.debug_names) } \
10329 + .debug_rnglists 0 : { *(.debug_rnglists) } \
10330 + .debug_str_offsets 0 : { *(.debug_str_offsets) }
10331
10332 /* Stabs debugging sections. */
10333 #define STABS_DEBUG \
10334 diff --git a/include/linux/bpf.h b/include/linux/bpf.h
10335 index 7aa0d8b5aaf0c..007147f643908 100644
10336 --- a/include/linux/bpf.h
10337 +++ b/include/linux/bpf.h
10338 @@ -711,7 +711,10 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
10339 /* verify correctness of eBPF program */
10340 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
10341 union bpf_attr __user *uattr);
10342 +
10343 +#ifndef CONFIG_BPF_JIT_ALWAYS_ON
10344 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
10345 +#endif
10346
10347 /* Map specifics */
10348 struct xdp_buff;
10349 diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
10350 index e4e1f5c1f4929..a53d7d2c2d95c 100644
10351 --- a/include/linux/device-mapper.h
10352 +++ b/include/linux/device-mapper.h
10353 @@ -316,6 +316,11 @@ struct dm_target {
10354 * whether or not its underlying devices have support.
10355 */
10356 bool discards_supported:1;
10357 +
10358 + /*
10359 + * Set if we need to limit the number of in-flight bios when swapping.
10360 + */
10361 + bool limit_swap_bios:1;
10362 };
10363
10364 /* Each target can link one of these into the table */
10365 diff --git a/include/linux/filter.h b/include/linux/filter.h
10366 index 79830bc9e45cf..c53e2fe3c8f7f 100644
10367 --- a/include/linux/filter.h
10368 +++ b/include/linux/filter.h
10369 @@ -846,7 +846,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
10370 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
10371 #define __bpf_call_base_args \
10372 ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
10373 - __bpf_call_base)
10374 + (void *)__bpf_call_base)
10375
10376 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
10377 void bpf_jit_compile(struct bpf_prog *prog);
10378 diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
10379 index a8f8889761378..0be0d68fbb009 100644
10380 --- a/include/linux/icmpv6.h
10381 +++ b/include/linux/icmpv6.h
10382 @@ -3,6 +3,7 @@
10383 #define _LINUX_ICMPV6_H
10384
10385 #include <linux/skbuff.h>
10386 +#include <linux/ipv6.h>
10387 #include <uapi/linux/icmpv6.h>
10388
10389 static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
10390 @@ -13,21 +14,64 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
10391 #include <linux/netdevice.h>
10392
10393 #if IS_ENABLED(CONFIG_IPV6)
10394 -extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
10395
10396 typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
10397 - const struct in6_addr *force_saddr);
10398 + const struct in6_addr *force_saddr,
10399 + const struct inet6_skb_parm *parm);
10400 +void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
10401 + const struct in6_addr *force_saddr,
10402 + const struct inet6_skb_parm *parm);
10403 +#if IS_BUILTIN(CONFIG_IPV6)
10404 +static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
10405 + const struct inet6_skb_parm *parm)
10406 +{
10407 + icmp6_send(skb, type, code, info, NULL, parm);
10408 +}
10409 +static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
10410 +{
10411 + BUILD_BUG_ON(fn != icmp6_send);
10412 + return 0;
10413 +}
10414 +static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
10415 +{
10416 + BUILD_BUG_ON(fn != icmp6_send);
10417 + return 0;
10418 +}
10419 +#else
10420 +extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
10421 + const struct inet6_skb_parm *parm);
10422 extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
10423 extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
10424 +#endif
10425 +
10426 +static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
10427 +{
10428 + __icmpv6_send(skb, type, code, info, IP6CB(skb));
10429 +}
10430 +
10431 int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
10432 unsigned int data_len);
10433
10434 +#if IS_ENABLED(CONFIG_NF_NAT)
10435 +void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
10436 +#else
10437 +static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
10438 +{
10439 + struct inet6_skb_parm parm = { 0 };
10440 + __icmpv6_send(skb_in, type, code, info, &parm);
10441 +}
10442 +#endif
10443 +
10444 #else
10445
10446 static inline void icmpv6_send(struct sk_buff *skb,
10447 u8 type, u8 code, __u32 info)
10448 {
10449 +}
10450
10451 +static inline void icmpv6_ndo_send(struct sk_buff *skb,
10452 + u8 type, u8 code, __u32 info)
10453 +{
10454 }
10455 #endif
10456
10457 diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
10458 index ea7c7906591eb..bbe297bbbca52 100644
10459 --- a/include/linux/ipv6.h
10460 +++ b/include/linux/ipv6.h
10461 @@ -83,7 +83,6 @@ struct ipv6_params {
10462 __s32 autoconf;
10463 };
10464 extern struct ipv6_params ipv6_defaults;
10465 -#include <linux/icmpv6.h>
10466 #include <linux/tcp.h>
10467 #include <linux/udp.h>
10468
10469 diff --git a/include/linux/kexec.h b/include/linux/kexec.h
10470 index 1776eb2e43a44..a1cffce3de8cd 100644
10471 --- a/include/linux/kexec.h
10472 +++ b/include/linux/kexec.h
10473 @@ -293,6 +293,11 @@ struct kimage {
10474 /* Information for loading purgatory */
10475 struct purgatory_info purgatory_info;
10476 #endif
10477 +
10478 +#ifdef CONFIG_IMA_KEXEC
10479 + /* Virtual address of IMA measurement buffer for kexec syscall */
10480 + void *ima_buffer;
10481 +#endif
10482 };
10483
10484 /* kexec interface functions */
10485 diff --git a/include/linux/key.h b/include/linux/key.h
10486 index 6cf8e71cf8b7c..9c26cc9b802a0 100644
10487 --- a/include/linux/key.h
10488 +++ b/include/linux/key.h
10489 @@ -269,6 +269,7 @@ extern struct key *key_alloc(struct key_type *type,
10490 #define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
10491 #define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
10492 #define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
10493 +#define KEY_ALLOC_SET_KEEP 0x0020 /* Set the KEEP flag on the key/keyring */
10494
10495 extern void key_revoke(struct key *key);
10496 extern void key_invalidate(struct key *key);
10497 diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
10498 index 75a2eded7aa2c..09e6ac4b669b2 100644
10499 --- a/include/linux/rcupdate.h
10500 +++ b/include/linux/rcupdate.h
10501 @@ -96,8 +96,10 @@ static inline void rcu_user_exit(void) { }
10502
10503 #ifdef CONFIG_RCU_NOCB_CPU
10504 void rcu_init_nohz(void);
10505 +void rcu_nocb_flush_deferred_wakeup(void);
10506 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
10507 static inline void rcu_init_nohz(void) { }
10508 +static inline void rcu_nocb_flush_deferred_wakeup(void) { }
10509 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
10510
10511 /**
10512 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
10513 index 988d176472df7..d7d6d4eb17949 100644
10514 --- a/include/linux/rmap.h
10515 +++ b/include/linux/rmap.h
10516 @@ -214,7 +214,8 @@ struct page_vma_mapped_walk {
10517
10518 static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
10519 {
10520 - if (pvmw->pte)
10521 + /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
10522 + if (pvmw->pte && !PageHuge(pvmw->page))
10523 pte_unmap(pvmw->pte);
10524 if (pvmw->ptl)
10525 spin_unlock(pvmw->ptl);
10526 diff --git a/include/net/act_api.h b/include/net/act_api.h
10527 index 05b568b92e59d..4dabe4730f00f 100644
10528 --- a/include/net/act_api.h
10529 +++ b/include/net/act_api.h
10530 @@ -156,6 +156,7 @@ int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
10531 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
10532 struct tc_action **a, const struct tc_action_ops *ops,
10533 int bind, bool cpustats);
10534 +void tcf_idr_insert_many(struct tc_action *actions[]);
10535 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
10536 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
10537 struct tc_action **a, int bind);
10538 diff --git a/include/net/icmp.h b/include/net/icmp.h
10539 index 5d4bfdba9adf0..fd84adc479633 100644
10540 --- a/include/net/icmp.h
10541 +++ b/include/net/icmp.h
10542 @@ -43,6 +43,16 @@ static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32
10543 __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
10544 }
10545
10546 +#if IS_ENABLED(CONFIG_NF_NAT)
10547 +void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info);
10548 +#else
10549 +static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
10550 +{
10551 + struct ip_options opts = { 0 };
10552 + __icmp_send(skb_in, type, code, info, &opts);
10553 +}
10554 +#endif
10555 +
10556 int icmp_rcv(struct sk_buff *skb);
10557 int icmp_err(struct sk_buff *skb, u32 info);
10558 int icmp_init(void);
10559 diff --git a/include/net/tcp.h b/include/net/tcp.h
10560 index 37b51456784f8..b914959cd2c67 100644
10561 --- a/include/net/tcp.h
10562 +++ b/include/net/tcp.h
10563 @@ -1409,8 +1409,13 @@ static inline int tcp_full_space(const struct sock *sk)
10564 */
10565 static inline bool tcp_rmem_pressure(const struct sock *sk)
10566 {
10567 - int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
10568 - int threshold = rcvbuf - (rcvbuf >> 3);
10569 + int rcvbuf, threshold;
10570 +
10571 + if (tcp_under_memory_pressure(sk))
10572 + return true;
10573 +
10574 + rcvbuf = READ_ONCE(sk->sk_rcvbuf);
10575 + threshold = rcvbuf - (rcvbuf >> 3);
10576
10577 return atomic_read(&sk->sk_rmem_alloc) > threshold;
10578 }
10579 diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c
10580 index 1b6b9349cb857..d99e89f113c43 100644
10581 --- a/kernel/bpf/bpf_lru_list.c
10582 +++ b/kernel/bpf/bpf_lru_list.c
10583 @@ -502,13 +502,14 @@ struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
10584 static void bpf_common_lru_push_free(struct bpf_lru *lru,
10585 struct bpf_lru_node *node)
10586 {
10587 + u8 node_type = READ_ONCE(node->type);
10588 unsigned long flags;
10589
10590 - if (WARN_ON_ONCE(node->type == BPF_LRU_LIST_T_FREE) ||
10591 - WARN_ON_ONCE(node->type == BPF_LRU_LOCAL_LIST_T_FREE))
10592 + if (WARN_ON_ONCE(node_type == BPF_LRU_LIST_T_FREE) ||
10593 + WARN_ON_ONCE(node_type == BPF_LRU_LOCAL_LIST_T_FREE))
10594 return;
10595
10596 - if (node->type == BPF_LRU_LOCAL_LIST_T_PENDING) {
10597 + if (node_type == BPF_LRU_LOCAL_LIST_T_PENDING) {
10598 struct bpf_lru_locallist *loc_l;
10599
10600 loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
10601 diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
10602 index 2118d8258b7c9..ad53b19734e9b 100644
10603 --- a/kernel/debug/kdb/kdb_private.h
10604 +++ b/kernel/debug/kdb/kdb_private.h
10605 @@ -233,7 +233,7 @@ extern struct task_struct *kdb_curr_task(int);
10606 #define kdb_do_each_thread(g, p) do_each_thread(g, p)
10607 #define kdb_while_each_thread(g, p) while_each_thread(g, p)
10608
10609 -#define GFP_KDB (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
10610 +#define GFP_KDB (in_dbg_master() ? GFP_ATOMIC : GFP_KERNEL)
10611
10612 extern void *debug_kmalloc(size_t size, gfp_t flags);
10613 extern void debug_kfree(void *);
10614 diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
10615 index 79f252af7dee3..4e74db89bd23f 100644
10616 --- a/kernel/kexec_file.c
10617 +++ b/kernel/kexec_file.c
10618 @@ -165,6 +165,11 @@ void kimage_file_post_load_cleanup(struct kimage *image)
10619 vfree(pi->sechdrs);
10620 pi->sechdrs = NULL;
10621
10622 +#ifdef CONFIG_IMA_KEXEC
10623 + vfree(image->ima_buffer);
10624 + image->ima_buffer = NULL;
10625 +#endif /* CONFIG_IMA_KEXEC */
10626 +
10627 /* See if architecture has anything to cleanup post load */
10628 arch_kimage_file_post_load_cleanup(image);
10629
10630 diff --git a/kernel/module.c b/kernel/module.c
10631 index 9e9af40698ffe..ab1f97cfe18dc 100644
10632 --- a/kernel/module.c
10633 +++ b/kernel/module.c
10634 @@ -2310,6 +2310,21 @@ static int verify_exported_symbols(struct module *mod)
10635 return 0;
10636 }
10637
10638 +static bool ignore_undef_symbol(Elf_Half emachine, const char *name)
10639 +{
10640 + /*
10641 + * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as
10642 + * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64.
10643 + * i386 has a similar problem but may not deserve a fix.
10644 + *
10645 + * If we ever have to ignore many symbols, consider refactoring the code to
10646 + * only warn if referenced by a relocation.
10647 + */
10648 + if (emachine == EM_386 || emachine == EM_X86_64)
10649 + return !strcmp(name, "_GLOBAL_OFFSET_TABLE_");
10650 + return false;
10651 +}
10652 +
10653 /* Change all symbols so that st_value encodes the pointer directly. */
10654 static int simplify_symbols(struct module *mod, const struct load_info *info)
10655 {
10656 @@ -2355,8 +2370,10 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
10657 break;
10658 }
10659
10660 - /* Ok if weak. */
10661 - if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
10662 + /* Ok if weak or ignored. */
10663 + if (!ksym &&
10664 + (ELF_ST_BIND(sym[i].st_info) == STB_WEAK ||
10665 + ignore_undef_symbol(info->hdr->e_machine, name)))
10666 break;
10667
10668 ret = PTR_ERR(ksym) ?: -ENOENT;
10669 diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
10670 index d9a659a686f31..6cfc5a00c67d6 100644
10671 --- a/kernel/printk/printk_safe.c
10672 +++ b/kernel/printk/printk_safe.c
10673 @@ -43,6 +43,8 @@ struct printk_safe_seq_buf {
10674 static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
10675 static DEFINE_PER_CPU(int, printk_context);
10676
10677 +static DEFINE_RAW_SPINLOCK(safe_read_lock);
10678 +
10679 #ifdef CONFIG_PRINTK_NMI
10680 static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
10681 #endif
10682 @@ -178,8 +180,6 @@ static void report_message_lost(struct printk_safe_seq_buf *s)
10683 */
10684 static void __printk_safe_flush(struct irq_work *work)
10685 {
10686 - static raw_spinlock_t read_lock =
10687 - __RAW_SPIN_LOCK_INITIALIZER(read_lock);
10688 struct printk_safe_seq_buf *s =
10689 container_of(work, struct printk_safe_seq_buf, work);
10690 unsigned long flags;
10691 @@ -193,7 +193,7 @@ static void __printk_safe_flush(struct irq_work *work)
10692 * different CPUs. This is especially important when printing
10693 * a backtrace.
10694 */
10695 - raw_spin_lock_irqsave(&read_lock, flags);
10696 + raw_spin_lock_irqsave(&safe_read_lock, flags);
10697
10698 i = 0;
10699 more:
10700 @@ -230,7 +230,7 @@ more:
10701
10702 out:
10703 report_message_lost(s);
10704 - raw_spin_unlock_irqrestore(&read_lock, flags);
10705 + raw_spin_unlock_irqrestore(&safe_read_lock, flags);
10706 }
10707
10708 /**
10709 @@ -276,6 +276,14 @@ void printk_safe_flush_on_panic(void)
10710 raw_spin_lock_init(&logbuf_lock);
10711 }
10712
10713 + if (raw_spin_is_locked(&safe_read_lock)) {
10714 + if (num_online_cpus() > 1)
10715 + return;
10716 +
10717 + debug_locks_off();
10718 + raw_spin_lock_init(&safe_read_lock);
10719 + }
10720 +
10721 printk_safe_flush();
10722 }
10723
10724 diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
10725 index 1b1d2b09efa9b..4dfa9dd47223a 100644
10726 --- a/kernel/rcu/tree.c
10727 +++ b/kernel/rcu/tree.c
10728 @@ -579,7 +579,6 @@ static void rcu_eqs_enter(bool user)
10729 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
10730 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
10731 rdp = this_cpu_ptr(&rcu_data);
10732 - do_nocb_deferred_wakeup(rdp);
10733 rcu_prepare_for_idle();
10734 rcu_preempt_deferred_qs(current);
10735 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
10736 @@ -618,7 +617,14 @@ void rcu_idle_enter(void)
10737 */
10738 void rcu_user_enter(void)
10739 {
10740 + struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
10741 +
10742 lockdep_assert_irqs_disabled();
10743 +
10744 + instrumentation_begin();
10745 + do_nocb_deferred_wakeup(rdp);
10746 + instrumentation_end();
10747 +
10748 rcu_eqs_enter(true);
10749 }
10750 #endif /* CONFIG_NO_HZ_FULL */
10751 diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
10752 index f7118842a2b88..a71a4a272515d 100644
10753 --- a/kernel/rcu/tree_plugin.h
10754 +++ b/kernel/rcu/tree_plugin.h
10755 @@ -2190,6 +2190,11 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
10756 do_nocb_deferred_wakeup_common(rdp);
10757 }
10758
10759 +void rcu_nocb_flush_deferred_wakeup(void)
10760 +{
10761 + do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
10762 +}
10763 +
10764 void __init rcu_init_nohz(void)
10765 {
10766 int cpu;
10767 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
10768 index 3dd7c10d6a582..611adca1e6d0c 100644
10769 --- a/kernel/sched/fair.c
10770 +++ b/kernel/sched/fair.c
10771 @@ -3814,7 +3814,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
10772 if (!static_branch_unlikely(&sched_asym_cpucapacity))
10773 return;
10774
10775 - if (!p) {
10776 + if (!p || p->nr_cpus_allowed == 1) {
10777 rq->misfit_task_load = 0;
10778 return;
10779 }
10780 diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
10781 index 131e7c86cf06e..3f8c7867c14c1 100644
10782 --- a/kernel/sched/idle.c
10783 +++ b/kernel/sched/idle.c
10784 @@ -249,6 +249,7 @@ static void do_idle(void)
10785 }
10786
10787 arch_cpu_idle_enter();
10788 + rcu_nocb_flush_deferred_wakeup();
10789
10790 /*
10791 * In poll mode we reenable interrupts and spin. Also if we
10792 diff --git a/kernel/seccomp.c b/kernel/seccomp.c
10793 index 4221a4383cfc5..1d62fa2b6b918 100644
10794 --- a/kernel/seccomp.c
10795 +++ b/kernel/seccomp.c
10796 @@ -921,6 +921,8 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
10797 const bool recheck_after_trace)
10798 {
10799 BUG();
10800 +
10801 + return -1;
10802 }
10803 #endif
10804
10805 diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
10806 index 73956eaff8a9c..be51df4508cbe 100644
10807 --- a/kernel/tracepoint.c
10808 +++ b/kernel/tracepoint.c
10809 @@ -53,6 +53,12 @@ struct tp_probes {
10810 struct tracepoint_func probes[0];
10811 };
10812
10813 +/* Called in removal of a func but failed to allocate a new tp_funcs */
10814 +static void tp_stub_func(void)
10815 +{
10816 + return;
10817 +}
10818 +
10819 static inline void *allocate_probes(int count)
10820 {
10821 struct tp_probes *p = kmalloc(struct_size(p, probes, count),
10822 @@ -131,6 +137,7 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
10823 {
10824 struct tracepoint_func *old, *new;
10825 int nr_probes = 0;
10826 + int stub_funcs = 0;
10827 int pos = -1;
10828
10829 if (WARN_ON(!tp_func->func))
10830 @@ -147,14 +154,34 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
10831 if (old[nr_probes].func == tp_func->func &&
10832 old[nr_probes].data == tp_func->data)
10833 return ERR_PTR(-EEXIST);
10834 + if (old[nr_probes].func == tp_stub_func)
10835 + stub_funcs++;
10836 }
10837 }
10838 - /* + 2 : one for new probe, one for NULL func */
10839 - new = allocate_probes(nr_probes + 2);
10840 + /* + 2 : one for new probe, one for NULL func - stub functions */
10841 + new = allocate_probes(nr_probes + 2 - stub_funcs);
10842 if (new == NULL)
10843 return ERR_PTR(-ENOMEM);
10844 if (old) {
10845 - if (pos < 0) {
10846 + if (stub_funcs) {
10847 + /* Need to copy one at a time to remove stubs */
10848 + int probes = 0;
10849 +
10850 + pos = -1;
10851 + for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
10852 + if (old[nr_probes].func == tp_stub_func)
10853 + continue;
10854 + if (pos < 0 && old[nr_probes].prio < prio)
10855 + pos = probes++;
10856 + new[probes++] = old[nr_probes];
10857 + }
10858 + nr_probes = probes;
10859 + if (pos < 0)
10860 + pos = probes;
10861 + else
10862 + nr_probes--; /* Account for insertion */
10863 +
10864 + } else if (pos < 0) {
10865 pos = nr_probes;
10866 memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
10867 } else {
10868 @@ -188,8 +215,9 @@ static void *func_remove(struct tracepoint_func **funcs,
10869 /* (N -> M), (N > 1, M >= 0) probes */
10870 if (tp_func->func) {
10871 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
10872 - if (old[nr_probes].func == tp_func->func &&
10873 - old[nr_probes].data == tp_func->data)
10874 + if ((old[nr_probes].func == tp_func->func &&
10875 + old[nr_probes].data == tp_func->data) ||
10876 + old[nr_probes].func == tp_stub_func)
10877 nr_del++;
10878 }
10879 }
10880 @@ -208,14 +236,32 @@ static void *func_remove(struct tracepoint_func **funcs,
10881 /* N -> M, (N > 1, M > 0) */
10882 /* + 1 for NULL */
10883 new = allocate_probes(nr_probes - nr_del + 1);
10884 - if (new == NULL)
10885 - return ERR_PTR(-ENOMEM);
10886 - for (i = 0; old[i].func; i++)
10887 - if (old[i].func != tp_func->func
10888 - || old[i].data != tp_func->data)
10889 - new[j++] = old[i];
10890 - new[nr_probes - nr_del].func = NULL;
10891 - *funcs = new;
10892 + if (new) {
10893 + for (i = 0; old[i].func; i++)
10894 + if ((old[i].func != tp_func->func
10895 + || old[i].data != tp_func->data)
10896 + && old[i].func != tp_stub_func)
10897 + new[j++] = old[i];
10898 + new[nr_probes - nr_del].func = NULL;
10899 + *funcs = new;
10900 + } else {
10901 + /*
10902 + * Failed to allocate, replace the old function
10903 + * with calls to tp_stub_func.
10904 + */
10905 + for (i = 0; old[i].func; i++)
10906 + if (old[i].func == tp_func->func &&
10907 + old[i].data == tp_func->data) {
10908 + old[i].func = tp_stub_func;
10909 + /* Set the prio to the next event. */
10910 + if (old[i + 1].func)
10911 + old[i].prio =
10912 + old[i + 1].prio;
10913 + else
10914 + old[i].prio = -1;
10915 + }
10916 + *funcs = old;
10917 + }
10918 }
10919 debug_print_probes(*funcs);
10920 return old;
10921 @@ -271,10 +317,12 @@ static int tracepoint_remove_func(struct tracepoint *tp,
10922 tp_funcs = rcu_dereference_protected(tp->funcs,
10923 lockdep_is_held(&tracepoints_mutex));
10924 old = func_remove(&tp_funcs, func);
10925 - if (IS_ERR(old)) {
10926 - WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
10927 + if (WARN_ON_ONCE(IS_ERR(old)))
10928 return PTR_ERR(old);
10929 - }
10930 +
10931 + if (tp_funcs == old)
10932 + /* Failed allocating new tp_funcs, replaced func with stub */
10933 + return 0;
10934
10935 if (!tp_funcs) {
10936 /* Removed last function */
10937 diff --git a/mm/compaction.c b/mm/compaction.c
10938 index 88c3f6bad1aba..d686887856fee 100644
10939 --- a/mm/compaction.c
10940 +++ b/mm/compaction.c
10941 @@ -1630,6 +1630,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
10942 unsigned long pfn = cc->migrate_pfn;
10943 unsigned long high_pfn;
10944 int order;
10945 + bool found_block = false;
10946
10947 /* Skip hints are relied on to avoid repeats on the fast search */
10948 if (cc->ignore_skip_hint)
10949 @@ -1672,7 +1673,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
10950 high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
10951
10952 for (order = cc->order - 1;
10953 - order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit;
10954 + order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit;
10955 order--) {
10956 struct free_area *area = &cc->zone->free_area[order];
10957 struct list_head *freelist;
10958 @@ -1687,7 +1688,11 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
10959 list_for_each_entry(freepage, freelist, lru) {
10960 unsigned long free_pfn;
10961
10962 - nr_scanned++;
10963 + if (nr_scanned++ >= limit) {
10964 + move_freelist_tail(freelist, freepage);
10965 + break;
10966 + }
10967 +
10968 free_pfn = page_to_pfn(freepage);
10969 if (free_pfn < high_pfn) {
10970 /*
10971 @@ -1696,12 +1701,8 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
10972 * the list assumes an entry is deleted, not
10973 * reordered.
10974 */
10975 - if (get_pageblock_skip(freepage)) {
10976 - if (list_is_last(freelist, &freepage->lru))
10977 - break;
10978 -
10979 + if (get_pageblock_skip(freepage))
10980 continue;
10981 - }
10982
10983 /* Reorder to so a future search skips recent pages */
10984 move_freelist_tail(freelist, freepage);
10985 @@ -1709,15 +1710,10 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
10986 update_fast_start_pfn(cc, free_pfn);
10987 pfn = pageblock_start_pfn(free_pfn);
10988 cc->fast_search_fail = 0;
10989 + found_block = true;
10990 set_pageblock_skip(freepage);
10991 break;
10992 }
10993 -
10994 - if (nr_scanned >= limit) {
10995 - cc->fast_search_fail++;
10996 - move_freelist_tail(freelist, freepage);
10997 - break;
10998 - }
10999 }
11000 spin_unlock_irqrestore(&cc->zone->lock, flags);
11001 }
11002 @@ -1728,9 +1724,10 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
11003 * If fast scanning failed then use a cached entry for a page block
11004 * that had free pages as the basis for starting a linear scan.
11005 */
11006 - if (pfn == cc->migrate_pfn)
11007 + if (!found_block) {
11008 + cc->fast_search_fail++;
11009 pfn = reinit_migrate_pfn(cc);
11010 -
11011 + }
11012 return pfn;
11013 }
11014
11015 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
11016 index d5b03b9262d4f..2cd4c7f43dcd9 100644
11017 --- a/mm/hugetlb.c
11018 +++ b/mm/hugetlb.c
11019 @@ -1192,14 +1192,16 @@ static inline void destroy_compound_gigantic_page(struct page *page,
11020 static void update_and_free_page(struct hstate *h, struct page *page)
11021 {
11022 int i;
11023 + struct page *subpage = page;
11024
11025 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
11026 return;
11027
11028 h->nr_huge_pages--;
11029 h->nr_huge_pages_node[page_to_nid(page)]--;
11030 - for (i = 0; i < pages_per_huge_page(h); i++) {
11031 - page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
11032 + for (i = 0; i < pages_per_huge_page(h);
11033 + i++, subpage = mem_map_next(subpage, page, i)) {
11034 + subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
11035 1 << PG_referenced | 1 << PG_dirty |
11036 1 << PG_active | 1 << PG_private |
11037 1 << PG_writeback);
11038 @@ -2812,8 +2814,10 @@ static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
11039 return -ENOMEM;
11040
11041 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
11042 - if (retval)
11043 + if (retval) {
11044 kobject_put(hstate_kobjs[hi]);
11045 + hstate_kobjs[hi] = NULL;
11046 + }
11047
11048 return retval;
11049 }
11050 diff --git a/mm/memory.c b/mm/memory.c
11051 index b23831132933a..c432e7c764451 100644
11052 --- a/mm/memory.c
11053 +++ b/mm/memory.c
11054 @@ -1804,11 +1804,11 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
11055 unsigned long addr, unsigned long end,
11056 unsigned long pfn, pgprot_t prot)
11057 {
11058 - pte_t *pte;
11059 + pte_t *pte, *mapped_pte;
11060 spinlock_t *ptl;
11061 int err = 0;
11062
11063 - pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
11064 + mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
11065 if (!pte)
11066 return -ENOMEM;
11067 arch_enter_lazy_mmu_mode();
11068 @@ -1822,7 +1822,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
11069 pfn++;
11070 } while (pte++, addr += PAGE_SIZE, addr != end);
11071 arch_leave_lazy_mmu_mode();
11072 - pte_unmap_unlock(pte - 1, ptl);
11073 + pte_unmap_unlock(mapped_pte, ptl);
11074 return err;
11075 }
11076
11077 @@ -4718,17 +4718,19 @@ long copy_huge_page_from_user(struct page *dst_page,
11078 void *page_kaddr;
11079 unsigned long i, rc = 0;
11080 unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
11081 + struct page *subpage = dst_page;
11082
11083 - for (i = 0; i < pages_per_huge_page; i++) {
11084 + for (i = 0; i < pages_per_huge_page;
11085 + i++, subpage = mem_map_next(subpage, dst_page, i)) {
11086 if (allow_pagefault)
11087 - page_kaddr = kmap(dst_page + i);
11088 + page_kaddr = kmap(subpage);
11089 else
11090 - page_kaddr = kmap_atomic(dst_page + i);
11091 + page_kaddr = kmap_atomic(subpage);
11092 rc = copy_from_user(page_kaddr,
11093 (const void __user *)(src + i * PAGE_SIZE),
11094 PAGE_SIZE);
11095 if (allow_pagefault)
11096 - kunmap(dst_page + i);
11097 + kunmap(subpage);
11098 else
11099 kunmap_atomic(page_kaddr);
11100
11101 diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
11102 index da7fd7c8c2dc0..463bad58478b2 100644
11103 --- a/net/bluetooth/a2mp.c
11104 +++ b/net/bluetooth/a2mp.c
11105 @@ -381,9 +381,9 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
11106 hdev = hci_dev_get(req->id);
11107 if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) {
11108 struct a2mp_amp_assoc_rsp rsp;
11109 - rsp.id = req->id;
11110
11111 memset(&rsp, 0, sizeof(rsp));
11112 + rsp.id = req->id;
11113
11114 if (tmp) {
11115 rsp.status = A2MP_STATUS_COLLISION_OCCURED;
11116 @@ -512,6 +512,7 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
11117 assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL);
11118 if (!assoc) {
11119 amp_ctrl_put(ctrl);
11120 + hci_dev_put(hdev);
11121 return -ENOMEM;
11122 }
11123
11124 diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
11125 index 9e19d5a3aac87..83b324419ad3d 100644
11126 --- a/net/bluetooth/hci_core.c
11127 +++ b/net/bluetooth/hci_core.c
11128 @@ -1317,8 +1317,10 @@ int hci_inquiry(void __user *arg)
11129 * cleared). If it is interrupted by a signal, return -EINTR.
11130 */
11131 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
11132 - TASK_INTERRUPTIBLE))
11133 - return -EINTR;
11134 + TASK_INTERRUPTIBLE)) {
11135 + err = -EINTR;
11136 + goto done;
11137 + }
11138 }
11139
11140 /* for unlimited number of responses we will use buffer with
11141 diff --git a/net/core/filter.c b/net/core/filter.c
11142 index 2fa10fdcf6b1d..524f3364f8a05 100644
11143 --- a/net/core/filter.c
11144 +++ b/net/core/filter.c
11145 @@ -4880,6 +4880,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
11146 {
11147 struct net *net = dev_net(skb->dev);
11148 int rc = -EAFNOSUPPORT;
11149 + bool check_mtu = false;
11150
11151 if (plen < sizeof(*params))
11152 return -EINVAL;
11153 @@ -4887,22 +4888,28 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
11154 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
11155 return -EINVAL;
11156
11157 + if (params->tot_len)
11158 + check_mtu = true;
11159 +
11160 switch (params->family) {
11161 #if IS_ENABLED(CONFIG_INET)
11162 case AF_INET:
11163 - rc = bpf_ipv4_fib_lookup(net, params, flags, false);
11164 + rc = bpf_ipv4_fib_lookup(net, params, flags, check_mtu);
11165 break;
11166 #endif
11167 #if IS_ENABLED(CONFIG_IPV6)
11168 case AF_INET6:
11169 - rc = bpf_ipv6_fib_lookup(net, params, flags, false);
11170 + rc = bpf_ipv6_fib_lookup(net, params, flags, check_mtu);
11171 break;
11172 #endif
11173 }
11174
11175 - if (!rc) {
11176 + if (rc == BPF_FIB_LKUP_RET_SUCCESS && !check_mtu) {
11177 struct net_device *dev;
11178
11179 + /* When tot_len isn't provided by user, check skb
11180 + * against MTU of FIB lookup resulting net_device
11181 + */
11182 dev = dev_get_by_index_rcu(net, params->ifindex);
11183 if (!is_skb_forwardable(dev, skb))
11184 rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
11185 diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
11186 index d00533aea1f05..dd8fae89be723 100644
11187 --- a/net/ipv4/icmp.c
11188 +++ b/net/ipv4/icmp.c
11189 @@ -750,6 +750,40 @@ out:;
11190 }
11191 EXPORT_SYMBOL(__icmp_send);
11192
11193 +#if IS_ENABLED(CONFIG_NF_NAT)
11194 +#include <net/netfilter/nf_conntrack.h>
11195 +void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
11196 +{
11197 + struct sk_buff *cloned_skb = NULL;
11198 + struct ip_options opts = { 0 };
11199 + enum ip_conntrack_info ctinfo;
11200 + struct nf_conn *ct;
11201 + __be32 orig_ip;
11202 +
11203 + ct = nf_ct_get(skb_in, &ctinfo);
11204 + if (!ct || !(ct->status & IPS_SRC_NAT)) {
11205 + __icmp_send(skb_in, type, code, info, &opts);
11206 + return;
11207 + }
11208 +
11209 + if (skb_shared(skb_in))
11210 + skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC);
11211 +
11212 + if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head ||
11213 + (skb_network_header(skb_in) + sizeof(struct iphdr)) >
11214 + skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in,
11215 + skb_network_offset(skb_in) + sizeof(struct iphdr))))
11216 + goto out;
11217 +
11218 + orig_ip = ip_hdr(skb_in)->saddr;
11219 + ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip;
11220 + __icmp_send(skb_in, type, code, info, &opts);
11221 + ip_hdr(skb_in)->saddr = orig_ip;
11222 +out:
11223 + consume_skb(cloned_skb);
11224 +}
11225 +EXPORT_SYMBOL(icmp_ndo_send);
11226 +#endif
11227
11228 static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
11229 {
11230 diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
11231 index e9bb89131e02a..3db10cae7b178 100644
11232 --- a/net/ipv6/icmp.c
11233 +++ b/net/ipv6/icmp.c
11234 @@ -312,10 +312,9 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st
11235 }
11236
11237 #if IS_ENABLED(CONFIG_IPV6_MIP6)
11238 -static void mip6_addr_swap(struct sk_buff *skb)
11239 +static void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt)
11240 {
11241 struct ipv6hdr *iph = ipv6_hdr(skb);
11242 - struct inet6_skb_parm *opt = IP6CB(skb);
11243 struct ipv6_destopt_hao *hao;
11244 struct in6_addr tmp;
11245 int off;
11246 @@ -332,7 +331,7 @@ static void mip6_addr_swap(struct sk_buff *skb)
11247 }
11248 }
11249 #else
11250 -static inline void mip6_addr_swap(struct sk_buff *skb) {}
11251 +static inline void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt) {}
11252 #endif
11253
11254 static struct dst_entry *icmpv6_route_lookup(struct net *net,
11255 @@ -426,8 +425,9 @@ static int icmp6_iif(const struct sk_buff *skb)
11256 /*
11257 * Send an ICMP message in response to a packet in error
11258 */
11259 -static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
11260 - const struct in6_addr *force_saddr)
11261 +void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
11262 + const struct in6_addr *force_saddr,
11263 + const struct inet6_skb_parm *parm)
11264 {
11265 struct inet6_dev *idev = NULL;
11266 struct ipv6hdr *hdr = ipv6_hdr(skb);
11267 @@ -520,7 +520,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
11268 if (!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, type))
11269 goto out_bh_enable;
11270
11271 - mip6_addr_swap(skb);
11272 + mip6_addr_swap(skb, parm);
11273
11274 memset(&fl6, 0, sizeof(fl6));
11275 fl6.flowi6_proto = IPPROTO_ICMPV6;
11276 @@ -600,12 +600,13 @@ out:
11277 out_bh_enable:
11278 local_bh_enable();
11279 }
11280 +EXPORT_SYMBOL(icmp6_send);
11281
11282 /* Slightly more convenient version of icmp6_send.
11283 */
11284 void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
11285 {
11286 - icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL);
11287 + icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL, IP6CB(skb));
11288 kfree_skb(skb);
11289 }
11290
11291 @@ -662,10 +663,10 @@ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
11292 }
11293 if (type == ICMP_TIME_EXCEEDED)
11294 icmp6_send(skb2, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
11295 - info, &temp_saddr);
11296 + info, &temp_saddr, IP6CB(skb2));
11297 else
11298 icmp6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH,
11299 - info, &temp_saddr);
11300 + info, &temp_saddr, IP6CB(skb2));
11301 if (rt)
11302 ip6_rt_put(rt);
11303
11304 diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c
11305 index 02045494c24cc..9e3574880cb03 100644
11306 --- a/net/ipv6/ip6_icmp.c
11307 +++ b/net/ipv6/ip6_icmp.c
11308 @@ -9,6 +9,8 @@
11309
11310 #if IS_ENABLED(CONFIG_IPV6)
11311
11312 +#if !IS_BUILTIN(CONFIG_IPV6)
11313 +
11314 static ip6_icmp_send_t __rcu *ip6_icmp_send;
11315
11316 int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
11317 @@ -31,18 +33,52 @@ int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
11318 }
11319 EXPORT_SYMBOL(inet6_unregister_icmp_sender);
11320
11321 -void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
11322 +void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
11323 + const struct inet6_skb_parm *parm)
11324 {
11325 ip6_icmp_send_t *send;
11326
11327 rcu_read_lock();
11328 send = rcu_dereference(ip6_icmp_send);
11329 + if (send)
11330 + send(skb, type, code, info, NULL, parm);
11331 + rcu_read_unlock();
11332 +}
11333 +EXPORT_SYMBOL(__icmpv6_send);
11334 +#endif
11335 +
11336 +#if IS_ENABLED(CONFIG_NF_NAT)
11337 +#include <net/netfilter/nf_conntrack.h>
11338 +void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
11339 +{
11340 + struct inet6_skb_parm parm = { 0 };
11341 + struct sk_buff *cloned_skb = NULL;
11342 + enum ip_conntrack_info ctinfo;
11343 + struct in6_addr orig_ip;
11344 + struct nf_conn *ct;
11345
11346 - if (!send)
11347 + ct = nf_ct_get(skb_in, &ctinfo);
11348 + if (!ct || !(ct->status & IPS_SRC_NAT)) {
11349 + __icmpv6_send(skb_in, type, code, info, &parm);
11350 + return;
11351 + }
11352 +
11353 + if (skb_shared(skb_in))
11354 + skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC);
11355 +
11356 + if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head ||
11357 + (skb_network_header(skb_in) + sizeof(struct ipv6hdr)) >
11358 + skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in,
11359 + skb_network_offset(skb_in) + sizeof(struct ipv6hdr))))
11360 goto out;
11361 - send(skb, type, code, info, NULL);
11362 +
11363 + orig_ip = ipv6_hdr(skb_in)->saddr;
11364 + ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6;
11365 + __icmpv6_send(skb_in, type, code, info, &parm);
11366 + ipv6_hdr(skb_in)->saddr = orig_ip;
11367 out:
11368 - rcu_read_unlock();
11369 + consume_skb(cloned_skb);
11370 }
11371 -EXPORT_SYMBOL(icmpv6_send);
11372 +EXPORT_SYMBOL(icmpv6_ndo_send);
11373 +#endif
11374 #endif
11375 diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
11376 index aa5150929996d..b5b728a71ab53 100644
11377 --- a/net/mac80211/mesh_hwmp.c
11378 +++ b/net/mac80211/mesh_hwmp.c
11379 @@ -356,7 +356,7 @@ u32 airtime_link_metric_get(struct ieee80211_local *local,
11380 */
11381 tx_time = (device_constant + 10 * test_frame_len / rate);
11382 estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
11383 - result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT);
11384 + result = ((u64)tx_time * estimated_retx) >> (2 * ARITH_SHIFT);
11385 return (u32)result;
11386 }
11387
11388 diff --git a/net/qrtr/tun.c b/net/qrtr/tun.c
11389 index 997af345ce374..cb425e216d461 100644
11390 --- a/net/qrtr/tun.c
11391 +++ b/net/qrtr/tun.c
11392 @@ -31,6 +31,7 @@ static int qrtr_tun_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
11393 static int qrtr_tun_open(struct inode *inode, struct file *filp)
11394 {
11395 struct qrtr_tun *tun;
11396 + int ret;
11397
11398 tun = kzalloc(sizeof(*tun), GFP_KERNEL);
11399 if (!tun)
11400 @@ -43,7 +44,16 @@ static int qrtr_tun_open(struct inode *inode, struct file *filp)
11401
11402 filp->private_data = tun;
11403
11404 - return qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO);
11405 + ret = qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO);
11406 + if (ret)
11407 + goto out;
11408 +
11409 + return 0;
11410 +
11411 +out:
11412 + filp->private_data = NULL;
11413 + kfree(tun);
11414 + return ret;
11415 }
11416
11417 static ssize_t qrtr_tun_read_iter(struct kiocb *iocb, struct iov_iter *to)
11418 diff --git a/net/sched/act_api.c b/net/sched/act_api.c
11419 index 1dc642b11443c..43c10a85e8813 100644
11420 --- a/net/sched/act_api.c
11421 +++ b/net/sched/act_api.c
11422 @@ -823,7 +823,7 @@ static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
11423 [TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
11424 };
11425
11426 -static void tcf_idr_insert_many(struct tc_action *actions[])
11427 +void tcf_idr_insert_many(struct tc_action *actions[])
11428 {
11429 int i;
11430
11431 diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
11432 index d7604417367d3..83e5a8aa2fb11 100644
11433 --- a/net/sched/cls_api.c
11434 +++ b/net/sched/cls_api.c
11435 @@ -3026,6 +3026,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
11436 act->type = exts->type = TCA_OLD_COMPAT;
11437 exts->actions[0] = act;
11438 exts->nr_actions = 1;
11439 + tcf_idr_insert_many(exts->actions);
11440 } else if (exts->action && tb[exts->action]) {
11441 int err;
11442
11443 diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
11444 index 00af31d3e7744..01c65f96d2832 100644
11445 --- a/net/xfrm/xfrm_interface.c
11446 +++ b/net/xfrm/xfrm_interface.c
11447 @@ -300,10 +300,10 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
11448 if (mtu < IPV6_MIN_MTU)
11449 mtu = IPV6_MIN_MTU;
11450
11451 - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
11452 + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
11453 } else {
11454 - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
11455 - htonl(mtu));
11456 + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
11457 + htonl(mtu));
11458 }
11459
11460 dst_release(dst);
11461 diff --git a/security/commoncap.c b/security/commoncap.c
11462 index 28a6939bcc4e5..ed89a6dd4f83d 100644
11463 --- a/security/commoncap.c
11464 +++ b/security/commoncap.c
11465 @@ -500,7 +500,8 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size)
11466 __u32 magic, nsmagic;
11467 struct inode *inode = d_backing_inode(dentry);
11468 struct user_namespace *task_ns = current_user_ns(),
11469 - *fs_ns = inode->i_sb->s_user_ns;
11470 + *fs_ns = inode->i_sb->s_user_ns,
11471 + *ancestor;
11472 kuid_t rootid;
11473 size_t newsize;
11474
11475 @@ -523,6 +524,15 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size)
11476 if (nsrootid == -1)
11477 return -EINVAL;
11478
11479 + /*
11480 + * Do not allow allow adding a v3 filesystem capability xattr
11481 + * if the rootid field is ambiguous.
11482 + */
11483 + for (ancestor = task_ns->parent; ancestor; ancestor = ancestor->parent) {
11484 + if (from_kuid(ancestor, rootid) == 0)
11485 + return -EINVAL;
11486 + }
11487 +
11488 newsize = sizeof(struct vfs_ns_cap_data);
11489 nscap = kmalloc(newsize, GFP_ATOMIC);
11490 if (!nscap)
11491 diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
11492 index ee6bd945f3d6a..25dac691491b1 100644
11493 --- a/security/integrity/evm/evm_crypto.c
11494 +++ b/security/integrity/evm/evm_crypto.c
11495 @@ -75,7 +75,7 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo)
11496 {
11497 long rc;
11498 const char *algo;
11499 - struct crypto_shash **tfm, *tmp_tfm;
11500 + struct crypto_shash **tfm, *tmp_tfm = NULL;
11501 struct shash_desc *desc;
11502
11503 if (type == EVM_XATTR_HMAC) {
11504 @@ -120,13 +120,16 @@ unlock:
11505 alloc:
11506 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),
11507 GFP_KERNEL);
11508 - if (!desc)
11509 + if (!desc) {
11510 + crypto_free_shash(tmp_tfm);
11511 return ERR_PTR(-ENOMEM);
11512 + }
11513
11514 desc->tfm = *tfm;
11515
11516 rc = crypto_shash_init(desc);
11517 if (rc) {
11518 + crypto_free_shash(tmp_tfm);
11519 kfree(desc);
11520 return ERR_PTR(rc);
11521 }
11522 diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
11523 index 9e94eca48b898..955e4b4d09e21 100644
11524 --- a/security/integrity/ima/ima_kexec.c
11525 +++ b/security/integrity/ima/ima_kexec.c
11526 @@ -120,6 +120,7 @@ void ima_add_kexec_buffer(struct kimage *image)
11527 ret = kexec_add_buffer(&kbuf);
11528 if (ret) {
11529 pr_err("Error passing over kexec measurement buffer.\n");
11530 + vfree(kexec_buffer);
11531 return;
11532 }
11533
11534 @@ -129,6 +130,8 @@ void ima_add_kexec_buffer(struct kimage *image)
11535 return;
11536 }
11537
11538 + image->ima_buffer = kexec_buffer;
11539 +
11540 pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
11541 kbuf.mem);
11542 }
11543 diff --git a/security/integrity/ima/ima_mok.c b/security/integrity/ima/ima_mok.c
11544 index 36cadadbfba47..1e5c019161738 100644
11545 --- a/security/integrity/ima/ima_mok.c
11546 +++ b/security/integrity/ima/ima_mok.c
11547 @@ -38,13 +38,12 @@ __init int ima_mok_init(void)
11548 (KEY_POS_ALL & ~KEY_POS_SETATTR) |
11549 KEY_USR_VIEW | KEY_USR_READ |
11550 KEY_USR_WRITE | KEY_USR_SEARCH,
11551 - KEY_ALLOC_NOT_IN_QUOTA,
11552 + KEY_ALLOC_NOT_IN_QUOTA |
11553 + KEY_ALLOC_SET_KEEP,
11554 restriction, NULL);
11555
11556 if (IS_ERR(ima_blacklist_keyring))
11557 panic("Can't allocate IMA blacklist keyring.");
11558 -
11559 - set_bit(KEY_FLAG_KEEP, &ima_blacklist_keyring->flags);
11560 return 0;
11561 }
11562 device_initcall(ima_mok_init);
11563 diff --git a/security/keys/key.c b/security/keys/key.c
11564 index e9845d0d8d349..623fcb4094dd4 100644
11565 --- a/security/keys/key.c
11566 +++ b/security/keys/key.c
11567 @@ -302,6 +302,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
11568 key->flags |= 1 << KEY_FLAG_BUILTIN;
11569 if (flags & KEY_ALLOC_UID_KEYRING)
11570 key->flags |= 1 << KEY_FLAG_UID_KEYRING;
11571 + if (flags & KEY_ALLOC_SET_KEEP)
11572 + key->flags |= 1 << KEY_FLAG_KEEP;
11573
11574 #ifdef KEY_DEBUGGING
11575 key->magic = KEY_DEBUG_MAGIC;
11576 diff --git a/security/keys/trusted.c b/security/keys/trusted.c
11577 index 36afc29aecc3b..92a14ab82f72f 100644
11578 --- a/security/keys/trusted.c
11579 +++ b/security/keys/trusted.c
11580 @@ -805,7 +805,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
11581 case Opt_migratable:
11582 if (*args[0].from == '0')
11583 pay->migratable = 0;
11584 - else
11585 + else if (*args[0].from != '1')
11586 return -EINVAL;
11587 break;
11588 case Opt_pcrlock:
11589 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
11590 index b3667a5efdc1f..7f9f6bbca5489 100644
11591 --- a/sound/pci/hda/hda_intel.c
11592 +++ b/sound/pci/hda/hda_intel.c
11593 @@ -2447,6 +2447,8 @@ static const struct pci_device_id azx_ids[] = {
11594 /* CometLake-H */
11595 { PCI_DEVICE(0x8086, 0x06C8),
11596 .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
11597 + { PCI_DEVICE(0x8086, 0xf1c8),
11598 + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
11599 /* CometLake-S */
11600 { PCI_DEVICE(0x8086, 0xa3f0),
11601 .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
11602 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
11603 index f548bd48bf729..a132fe4537a55 100644
11604 --- a/sound/pci/hda/patch_realtek.c
11605 +++ b/sound/pci/hda/patch_realtek.c
11606 @@ -1895,6 +1895,7 @@ enum {
11607 ALC889_FIXUP_FRONT_HP_NO_PRESENCE,
11608 ALC889_FIXUP_VAIO_TT,
11609 ALC888_FIXUP_EEE1601,
11610 + ALC886_FIXUP_EAPD,
11611 ALC882_FIXUP_EAPD,
11612 ALC883_FIXUP_EAPD,
11613 ALC883_FIXUP_ACER_EAPD,
11614 @@ -2228,6 +2229,15 @@ static const struct hda_fixup alc882_fixups[] = {
11615 { }
11616 }
11617 },
11618 + [ALC886_FIXUP_EAPD] = {
11619 + .type = HDA_FIXUP_VERBS,
11620 + .v.verbs = (const struct hda_verb[]) {
11621 + /* change to EAPD mode */
11622 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
11623 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0068 },
11624 + { }
11625 + }
11626 + },
11627 [ALC882_FIXUP_EAPD] = {
11628 .type = HDA_FIXUP_VERBS,
11629 .v.verbs = (const struct hda_verb[]) {
11630 @@ -2500,6 +2510,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
11631 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
11632
11633 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
11634 + SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD),
11635 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
11636 SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
11637 SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
11638 diff --git a/sound/soc/codecs/cpcap.c b/sound/soc/codecs/cpcap.c
11639 index d7f05b384f1fb..1902689c5ea2c 100644
11640 --- a/sound/soc/codecs/cpcap.c
11641 +++ b/sound/soc/codecs/cpcap.c
11642 @@ -1263,12 +1263,12 @@ static int cpcap_voice_hw_params(struct snd_pcm_substream *substream,
11643
11644 if (direction == SNDRV_PCM_STREAM_CAPTURE) {
11645 mask = 0x0000;
11646 - mask |= CPCAP_BIT_MIC1_RX_TIMESLOT0;
11647 - mask |= CPCAP_BIT_MIC1_RX_TIMESLOT1;
11648 - mask |= CPCAP_BIT_MIC1_RX_TIMESLOT2;
11649 - mask |= CPCAP_BIT_MIC2_TIMESLOT0;
11650 - mask |= CPCAP_BIT_MIC2_TIMESLOT1;
11651 - mask |= CPCAP_BIT_MIC2_TIMESLOT2;
11652 + mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT0);
11653 + mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT1);
11654 + mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT2);
11655 + mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT0);
11656 + mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT1);
11657 + mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT2);
11658 val = 0x0000;
11659 if (channels >= 2)
11660 val = BIT(CPCAP_BIT_MIC1_RX_TIMESLOT0);
11661 diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
11662 index ac569ab3d30f4..51d7a87ab4c3b 100644
11663 --- a/sound/soc/codecs/cs42l56.c
11664 +++ b/sound/soc/codecs/cs42l56.c
11665 @@ -1248,6 +1248,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
11666 dev_err(&i2c_client->dev,
11667 "CS42L56 Device ID (%X). Expected %X\n",
11668 devid, CS42L56_DEVID);
11669 + ret = -EINVAL;
11670 goto err_enable;
11671 }
11672 alpha_rev = reg & CS42L56_AREV_MASK;
11673 @@ -1305,7 +1306,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
11674 ret = devm_snd_soc_register_component(&i2c_client->dev,
11675 &soc_component_dev_cs42l56, &cs42l56_dai, 1);
11676 if (ret < 0)
11677 - return ret;
11678 + goto err_enable;
11679
11680 return 0;
11681
11682 diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
11683 index 9b794775df537..edad6721251f4 100644
11684 --- a/sound/soc/generic/simple-card-utils.c
11685 +++ b/sound/soc/generic/simple-card-utils.c
11686 @@ -172,16 +172,15 @@ int asoc_simple_parse_clk(struct device *dev,
11687 * or device's module clock.
11688 */
11689 clk = devm_get_clk_from_child(dev, node, NULL);
11690 - if (!IS_ERR(clk)) {
11691 - simple_dai->sysclk = clk_get_rate(clk);
11692 + if (IS_ERR(clk))
11693 + clk = devm_get_clk_from_child(dev, dlc->of_node, NULL);
11694
11695 + if (!IS_ERR(clk)) {
11696 simple_dai->clk = clk;
11697 - } else if (!of_property_read_u32(node, "system-clock-frequency", &val)) {
11698 + simple_dai->sysclk = clk_get_rate(clk);
11699 + } else if (!of_property_read_u32(node, "system-clock-frequency",
11700 + &val)) {
11701 simple_dai->sysclk = val;
11702 - } else {
11703 - clk = devm_get_clk_from_child(dev, dlc->of_node, NULL);
11704 - if (!IS_ERR(clk))
11705 - simple_dai->sysclk = clk_get_rate(clk);
11706 }
11707
11708 if (of_property_read_bool(node, "system-clock-direction-out"))
11709 diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
11710 index 5529e8eeca462..08726034ff090 100644
11711 --- a/sound/soc/sof/debug.c
11712 +++ b/sound/soc/sof/debug.c
11713 @@ -135,7 +135,7 @@ static ssize_t sof_dfsentry_write(struct file *file, const char __user *buffer,
11714 char *string;
11715 int ret;
11716
11717 - string = kzalloc(count, GFP_KERNEL);
11718 + string = kzalloc(count+1, GFP_KERNEL);
11719 if (!string)
11720 return -ENOMEM;
11721
11722 diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
11723 index 1a5e555002b2b..75218539fb107 100644
11724 --- a/sound/usb/pcm.c
11725 +++ b/sound/usb/pcm.c
11726 @@ -1885,7 +1885,7 @@ void snd_usb_preallocate_buffer(struct snd_usb_substream *subs)
11727 {
11728 struct snd_pcm *pcm = subs->stream->pcm;
11729 struct snd_pcm_substream *s = pcm->streams[subs->direction].substream;
11730 - struct device *dev = subs->dev->bus->controller;
11731 + struct device *dev = subs->dev->bus->sysdev;
11732
11733 if (!snd_usb_use_vmalloc)
11734 snd_pcm_lib_preallocate_pages(s, SNDRV_DMA_TYPE_DEV_SG,
11735 diff --git a/tools/objtool/check.c b/tools/objtool/check.c
11736 index 1b7e748170e54..06aaf04e629c2 100644
11737 --- a/tools/objtool/check.c
11738 +++ b/tools/objtool/check.c
11739 @@ -626,8 +626,8 @@ static int add_jump_destinations(struct objtool_file *file)
11740 * case where the parent function's only reference to a
11741 * subfunction is through a jump table.
11742 */
11743 - if (!strstr(insn->func->name, ".cold.") &&
11744 - strstr(insn->jump_dest->func->name, ".cold.")) {
11745 + if (!strstr(insn->func->name, ".cold") &&
11746 + strstr(insn->jump_dest->func->name, ".cold")) {
11747 insn->func->cfunc = insn->jump_dest->func;
11748 insn->jump_dest->func->pfunc = insn->func;
11749
11750 @@ -2192,15 +2192,19 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
11751 break;
11752
11753 case INSN_STD:
11754 - if (state.df)
11755 + if (state.df) {
11756 WARN_FUNC("recursive STD", sec, insn->offset);
11757 + return 1;
11758 + }
11759
11760 state.df = true;
11761 break;
11762
11763 case INSN_CLD:
11764 - if (!state.df && func)
11765 + if (!state.df && func) {
11766 WARN_FUNC("redundant CLD", sec, insn->offset);
11767 + return 1;
11768 + }
11769
11770 state.df = false;
11771 break;
11772 diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
11773 index df9201434cb6a..b0a10a219b50d 100644
11774 --- a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
11775 +++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
11776 @@ -114,7 +114,7 @@
11777 "PublicDescription": "Level 2 access to instruciton TLB that caused a page table walk. This event counts on any instruciton access which causes L2I_TLB_REFILL to count",
11778 "EventCode": "0x35",
11779 "EventName": "L2I_TLB_ACCESS",
11780 - "BriefDescription": "L2D TLB access"
11781 + "BriefDescription": "L2I TLB access"
11782 },
11783 {
11784 "PublicDescription": "Branch target buffer misprediction",
11785 diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
11786 index 3a02426db9a63..2f76d4a9de860 100644
11787 --- a/tools/perf/tests/sample-parsing.c
11788 +++ b/tools/perf/tests/sample-parsing.c
11789 @@ -180,7 +180,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
11790 .data = {1, 211, 212, 213},
11791 };
11792 u64 regs[64];
11793 - const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
11794 + const u32 raw_data[] = {0x12345678, 0x0a0b0c0d, 0x11020304, 0x05060708, 0 };
11795 const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
11796 struct perf_sample sample = {
11797 .ip = 101,
11798 diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
11799 index fc1e5a991008d..bfaa9afdb8b4c 100644
11800 --- a/tools/perf/util/event.c
11801 +++ b/tools/perf/util/event.c
11802 @@ -597,6 +597,8 @@ int machine__resolve(struct machine *machine, struct addr_location *al,
11803 }
11804
11805 al->sym = map__find_symbol(al->map, al->addr);
11806 + } else if (symbol_conf.dso_list) {
11807 + al->filtered |= (1 << HIST_FILTER__DSO);
11808 }
11809
11810 if (symbol_conf.sym_list &&
11811 diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
11812 index 7ffcbd6fcd1ae..7f53b63088b2c 100644
11813 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
11814 +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
11815 @@ -1745,6 +1745,9 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
11816 break;
11817
11818 case INTEL_PT_CYC:
11819 + intel_pt_calc_cyc_timestamp(decoder);
11820 + break;
11821 +
11822 case INTEL_PT_VMCS:
11823 case INTEL_PT_MNT:
11824 case INTEL_PT_PAD:
11825 @@ -2634,9 +2637,18 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
11826 }
11827 if (intel_pt_sample_time(decoder->pkt_state)) {
11828 intel_pt_update_sample_time(decoder);
11829 - if (decoder->sample_cyc)
11830 + if (decoder->sample_cyc) {
11831 decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
11832 + decoder->state.flags |= INTEL_PT_SAMPLE_IPC;
11833 + decoder->sample_cyc = false;
11834 + }
11835 }
11836 + /*
11837 + * When using only TSC/MTC to compute cycles, IPC can be
11838 + * sampled as soon as the cycle count changes.
11839 + */
11840 + if (!decoder->have_cyc)
11841 + decoder->state.flags |= INTEL_PT_SAMPLE_IPC;
11842 }
11843
11844 decoder->state.timestamp = decoder->sample_timestamp;
11845 diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
11846 index e289e463d635e..7396da0fa3a7c 100644
11847 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
11848 +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
11849 @@ -17,6 +17,7 @@
11850 #define INTEL_PT_ABORT_TX (1 << 1)
11851 #define INTEL_PT_ASYNC (1 << 2)
11852 #define INTEL_PT_FUP_IP (1 << 3)
11853 +#define INTEL_PT_SAMPLE_IPC (1 << 4)
11854
11855 enum intel_pt_sample_type {
11856 INTEL_PT_BRANCH = 1 << 0,
11857 diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
11858 index 8aeaeba48a41f..d0e0ce11faf58 100644
11859 --- a/tools/perf/util/intel-pt.c
11860 +++ b/tools/perf/util/intel-pt.c
11861 @@ -1304,7 +1304,8 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
11862 sample.branch_stack = (struct branch_stack *)&dummy_bs;
11863 }
11864
11865 - sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
11866 + if (ptq->state->flags & INTEL_PT_SAMPLE_IPC)
11867 + sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
11868 if (sample.cyc_cnt) {
11869 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
11870 ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
11871 @@ -1366,7 +1367,8 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
11872 sample.stream_id = ptq->pt->instructions_id;
11873 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
11874
11875 - sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
11876 + if (ptq->state->flags & INTEL_PT_SAMPLE_IPC)
11877 + sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
11878 if (sample.cyc_cnt) {
11879 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
11880 ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
11881 @@ -1901,14 +1903,8 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
11882
11883 ptq->have_sample = false;
11884
11885 - if (ptq->state->tot_cyc_cnt > ptq->ipc_cyc_cnt) {
11886 - /*
11887 - * Cycle count and instruction count only go together to create
11888 - * a valid IPC ratio when the cycle count changes.
11889 - */
11890 - ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
11891 - ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
11892 - }
11893 + ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
11894 + ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
11895
11896 /*
11897 * Do PEBS first to allow for the possibility that the PEBS timestamp
11898 diff --git a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
11899 index cf001a2c69420..7c2cb04569dab 100755
11900 --- a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
11901 +++ b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
11902 @@ -81,5 +81,5 @@ echo "$failed devices failed to recover ($dev_count tested)"
11903 lspci | diff -u $pre_lspci -
11904 rm -f $pre_lspci
11905
11906 -test "$failed" == 0
11907 +test "$failed" -eq 0
11908 exit $?