Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0121-5.4.22-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3502 - (show annotations) (download)
Mon May 11 14:36:23 2020 UTC (3 years, 11 months ago) by niro
File size: 430900 byte(s)
-linux-5.4.22
1 diff --git a/Documentation/fb/fbcon.rst b/Documentation/fb/fbcon.rst
2 index ebca41785abe..65ba40255137 100644
3 --- a/Documentation/fb/fbcon.rst
4 +++ b/Documentation/fb/fbcon.rst
5 @@ -127,7 +127,7 @@ C. Boot options
6 is typically located on the same video card. Thus, the consoles that
7 are controlled by the VGA console will be garbled.
8
9 -4. fbcon=rotate:<n>
10 +5. fbcon=rotate:<n>
11
12 This option changes the orientation angle of the console display. The
13 value 'n' accepts the following:
14 @@ -152,21 +152,21 @@ C. Boot options
15 Actually, the underlying fb driver is totally ignorant of console
16 rotation.
17
18 -5. fbcon=margin:<color>
19 +6. fbcon=margin:<color>
20
21 This option specifies the color of the margins. The margins are the
22 leftover area at the right and the bottom of the screen that are not
23 used by text. By default, this area will be black. The 'color' value
24 is an integer number that depends on the framebuffer driver being used.
25
26 -6. fbcon=nodefer
27 +7. fbcon=nodefer
28
29 If the kernel is compiled with deferred fbcon takeover support, normally
30 the framebuffer contents, left in place by the firmware/bootloader, will
31 be preserved until there actually is some text is output to the console.
32 This option causes fbcon to bind immediately to the fbdev device.
33
34 -7. fbcon=logo-pos:<location>
35 +8. fbcon=logo-pos:<location>
36
37 The only possible 'location' is 'center' (without quotes), and when
38 given, the bootup logo is moved from the default top-left corner
39 diff --git a/Makefile b/Makefile
40 index adfc88f00f07..9428ec3b611a 100644
41 --- a/Makefile
42 +++ b/Makefile
43 @@ -1,7 +1,7 @@
44 # SPDX-License-Identifier: GPL-2.0
45 VERSION = 5
46 PATCHLEVEL = 4
47 -SUBLEVEL = 21
48 +SUBLEVEL = 22
49 EXTRAVERSION =
50 NAME = Kleptomaniac Octopus
51
52 diff --git a/arch/Kconfig b/arch/Kconfig
53 index 43102756304c..238dccfa7691 100644
54 --- a/arch/Kconfig
55 +++ b/arch/Kconfig
56 @@ -399,6 +399,9 @@ config HAVE_RCU_TABLE_FREE
57 config HAVE_MMU_GATHER_PAGE_SIZE
58 bool
59
60 +config MMU_GATHER_NO_RANGE
61 + bool
62 +
63 config HAVE_MMU_GATHER_NO_GATHER
64 bool
65
66 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
67 index 39002d769d95..05c9bbfe444d 100644
68 --- a/arch/arm/Kconfig
69 +++ b/arch/arm/Kconfig
70 @@ -75,7 +75,7 @@ config ARM
71 select HAVE_CONTEXT_TRACKING
72 select HAVE_COPY_THREAD_TLS
73 select HAVE_C_RECORDMCOUNT
74 - select HAVE_DEBUG_KMEMLEAK
75 + select HAVE_DEBUG_KMEMLEAK if !XIP_KERNEL
76 select HAVE_DMA_CONTIGUOUS if MMU
77 select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
78 select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
79 @@ -1907,7 +1907,7 @@ config XIP_DEFLATED_DATA
80 config KEXEC
81 bool "Kexec system call (EXPERIMENTAL)"
82 depends on (!SMP || PM_SLEEP_SMP)
83 - depends on !CPU_V7M
84 + depends on MMU
85 select KEXEC_CORE
86 help
87 kexec is a system call that implements the ability to shutdown your
88 diff --git a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
89 index 93be00a60c88..a66c4fac6baf 100644
90 --- a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
91 +++ b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
92 @@ -627,7 +627,7 @@
93 pinctrl-0 = <&pinctrl_usdhc2>;
94 bus-width = <4>;
95 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
96 - wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
97 + disable-wp;
98 vmmc-supply = <&reg_3p3v_sd>;
99 vqmmc-supply = <&reg_3p3v>;
100 no-1-8-v;
101 @@ -640,7 +640,7 @@
102 pinctrl-0 = <&pinctrl_usdhc3>;
103 bus-width = <4>;
104 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
105 - wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
106 + disable-wp;
107 vmmc-supply = <&reg_3p3v_sd>;
108 vqmmc-supply = <&reg_3p3v>;
109 no-1-8-v;
110 @@ -774,6 +774,7 @@
111 &usbh1 {
112 vbus-supply = <&reg_5p0v_main>;
113 disable-over-current;
114 + maximum-speed = "full-speed";
115 status = "okay";
116 };
117
118 @@ -1055,7 +1056,6 @@
119 MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
120 MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
121 MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
122 - MX6QDL_PAD_NANDF_D3__GPIO2_IO03 0x40010040
123 MX6QDL_PAD_NANDF_D2__GPIO2_IO02 0x40010040
124 >;
125 };
126 @@ -1068,7 +1068,6 @@
127 MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
128 MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
129 MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
130 - MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x40010040
131 MX6QDL_PAD_NANDF_D0__GPIO2_IO00 0x40010040
132
133 >;
134 diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
135 index ebf5b7cfe215..63341635bddf 100644
136 --- a/arch/arm/boot/dts/r8a7779.dtsi
137 +++ b/arch/arm/boot/dts/r8a7779.dtsi
138 @@ -68,6 +68,14 @@
139 <0xf0000100 0x100>;
140 };
141
142 + timer@f0000200 {
143 + compatible = "arm,cortex-a9-global-timer";
144 + reg = <0xf0000200 0x100>;
145 + interrupts = <GIC_PPI 11
146 + (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
147 + clocks = <&cpg_clocks R8A7779_CLK_ZS>;
148 + };
149 +
150 timer@f0000600 {
151 compatible = "arm,cortex-a9-twd-timer";
152 reg = <0xf0000600 0x20>;
153 diff --git a/arch/arm/boot/dts/rk3188-bqedison2qc.dts b/arch/arm/boot/dts/rk3188-bqedison2qc.dts
154 index c8b62bbd6a4a..ad1afd403052 100644
155 --- a/arch/arm/boot/dts/rk3188-bqedison2qc.dts
156 +++ b/arch/arm/boot/dts/rk3188-bqedison2qc.dts
157 @@ -466,9 +466,12 @@
158 pinctrl-names = "default";
159 pinctrl-0 = <&sd1_clk>, <&sd1_cmd>, <&sd1_bus4>;
160 vmmcq-supply = <&vccio_wl>;
161 + #address-cells = <1>;
162 + #size-cells = <0>;
163 status = "okay";
164
165 brcmf: wifi@1 {
166 + reg = <1>;
167 compatible = "brcm,bcm4329-fmac";
168 interrupt-parent = <&gpio3>;
169 interrupts = <RK_PD2 GPIO_ACTIVE_HIGH>;
170 diff --git a/arch/arm/boot/dts/stm32f469-disco.dts b/arch/arm/boot/dts/stm32f469-disco.dts
171 index a3ff04940aec..c6dc6d1a051b 100644
172 --- a/arch/arm/boot/dts/stm32f469-disco.dts
173 +++ b/arch/arm/boot/dts/stm32f469-disco.dts
174 @@ -76,6 +76,13 @@
175 regulator-max-microvolt = <3300000>;
176 };
177
178 + vdd_dsi: vdd-dsi {
179 + compatible = "regulator-fixed";
180 + regulator-name = "vdd_dsi";
181 + regulator-min-microvolt = <3300000>;
182 + regulator-max-microvolt = <3300000>;
183 + };
184 +
185 soc {
186 dma-ranges = <0xc0000000 0x0 0x10000000>;
187 };
188 @@ -155,6 +162,7 @@
189 compatible = "orisetech,otm8009a";
190 reg = <0>; /* dsi virtual channel (0..3) */
191 reset-gpios = <&gpioh 7 GPIO_ACTIVE_LOW>;
192 + power-supply = <&vdd_dsi>;
193 status = "okay";
194
195 port {
196 diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi
197 index e37c30e811d3..6056f206c9e3 100644
198 --- a/arch/arm/boot/dts/sun8i-h3.dtsi
199 +++ b/arch/arm/boot/dts/sun8i-h3.dtsi
200 @@ -80,7 +80,7 @@
201 #cooling-cells = <2>;
202 };
203
204 - cpu@1 {
205 + cpu1: cpu@1 {
206 compatible = "arm,cortex-a7";
207 device_type = "cpu";
208 reg = <1>;
209 @@ -90,7 +90,7 @@
210 #cooling-cells = <2>;
211 };
212
213 - cpu@2 {
214 + cpu2: cpu@2 {
215 compatible = "arm,cortex-a7";
216 device_type = "cpu";
217 reg = <2>;
218 @@ -100,7 +100,7 @@
219 #cooling-cells = <2>;
220 };
221
222 - cpu@3 {
223 + cpu3: cpu@3 {
224 compatible = "arm,cortex-a7";
225 device_type = "cpu";
226 reg = <3>;
227 @@ -111,6 +111,15 @@
228 };
229 };
230
231 + pmu {
232 + compatible = "arm,cortex-a7-pmu";
233 + interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
234 + <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
235 + <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
236 + <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
237 + interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
238 + };
239 +
240 timer {
241 compatible = "arm,armv7-timer";
242 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
243 diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
244 index 736ed7a7bcf8..34d4acbcee34 100644
245 --- a/arch/arm/configs/exynos_defconfig
246 +++ b/arch/arm/configs/exynos_defconfig
247 @@ -38,6 +38,7 @@ CONFIG_CRYPTO_SHA256_ARM=m
248 CONFIG_CRYPTO_SHA512_ARM=m
249 CONFIG_CRYPTO_AES_ARM_BS=m
250 CONFIG_CRYPTO_CHACHA20_NEON=m
251 +CONFIG_KALLSYMS_ALL=y
252 CONFIG_MODULES=y
253 CONFIG_MODULE_UNLOAD=y
254 CONFIG_PARTITION_ADVANCED=y
255 @@ -92,6 +93,7 @@ CONFIG_BLK_DEV_LOOP=y
256 CONFIG_BLK_DEV_CRYPTOLOOP=y
257 CONFIG_BLK_DEV_RAM=y
258 CONFIG_BLK_DEV_RAM_SIZE=8192
259 +CONFIG_SCSI=y
260 CONFIG_BLK_DEV_SD=y
261 CONFIG_CHR_DEV_SG=y
262 CONFIG_ATA=y
263 @@ -290,6 +292,7 @@ CONFIG_CROS_EC_SPI=y
264 CONFIG_COMMON_CLK_MAX77686=y
265 CONFIG_COMMON_CLK_S2MPS11=y
266 CONFIG_EXYNOS_IOMMU=y
267 +CONFIG_PM_DEVFREQ=y
268 CONFIG_DEVFREQ_GOV_PERFORMANCE=y
269 CONFIG_DEVFREQ_GOV_POWERSAVE=y
270 CONFIG_DEVFREQ_GOV_USERSPACE=y
271 @@ -354,4 +357,7 @@ CONFIG_SOFTLOCKUP_DETECTOR=y
272 # CONFIG_DETECT_HUNG_TASK is not set
273 CONFIG_PROVE_LOCKING=y
274 CONFIG_DEBUG_ATOMIC_SLEEP=y
275 +CONFIG_DEBUG_RT_MUTEXES=y
276 +CONFIG_DEBUG_SPINLOCK=y
277 +CONFIG_DEBUG_MUTEXES=y
278 CONFIG_DEBUG_USER=y
279 diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
280 index f002a496d7cb..1d34e3eefda3 100644
281 --- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
282 +++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
283 @@ -54,21 +54,21 @@
284 enable-method = "psci";
285 };
286
287 - cpu@1 {
288 + cpu1: cpu@1 {
289 compatible = "arm,cortex-a53";
290 device_type = "cpu";
291 reg = <1>;
292 enable-method = "psci";
293 };
294
295 - cpu@2 {
296 + cpu2: cpu@2 {
297 compatible = "arm,cortex-a53";
298 device_type = "cpu";
299 reg = <2>;
300 enable-method = "psci";
301 };
302
303 - cpu@3 {
304 + cpu3: cpu@3 {
305 compatible = "arm,cortex-a53";
306 device_type = "cpu";
307 reg = <3>;
308 @@ -76,6 +76,16 @@
309 };
310 };
311
312 + pmu {
313 + compatible = "arm,cortex-a53-pmu",
314 + "arm,armv8-pmuv3";
315 + interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
316 + <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
317 + <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
318 + <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
319 + interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
320 + };
321 +
322 psci {
323 compatible = "arm,psci-0.2";
324 method = "smc";
325 diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
326 index 0d5ea19336a1..d19253891672 100644
327 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
328 +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
329 @@ -70,6 +70,16 @@
330 clock-output-names = "ext_osc32k";
331 };
332
333 + pmu {
334 + compatible = "arm,cortex-a53-pmu",
335 + "arm,armv8-pmuv3";
336 + interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
337 + <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
338 + <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
339 + <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
340 + interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
341 + };
342 +
343 psci {
344 compatible = "arm,psci-0.2";
345 method = "smc";
346 diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
347 index 87f4d9c1b0d4..fbb8ce78f95b 100644
348 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
349 +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
350 @@ -1598,6 +1598,8 @@
351 interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
352 phys = <&hsusb_phy2>;
353 phy-names = "usb2-phy";
354 + snps,dis_u2_susphy_quirk;
355 + snps,dis_enblslpm_quirk;
356 };
357 };
358
359 @@ -1628,6 +1630,8 @@
360 interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
361 phys = <&hsusb_phy1>, <&ssusb_phy_0>;
362 phy-names = "usb2-phy", "usb3-phy";
363 + snps,dis_u2_susphy_quirk;
364 + snps,dis_enblslpm_quirk;
365 };
366 };
367
368 diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
369 index f5a85caff1a3..751651a6cd81 100644
370 --- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
371 +++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
372 @@ -517,6 +517,8 @@
373 vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
374 vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
375 vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
376 +
377 + qcom,snoc-host-cap-8bit-quirk;
378 };
379
380 /* PINCTRL - additions to nodes defined in sdm845.dtsi */
381 diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
382 index eb992d60e6ba..9e09909a510a 100644
383 --- a/arch/arm64/boot/dts/rockchip/px30.dtsi
384 +++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
385 @@ -768,7 +768,7 @@
386 interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
387 clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
388 <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
389 - clock-names = "biu", "ciu", "ciu-drv", "ciu-sample";
390 + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
391 fifo-depth = <0x100>;
392 max-frequency = <150000000>;
393 pinctrl-names = "default";
394 @@ -783,7 +783,7 @@
395 interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
396 clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
397 <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
398 - clock-names = "biu", "ciu", "ciu-drv", "ciu-sample";
399 + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
400 fifo-depth = <0x100>;
401 max-frequency = <150000000>;
402 pinctrl-names = "default";
403 @@ -798,7 +798,7 @@
404 interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
405 clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
406 <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
407 - clock-names = "biu", "ciu", "ciu-drv", "ciu-sample";
408 + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
409 fifo-depth = <0x100>;
410 max-frequency = <150000000>;
411 power-domains = <&power PX30_PD_MMC_NAND>;
412 diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
413 index c706db0ee9ec..76f5db696009 100644
414 --- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
415 +++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
416 @@ -669,9 +669,12 @@
417 vqmmc-supply = &vcc1v8_s3; /* IO line */
418 vmmc-supply = &vcc_sdio; /* card's power */
419
420 + #address-cells = <1>;
421 + #size-cells = <0>;
422 status = "okay";
423
424 brcmf: wifi@1 {
425 + reg = <1>;
426 compatible = "brcm,bcm4329-fmac";
427 interrupt-parent = <&gpio0>;
428 interrupts = <RK_PA3 GPIO_ACTIVE_HIGH>;
429 diff --git a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
430 index 4944d78a0a1c..e87a04477440 100644
431 --- a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
432 +++ b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
433 @@ -654,9 +654,12 @@
434 sd-uhs-sdr104;
435 vqmmc-supply = <&vcc1v8_s3>;
436 vmmc-supply = <&vccio_sd>;
437 + #address-cells = <1>;
438 + #size-cells = <0>;
439 status = "okay";
440
441 brcmf: wifi@1 {
442 + reg = <1>;
443 compatible = "brcm,bcm4329-fmac";
444 interrupt-parent = <&gpio0>;
445 interrupts = <RK_PA3 GPIO_ACTIVE_HIGH>;
446 diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
447 index 2a127985ab17..d3ed8e5e770f 100644
448 --- a/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
449 +++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
450 @@ -94,33 +94,6 @@
451 };
452 };
453
454 -&gpu_thermal {
455 - trips {
456 - gpu_warm: gpu_warm {
457 - temperature = <55000>;
458 - hysteresis = <2000>;
459 - type = "active";
460 - };
461 -
462 - gpu_hot: gpu_hot {
463 - temperature = <65000>;
464 - hysteresis = <2000>;
465 - type = "active";
466 - };
467 - };
468 - cooling-maps {
469 - map1 {
470 - trip = <&gpu_warm>;
471 - cooling-device = <&fan THERMAL_NO_LIMIT 1>;
472 - };
473 -
474 - map2 {
475 - trip = <&gpu_hot>;
476 - cooling-device = <&fan 2 THERMAL_NO_LIMIT>;
477 - };
478 - };
479 -};
480 -
481 &pinctrl {
482 ir {
483 ir_rx: ir-rx {
484 diff --git a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
485 index 0541dfce924d..9c659f3115c8 100644
486 --- a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
487 +++ b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
488 @@ -648,9 +648,12 @@
489 pinctrl-names = "default";
490 pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk>;
491 sd-uhs-sdr104;
492 + #address-cells = <1>;
493 + #size-cells = <0>;
494 status = "okay";
495
496 brcmf: wifi@1 {
497 + reg = <1>;
498 compatible = "brcm,bcm4329-fmac";
499 interrupt-parent = <&gpio0>;
500 interrupts = <RK_PA3 GPIO_ACTIVE_HIGH>;
501 diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
502 index 698ef9a1d5b7..96445111e398 100644
503 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
504 +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
505 @@ -43,6 +43,7 @@
506 smmu0: smmu@36600000 {
507 compatible = "arm,smmu-v3";
508 reg = <0x0 0x36600000 0x0 0x100000>;
509 + power-domains = <&k3_pds 229 TI_SCI_PD_EXCLUSIVE>;
510 interrupt-parent = <&gic500>;
511 interrupts = <GIC_SPI 772 IRQ_TYPE_EDGE_RISING>,
512 <GIC_SPI 768 IRQ_TYPE_EDGE_RISING>;
513 diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
514 index b9f8d787eea9..324e7d5ab37e 100644
515 --- a/arch/arm64/include/asm/alternative.h
516 +++ b/arch/arm64/include/asm/alternative.h
517 @@ -35,13 +35,16 @@ void apply_alternatives_module(void *start, size_t length);
518 static inline void apply_alternatives_module(void *start, size_t length) { }
519 #endif
520
521 -#define ALTINSTR_ENTRY(feature,cb) \
522 +#define ALTINSTR_ENTRY(feature) \
523 " .word 661b - .\n" /* label */ \
524 - " .if " __stringify(cb) " == 0\n" \
525 " .word 663f - .\n" /* new instruction */ \
526 - " .else\n" \
527 + " .hword " __stringify(feature) "\n" /* feature bit */ \
528 + " .byte 662b-661b\n" /* source len */ \
529 + " .byte 664f-663f\n" /* replacement len */
530 +
531 +#define ALTINSTR_ENTRY_CB(feature, cb) \
532 + " .word 661b - .\n" /* label */ \
533 " .word " __stringify(cb) "- .\n" /* callback */ \
534 - " .endif\n" \
535 " .hword " __stringify(feature) "\n" /* feature bit */ \
536 " .byte 662b-661b\n" /* source len */ \
537 " .byte 664f-663f\n" /* replacement len */
538 @@ -62,15 +65,14 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
539 *
540 * Alternatives with callbacks do not generate replacement instructions.
541 */
542 -#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \
543 +#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
544 ".if "__stringify(cfg_enabled)" == 1\n" \
545 "661:\n\t" \
546 oldinstr "\n" \
547 "662:\n" \
548 ".pushsection .altinstructions,\"a\"\n" \
549 - ALTINSTR_ENTRY(feature,cb) \
550 + ALTINSTR_ENTRY(feature) \
551 ".popsection\n" \
552 - " .if " __stringify(cb) " == 0\n" \
553 ".pushsection .altinstr_replacement, \"a\"\n" \
554 "663:\n\t" \
555 newinstr "\n" \
556 @@ -78,17 +80,25 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
557 ".popsection\n\t" \
558 ".org . - (664b-663b) + (662b-661b)\n\t" \
559 ".org . - (662b-661b) + (664b-663b)\n" \
560 - ".else\n\t" \
561 + ".endif\n"
562 +
563 +#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
564 + ".if "__stringify(cfg_enabled)" == 1\n" \
565 + "661:\n\t" \
566 + oldinstr "\n" \
567 + "662:\n" \
568 + ".pushsection .altinstructions,\"a\"\n" \
569 + ALTINSTR_ENTRY_CB(feature, cb) \
570 + ".popsection\n" \
571 "663:\n\t" \
572 "664:\n\t" \
573 - ".endif\n" \
574 ".endif\n"
575
576 #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
577 - __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
578 + __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
579
580 #define ALTERNATIVE_CB(oldinstr, cb) \
581 - __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
582 + __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb)
583 #else
584
585 #include <asm/assembler.h>
586 diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
587 index 574808b9df4c..da3280f639cd 100644
588 --- a/arch/arm64/include/asm/atomic_lse.h
589 +++ b/arch/arm64/include/asm/atomic_lse.h
590 @@ -14,6 +14,7 @@
591 static inline void __lse_atomic_##op(int i, atomic_t *v) \
592 { \
593 asm volatile( \
594 + __LSE_PREAMBLE \
595 " " #asm_op " %w[i], %[v]\n" \
596 : [i] "+r" (i), [v] "+Q" (v->counter) \
597 : "r" (v)); \
598 @@ -30,6 +31,7 @@ ATOMIC_OP(add, stadd)
599 static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
600 { \
601 asm volatile( \
602 + __LSE_PREAMBLE \
603 " " #asm_op #mb " %w[i], %w[i], %[v]" \
604 : [i] "+r" (i), [v] "+Q" (v->counter) \
605 : "r" (v) \
606 @@ -58,6 +60,7 @@ static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \
607 u32 tmp; \
608 \
609 asm volatile( \
610 + __LSE_PREAMBLE \
611 " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
612 " add %w[i], %w[i], %w[tmp]" \
613 : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
614 @@ -77,6 +80,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory")
615 static inline void __lse_atomic_and(int i, atomic_t *v)
616 {
617 asm volatile(
618 + __LSE_PREAMBLE
619 " mvn %w[i], %w[i]\n"
620 " stclr %w[i], %[v]"
621 : [i] "+&r" (i), [v] "+Q" (v->counter)
622 @@ -87,6 +91,7 @@ static inline void __lse_atomic_and(int i, atomic_t *v)
623 static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \
624 { \
625 asm volatile( \
626 + __LSE_PREAMBLE \
627 " mvn %w[i], %w[i]\n" \
628 " ldclr" #mb " %w[i], %w[i], %[v]" \
629 : [i] "+&r" (i), [v] "+Q" (v->counter) \
630 @@ -106,6 +111,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
631 static inline void __lse_atomic_sub(int i, atomic_t *v)
632 {
633 asm volatile(
634 + __LSE_PREAMBLE
635 " neg %w[i], %w[i]\n"
636 " stadd %w[i], %[v]"
637 : [i] "+&r" (i), [v] "+Q" (v->counter)
638 @@ -118,6 +124,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
639 u32 tmp; \
640 \
641 asm volatile( \
642 + __LSE_PREAMBLE \
643 " neg %w[i], %w[i]\n" \
644 " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
645 " add %w[i], %w[i], %w[tmp]" \
646 @@ -139,6 +146,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory")
647 static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
648 { \
649 asm volatile( \
650 + __LSE_PREAMBLE \
651 " neg %w[i], %w[i]\n" \
652 " ldadd" #mb " %w[i], %w[i], %[v]" \
653 : [i] "+&r" (i), [v] "+Q" (v->counter) \
654 @@ -159,6 +167,7 @@ ATOMIC_FETCH_OP_SUB( , al, "memory")
655 static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
656 { \
657 asm volatile( \
658 + __LSE_PREAMBLE \
659 " " #asm_op " %[i], %[v]\n" \
660 : [i] "+r" (i), [v] "+Q" (v->counter) \
661 : "r" (v)); \
662 @@ -175,6 +184,7 @@ ATOMIC64_OP(add, stadd)
663 static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
664 { \
665 asm volatile( \
666 + __LSE_PREAMBLE \
667 " " #asm_op #mb " %[i], %[i], %[v]" \
668 : [i] "+r" (i), [v] "+Q" (v->counter) \
669 : "r" (v) \
670 @@ -203,6 +213,7 @@ static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
671 unsigned long tmp; \
672 \
673 asm volatile( \
674 + __LSE_PREAMBLE \
675 " ldadd" #mb " %[i], %x[tmp], %[v]\n" \
676 " add %[i], %[i], %x[tmp]" \
677 : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
678 @@ -222,6 +233,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory")
679 static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
680 {
681 asm volatile(
682 + __LSE_PREAMBLE
683 " mvn %[i], %[i]\n"
684 " stclr %[i], %[v]"
685 : [i] "+&r" (i), [v] "+Q" (v->counter)
686 @@ -232,6 +244,7 @@ static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
687 static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
688 { \
689 asm volatile( \
690 + __LSE_PREAMBLE \
691 " mvn %[i], %[i]\n" \
692 " ldclr" #mb " %[i], %[i], %[v]" \
693 : [i] "+&r" (i), [v] "+Q" (v->counter) \
694 @@ -251,6 +264,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")
695 static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
696 {
697 asm volatile(
698 + __LSE_PREAMBLE
699 " neg %[i], %[i]\n"
700 " stadd %[i], %[v]"
701 : [i] "+&r" (i), [v] "+Q" (v->counter)
702 @@ -263,6 +277,7 @@ static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
703 unsigned long tmp; \
704 \
705 asm volatile( \
706 + __LSE_PREAMBLE \
707 " neg %[i], %[i]\n" \
708 " ldadd" #mb " %[i], %x[tmp], %[v]\n" \
709 " add %[i], %[i], %x[tmp]" \
710 @@ -284,6 +299,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
711 static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
712 { \
713 asm volatile( \
714 + __LSE_PREAMBLE \
715 " neg %[i], %[i]\n" \
716 " ldadd" #mb " %[i], %[i], %[v]" \
717 : [i] "+&r" (i), [v] "+Q" (v->counter) \
718 @@ -305,6 +321,7 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
719 unsigned long tmp;
720
721 asm volatile(
722 + __LSE_PREAMBLE
723 "1: ldr %x[tmp], %[v]\n"
724 " subs %[ret], %x[tmp], #1\n"
725 " b.lt 2f\n"
726 @@ -332,6 +349,7 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
727 unsigned long tmp; \
728 \
729 asm volatile( \
730 + __LSE_PREAMBLE \
731 " mov %" #w "[tmp], %" #w "[old]\n" \
732 " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \
733 " mov %" #w "[ret], %" #w "[tmp]" \
734 @@ -379,6 +397,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \
735 register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
736 \
737 asm volatile( \
738 + __LSE_PREAMBLE \
739 " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
740 " eor %[old1], %[old1], %[oldval1]\n" \
741 " eor %[old2], %[old2], %[oldval2]\n" \
742 diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
743 index 80b388278149..73834996c4b6 100644
744 --- a/arch/arm64/include/asm/lse.h
745 +++ b/arch/arm64/include/asm/lse.h
746 @@ -6,6 +6,8 @@
747
748 #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
749
750 +#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
751 +
752 #include <linux/compiler_types.h>
753 #include <linux/export.h>
754 #include <linux/jump_label.h>
755 @@ -14,8 +16,6 @@
756 #include <asm/atomic_lse.h>
757 #include <asm/cpucaps.h>
758
759 -__asm__(".arch_extension lse");
760 -
761 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
762 extern struct static_key_false arm64_const_caps_ready;
763
764 @@ -34,7 +34,7 @@ static inline bool system_uses_lse_atomics(void)
765
766 /* In-line patching at runtime */
767 #define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
768 - ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
769 + ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
770
771 #else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
772
773 diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
774 index 0bde47e4fa69..dcba53803fa5 100644
775 --- a/arch/microblaze/kernel/cpu/cache.c
776 +++ b/arch/microblaze/kernel/cpu/cache.c
777 @@ -92,7 +92,8 @@ static inline void __disable_dcache_nomsr(void)
778 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
779 do { \
780 int align = ~(cache_line_length - 1); \
781 - end = min(start + cache_size, end); \
782 + if (start < UINT_MAX - cache_size) \
783 + end = min(start + cache_size, end); \
784 start &= align; \
785 } while (0)
786
787 diff --git a/arch/mips/loongson64/loongson-3/platform.c b/arch/mips/loongson64/loongson-3/platform.c
788 index 13f3404f0030..9674ae1361a8 100644
789 --- a/arch/mips/loongson64/loongson-3/platform.c
790 +++ b/arch/mips/loongson64/loongson-3/platform.c
791 @@ -27,6 +27,9 @@ static int __init loongson3_platform_init(void)
792 continue;
793
794 pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
795 + if (!pdev)
796 + return -ENOMEM;
797 +
798 pdev->name = loongson_sysconf.sensors[i].name;
799 pdev->id = loongson_sysconf.sensors[i].id;
800 pdev->dev.platform_data = &loongson_sysconf.sensors[i];
801 diff --git a/arch/powerpc/Makefile.postlink b/arch/powerpc/Makefile.postlink
802 index 134f12f89b92..2268396ff4bb 100644
803 --- a/arch/powerpc/Makefile.postlink
804 +++ b/arch/powerpc/Makefile.postlink
805 @@ -17,11 +17,11 @@ quiet_cmd_head_check = CHKHEAD $@
806 quiet_cmd_relocs_check = CHKREL $@
807 ifdef CONFIG_PPC_BOOK3S_64
808 cmd_relocs_check = \
809 - $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$@" ; \
810 + $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@" ; \
811 $(BASH) $(srctree)/arch/powerpc/tools/unrel_branch_check.sh "$(OBJDUMP)" "$@"
812 else
813 cmd_relocs_check = \
814 - $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$@"
815 + $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@"
816 endif
817
818 # `@true` prevents complaint when there is nothing to be done
819 diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
820 index c031be8d41ff..2fb166928e91 100644
821 --- a/arch/powerpc/kernel/eeh_driver.c
822 +++ b/arch/powerpc/kernel/eeh_driver.c
823 @@ -541,12 +541,6 @@ static void eeh_rmv_device(struct eeh_dev *edev, void *userdata)
824
825 pci_iov_remove_virtfn(edev->physfn, pdn->vf_index);
826 edev->pdev = NULL;
827 -
828 - /*
829 - * We have to set the VF PE number to invalid one, which is
830 - * required to plug the VF successfully.
831 - */
832 - pdn->pe_number = IODA_INVALID_PE;
833 #endif
834 if (rmv_data)
835 list_add(&edev->rmv_entry, &rmv_data->removed_vf_list);
836 diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
837 index 9524009ca1ae..d876eda92609 100644
838 --- a/arch/powerpc/kernel/pci_dn.c
839 +++ b/arch/powerpc/kernel/pci_dn.c
840 @@ -244,9 +244,22 @@ void remove_dev_pci_data(struct pci_dev *pdev)
841 continue;
842
843 #ifdef CONFIG_EEH
844 - /* Release EEH device for the VF */
845 + /*
846 + * Release EEH state for this VF. The PCI core
847 + * has already torn down the pci_dev for this VF, but
848 + * we're responsible to removing the eeh_dev since it
849 + * has the same lifetime as the pci_dn that spawned it.
850 + */
851 edev = pdn_to_eeh_dev(pdn);
852 if (edev) {
853 + /*
854 + * We allocate pci_dn's for the totalvfs count,
855 + * but only only the vfs that were activated
856 + * have a configured PE.
857 + */
858 + if (edev->pe)
859 + eeh_rmv_from_parent_pe(edev);
860 +
861 pdn->edev = NULL;
862 kfree(edev);
863 }
864 diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
865 index 2e496eb86e94..1139bc56e004 100644
866 --- a/arch/powerpc/kvm/emulate_loadstore.c
867 +++ b/arch/powerpc/kvm/emulate_loadstore.c
868 @@ -73,7 +73,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
869 {
870 struct kvm_run *run = vcpu->run;
871 u32 inst;
872 - int ra, rs, rt;
873 enum emulation_result emulated = EMULATE_FAIL;
874 int advance = 1;
875 struct instruction_op op;
876 @@ -85,10 +84,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
877 if (emulated != EMULATE_DONE)
878 return emulated;
879
880 - ra = get_ra(inst);
881 - rs = get_rs(inst);
882 - rt = get_rt(inst);
883 -
884 vcpu->arch.mmio_vsx_copy_nums = 0;
885 vcpu->arch.mmio_vsx_offset = 0;
886 vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
887 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
888 index 9298905cfe74..881a026a603a 100644
889 --- a/arch/powerpc/mm/fault.c
890 +++ b/arch/powerpc/mm/fault.c
891 @@ -354,6 +354,9 @@ static void sanity_check_fault(bool is_write, bool is_user,
892 * Userspace trying to access kernel address, we get PROTFAULT for that.
893 */
894 if (is_user && address >= TASK_SIZE) {
895 + if ((long)address == -1)
896 + return;
897 +
898 pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n",
899 current->comm, current->pid, address,
900 from_kuid(&init_user_ns, current_uid()));
901 diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
902 index c28d0d9b7ee0..058223233088 100644
903 --- a/arch/powerpc/platforms/powernv/pci-ioda.c
904 +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
905 @@ -1558,6 +1558,10 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
906
907 /* Reserve PE for each VF */
908 for (vf_index = 0; vf_index < num_vfs; vf_index++) {
909 + int vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index);
910 + int vf_bus = pci_iov_virtfn_bus(pdev, vf_index);
911 + struct pci_dn *vf_pdn;
912 +
913 if (pdn->m64_single_mode)
914 pe_num = pdn->pe_num_map[vf_index];
915 else
916 @@ -1570,13 +1574,11 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
917 pe->pbus = NULL;
918 pe->parent_dev = pdev;
919 pe->mve_number = -1;
920 - pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
921 - pci_iov_virtfn_devfn(pdev, vf_index);
922 + pe->rid = (vf_bus << 8) | vf_devfn;
923
924 pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
925 hose->global_number, pdev->bus->number,
926 - PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
927 - PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
928 + PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num);
929
930 if (pnv_ioda_configure_pe(phb, pe)) {
931 /* XXX What do we do here ? */
932 @@ -1590,6 +1592,15 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
933 list_add_tail(&pe->list, &phb->ioda.pe_list);
934 mutex_unlock(&phb->ioda.pe_list_mutex);
935
936 + /* associate this pe to it's pdn */
937 + list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {
938 + if (vf_pdn->busno == vf_bus &&
939 + vf_pdn->devfn == vf_devfn) {
940 + vf_pdn->pe_number = pe_num;
941 + break;
942 + }
943 + }
944 +
945 pnv_pci_ioda2_setup_dma_pe(phb, pe);
946 #ifdef CONFIG_IOMMU_API
947 iommu_register_group(&pe->table_group,
948 @@ -2889,9 +2900,6 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
949 struct pci_dn *pdn;
950 int mul, total_vfs;
951
952 - if (!pdev->is_physfn || pci_dev_is_added(pdev))
953 - return;
954 -
955 pdn = pci_get_pdn(pdev);
956 pdn->vfs_expanded = 0;
957 pdn->m64_single_mode = false;
958 @@ -2966,6 +2974,30 @@ truncate_iov:
959 res->end = res->start - 1;
960 }
961 }
962 +
963 +static void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev)
964 +{
965 + if (WARN_ON(pci_dev_is_added(pdev)))
966 + return;
967 +
968 + if (pdev->is_virtfn) {
969 + struct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev);
970 +
971 + /*
972 + * VF PEs are single-device PEs so their pdev pointer needs to
973 + * be set. The pdev doesn't exist when the PE is allocated (in
974 + * (pcibios_sriov_enable()) so we fix it up here.
975 + */
976 + pe->pdev = pdev;
977 + WARN_ON(!(pe->flags & PNV_IODA_PE_VF));
978 + } else if (pdev->is_physfn) {
979 + /*
980 + * For PFs adjust their allocated IOV resources to match what
981 + * the PHB can support using it's M64 BAR table.
982 + */
983 + pnv_pci_ioda_fixup_iov_resources(pdev);
984 + }
985 +}
986 #endif /* CONFIG_PCI_IOV */
987
988 static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
989 @@ -3862,7 +3894,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
990 ppc_md.pcibios_default_alignment = pnv_pci_default_alignment;
991
992 #ifdef CONFIG_PCI_IOV
993 - ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
994 + ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov;
995 ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
996 ppc_md.pcibios_sriov_enable = pnv_pcibios_sriov_enable;
997 ppc_md.pcibios_sriov_disable = pnv_pcibios_sriov_disable;
998 diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
999 index c0bea75ac27b..8307e1f4086c 100644
1000 --- a/arch/powerpc/platforms/powernv/pci.c
1001 +++ b/arch/powerpc/platforms/powernv/pci.c
1002 @@ -814,24 +814,6 @@ void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
1003 {
1004 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
1005 struct pnv_phb *phb = hose->private_data;
1006 -#ifdef CONFIG_PCI_IOV
1007 - struct pnv_ioda_pe *pe;
1008 - struct pci_dn *pdn;
1009 -
1010 - /* Fix the VF pdn PE number */
1011 - if (pdev->is_virtfn) {
1012 - pdn = pci_get_pdn(pdev);
1013 - WARN_ON(pdn->pe_number != IODA_INVALID_PE);
1014 - list_for_each_entry(pe, &phb->ioda.pe_list, list) {
1015 - if (pe->rid == ((pdev->bus->number << 8) |
1016 - (pdev->devfn & 0xff))) {
1017 - pdn->pe_number = pe->pe_number;
1018 - pe->pdev = pdev;
1019 - break;
1020 - }
1021 - }
1022 - }
1023 -#endif /* CONFIG_PCI_IOV */
1024
1025 if (phb && phb->dma_dev_setup)
1026 phb->dma_dev_setup(phb, pdev);
1027 diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
1028 index e33e8bc4b69b..38c306551f76 100644
1029 --- a/arch/powerpc/platforms/pseries/lparcfg.c
1030 +++ b/arch/powerpc/platforms/pseries/lparcfg.c
1031 @@ -435,10 +435,10 @@ static void maxmem_data(struct seq_file *m)
1032 {
1033 unsigned long maxmem = 0;
1034
1035 - maxmem += drmem_info->n_lmbs * drmem_info->lmb_size;
1036 + maxmem += (unsigned long)drmem_info->n_lmbs * drmem_info->lmb_size;
1037 maxmem += hugetlb_total_pages() * PAGE_SIZE;
1038
1039 - seq_printf(m, "MaxMem=%ld\n", maxmem);
1040 + seq_printf(m, "MaxMem=%lu\n", maxmem);
1041 }
1042
1043 static int pseries_lparcfg_data(struct seq_file *m, void *v)
1044 diff --git a/arch/powerpc/tools/relocs_check.sh b/arch/powerpc/tools/relocs_check.sh
1045 index 7b9fe0a567cf..014e00e74d2b 100755
1046 --- a/arch/powerpc/tools/relocs_check.sh
1047 +++ b/arch/powerpc/tools/relocs_check.sh
1048 @@ -10,14 +10,21 @@
1049 # based on relocs_check.pl
1050 # Copyright © 2009 IBM Corporation
1051
1052 -if [ $# -lt 2 ]; then
1053 - echo "$0 [path to objdump] [path to vmlinux]" 1>&2
1054 +if [ $# -lt 3 ]; then
1055 + echo "$0 [path to objdump] [path to nm] [path to vmlinux]" 1>&2
1056 exit 1
1057 fi
1058
1059 -# Have Kbuild supply the path to objdump so we handle cross compilation.
1060 +# Have Kbuild supply the path to objdump and nm so we handle cross compilation.
1061 objdump="$1"
1062 -vmlinux="$2"
1063 +nm="$2"
1064 +vmlinux="$3"
1065 +
1066 +# Remove from the bad relocations those that match an undefined weak symbol
1067 +# which will result in an absolute relocation to 0.
1068 +# Weak unresolved symbols are of that form in nm output:
1069 +# " w _binary__btf_vmlinux_bin_end"
1070 +undef_weak_symbols=$($nm "$vmlinux" | awk '$1 ~ /w/ { print $2 }')
1071
1072 bad_relocs=$(
1073 $objdump -R "$vmlinux" |
1074 @@ -26,8 +33,6 @@ $objdump -R "$vmlinux" |
1075 # These relocations are okay
1076 # On PPC64:
1077 # R_PPC64_RELATIVE, R_PPC64_NONE
1078 - # R_PPC64_ADDR64 mach_<name>
1079 - # R_PPC64_ADDR64 __crc_<name>
1080 # On PPC:
1081 # R_PPC_RELATIVE, R_PPC_ADDR16_HI,
1082 # R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
1083 @@ -39,8 +44,7 @@ R_PPC_ADDR16_HI
1084 R_PPC_ADDR16_HA
1085 R_PPC_RELATIVE
1086 R_PPC_NONE' |
1087 - grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_' |
1088 - grep -E -v '\<R_PPC64_ADDR64[[:space:]]+__crc_'
1089 + ([ "$undef_weak_symbols" ] && grep -F -w -v "$undef_weak_symbols" || cat)
1090 )
1091
1092 if [ -z "$bad_relocs" ]; then
1093 diff --git a/arch/s390/Makefile b/arch/s390/Makefile
1094 index 478b645b20dd..9ce1baeac2b2 100644
1095 --- a/arch/s390/Makefile
1096 +++ b/arch/s390/Makefile
1097 @@ -69,7 +69,7 @@ cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
1098 #
1099 cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
1100
1101 -ifeq ($(call cc-option-yn,-mpacked-stack),y)
1102 +ifeq ($(call cc-option-yn,-mpacked-stack -mbackchain -msoft-float),y)
1103 cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK
1104 aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
1105 endif
1106 diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
1107 index a2399eff84ca..6087a4e9b2bf 100644
1108 --- a/arch/s390/include/asm/pci.h
1109 +++ b/arch/s390/include/asm/pci.h
1110 @@ -183,7 +183,7 @@ void zpci_remove_reserved_devices(void);
1111 /* CLP */
1112 int clp_scan_pci_devices(void);
1113 int clp_rescan_pci_devices(void);
1114 -int clp_rescan_pci_devices_simple(void);
1115 +int clp_rescan_pci_devices_simple(u32 *fid);
1116 int clp_add_pci_device(u32, u32, int);
1117 int clp_enable_fh(struct zpci_dev *, u8);
1118 int clp_disable_fh(struct zpci_dev *);
1119 diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
1120 index 9e1660a6b9db..3431b2d5e334 100644
1121 --- a/arch/s390/kernel/mcount.S
1122 +++ b/arch/s390/kernel/mcount.S
1123 @@ -26,6 +26,12 @@ ENDPROC(ftrace_stub)
1124 #define STACK_PTREGS (STACK_FRAME_OVERHEAD)
1125 #define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
1126 #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
1127 +#ifdef __PACK_STACK
1128 +/* allocate just enough for r14, r15 and backchain */
1129 +#define TRACED_FUNC_FRAME_SIZE 24
1130 +#else
1131 +#define TRACED_FUNC_FRAME_SIZE STACK_FRAME_OVERHEAD
1132 +#endif
1133
1134 ENTRY(_mcount)
1135 BR_EX %r14
1136 @@ -39,9 +45,16 @@ ENTRY(ftrace_caller)
1137 #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT))
1138 aghi %r0,MCOUNT_RETURN_FIXUP
1139 #endif
1140 - aghi %r15,-STACK_FRAME_SIZE
1141 + # allocate stack frame for ftrace_caller to contain traced function
1142 + aghi %r15,-TRACED_FUNC_FRAME_SIZE
1143 stg %r1,__SF_BACKCHAIN(%r15)
1144 + stg %r0,(__SF_GPRS+8*8)(%r15)
1145 + stg %r15,(__SF_GPRS+9*8)(%r15)
1146 + # allocate pt_regs and stack frame for ftrace_trace_function
1147 + aghi %r15,-STACK_FRAME_SIZE
1148 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
1149 + aghi %r1,-TRACED_FUNC_FRAME_SIZE
1150 + stg %r1,__SF_BACKCHAIN(%r15)
1151 stg %r0,(STACK_PTREGS_PSW+8)(%r15)
1152 stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
1153 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
1154 diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
1155 index d1ccc168c071..62388a678b91 100644
1156 --- a/arch/s390/kvm/interrupt.c
1157 +++ b/arch/s390/kvm/interrupt.c
1158 @@ -2191,7 +2191,7 @@ static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
1159 return -EINVAL;
1160
1161 if (!test_kvm_facility(kvm, 72))
1162 - return -ENOTSUPP;
1163 + return -EOPNOTSUPP;
1164
1165 mutex_lock(&fi->ais_lock);
1166 ais.simm = fi->simm;
1167 @@ -2500,7 +2500,7 @@ static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
1168 int ret = 0;
1169
1170 if (!test_kvm_facility(kvm, 72))
1171 - return -ENOTSUPP;
1172 + return -EOPNOTSUPP;
1173
1174 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1175 return -EFAULT;
1176 @@ -2580,7 +2580,7 @@ static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
1177 struct kvm_s390_ais_all ais;
1178
1179 if (!test_kvm_facility(kvm, 72))
1180 - return -ENOTSUPP;
1181 + return -EOPNOTSUPP;
1182
1183 if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
1184 return -EFAULT;
1185 diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
1186 index c7fea9bea8cb..5b24fcc9c361 100644
1187 --- a/arch/s390/pci/pci.c
1188 +++ b/arch/s390/pci/pci.c
1189 @@ -934,5 +934,5 @@ subsys_initcall_sync(pci_base_init);
1190 void zpci_rescan(void)
1191 {
1192 if (zpci_is_enabled())
1193 - clp_rescan_pci_devices_simple();
1194 + clp_rescan_pci_devices_simple(NULL);
1195 }
1196 diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
1197 index e585a62d6530..281e0dd4c614 100644
1198 --- a/arch/s390/pci/pci_clp.c
1199 +++ b/arch/s390/pci/pci_clp.c
1200 @@ -240,12 +240,14 @@ error:
1201 }
1202
1203 /*
1204 - * Enable/Disable a given PCI function defined by its function handle.
1205 + * Enable/Disable a given PCI function and update its function handle if
1206 + * necessary
1207 */
1208 -static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
1209 +static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
1210 {
1211 struct clp_req_rsp_set_pci *rrb;
1212 int rc, retries = 100;
1213 + u32 fid = zdev->fid;
1214
1215 rrb = clp_alloc_block(GFP_KERNEL);
1216 if (!rrb)
1217 @@ -256,7 +258,7 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
1218 rrb->request.hdr.len = sizeof(rrb->request);
1219 rrb->request.hdr.cmd = CLP_SET_PCI_FN;
1220 rrb->response.hdr.len = sizeof(rrb->response);
1221 - rrb->request.fh = *fh;
1222 + rrb->request.fh = zdev->fh;
1223 rrb->request.oc = command;
1224 rrb->request.ndas = nr_dma_as;
1225
1226 @@ -269,12 +271,17 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
1227 }
1228 } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
1229
1230 - if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
1231 - *fh = rrb->response.fh;
1232 - else {
1233 + if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
1234 zpci_err("Set PCI FN:\n");
1235 zpci_err_clp(rrb->response.hdr.rsp, rc);
1236 - rc = -EIO;
1237 + }
1238 +
1239 + if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
1240 + zdev->fh = rrb->response.fh;
1241 + } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY &&
1242 + rrb->response.fh == 0) {
1243 + /* Function is already in desired state - update handle */
1244 + rc = clp_rescan_pci_devices_simple(&fid);
1245 }
1246 clp_free_block(rrb);
1247 return rc;
1248 @@ -282,18 +289,17 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
1249
1250 int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
1251 {
1252 - u32 fh = zdev->fh;
1253 int rc;
1254
1255 - rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
1256 - zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
1257 + rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
1258 + zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
1259 if (rc)
1260 goto out;
1261
1262 - zdev->fh = fh;
1263 if (zpci_use_mio(zdev)) {
1264 - rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_MIO);
1265 - zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
1266 + rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_MIO);
1267 + zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
1268 + zdev->fid, zdev->fh, rc);
1269 if (rc)
1270 clp_disable_fh(zdev);
1271 }
1272 @@ -309,11 +315,8 @@ int clp_disable_fh(struct zpci_dev *zdev)
1273 if (!zdev_enabled(zdev))
1274 return 0;
1275
1276 - rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
1277 + rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN);
1278 zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
1279 - if (!rc)
1280 - zdev->fh = fh;
1281 -
1282 return rc;
1283 }
1284
1285 @@ -370,10 +373,14 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
1286 static void __clp_update(struct clp_fh_list_entry *entry, void *data)
1287 {
1288 struct zpci_dev *zdev;
1289 + u32 *fid = data;
1290
1291 if (!entry->vendor_id)
1292 return;
1293
1294 + if (fid && *fid != entry->fid)
1295 + return;
1296 +
1297 zdev = get_zdev_by_fid(entry->fid);
1298 if (!zdev)
1299 return;
1300 @@ -413,7 +420,10 @@ int clp_rescan_pci_devices(void)
1301 return rc;
1302 }
1303
1304 -int clp_rescan_pci_devices_simple(void)
1305 +/* Rescan PCI functions and refresh function handles. If fid is non-NULL only
1306 + * refresh the handle of the function matching @fid
1307 + */
1308 +int clp_rescan_pci_devices_simple(u32 *fid)
1309 {
1310 struct clp_req_rsp_list_pci *rrb;
1311 int rc;
1312 @@ -422,7 +432,7 @@ int clp_rescan_pci_devices_simple(void)
1313 if (!rrb)
1314 return -ENOMEM;
1315
1316 - rc = clp_list_pci(rrb, NULL, __clp_update);
1317 + rc = clp_list_pci(rrb, fid, __clp_update);
1318
1319 clp_free_block(rrb);
1320 return rc;
1321 diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
1322 index a433ba01a317..215f17437a4f 100644
1323 --- a/arch/s390/pci/pci_sysfs.c
1324 +++ b/arch/s390/pci/pci_sysfs.c
1325 @@ -13,6 +13,8 @@
1326 #include <linux/stat.h>
1327 #include <linux/pci.h>
1328
1329 +#include "../../../drivers/pci/pci.h"
1330 +
1331 #include <asm/sclp.h>
1332
1333 #define zpci_attr(name, fmt, member) \
1334 @@ -49,31 +51,50 @@ static DEVICE_ATTR_RO(mio_enabled);
1335 static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
1336 const char *buf, size_t count)
1337 {
1338 + struct kernfs_node *kn;
1339 struct pci_dev *pdev = to_pci_dev(dev);
1340 struct zpci_dev *zdev = to_zpci(pdev);
1341 - int ret;
1342 -
1343 - if (!device_remove_file_self(dev, attr))
1344 - return count;
1345 -
1346 + int ret = 0;
1347 +
1348 + /* Can't use device_remove_self() here as that would lead us to lock
1349 + * the pci_rescan_remove_lock while holding the device' kernfs lock.
1350 + * This would create a possible deadlock with disable_slot() which is
1351 + * not directly protected by the device' kernfs lock but takes it
1352 + * during the device removal which happens under
1353 + * pci_rescan_remove_lock.
1354 + *
1355 + * This is analogous to sdev_store_delete() in
1356 + * drivers/scsi/scsi_sysfs.c
1357 + */
1358 + kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
1359 + WARN_ON_ONCE(!kn);
1360 + /* device_remove_file() serializes concurrent calls ignoring all but
1361 + * the first
1362 + */
1363 + device_remove_file(dev, attr);
1364 +
1365 + /* A concurrent call to recover_store() may slip between
1366 + * sysfs_break_active_protection() and the sysfs file removal.
1367 + * Once it unblocks from pci_lock_rescan_remove() the original pdev
1368 + * will already be removed.
1369 + */
1370 pci_lock_rescan_remove();
1371 - pci_stop_and_remove_bus_device(pdev);
1372 - ret = zpci_disable_device(zdev);
1373 - if (ret)
1374 - goto error;
1375 -
1376 - ret = zpci_enable_device(zdev);
1377 - if (ret)
1378 - goto error;
1379 -
1380 - pci_rescan_bus(zdev->bus);
1381 + if (pci_dev_is_added(pdev)) {
1382 + pci_stop_and_remove_bus_device(pdev);
1383 + ret = zpci_disable_device(zdev);
1384 + if (ret)
1385 + goto out;
1386 +
1387 + ret = zpci_enable_device(zdev);
1388 + if (ret)
1389 + goto out;
1390 + pci_rescan_bus(zdev->bus);
1391 + }
1392 +out:
1393 pci_unlock_rescan_remove();
1394 -
1395 - return count;
1396 -
1397 -error:
1398 - pci_unlock_rescan_remove();
1399 - return ret;
1400 + if (kn)
1401 + sysfs_unbreak_active_protection(kn);
1402 + return ret ? ret : count;
1403 }
1404 static DEVICE_ATTR_WO(recover);
1405
1406 diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
1407 index d516e5d48818..b887cc402b71 100644
1408 --- a/arch/sh/include/cpu-sh2a/cpu/sh7269.h
1409 +++ b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
1410 @@ -78,8 +78,15 @@ enum {
1411 GPIO_FN_WDTOVF,
1412
1413 /* CAN */
1414 - GPIO_FN_CTX1, GPIO_FN_CRX1, GPIO_FN_CTX0, GPIO_FN_CTX0_CTX1,
1415 - GPIO_FN_CRX0, GPIO_FN_CRX0_CRX1, GPIO_FN_CRX0_CRX1_CRX2,
1416 + GPIO_FN_CTX2, GPIO_FN_CRX2,
1417 + GPIO_FN_CTX1, GPIO_FN_CRX1,
1418 + GPIO_FN_CTX0, GPIO_FN_CRX0,
1419 + GPIO_FN_CTX0_CTX1, GPIO_FN_CRX0_CRX1,
1420 + GPIO_FN_CTX0_CTX1_CTX2, GPIO_FN_CRX0_CRX1_CRX2,
1421 + GPIO_FN_CTX2_PJ21, GPIO_FN_CRX2_PJ20,
1422 + GPIO_FN_CTX1_PJ23, GPIO_FN_CRX1_PJ22,
1423 + GPIO_FN_CTX0_CTX1_PJ23, GPIO_FN_CRX0_CRX1_PJ22,
1424 + GPIO_FN_CTX0_CTX1_CTX2_PJ21, GPIO_FN_CRX0_CRX1_CRX2_PJ20,
1425
1426 /* DMAC */
1427 GPIO_FN_TEND0, GPIO_FN_DACK0, GPIO_FN_DREQ0,
1428 diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
1429 index 61afd787bd0c..59b6df13ddea 100644
1430 --- a/arch/sparc/kernel/vmlinux.lds.S
1431 +++ b/arch/sparc/kernel/vmlinux.lds.S
1432 @@ -172,12 +172,14 @@ SECTIONS
1433 }
1434 PERCPU_SECTION(SMP_CACHE_BYTES)
1435
1436 -#ifdef CONFIG_JUMP_LABEL
1437 . = ALIGN(PAGE_SIZE);
1438 .exit.text : {
1439 EXIT_TEXT
1440 }
1441 -#endif
1442 +
1443 + .exit.data : {
1444 + EXIT_DATA
1445 + }
1446
1447 . = ALIGN(PAGE_SIZE);
1448 __init_end = .;
1449 diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
1450 index 240626e7f55a..43842fade8fa 100644
1451 --- a/arch/x86/entry/vdso/vdso32-setup.c
1452 +++ b/arch/x86/entry/vdso/vdso32-setup.c
1453 @@ -11,6 +11,7 @@
1454 #include <linux/smp.h>
1455 #include <linux/kernel.h>
1456 #include <linux/mm_types.h>
1457 +#include <linux/elf.h>
1458
1459 #include <asm/processor.h>
1460 #include <asm/vdso.h>
1461 diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
1462 index beffafd7dcc3..3ea8056148d8 100644
1463 --- a/arch/x86/events/amd/core.c
1464 +++ b/arch/x86/events/amd/core.c
1465 @@ -302,6 +302,25 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
1466 return offset;
1467 }
1468
1469 +/*
1470 + * AMD64 events are detected based on their event codes.
1471 + */
1472 +static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
1473 +{
1474 + return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
1475 +}
1476 +
1477 +static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc)
1478 +{
1479 + if (!(x86_pmu.flags & PMU_FL_PAIR))
1480 + return false;
1481 +
1482 + switch (amd_get_event_code(hwc)) {
1483 + case 0x003: return true; /* Retired SSE/AVX FLOPs */
1484 + default: return false;
1485 + }
1486 +}
1487 +
1488 static int amd_core_hw_config(struct perf_event *event)
1489 {
1490 if (event->attr.exclude_host && event->attr.exclude_guest)
1491 @@ -320,14 +339,6 @@ static int amd_core_hw_config(struct perf_event *event)
1492 return 0;
1493 }
1494
1495 -/*
1496 - * AMD64 events are detected based on their event codes.
1497 - */
1498 -static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
1499 -{
1500 - return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
1501 -}
1502 -
1503 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
1504 {
1505 return (hwc->config & 0xe0) == 0xe0;
1506 @@ -865,6 +876,20 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
1507 }
1508 }
1509
1510 +static struct event_constraint pair_constraint;
1511 +
1512 +static struct event_constraint *
1513 +amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx,
1514 + struct perf_event *event)
1515 +{
1516 + struct hw_perf_event *hwc = &event->hw;
1517 +
1518 + if (amd_is_pair_event_code(hwc))
1519 + return &pair_constraint;
1520 +
1521 + return &unconstrained;
1522 +}
1523 +
1524 static ssize_t amd_event_sysfs_show(char *page, u64 config)
1525 {
1526 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
1527 @@ -908,33 +933,15 @@ static __initconst const struct x86_pmu amd_pmu = {
1528
1529 static int __init amd_core_pmu_init(void)
1530 {
1531 + u64 even_ctr_mask = 0ULL;
1532 + int i;
1533 +
1534 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
1535 return 0;
1536
1537 - /* Avoid calulating the value each time in the NMI handler */
1538 + /* Avoid calculating the value each time in the NMI handler */
1539 perf_nmi_window = msecs_to_jiffies(100);
1540
1541 - switch (boot_cpu_data.x86) {
1542 - case 0x15:
1543 - pr_cont("Fam15h ");
1544 - x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
1545 - break;
1546 - case 0x17:
1547 - pr_cont("Fam17h ");
1548 - /*
1549 - * In family 17h, there are no event constraints in the PMC hardware.
1550 - * We fallback to using default amd_get_event_constraints.
1551 - */
1552 - break;
1553 - case 0x18:
1554 - pr_cont("Fam18h ");
1555 - /* Using default amd_get_event_constraints. */
1556 - break;
1557 - default:
1558 - pr_err("core perfctr but no constraints; unknown hardware!\n");
1559 - return -ENODEV;
1560 - }
1561 -
1562 /*
1563 * If core performance counter extensions exists, we must use
1564 * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
1565 @@ -949,6 +956,30 @@ static int __init amd_core_pmu_init(void)
1566 */
1567 x86_pmu.amd_nb_constraints = 0;
1568
1569 + if (boot_cpu_data.x86 == 0x15) {
1570 + pr_cont("Fam15h ");
1571 + x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
1572 + }
1573 + if (boot_cpu_data.x86 >= 0x17) {
1574 + pr_cont("Fam17h+ ");
1575 + /*
1576 + * Family 17h and compatibles have constraints for Large
1577 + * Increment per Cycle events: they may only be assigned an
1578 + * even numbered counter that has a consecutive adjacent odd
1579 + * numbered counter following it.
1580 + */
1581 + for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
1582 + even_ctr_mask |= 1 << i;
1583 +
1584 + pair_constraint = (struct event_constraint)
1585 + __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
1586 + x86_pmu.num_counters / 2, 0,
1587 + PERF_X86_EVENT_PAIR);
1588 +
1589 + x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
1590 + x86_pmu.flags |= PMU_FL_PAIR;
1591 + }
1592 +
1593 pr_cont("core perfctr, ");
1594 return 0;
1595 }
1596 diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
1597 index ecacfbf4ebc1..0ed910237c4d 100644
1598 --- a/arch/x86/events/perf_event.h
1599 +++ b/arch/x86/events/perf_event.h
1600 @@ -77,6 +77,7 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
1601 #define PERF_X86_EVENT_AUTO_RELOAD 0x0200 /* use PEBS auto-reload */
1602 #define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */
1603 #define PERF_X86_EVENT_PEBS_VIA_PT 0x0800 /* use PT buffer for PEBS */
1604 +#define PERF_X86_EVENT_PAIR 0x1000 /* Large Increment per Cycle */
1605
1606 struct amd_nb {
1607 int nb_id; /* NorthBridge id */
1608 @@ -735,6 +736,7 @@ do { \
1609 #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
1610 #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
1611 #define PMU_FL_TFA 0x20 /* deal with TSX force abort */
1612 +#define PMU_FL_PAIR 0x40 /* merge counters for large incr. events */
1613
1614 #define EVENT_VAR(_id) event_attr_##_id
1615 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
1616 diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
1617 index 75ded1d13d98..9d5d949e662e 100644
1618 --- a/arch/x86/include/asm/nmi.h
1619 +++ b/arch/x86/include/asm/nmi.h
1620 @@ -41,7 +41,6 @@ struct nmiaction {
1621 struct list_head list;
1622 nmi_handler_t handler;
1623 u64 max_duration;
1624 - struct irq_work irq_work;
1625 unsigned long flags;
1626 const char *name;
1627 };
1628 diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
1629 index 0071b794ed19..400a05e1c1c5 100644
1630 --- a/arch/x86/kernel/fpu/signal.c
1631 +++ b/arch/x86/kernel/fpu/signal.c
1632 @@ -352,6 +352,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
1633 fpregs_unlock();
1634 return 0;
1635 }
1636 + fpregs_deactivate(fpu);
1637 fpregs_unlock();
1638 }
1639
1640 @@ -403,6 +404,8 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
1641 }
1642 if (!ret)
1643 fpregs_mark_activate();
1644 + else
1645 + fpregs_deactivate(fpu);
1646 fpregs_unlock();
1647
1648 err_out:
1649 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
1650 index e676a9916c49..54c21d6abd5a 100644
1651 --- a/arch/x86/kernel/nmi.c
1652 +++ b/arch/x86/kernel/nmi.c
1653 @@ -104,18 +104,22 @@ static int __init nmi_warning_debugfs(void)
1654 }
1655 fs_initcall(nmi_warning_debugfs);
1656
1657 -static void nmi_max_handler(struct irq_work *w)
1658 +static void nmi_check_duration(struct nmiaction *action, u64 duration)
1659 {
1660 - struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
1661 + u64 whole_msecs = READ_ONCE(action->max_duration);
1662 int remainder_ns, decimal_msecs;
1663 - u64 whole_msecs = READ_ONCE(a->max_duration);
1664 +
1665 + if (duration < nmi_longest_ns || duration < action->max_duration)
1666 + return;
1667 +
1668 + action->max_duration = duration;
1669
1670 remainder_ns = do_div(whole_msecs, (1000 * 1000));
1671 decimal_msecs = remainder_ns / 1000;
1672
1673 printk_ratelimited(KERN_INFO
1674 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
1675 - a->handler, whole_msecs, decimal_msecs);
1676 + action->handler, whole_msecs, decimal_msecs);
1677 }
1678
1679 static int nmi_handle(unsigned int type, struct pt_regs *regs)
1680 @@ -142,11 +146,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs)
1681 delta = sched_clock() - delta;
1682 trace_nmi_handler(a->handler, (int)delta, thishandled);
1683
1684 - if (delta < nmi_longest_ns || delta < a->max_duration)
1685 - continue;
1686 -
1687 - a->max_duration = delta;
1688 - irq_work_queue(&a->irq_work);
1689 + nmi_check_duration(a, delta);
1690 }
1691
1692 rcu_read_unlock();
1693 @@ -164,8 +164,6 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
1694 if (!action->handler)
1695 return -EINVAL;
1696
1697 - init_irq_work(&action->irq_work, nmi_max_handler);
1698 -
1699 raw_spin_lock_irqsave(&desc->lock, flags);
1700
1701 /*
1702 diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c
1703 index 01f0e2263b86..298fc1edd9c9 100644
1704 --- a/arch/x86/kernel/sysfb_simplefb.c
1705 +++ b/arch/x86/kernel/sysfb_simplefb.c
1706 @@ -90,11 +90,11 @@ __init int create_simplefb(const struct screen_info *si,
1707 if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
1708 size <<= 16;
1709 length = mode->height * mode->stride;
1710 - length = PAGE_ALIGN(length);
1711 if (length > size) {
1712 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
1713 return -EINVAL;
1714 }
1715 + length = PAGE_ALIGN(length);
1716
1717 /* setup IORESOURCE_MEM as framebuffer memory */
1718 memset(&res, 0, sizeof(res));
1719 diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
1720 index 0a0e9112f284..5cb9f009f2be 100644
1721 --- a/arch/x86/lib/x86-opcode-map.txt
1722 +++ b/arch/x86/lib/x86-opcode-map.txt
1723 @@ -909,7 +909,7 @@ EndTable
1724
1725 GrpTable: Grp3_2
1726 0: TEST Ev,Iz
1727 -1:
1728 +1: TEST Ev,Iz
1729 2: NOT Ev
1730 3: NEG Ev
1731 4: MUL rAX,Ev
1732 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
1733 index 0d09cc5aad61..a19a71b4d185 100644
1734 --- a/arch/x86/mm/pageattr.c
1735 +++ b/arch/x86/mm/pageattr.c
1736 @@ -2215,7 +2215,7 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
1737 .pgd = pgd,
1738 .numpages = numpages,
1739 .mask_set = __pgprot(0),
1740 - .mask_clr = __pgprot(0),
1741 + .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
1742 .flags = 0,
1743 };
1744
1745 @@ -2224,12 +2224,6 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
1746 if (!(__supported_pte_mask & _PAGE_NX))
1747 goto out;
1748
1749 - if (!(page_flags & _PAGE_NX))
1750 - cpa.mask_clr = __pgprot(_PAGE_NX);
1751 -
1752 - if (!(page_flags & _PAGE_RW))
1753 - cpa.mask_clr = __pgprot(_PAGE_RW);
1754 -
1755 if (!(page_flags & _PAGE_ENC))
1756 cpa.mask_clr = pgprot_encrypted(cpa.mask_clr);
1757
1758 diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
1759 index 425e025341db..01d7ca492741 100644
1760 --- a/arch/x86/platform/efi/efi.c
1761 +++ b/arch/x86/platform/efi/efi.c
1762 @@ -504,7 +504,6 @@ void __init efi_init(void)
1763 efi_char16_t *c16;
1764 char vendor[100] = "unknown";
1765 int i = 0;
1766 - void *tmp;
1767
1768 #ifdef CONFIG_X86_32
1769 if (boot_params.efi_info.efi_systab_hi ||
1770 @@ -529,14 +528,16 @@ void __init efi_init(void)
1771 /*
1772 * Show what we know for posterity
1773 */
1774 - c16 = tmp = early_memremap(efi.systab->fw_vendor, 2);
1775 + c16 = early_memremap_ro(efi.systab->fw_vendor,
1776 + sizeof(vendor) * sizeof(efi_char16_t));
1777 if (c16) {
1778 - for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
1779 - vendor[i] = *c16++;
1780 + for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
1781 + vendor[i] = c16[i];
1782 vendor[i] = '\0';
1783 - } else
1784 + early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
1785 + } else {
1786 pr_err("Could not map the firmware vendor!\n");
1787 - early_memunmap(tmp, 2);
1788 + }
1789
1790 pr_info("EFI v%u.%.02u by %s\n",
1791 efi.systab->hdr.revision >> 16,
1792 @@ -953,16 +954,14 @@ static void __init __efi_enter_virtual_mode(void)
1793
1794 if (efi_alloc_page_tables()) {
1795 pr_err("Failed to allocate EFI page tables\n");
1796 - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
1797 - return;
1798 + goto err;
1799 }
1800
1801 efi_merge_regions();
1802 new_memmap = efi_map_regions(&count, &pg_shift);
1803 if (!new_memmap) {
1804 pr_err("Error reallocating memory, EFI runtime non-functional!\n");
1805 - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
1806 - return;
1807 + goto err;
1808 }
1809
1810 pa = __pa(new_memmap);
1811 @@ -976,8 +975,7 @@ static void __init __efi_enter_virtual_mode(void)
1812
1813 if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) {
1814 pr_err("Failed to remap late EFI memory map\n");
1815 - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
1816 - return;
1817 + goto err;
1818 }
1819
1820 if (efi_enabled(EFI_DBG)) {
1821 @@ -985,12 +983,11 @@ static void __init __efi_enter_virtual_mode(void)
1822 efi_print_memmap();
1823 }
1824
1825 - BUG_ON(!efi.systab);
1826 + if (WARN_ON(!efi.systab))
1827 + goto err;
1828
1829 - if (efi_setup_page_tables(pa, 1 << pg_shift)) {
1830 - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
1831 - return;
1832 - }
1833 + if (efi_setup_page_tables(pa, 1 << pg_shift))
1834 + goto err;
1835
1836 efi_sync_low_kernel_mappings();
1837
1838 @@ -1010,9 +1007,9 @@ static void __init __efi_enter_virtual_mode(void)
1839 }
1840
1841 if (status != EFI_SUCCESS) {
1842 - pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n",
1843 - status);
1844 - panic("EFI call to SetVirtualAddressMap() failed!");
1845 + pr_err("Unable to switch EFI into virtual mode (status=%lx)!\n",
1846 + status);
1847 + goto err;
1848 }
1849
1850 efi_free_boot_services();
1851 @@ -1041,6 +1038,10 @@ static void __init __efi_enter_virtual_mode(void)
1852
1853 /* clean DUMMY object */
1854 efi_delete_dummy_variable();
1855 + return;
1856 +
1857 +err:
1858 + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
1859 }
1860
1861 void __init efi_enter_virtual_mode(void)
1862 diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
1863 index 08ce8177c3af..52a1e5192fa8 100644
1864 --- a/arch/x86/platform/efi/efi_64.c
1865 +++ b/arch/x86/platform/efi/efi_64.c
1866 @@ -392,11 +392,12 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
1867 return 0;
1868
1869 page = alloc_page(GFP_KERNEL|__GFP_DMA32);
1870 - if (!page)
1871 - panic("Unable to allocate EFI runtime stack < 4GB\n");
1872 + if (!page) {
1873 + pr_err("Unable to allocate EFI runtime stack < 4GB\n");
1874 + return 1;
1875 + }
1876
1877 - efi_scratch.phys_stack = virt_to_phys(page_address(page));
1878 - efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */
1879 + efi_scratch.phys_stack = page_to_phys(page + 1); /* stack grows down */
1880
1881 npages = (_etext - _text) >> PAGE_SHIFT;
1882 text = __pa(_text);
1883 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
1884 index 0c6214497fcc..5498d05b873d 100644
1885 --- a/block/bfq-iosched.c
1886 +++ b/block/bfq-iosched.c
1887 @@ -3444,6 +3444,10 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
1888 static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
1889 struct bfq_queue *bfqq)
1890 {
1891 + /* No point in idling for bfqq if it won't get requests any longer */
1892 + if (unlikely(!bfqq_process_refs(bfqq)))
1893 + return false;
1894 +
1895 return (bfqq->wr_coeff > 1 &&
1896 (bfqd->wr_busy_queues <
1897 bfq_tot_busy_queues(bfqd) ||
1898 @@ -4077,6 +4081,10 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
1899 bfqq_sequential_and_IO_bound,
1900 idling_boosts_thr;
1901
1902 + /* No point in idling for bfqq if it won't get requests any longer */
1903 + if (unlikely(!bfqq_process_refs(bfqq)))
1904 + return false;
1905 +
1906 bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
1907 bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
1908
1909 @@ -4170,6 +4178,10 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
1910 struct bfq_data *bfqd = bfqq->bfqd;
1911 bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar;
1912
1913 + /* No point in idling for bfqq if it won't get requests any longer */
1914 + if (unlikely(!bfqq_process_refs(bfqq)))
1915 + return false;
1916 +
1917 if (unlikely(bfqd->strict_guarantees))
1918 return true;
1919
1920 diff --git a/crypto/Kconfig b/crypto/Kconfig
1921 index 29472fb795f3..b2cc0ad3792a 100644
1922 --- a/crypto/Kconfig
1923 +++ b/crypto/Kconfig
1924 @@ -500,10 +500,10 @@ config CRYPTO_ESSIV
1925 encryption.
1926
1927 This driver implements a crypto API template that can be
1928 - instantiated either as a skcipher or as a aead (depending on the
1929 + instantiated either as an skcipher or as an AEAD (depending on the
1930 type of the first template argument), and which defers encryption
1931 and decryption requests to the encapsulated cipher after applying
1932 - ESSIV to the input IV. Note that in the aead case, it is assumed
1933 + ESSIV to the input IV. Note that in the AEAD case, it is assumed
1934 that the keys are presented in the same format used by the authenc
1935 template, and that the IV appears at the end of the authenticated
1936 associated data (AAD) region (which is how dm-crypt uses it.)
1937 diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
1938 index cf4e061bb0f0..8438e33aa447 100644
1939 --- a/drivers/acpi/acpica/dsfield.c
1940 +++ b/drivers/acpi/acpica/dsfield.c
1941 @@ -244,7 +244,7 @@ cleanup:
1942 * FUNCTION: acpi_ds_get_field_names
1943 *
1944 * PARAMETERS: info - create_field info structure
1945 - * ` walk_state - Current method state
1946 + * walk_state - Current method state
1947 * arg - First parser arg for the field name list
1948 *
1949 * RETURN: Status
1950 diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
1951 index c88fd31208a5..4bcf15bf03de 100644
1952 --- a/drivers/acpi/acpica/dswload.c
1953 +++ b/drivers/acpi/acpica/dswload.c
1954 @@ -410,6 +410,27 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
1955 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
1956 walk_state));
1957
1958 + /*
1959 + * Disassembler: handle create field operators here.
1960 + *
1961 + * create_buffer_field is a deferred op that is typically processed in load
1962 + * pass 2. However, disassembly of control method contents walk the parse
1963 + * tree with ACPI_PARSE_LOAD_PASS1 and AML_CREATE operators are processed
1964 + * in a later walk. This is a problem when there is a control method that
1965 + * has the same name as the AML_CREATE object. In this case, any use of the
1966 + * name segment will be detected as a method call rather than a reference
1967 + * to a buffer field.
1968 + *
1969 + * This earlier creation during disassembly solves this issue by inserting
1970 + * the named object in the ACPI namespace so that references to this name
1971 + * would be a name string rather than a method call.
1972 + */
1973 + if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) &&
1974 + (walk_state->op_info->flags & AML_CREATE)) {
1975 + status = acpi_ds_create_buffer_field(op, walk_state);
1976 + return_ACPI_STATUS(status);
1977 + }
1978 +
1979 /* We are only interested in opcodes that have an associated name */
1980
1981 if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) {
1982 diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
1983 index ce93a355bd1c..985afc62da82 100644
1984 --- a/drivers/acpi/button.c
1985 +++ b/drivers/acpi/button.c
1986 @@ -89,6 +89,17 @@ static const struct dmi_system_id lid_blacklst[] = {
1987 },
1988 .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
1989 },
1990 + {
1991 + /*
1992 + * Razer Blade Stealth 13 late 2019, notification of the LID device
1993 + * only happens on close, not on open and _LID always returns closed.
1994 + */
1995 + .matches = {
1996 + DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
1997 + DMI_MATCH(DMI_PRODUCT_NAME, "Razer Blade Stealth 13 Late 2019"),
1998 + },
1999 + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
2000 + },
2001 {}
2002 };
2003
2004 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
2005 index f1a500205313..8fbd36eb8941 100644
2006 --- a/drivers/atm/fore200e.c
2007 +++ b/drivers/atm/fore200e.c
2008 @@ -1414,12 +1414,14 @@ fore200e_open(struct atm_vcc *vcc)
2009 static void
2010 fore200e_close(struct atm_vcc* vcc)
2011 {
2012 - struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
2013 struct fore200e_vcc* fore200e_vcc;
2014 + struct fore200e* fore200e;
2015 struct fore200e_vc_map* vc_map;
2016 unsigned long flags;
2017
2018 ASSERT(vcc);
2019 + fore200e = FORE200E_DEV(vcc->dev);
2020 +
2021 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
2022 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
2023
2024 @@ -1464,10 +1466,10 @@ fore200e_close(struct atm_vcc* vcc)
2025 static int
2026 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
2027 {
2028 - struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
2029 - struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
2030 + struct fore200e* fore200e;
2031 + struct fore200e_vcc* fore200e_vcc;
2032 struct fore200e_vc_map* vc_map;
2033 - struct host_txq* txq = &fore200e->host_txq;
2034 + struct host_txq* txq;
2035 struct host_txq_entry* entry;
2036 struct tpd* tpd;
2037 struct tpd_haddr tpd_haddr;
2038 @@ -1480,9 +1482,18 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
2039 unsigned char* data;
2040 unsigned long flags;
2041
2042 - ASSERT(vcc);
2043 - ASSERT(fore200e);
2044 - ASSERT(fore200e_vcc);
2045 + if (!vcc)
2046 + return -EINVAL;
2047 +
2048 + fore200e = FORE200E_DEV(vcc->dev);
2049 + fore200e_vcc = FORE200E_VCC(vcc);
2050 +
2051 + if (!fore200e)
2052 + return -EINVAL;
2053 +
2054 + txq = &fore200e->host_txq;
2055 + if (!fore200e_vcc)
2056 + return -EINVAL;
2057
2058 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
2059 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
2060 diff --git a/drivers/base/dd.c b/drivers/base/dd.c
2061 index d811e60610d3..b25bcab2a26b 100644
2062 --- a/drivers/base/dd.c
2063 +++ b/drivers/base/dd.c
2064 @@ -516,7 +516,10 @@ static int really_probe(struct device *dev, struct device_driver *drv)
2065 atomic_inc(&probe_count);
2066 pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
2067 drv->bus->name, __func__, drv->name, dev_name(dev));
2068 - WARN_ON(!list_empty(&dev->devres_head));
2069 + if (!list_empty(&dev->devres_head)) {
2070 + dev_crit(dev, "Resources present before probing\n");
2071 + return -EBUSY;
2072 + }
2073
2074 re_probe:
2075 dev->driver = drv;
2076 diff --git a/drivers/base/platform.c b/drivers/base/platform.c
2077 index 3c0cd20925b7..60386a32208f 100644
2078 --- a/drivers/base/platform.c
2079 +++ b/drivers/base/platform.c
2080 @@ -27,6 +27,7 @@
2081 #include <linux/limits.h>
2082 #include <linux/property.h>
2083 #include <linux/kmemleak.h>
2084 +#include <linux/types.h>
2085
2086 #include "base.h"
2087 #include "power/power.h"
2088 @@ -48,7 +49,7 @@ EXPORT_SYMBOL_GPL(platform_bus);
2089 struct resource *platform_get_resource(struct platform_device *dev,
2090 unsigned int type, unsigned int num)
2091 {
2092 - int i;
2093 + u32 i;
2094
2095 for (i = 0; i < dev->num_resources; i++) {
2096 struct resource *r = &dev->resource[i];
2097 @@ -226,7 +227,7 @@ struct resource *platform_get_resource_byname(struct platform_device *dev,
2098 unsigned int type,
2099 const char *name)
2100 {
2101 - int i;
2102 + u32 i;
2103
2104 for (i = 0; i < dev->num_resources; i++) {
2105 struct resource *r = &dev->resource[i];
2106 @@ -473,7 +474,8 @@ EXPORT_SYMBOL_GPL(platform_device_add_properties);
2107 */
2108 int platform_device_add(struct platform_device *pdev)
2109 {
2110 - int i, ret;
2111 + u32 i;
2112 + int ret;
2113
2114 if (!pdev)
2115 return -EINVAL;
2116 @@ -541,7 +543,7 @@ int platform_device_add(struct platform_device *pdev)
2117 pdev->id = PLATFORM_DEVID_AUTO;
2118 }
2119
2120 - while (--i >= 0) {
2121 + while (i--) {
2122 struct resource *r = &pdev->resource[i];
2123 if (r->parent)
2124 release_resource(r);
2125 @@ -562,7 +564,7 @@ EXPORT_SYMBOL_GPL(platform_device_add);
2126 */
2127 void platform_device_del(struct platform_device *pdev)
2128 {
2129 - int i;
2130 + u32 i;
2131
2132 if (!IS_ERR_OR_NULL(pdev)) {
2133 device_del(&pdev->dev);
2134 diff --git a/drivers/block/brd.c b/drivers/block/brd.c
2135 index c548a5a6c1a0..79f18cfa7049 100644
2136 --- a/drivers/block/brd.c
2137 +++ b/drivers/block/brd.c
2138 @@ -470,6 +470,25 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
2139 return kobj;
2140 }
2141
2142 +static inline void brd_check_and_reset_par(void)
2143 +{
2144 + if (unlikely(!max_part))
2145 + max_part = 1;
2146 +
2147 + /*
2148 + * make sure 'max_part' can be divided exactly by (1U << MINORBITS),
2149 + * otherwise, it is possiable to get same dev_t when adding partitions.
2150 + */
2151 + if ((1U << MINORBITS) % max_part != 0)
2152 + max_part = 1UL << fls(max_part);
2153 +
2154 + if (max_part > DISK_MAX_PARTS) {
2155 + pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n",
2156 + DISK_MAX_PARTS, DISK_MAX_PARTS);
2157 + max_part = DISK_MAX_PARTS;
2158 + }
2159 +}
2160 +
2161 static int __init brd_init(void)
2162 {
2163 struct brd_device *brd, *next;
2164 @@ -493,8 +512,7 @@ static int __init brd_init(void)
2165 if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
2166 return -EIO;
2167
2168 - if (unlikely(!max_part))
2169 - max_part = 1;
2170 + brd_check_and_reset_par();
2171
2172 for (i = 0; i < rd_nr; i++) {
2173 brd = brd_alloc(i);
2174 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
2175 index b4607dd96185..78181908f0df 100644
2176 --- a/drivers/block/nbd.c
2177 +++ b/drivers/block/nbd.c
2178 @@ -1265,6 +1265,16 @@ static int nbd_start_device(struct nbd_device *nbd)
2179 args = kzalloc(sizeof(*args), GFP_KERNEL);
2180 if (!args) {
2181 sock_shutdown(nbd);
2182 + /*
2183 + * If num_connections is m (2 < m),
2184 + * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
2185 + * But NO.(n + 1) failed. We still have n recv threads.
2186 + * So, add flush_workqueue here to prevent recv threads
2187 + * dropping the last config_refs and trying to destroy
2188 + * the workqueue from inside the workqueue.
2189 + */
2190 + if (i)
2191 + flush_workqueue(nbd->recv_workq);
2192 return -ENOMEM;
2193 }
2194 sk_set_memalloc(config->socks[i]->sock->sk);
2195 diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
2196 index 13527a0b4e44..a67315786db4 100644
2197 --- a/drivers/block/rbd.c
2198 +++ b/drivers/block/rbd.c
2199 @@ -2739,7 +2739,7 @@ static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2200 u64 off, u64 len)
2201 {
2202 struct ceph_file_extent ex = { off, len };
2203 - union rbd_img_fill_iter dummy;
2204 + union rbd_img_fill_iter dummy = {};
2205 struct rbd_img_fill_ctx fctx = {
2206 .pos_type = OBJ_REQUEST_NODATA,
2207 .pos = &dummy,
2208 diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
2209 index 4285e75e52c3..1bf4a908a0bd 100644
2210 --- a/drivers/block/zram/zram_drv.c
2211 +++ b/drivers/block/zram/zram_drv.c
2212 @@ -626,7 +626,7 @@ static ssize_t writeback_store(struct device *dev,
2213 struct bio bio;
2214 struct bio_vec bio_vec;
2215 struct page *page;
2216 - ssize_t ret;
2217 + ssize_t ret = len;
2218 int mode;
2219 unsigned long blk_idx = 0;
2220
2221 @@ -762,7 +762,6 @@ next:
2222
2223 if (blk_idx)
2224 free_block_bdev(zram, blk_idx);
2225 - ret = len;
2226 __free_page(page);
2227 release_init_lock:
2228 up_read(&zram->init_lock);
2229 diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
2230 index d9846265a5cd..a0cecb12b6f9 100644
2231 --- a/drivers/bus/ti-sysc.c
2232 +++ b/drivers/bus/ti-sysc.c
2233 @@ -479,7 +479,7 @@ static void sysc_clkdm_deny_idle(struct sysc *ddata)
2234 {
2235 struct ti_sysc_platform_data *pdata;
2236
2237 - if (ddata->legacy_mode)
2238 + if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
2239 return;
2240
2241 pdata = dev_get_platdata(ddata->dev);
2242 @@ -491,7 +491,7 @@ static void sysc_clkdm_allow_idle(struct sysc *ddata)
2243 {
2244 struct ti_sysc_platform_data *pdata;
2245
2246 - if (ddata->legacy_mode)
2247 + if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
2248 return;
2249
2250 pdata = dev_get_platdata(ddata->dev);
2251 @@ -1251,6 +1251,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
2252 /* Quirks that need to be set based on detected module */
2253 SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff,
2254 SYSC_MODULE_QUIRK_AESS),
2255 + SYSC_QUIRK("dcan", 0x48480000, 0x20, -1, -1, 0xa3170504, 0xffffffff,
2256 + SYSC_QUIRK_CLKDM_NOAUTO),
2257 + SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -1, 0x500a0200, 0xffffffff,
2258 + SYSC_QUIRK_CLKDM_NOAUTO),
2259 + SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -1, 0x500a0200, 0xffffffff,
2260 + SYSC_QUIRK_CLKDM_NOAUTO),
2261 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
2262 SYSC_MODULE_QUIRK_HDQ1W),
2263 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
2264 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
2265 index 9ac6671bb514..f69609b47fef 100644
2266 --- a/drivers/char/hpet.c
2267 +++ b/drivers/char/hpet.c
2268 @@ -855,7 +855,7 @@ int hpet_alloc(struct hpet_data *hdp)
2269 return 0;
2270 }
2271
2272 - hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs - 1),
2273 + hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs),
2274 GFP_KERNEL);
2275
2276 if (!hpetp)
2277 diff --git a/drivers/char/random.c b/drivers/char/random.c
2278 index 01b8868b9bed..a385fc1da1cb 100644
2279 --- a/drivers/char/random.c
2280 +++ b/drivers/char/random.c
2281 @@ -1687,8 +1687,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
2282 print_once = true;
2283 #endif
2284 if (__ratelimit(&unseeded_warning))
2285 - pr_notice("random: %s called from %pS with crng_init=%d\n",
2286 - func_name, caller, crng_init);
2287 + printk_deferred(KERN_NOTICE "random: %s called from %pS "
2288 + "with crng_init=%d\n", func_name, caller,
2289 + crng_init);
2290 }
2291
2292 /*
2293 diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
2294 index 86238d5ecb4d..77398aefeb6d 100644
2295 --- a/drivers/clk/at91/sam9x60.c
2296 +++ b/drivers/clk/at91/sam9x60.c
2297 @@ -47,6 +47,7 @@ static const struct clk_programmable_layout sam9x60_programmable_layout = {
2298 .pres_shift = 8,
2299 .css_mask = 0x1f,
2300 .have_slck_mck = 0,
2301 + .is_pres_direct = 1,
2302 };
2303
2304 static const struct clk_pcr_layout sam9x60_pcr_layout = {
2305 diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
2306 index 67f592fa083a..62d0fc486d3a 100644
2307 --- a/drivers/clk/clk.c
2308 +++ b/drivers/clk/clk.c
2309 @@ -3320,6 +3320,21 @@ static int __clk_core_init(struct clk_core *core)
2310 goto out;
2311 }
2312
2313 + /*
2314 + * optional platform-specific magic
2315 + *
2316 + * The .init callback is not used by any of the basic clock types, but
2317 + * exists for weird hardware that must perform initialization magic.
2318 + * Please consider other ways of solving initialization problems before
2319 + * using this callback, as its use is discouraged.
2320 + *
2321 + * If it exist, this callback should called before any other callback of
2322 + * the clock
2323 + */
2324 + if (core->ops->init)
2325 + core->ops->init(core->hw);
2326 +
2327 +
2328 core->parent = __clk_init_parent(core);
2329
2330 /*
2331 @@ -3344,17 +3359,6 @@ static int __clk_core_init(struct clk_core *core)
2332 core->orphan = true;
2333 }
2334
2335 - /*
2336 - * optional platform-specific magic
2337 - *
2338 - * The .init callback is not used by any of the basic clock types, but
2339 - * exists for weird hardware that must perform initialization magic.
2340 - * Please consider other ways of solving initialization problems before
2341 - * using this callback, as its use is discouraged.
2342 - */
2343 - if (core->ops->init)
2344 - core->ops->init(core->hw);
2345 -
2346 /*
2347 * Set clk's accuracy. The preferred method is to use
2348 * .recalc_accuracy. For simple clocks and lazy developers the default
2349 @@ -3714,6 +3718,28 @@ fail_out:
2350 return ERR_PTR(ret);
2351 }
2352
2353 +/**
2354 + * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
2355 + * @dev: Device to get device node of
2356 + *
2357 + * Return: device node pointer of @dev, or the device node pointer of
2358 + * @dev->parent if dev doesn't have a device node, or NULL if neither
2359 + * @dev or @dev->parent have a device node.
2360 + */
2361 +static struct device_node *dev_or_parent_of_node(struct device *dev)
2362 +{
2363 + struct device_node *np;
2364 +
2365 + if (!dev)
2366 + return NULL;
2367 +
2368 + np = dev_of_node(dev);
2369 + if (!np)
2370 + np = dev_of_node(dev->parent);
2371 +
2372 + return np;
2373 +}
2374 +
2375 /**
2376 * clk_register - allocate a new clock, register it and return an opaque cookie
2377 * @dev: device that is registering this clock
2378 @@ -3729,7 +3755,7 @@ fail_out:
2379 */
2380 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2381 {
2382 - return __clk_register(dev, dev_of_node(dev), hw);
2383 + return __clk_register(dev, dev_or_parent_of_node(dev), hw);
2384 }
2385 EXPORT_SYMBOL_GPL(clk_register);
2386
2387 @@ -3745,7 +3771,8 @@ EXPORT_SYMBOL_GPL(clk_register);
2388 */
2389 int clk_hw_register(struct device *dev, struct clk_hw *hw)
2390 {
2391 - return PTR_ERR_OR_ZERO(__clk_register(dev, dev_of_node(dev), hw));
2392 + return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
2393 + hw));
2394 }
2395 EXPORT_SYMBOL_GPL(clk_hw_register);
2396
2397 diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
2398 index f7a389a50401..6fe64ff8ffa1 100644
2399 --- a/drivers/clk/imx/clk.h
2400 +++ b/drivers/clk/imx/clk.h
2401 @@ -51,48 +51,48 @@ struct imx_pll14xx_clk {
2402 };
2403
2404 #define imx_clk_cpu(name, parent_name, div, mux, pll, step) \
2405 - imx_clk_hw_cpu(name, parent_name, div, mux, pll, step)->clk
2406 + to_clk(imx_clk_hw_cpu(name, parent_name, div, mux, pll, step))
2407
2408 #define clk_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
2409 cgr_val, clk_gate_flags, lock, share_count) \
2410 - clk_hw_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
2411 - cgr_val, clk_gate_flags, lock, share_count)->clk
2412 + to_clk(clk_hw_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
2413 + cgr_val, clk_gate_flags, lock, share_count))
2414
2415 #define imx_clk_pllv3(type, name, parent_name, base, div_mask) \
2416 - imx_clk_hw_pllv3(type, name, parent_name, base, div_mask)->clk
2417 + to_clk(imx_clk_hw_pllv3(type, name, parent_name, base, div_mask))
2418
2419 #define imx_clk_pfd(name, parent_name, reg, idx) \
2420 - imx_clk_hw_pfd(name, parent_name, reg, idx)->clk
2421 + to_clk(imx_clk_hw_pfd(name, parent_name, reg, idx))
2422
2423 #define imx_clk_gate_exclusive(name, parent, reg, shift, exclusive_mask) \
2424 - imx_clk_hw_gate_exclusive(name, parent, reg, shift, exclusive_mask)->clk
2425 + to_clk(imx_clk_hw_gate_exclusive(name, parent, reg, shift, exclusive_mask))
2426
2427 #define imx_clk_fixed_factor(name, parent, mult, div) \
2428 - imx_clk_hw_fixed_factor(name, parent, mult, div)->clk
2429 + to_clk(imx_clk_hw_fixed_factor(name, parent, mult, div))
2430
2431 #define imx_clk_divider2(name, parent, reg, shift, width) \
2432 - imx_clk_hw_divider2(name, parent, reg, shift, width)->clk
2433 + to_clk(imx_clk_hw_divider2(name, parent, reg, shift, width))
2434
2435 #define imx_clk_gate_dis(name, parent, reg, shift) \
2436 - imx_clk_hw_gate_dis(name, parent, reg, shift)->clk
2437 + to_clk(imx_clk_hw_gate_dis(name, parent, reg, shift))
2438
2439 #define imx_clk_gate2(name, parent, reg, shift) \
2440 - imx_clk_hw_gate2(name, parent, reg, shift)->clk
2441 + to_clk(imx_clk_hw_gate2(name, parent, reg, shift))
2442
2443 #define imx_clk_gate2_flags(name, parent, reg, shift, flags) \
2444 - imx_clk_hw_gate2_flags(name, parent, reg, shift, flags)->clk
2445 + to_clk(imx_clk_hw_gate2_flags(name, parent, reg, shift, flags))
2446
2447 #define imx_clk_gate2_shared2(name, parent, reg, shift, share_count) \
2448 - imx_clk_hw_gate2_shared2(name, parent, reg, shift, share_count)->clk
2449 + to_clk(imx_clk_hw_gate2_shared2(name, parent, reg, shift, share_count))
2450
2451 #define imx_clk_gate3(name, parent, reg, shift) \
2452 - imx_clk_hw_gate3(name, parent, reg, shift)->clk
2453 + to_clk(imx_clk_hw_gate3(name, parent, reg, shift))
2454
2455 #define imx_clk_gate4(name, parent, reg, shift) \
2456 - imx_clk_hw_gate4(name, parent, reg, shift)->clk
2457 + to_clk(imx_clk_hw_gate4(name, parent, reg, shift))
2458
2459 #define imx_clk_mux(name, reg, shift, width, parents, num_parents) \
2460 - imx_clk_hw_mux(name, reg, shift, width, parents, num_parents)->clk
2461 + to_clk(imx_clk_hw_mux(name, reg, shift, width, parents, num_parents))
2462
2463 struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
2464 void __iomem *base, const struct imx_pll14xx_clk *pll_clk);
2465 @@ -195,6 +195,13 @@ struct clk_hw *imx_clk_hw_fixup_mux(const char *name, void __iomem *reg,
2466 u8 shift, u8 width, const char * const *parents,
2467 int num_parents, void (*fixup)(u32 *val));
2468
2469 +static inline struct clk *to_clk(struct clk_hw *hw)
2470 +{
2471 + if (IS_ERR_OR_NULL(hw))
2472 + return ERR_CAST(hw);
2473 + return hw->clk;
2474 +}
2475 +
2476 static inline struct clk *imx_clk_fixed(const char *name, int rate)
2477 {
2478 return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
2479 diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
2480 index ddb1e5634739..3a5853ca98c6 100644
2481 --- a/drivers/clk/meson/clk-pll.c
2482 +++ b/drivers/clk/meson/clk-pll.c
2483 @@ -77,6 +77,15 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
2484 unsigned int m, n, frac;
2485
2486 n = meson_parm_read(clk->map, &pll->n);
2487 +
2488 + /*
2489 + * On some HW, N is set to zero on init. This value is invalid as
2490 + * it would result in a division by zero. The rate can't be
2491 + * calculated in this case
2492 + */
2493 + if (n == 0)
2494 + return 0;
2495 +
2496 m = meson_parm_read(clk->map, &pll->m);
2497
2498 frac = MESON_PARM_APPLICABLE(&pll->frac) ?
2499 diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
2500 index 67e6691e080c..8856ce476ccf 100644
2501 --- a/drivers/clk/meson/meson8b.c
2502 +++ b/drivers/clk/meson/meson8b.c
2503 @@ -1764,8 +1764,11 @@ static struct clk_regmap meson8b_hdmi_sys = {
2504
2505 /*
2506 * The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
2507 - * muxed by a glitch-free switch on Meson8b and Meson8m2. Meson8 only
2508 - * has mali_0 and no glitch-free mux.
2509 + * muxed by a glitch-free switch on Meson8b and Meson8m2. The CCF can
2510 + * actually manage this glitch-free mux because it does top-to-bottom
2511 + * updates the each clock tree and switches to the "inactive" one when
2512 + * CLK_SET_RATE_GATE is set.
2513 + * Meson8 only has mali_0 and no glitch-free mux.
2514 */
2515 static const struct clk_hw *meson8b_mali_0_1_parent_hws[] = {
2516 &meson8b_xtal.hw,
2517 @@ -1830,7 +1833,7 @@ static struct clk_regmap meson8b_mali_0 = {
2518 &meson8b_mali_0_div.hw
2519 },
2520 .num_parents = 1,
2521 - .flags = CLK_SET_RATE_PARENT,
2522 + .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
2523 },
2524 };
2525
2526 @@ -1885,7 +1888,7 @@ static struct clk_regmap meson8b_mali_1 = {
2527 &meson8b_mali_1_div.hw
2528 },
2529 .num_parents = 1,
2530 - .flags = CLK_SET_RATE_PARENT,
2531 + .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
2532 },
2533 };
2534
2535 diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
2536 index 5a89ed88cc27..a88101480e33 100644
2537 --- a/drivers/clk/qcom/clk-rcg2.c
2538 +++ b/drivers/clk/qcom/clk-rcg2.c
2539 @@ -217,6 +217,9 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
2540
2541 clk_flags = clk_hw_get_flags(hw);
2542 p = clk_hw_get_parent_by_index(hw, index);
2543 + if (!p)
2544 + return -EINVAL;
2545 +
2546 if (clk_flags & CLK_SET_RATE_PARENT) {
2547 rate = f->freq;
2548 if (f->pre_div) {
2549 @@ -952,7 +955,7 @@ static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
2550 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
2551 struct clk_hw *p;
2552 unsigned long prate = 0;
2553 - u32 val, mask, cfg, mode;
2554 + u32 val, mask, cfg, mode, src;
2555 int i, num_parents;
2556
2557 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
2558 @@ -962,12 +965,12 @@ static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
2559 if (cfg & mask)
2560 f->pre_div = cfg & mask;
2561
2562 - cfg &= CFG_SRC_SEL_MASK;
2563 - cfg >>= CFG_SRC_SEL_SHIFT;
2564 + src = cfg & CFG_SRC_SEL_MASK;
2565 + src >>= CFG_SRC_SEL_SHIFT;
2566
2567 num_parents = clk_hw_get_num_parents(hw);
2568 for (i = 0; i < num_parents; i++) {
2569 - if (cfg == rcg->parent_map[i].cfg) {
2570 + if (src == rcg->parent_map[i].cfg) {
2571 f->src = rcg->parent_map[i].src;
2572 p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
2573 prate = clk_hw_get_rate(p);
2574 diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
2575 index 930fa4a4c52a..e5c3db11bf26 100644
2576 --- a/drivers/clk/qcom/clk-smd-rpm.c
2577 +++ b/drivers/clk/qcom/clk-smd-rpm.c
2578 @@ -648,6 +648,7 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
2579 };
2580
2581 /* msm8998 */
2582 +DEFINE_CLK_SMD_RPM(msm8998, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
2583 DEFINE_CLK_SMD_RPM(msm8998, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
2584 DEFINE_CLK_SMD_RPM(msm8998, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
2585 DEFINE_CLK_SMD_RPM(msm8998, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
2586 @@ -671,6 +672,8 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk2_pin, rf_clk2_a_pin, 5);
2587 DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6);
2588 DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6);
2589 static struct clk_smd_rpm *msm8998_clks[] = {
2590 + [RPM_SMD_BIMC_CLK] = &msm8998_bimc_clk,
2591 + [RPM_SMD_BIMC_A_CLK] = &msm8998_bimc_a_clk,
2592 [RPM_SMD_PCNOC_CLK] = &msm8998_pcnoc_clk,
2593 [RPM_SMD_PCNOC_A_CLK] = &msm8998_pcnoc_a_clk,
2594 [RPM_SMD_SNOC_CLK] = &msm8998_snoc_clk,
2595 diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
2596 index d25c8ba00a65..532626946b8d 100644
2597 --- a/drivers/clk/renesas/rcar-gen3-cpg.c
2598 +++ b/drivers/clk/renesas/rcar-gen3-cpg.c
2599 @@ -464,7 +464,8 @@ static struct clk * __init cpg_rpc_clk_register(const char *name,
2600
2601 clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
2602 &rpc->div.hw, &clk_divider_ops,
2603 - &rpc->gate.hw, &clk_gate_ops, 0);
2604 + &rpc->gate.hw, &clk_gate_ops,
2605 + CLK_SET_RATE_PARENT);
2606 if (IS_ERR(clk)) {
2607 kfree(rpc);
2608 return clk;
2609 @@ -500,7 +501,8 @@ static struct clk * __init cpg_rpcd2_clk_register(const char *name,
2610
2611 clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
2612 &rpcd2->fixed.hw, &clk_fixed_factor_ops,
2613 - &rpcd2->gate.hw, &clk_gate_ops, 0);
2614 + &rpcd2->gate.hw, &clk_gate_ops,
2615 + CLK_SET_RATE_PARENT);
2616 if (IS_ERR(clk))
2617 kfree(rpcd2);
2618
2619 diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
2620 index 49bd7a4c015c..5f66bf879772 100644
2621 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
2622 +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
2623 @@ -921,11 +921,26 @@ static const struct sunxi_ccu_desc sun50i_a64_ccu_desc = {
2624 .num_resets = ARRAY_SIZE(sun50i_a64_ccu_resets),
2625 };
2626
2627 +static struct ccu_pll_nb sun50i_a64_pll_cpu_nb = {
2628 + .common = &pll_cpux_clk.common,
2629 + /* copy from pll_cpux_clk */
2630 + .enable = BIT(31),
2631 + .lock = BIT(28),
2632 +};
2633 +
2634 +static struct ccu_mux_nb sun50i_a64_cpu_nb = {
2635 + .common = &cpux_clk.common,
2636 + .cm = &cpux_clk.mux,
2637 + .delay_us = 1, /* > 8 clock cycles at 24 MHz */
2638 + .bypass_index = 1, /* index of 24 MHz oscillator */
2639 +};
2640 +
2641 static int sun50i_a64_ccu_probe(struct platform_device *pdev)
2642 {
2643 struct resource *res;
2644 void __iomem *reg;
2645 u32 val;
2646 + int ret;
2647
2648 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2649 reg = devm_ioremap_resource(&pdev->dev, res);
2650 @@ -939,7 +954,18 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev)
2651
2652 writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
2653
2654 - return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc);
2655 + ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc);
2656 + if (ret)
2657 + return ret;
2658 +
2659 + /* Gate then ungate PLL CPU after any rate changes */
2660 + ccu_pll_notifier_register(&sun50i_a64_pll_cpu_nb);
2661 +
2662 + /* Reparent CPU during PLL CPU rate changes */
2663 + ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
2664 + &sun50i_a64_cpu_nb);
2665 +
2666 + return 0;
2667 }
2668
2669 static const struct of_device_id sun50i_a64_ccu_ids[] = {
2670 diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
2671 index 9dd6185a4b4e..66e4b2b9ec60 100644
2672 --- a/drivers/clk/ti/clk-7xx.c
2673 +++ b/drivers/clk/ti/clk-7xx.c
2674 @@ -405,7 +405,7 @@ static const struct omap_clkctrl_bit_data dra7_gmac_bit_data[] __initconst = {
2675 };
2676
2677 static const struct omap_clkctrl_reg_data dra7_gmac_clkctrl_regs[] __initconst = {
2678 - { DRA7_GMAC_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "dpll_gmac_ck" },
2679 + { DRA7_GMAC_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "gmac_main_clk" },
2680 { 0 },
2681 };
2682
2683 diff --git a/drivers/clk/uniphier/clk-uniphier-peri.c b/drivers/clk/uniphier/clk-uniphier-peri.c
2684 index 9caa52944b1c..3e32db9dad81 100644
2685 --- a/drivers/clk/uniphier/clk-uniphier-peri.c
2686 +++ b/drivers/clk/uniphier/clk-uniphier-peri.c
2687 @@ -18,8 +18,8 @@
2688 #define UNIPHIER_PERI_CLK_FI2C(idx, ch) \
2689 UNIPHIER_CLK_GATE("i2c" #ch, (idx), "i2c", 0x24, 24 + (ch))
2690
2691 -#define UNIPHIER_PERI_CLK_SCSSI(idx) \
2692 - UNIPHIER_CLK_GATE("scssi", (idx), "spi", 0x20, 17)
2693 +#define UNIPHIER_PERI_CLK_SCSSI(idx, ch) \
2694 + UNIPHIER_CLK_GATE("scssi" #ch, (idx), "spi", 0x20, 17 + (ch))
2695
2696 #define UNIPHIER_PERI_CLK_MCSSI(idx) \
2697 UNIPHIER_CLK_GATE("mcssi", (idx), "spi", 0x24, 14)
2698 @@ -35,7 +35,7 @@ const struct uniphier_clk_data uniphier_ld4_peri_clk_data[] = {
2699 UNIPHIER_PERI_CLK_I2C(6, 2),
2700 UNIPHIER_PERI_CLK_I2C(7, 3),
2701 UNIPHIER_PERI_CLK_I2C(8, 4),
2702 - UNIPHIER_PERI_CLK_SCSSI(11),
2703 + UNIPHIER_PERI_CLK_SCSSI(11, 0),
2704 { /* sentinel */ }
2705 };
2706
2707 @@ -51,7 +51,10 @@ const struct uniphier_clk_data uniphier_pro4_peri_clk_data[] = {
2708 UNIPHIER_PERI_CLK_FI2C(8, 4),
2709 UNIPHIER_PERI_CLK_FI2C(9, 5),
2710 UNIPHIER_PERI_CLK_FI2C(10, 6),
2711 - UNIPHIER_PERI_CLK_SCSSI(11),
2712 - UNIPHIER_PERI_CLK_MCSSI(12),
2713 + UNIPHIER_PERI_CLK_SCSSI(11, 0),
2714 + UNIPHIER_PERI_CLK_SCSSI(12, 1),
2715 + UNIPHIER_PERI_CLK_SCSSI(13, 2),
2716 + UNIPHIER_PERI_CLK_SCSSI(14, 3),
2717 + UNIPHIER_PERI_CLK_MCSSI(15),
2718 { /* sentinel */ }
2719 };
2720 diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
2721 index 2b196cbfadb6..b235f446ee50 100644
2722 --- a/drivers/clocksource/bcm2835_timer.c
2723 +++ b/drivers/clocksource/bcm2835_timer.c
2724 @@ -121,7 +121,7 @@ static int __init bcm2835_timer_init(struct device_node *node)
2725 ret = setup_irq(irq, &timer->act);
2726 if (ret) {
2727 pr_err("Can't set up timer IRQ\n");
2728 - goto err_iounmap;
2729 + goto err_timer_free;
2730 }
2731
2732 clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
2733 @@ -130,6 +130,9 @@ static int __init bcm2835_timer_init(struct device_node *node)
2734
2735 return 0;
2736
2737 +err_timer_free:
2738 + kfree(timer);
2739 +
2740 err_iounmap:
2741 iounmap(base);
2742 return ret;
2743 diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c
2744 index 62745c962049..e421946a91c5 100644
2745 --- a/drivers/clocksource/timer-davinci.c
2746 +++ b/drivers/clocksource/timer-davinci.c
2747 @@ -302,10 +302,6 @@ int __init davinci_timer_register(struct clk *clk,
2748 return rv;
2749 }
2750
2751 - clockevents_config_and_register(&clockevent->dev, tick_rate,
2752 - DAVINCI_TIMER_MIN_DELTA,
2753 - DAVINCI_TIMER_MAX_DELTA);
2754 -
2755 davinci_clocksource.dev.rating = 300;
2756 davinci_clocksource.dev.read = davinci_clocksource_read;
2757 davinci_clocksource.dev.mask =
2758 @@ -323,6 +319,10 @@ int __init davinci_timer_register(struct clk *clk,
2759 davinci_clocksource_init_tim34(base);
2760 }
2761
2762 + clockevents_config_and_register(&clockevent->dev, tick_rate,
2763 + DAVINCI_TIMER_MIN_DELTA,
2764 + DAVINCI_TIMER_MAX_DELTA);
2765 +
2766 rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
2767 if (rv) {
2768 pr_err("Unable to register clocksource");
2769 diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
2770 index 7316312935c8..06b2b3fa5206 100644
2771 --- a/drivers/crypto/Kconfig
2772 +++ b/drivers/crypto/Kconfig
2773 @@ -739,7 +739,7 @@ source "drivers/crypto/stm32/Kconfig"
2774
2775 config CRYPTO_DEV_SAFEXCEL
2776 tristate "Inside Secure's SafeXcel cryptographic engine driver"
2777 - depends on OF || PCI || COMPILE_TEST
2778 + depends on (OF || PCI || COMPILE_TEST) && HAS_IOMEM
2779 select CRYPTO_LIB_AES
2780 select CRYPTO_AUTHENC
2781 select CRYPTO_BLKCIPHER
2782 diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
2783 index aca75237bbcf..dffa2aa855fd 100644
2784 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c
2785 +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
2786 @@ -727,6 +727,14 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
2787 return 0;
2788 }
2789
2790 +static void chtls_purge_wr_queue(struct sock *sk)
2791 +{
2792 + struct sk_buff *skb;
2793 +
2794 + while ((skb = dequeue_wr(sk)) != NULL)
2795 + kfree_skb(skb);
2796 +}
2797 +
2798 static void chtls_release_resources(struct sock *sk)
2799 {
2800 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
2801 @@ -741,6 +749,11 @@ static void chtls_release_resources(struct sock *sk)
2802 kfree_skb(csk->txdata_skb_cache);
2803 csk->txdata_skb_cache = NULL;
2804
2805 + if (csk->wr_credits != csk->wr_max_credits) {
2806 + chtls_purge_wr_queue(sk);
2807 + chtls_reset_wr_list(csk);
2808 + }
2809 +
2810 if (csk->l2t_entry) {
2811 cxgb4_l2t_release(csk->l2t_entry);
2812 csk->l2t_entry = NULL;
2813 @@ -1735,6 +1748,7 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
2814 else
2815 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2816 }
2817 + kfree_skb(skb);
2818 }
2819
2820 static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
2821 @@ -2062,19 +2076,6 @@ rel_skb:
2822 return 0;
2823 }
2824
2825 -static struct sk_buff *dequeue_wr(struct sock *sk)
2826 -{
2827 - struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
2828 - struct sk_buff *skb = csk->wr_skb_head;
2829 -
2830 - if (likely(skb)) {
2831 - /* Don't bother clearing the tail */
2832 - csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
2833 - WR_SKB_CB(skb)->next_wr = NULL;
2834 - }
2835 - return skb;
2836 -}
2837 -
2838 static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
2839 {
2840 struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;
2841 diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h
2842 index 129d7ac649a9..3fac0c74a41f 100644
2843 --- a/drivers/crypto/chelsio/chtls/chtls_cm.h
2844 +++ b/drivers/crypto/chelsio/chtls/chtls_cm.h
2845 @@ -185,6 +185,12 @@ static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
2846 kfree_skb(skb);
2847 }
2848
2849 +static inline void chtls_reset_wr_list(struct chtls_sock *csk)
2850 +{
2851 + csk->wr_skb_head = NULL;
2852 + csk->wr_skb_tail = NULL;
2853 +}
2854 +
2855 static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
2856 {
2857 WR_SKB_CB(skb)->next_wr = NULL;
2858 @@ -197,4 +203,19 @@ static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
2859 WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb;
2860 csk->wr_skb_tail = skb;
2861 }
2862 +
2863 +static inline struct sk_buff *dequeue_wr(struct sock *sk)
2864 +{
2865 + struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
2866 + struct sk_buff *skb = NULL;
2867 +
2868 + skb = csk->wr_skb_head;
2869 +
2870 + if (likely(skb)) {
2871 + /* Don't bother clearing the tail */
2872 + csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
2873 + WR_SKB_CB(skb)->next_wr = NULL;
2874 + }
2875 + return skb;
2876 +}
2877 #endif
2878 diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
2879 index 2a34035d3cfb..a217fe72602d 100644
2880 --- a/drivers/crypto/chelsio/chtls/chtls_hw.c
2881 +++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
2882 @@ -350,6 +350,7 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
2883 kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
2884 kwr->sc_imm.len = cpu_to_be32(klen);
2885
2886 + lock_sock(sk);
2887 /* key info */
2888 kctx = (struct _key_ctx *)(kwr + 1);
2889 ret = chtls_key_info(csk, kctx, keylen, optname);
2890 @@ -388,8 +389,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
2891 csk->tlshws.txkey = keyid;
2892 }
2893
2894 + release_sock(sk);
2895 return ret;
2896 out_notcb:
2897 + release_sock(sk);
2898 free_tls_keyid(sk);
2899 out_nokey:
2900 kfree_skb(skb);
2901 diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
2902 index af4a3ccb96b3..1433f2ba9d3b 100644
2903 --- a/drivers/devfreq/Kconfig
2904 +++ b/drivers/devfreq/Kconfig
2905 @@ -118,7 +118,8 @@ config ARM_TEGRA20_DEVFREQ
2906
2907 config ARM_RK3399_DMC_DEVFREQ
2908 tristate "ARM RK3399 DMC DEVFREQ Driver"
2909 - depends on ARCH_ROCKCHIP
2910 + depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \
2911 + (COMPILE_TEST && HAVE_ARM_SMCCC)
2912 select DEVFREQ_EVENT_ROCKCHIP_DFI
2913 select DEVFREQ_GOV_SIMPLE_ONDEMAND
2914 select PM_DEVFREQ_EVENT
2915 diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
2916 index cef2cf5347ca..a53e0a6ffdfe 100644
2917 --- a/drivers/devfreq/event/Kconfig
2918 +++ b/drivers/devfreq/event/Kconfig
2919 @@ -34,7 +34,7 @@ config DEVFREQ_EVENT_EXYNOS_PPMU
2920
2921 config DEVFREQ_EVENT_ROCKCHIP_DFI
2922 tristate "ROCKCHIP DFI DEVFREQ event Driver"
2923 - depends on ARCH_ROCKCHIP
2924 + depends on ARCH_ROCKCHIP || COMPILE_TEST
2925 help
2926 This add the devfreq-event driver for Rockchip SoC. It provides DFI
2927 (DDR Monitor Module) driver to count ddr load.
2928 diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
2929 index 87b42055e6bc..c4873bb791f8 100644
2930 --- a/drivers/devfreq/event/exynos-ppmu.c
2931 +++ b/drivers/devfreq/event/exynos-ppmu.c
2932 @@ -101,17 +101,22 @@ static struct __exynos_ppmu_events {
2933 PPMU_EVENT(dmc1_1),
2934 };
2935
2936 -static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
2937 +static int __exynos_ppmu_find_ppmu_id(const char *edev_name)
2938 {
2939 int i;
2940
2941 for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
2942 - if (!strcmp(edev->desc->name, ppmu_events[i].name))
2943 + if (!strcmp(edev_name, ppmu_events[i].name))
2944 return ppmu_events[i].id;
2945
2946 return -EINVAL;
2947 }
2948
2949 +static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
2950 +{
2951 + return __exynos_ppmu_find_ppmu_id(edev->desc->name);
2952 +}
2953 +
2954 /*
2955 * The devfreq-event ops structure for PPMU v1.1
2956 */
2957 @@ -556,13 +561,11 @@ static int of_get_devfreq_events(struct device_node *np,
2958 * use default if not.
2959 */
2960 if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) {
2961 - struct devfreq_event_dev edev;
2962 int id;
2963 /* Not all registers take the same value for
2964 * read+write data count.
2965 */
2966 - edev.desc = &desc[j];
2967 - id = exynos_ppmu_find_ppmu_id(&edev);
2968 + id = __exynos_ppmu_find_ppmu_id(desc[j].name);
2969
2970 switch (id) {
2971 case PPMU_PMNCNT0:
2972 diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
2973 index 03ac4b96117c..4b604086b1b3 100644
2974 --- a/drivers/dma/dmaengine.c
2975 +++ b/drivers/dma/dmaengine.c
2976 @@ -179,7 +179,7 @@ __dma_device_satisfies_mask(struct dma_device *device,
2977
2978 static struct module *dma_chan_to_owner(struct dma_chan *chan)
2979 {
2980 - return chan->device->dev->driver->owner;
2981 + return chan->device->owner;
2982 }
2983
2984 /**
2985 @@ -919,6 +919,8 @@ int dma_async_device_register(struct dma_device *device)
2986 return -EIO;
2987 }
2988
2989 + device->owner = device->dev->driver->owner;
2990 +
2991 if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
2992 dev_err(device->dev,
2993 "Device claims capability %s, but op is not defined\n",
2994 diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
2995 index 89792083d62c..95cc0256b387 100644
2996 --- a/drivers/dma/fsl-qdma.c
2997 +++ b/drivers/dma/fsl-qdma.c
2998 @@ -304,7 +304,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
2999
3000 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
3001
3002 - if (!fsl_queue->comp_pool && !fsl_queue->comp_pool)
3003 + if (!fsl_queue->comp_pool && !fsl_queue->desc_pool)
3004 return;
3005
3006 list_for_each_entry_safe(comp_temp, _comp_temp,
3007 diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
3008 index c27e206a764c..66f1b2ac5cde 100644
3009 --- a/drivers/dma/imx-sdma.c
3010 +++ b/drivers/dma/imx-sdma.c
3011 @@ -760,12 +760,8 @@ static void sdma_start_desc(struct sdma_channel *sdmac)
3012 return;
3013 }
3014 sdmac->desc = desc = to_sdma_desc(&vd->tx);
3015 - /*
3016 - * Do not delete the node in desc_issued list in cyclic mode, otherwise
3017 - * the desc allocated will never be freed in vchan_dma_desc_free_list
3018 - */
3019 - if (!(sdmac->flags & IMX_DMA_SG_LOOP))
3020 - list_del(&vd->node);
3021 +
3022 + list_del(&vd->node);
3023
3024 sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
3025 sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
3026 @@ -1071,7 +1067,6 @@ static void sdma_channel_terminate_work(struct work_struct *work)
3027
3028 spin_lock_irqsave(&sdmac->vc.lock, flags);
3029 vchan_get_all_descriptors(&sdmac->vc, &head);
3030 - sdmac->desc = NULL;
3031 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
3032 vchan_dma_desc_free_list(&sdmac->vc, &head);
3033 sdmac->context_loaded = false;
3034 @@ -1080,11 +1075,19 @@ static void sdma_channel_terminate_work(struct work_struct *work)
3035 static int sdma_disable_channel_async(struct dma_chan *chan)
3036 {
3037 struct sdma_channel *sdmac = to_sdma_chan(chan);
3038 + unsigned long flags;
3039 +
3040 + spin_lock_irqsave(&sdmac->vc.lock, flags);
3041
3042 sdma_disable_channel(chan);
3043
3044 - if (sdmac->desc)
3045 + if (sdmac->desc) {
3046 + vchan_terminate_vdesc(&sdmac->desc->vd);
3047 + sdmac->desc = NULL;
3048 schedule_work(&sdmac->terminate_worker);
3049 + }
3050 +
3051 + spin_unlock_irqrestore(&sdmac->vc.lock, flags);
3052
3053 return 0;
3054 }
3055 diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c
3056 index 413cdb4a591d..bb9ceeaf29bf 100644
3057 --- a/drivers/edac/sifive_edac.c
3058 +++ b/drivers/edac/sifive_edac.c
3059 @@ -54,8 +54,8 @@ static int ecc_register(struct platform_device *pdev)
3060 p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc",
3061 1, 1, NULL, 0,
3062 edac_device_alloc_index());
3063 - if (IS_ERR(p->dci))
3064 - return PTR_ERR(p->dci);
3065 + if (!p->dci)
3066 + return -ENOMEM;
3067
3068 p->dci->dev = &pdev->dev;
3069 p->dci->mod_name = "Sifive ECC Manager";
3070 diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
3071 index 08234e64993a..3224933f4c8f 100644
3072 --- a/drivers/gpio/gpio-grgpio.c
3073 +++ b/drivers/gpio/gpio-grgpio.c
3074 @@ -253,17 +253,16 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
3075 lirq->irq = irq;
3076 uirq = &priv->uirqs[lirq->index];
3077 if (uirq->refcnt == 0) {
3078 + spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
3079 ret = request_irq(uirq->uirq, grgpio_irq_handler, 0,
3080 dev_name(priv->dev), priv);
3081 if (ret) {
3082 dev_err(priv->dev,
3083 "Could not request underlying irq %d\n",
3084 uirq->uirq);
3085 -
3086 - spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
3087 -
3088 return ret;
3089 }
3090 + spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
3091 }
3092 uirq->refcnt++;
3093
3094 @@ -309,8 +308,11 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
3095 if (index >= 0) {
3096 uirq = &priv->uirqs[lirq->index];
3097 uirq->refcnt--;
3098 - if (uirq->refcnt == 0)
3099 + if (uirq->refcnt == 0) {
3100 + spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
3101 free_irq(uirq->uirq, priv);
3102 + return;
3103 + }
3104 }
3105
3106 spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
3107 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
3108 index 22506e4614b3..484fa6560adc 100644
3109 --- a/drivers/gpio/gpiolib.c
3110 +++ b/drivers/gpio/gpiolib.c
3111 @@ -1924,6 +1924,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
3112 parent_type);
3113 chip_info(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
3114 irq, parent_hwirq);
3115 + irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key);
3116 ret = irq_domain_alloc_irqs_parent(d, irq, 1, &parent_fwspec);
3117 if (ret)
3118 chip_err(gc,
3119 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
3120 index 1c9d40f97a9b..f2f40f05fa5c 100644
3121 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
3122 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
3123 @@ -338,17 +338,9 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
3124 path_size += le16_to_cpu(path->usSize);
3125
3126 if (device_support & le16_to_cpu(path->usDeviceTag)) {
3127 - uint8_t con_obj_id, con_obj_num, con_obj_type;
3128 -
3129 - con_obj_id =
3130 + uint8_t con_obj_id =
3131 (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
3132 >> OBJECT_ID_SHIFT;
3133 - con_obj_num =
3134 - (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
3135 - >> ENUM_ID_SHIFT;
3136 - con_obj_type =
3137 - (le16_to_cpu(path->usConnObjectId) &
3138 - OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
3139
3140 /* Skip TV/CV support */
3141 if ((le16_to_cpu(path->usDeviceTag) ==
3142 @@ -373,14 +365,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
3143 router.ddc_valid = false;
3144 router.cd_valid = false;
3145 for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
3146 - uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
3147 -
3148 - grph_obj_id =
3149 - (le16_to_cpu(path->usGraphicObjIds[j]) &
3150 - OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3151 - grph_obj_num =
3152 - (le16_to_cpu(path->usGraphicObjIds[j]) &
3153 - ENUM_ID_MASK) >> ENUM_ID_SHIFT;
3154 + uint8_t grph_obj_type=
3155 grph_obj_type =
3156 (le16_to_cpu(path->usGraphicObjIds[j]) &
3157 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
3158 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
3159 index 7a6c837c0a85..13694d5eba47 100644
3160 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
3161 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
3162 @@ -3466,8 +3466,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3163 if (r)
3164 return r;
3165
3166 - amdgpu_amdkfd_pre_reset(adev);
3167 -
3168 /* Resume IP prior to SMC */
3169 r = amdgpu_device_ip_reinit_early_sriov(adev);
3170 if (r)
3171 diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
3172 index de9b995b65b1..2d780820ba00 100644
3173 --- a/drivers/gpu/drm/amd/amdgpu/nv.c
3174 +++ b/drivers/gpu/drm/amd/amdgpu/nv.c
3175 @@ -660,6 +660,12 @@ static int nv_common_early_init(void *handle)
3176 adev->pg_flags = AMD_PG_SUPPORT_VCN |
3177 AMD_PG_SUPPORT_VCN_DPG |
3178 AMD_PG_SUPPORT_ATHUB;
3179 + /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
3180 + * as a consequence, the rev_id and external_rev_id are wrong.
3181 + * workaround it by hardcoding rev_id to 0 (default value).
3182 + */
3183 + if (amdgpu_sriov_vf(adev))
3184 + adev->rev_id = 0;
3185 adev->external_rev_id = adev->rev_id + 0xa;
3186 break;
3187 default:
3188 diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
3189 index 839f186e1182..19e870c79896 100644
3190 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
3191 +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
3192 @@ -52,6 +52,7 @@
3193 uint32_t old_ = 0; \
3194 uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
3195 uint32_t loop = adev->usec_timeout; \
3196 + ret = 0; \
3197 while ((tmp_ & (mask)) != (expected_value)) { \
3198 if (old_ != tmp_) { \
3199 loop = adev->usec_timeout; \
3200 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
3201 index 15c523027285..511712c2e382 100644
3202 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
3203 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
3204 @@ -93,7 +93,7 @@ void kfd_debugfs_init(void)
3205 kfd_debugfs_hqds_by_device, &kfd_debugfs_fops);
3206 debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
3207 kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
3208 - debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root,
3209 + debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
3210 NULL, &kfd_debugfs_hang_hws_fops);
3211 }
3212
3213 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3214 index f335f73919d1..a2ed9c257cb0 100644
3215 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3216 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3217 @@ -1181,16 +1181,18 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
3218
3219 list_add(&q->list, &qpd->queues_list);
3220 qpd->queue_count++;
3221 +
3222 + if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
3223 + dqm->sdma_queue_count++;
3224 + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
3225 + dqm->xgmi_sdma_queue_count++;
3226 +
3227 if (q->properties.is_active) {
3228 dqm->queue_count++;
3229 retval = execute_queues_cpsch(dqm,
3230 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
3231 }
3232
3233 - if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
3234 - dqm->sdma_queue_count++;
3235 - else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
3236 - dqm->xgmi_sdma_queue_count++;
3237 /*
3238 * Unconditionally increment this counter, regardless of the queue's
3239 * type or whether the queue is active.
3240 diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h
3241 deleted file mode 100644
3242 index 45a07eeffbb6..000000000000
3243 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h
3244 +++ /dev/null
3245 @@ -1,43 +0,0 @@
3246 -/*
3247 - * Copyright 2017 Advanced Micro Devices, Inc.
3248 - *
3249 - * Permission is hereby granted, free of charge, to any person obtaining a
3250 - * copy of this software and associated documentation files (the "Software"),
3251 - * to deal in the Software without restriction, including without limitation
3252 - * the rights to use, copy, modify, merge, publish, distribute, sublicense,
3253 - * and/or sell copies of the Software, and to permit persons to whom the
3254 - * Software is furnished to do so, subject to the following conditions:
3255 - *
3256 - * The above copyright notice and this permission notice shall be included in
3257 - * all copies or substantial portions of the Software.
3258 - *
3259 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3260 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3261 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
3262 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
3263 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
3264 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
3265 - * OTHER DEALINGS IN THE SOFTWARE.
3266 - *
3267 - * Authors: AMD
3268 - *
3269 - */
3270 -
3271 -#ifndef _DCN_CALC_MATH_H_
3272 -#define _DCN_CALC_MATH_H_
3273 -
3274 -float dcn_bw_mod(const float arg1, const float arg2);
3275 -float dcn_bw_min2(const float arg1, const float arg2);
3276 -unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2);
3277 -float dcn_bw_max2(const float arg1, const float arg2);
3278 -float dcn_bw_floor2(const float arg, const float significance);
3279 -float dcn_bw_floor(const float arg);
3280 -float dcn_bw_ceil2(const float arg, const float significance);
3281 -float dcn_bw_ceil(const float arg);
3282 -float dcn_bw_max3(float v1, float v2, float v3);
3283 -float dcn_bw_max5(float v1, float v2, float v3, float v4, float v5);
3284 -float dcn_bw_pow(float a, float exp);
3285 -float dcn_bw_log(float a, float b);
3286 -double dcn_bw_fabs(double a);
3287 -
3288 -#endif /* _DCN_CALC_MATH_H_ */
3289 diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
3290 index 9b2cb57bf2ba..c9a241fe46cf 100644
3291 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
3292 +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
3293 @@ -1438,6 +1438,7 @@ void dcn_bw_update_from_pplib(struct dc *dc)
3294 struct dc_context *ctx = dc->ctx;
3295 struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
3296 bool res;
3297 + unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
3298
3299 /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
3300 res = dm_pp_get_clock_levels_by_type_with_voltage(
3301 @@ -1449,17 +1450,28 @@ void dcn_bw_update_from_pplib(struct dc *dc)
3302 res = verify_clock_values(&fclks);
3303
3304 if (res) {
3305 - ASSERT(fclks.num_levels >= 3);
3306 - dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (fclks.data[0].clocks_in_khz / 1000.0) / 1000.0;
3307 - dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
3308 - (fclks.data[fclks.num_levels - (fclks.num_levels > 2 ? 3 : 2)].clocks_in_khz / 1000.0)
3309 - * ddr4_dram_factor_single_Channel / 1000.0;
3310 - dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels *
3311 - (fclks.data[fclks.num_levels - 2].clocks_in_khz / 1000.0)
3312 - * ddr4_dram_factor_single_Channel / 1000.0;
3313 - dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels *
3314 - (fclks.data[fclks.num_levels - 1].clocks_in_khz / 1000.0)
3315 - * ddr4_dram_factor_single_Channel / 1000.0;
3316 + ASSERT(fclks.num_levels);
3317 +
3318 + vmin0p65_idx = 0;
3319 + vmid0p72_idx = fclks.num_levels -
3320 + (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1));
3321 + vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1);
3322 + vmax0p9_idx = fclks.num_levels - 1;
3323 +
3324 + dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
3325 + 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
3326 + dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
3327 + dc->dcn_soc->number_of_channels *
3328 + (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0)
3329 + * ddr4_dram_factor_single_Channel / 1000.0;
3330 + dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
3331 + dc->dcn_soc->number_of_channels *
3332 + (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0)
3333 + * ddr4_dram_factor_single_Channel / 1000.0;
3334 + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
3335 + dc->dcn_soc->number_of_channels *
3336 + (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0)
3337 + * ddr4_dram_factor_single_Channel / 1000.0;
3338 } else
3339 BREAK_TO_DEBUGGER();
3340
3341 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
3342 index 4b8819c27fcd..4704aac336c2 100644
3343 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c
3344 +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
3345 @@ -2267,12 +2267,7 @@ void dc_set_power_state(
3346 enum dc_acpi_cm_power_state power_state)
3347 {
3348 struct kref refcount;
3349 - struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib),
3350 - GFP_KERNEL);
3351 -
3352 - ASSERT(dml);
3353 - if (!dml)
3354 - return;
3355 + struct display_mode_lib *dml;
3356
3357 switch (power_state) {
3358 case DC_ACPI_CM_POWER_STATE_D0:
3359 @@ -2294,6 +2289,12 @@ void dc_set_power_state(
3360 * clean state, and dc hw programming optimizations will not
3361 * cause any trouble.
3362 */
3363 + dml = kzalloc(sizeof(struct display_mode_lib),
3364 + GFP_KERNEL);
3365 +
3366 + ASSERT(dml);
3367 + if (!dml)
3368 + return;
3369
3370 /* Preserve refcount */
3371 refcount = dc->current_state->refcount;
3372 @@ -2307,10 +2308,10 @@ void dc_set_power_state(
3373 dc->current_state->refcount = refcount;
3374 dc->current_state->bw_ctx.dml = *dml;
3375
3376 + kfree(dml);
3377 +
3378 break;
3379 }
3380 -
3381 - kfree(dml);
3382 }
3383
3384 void dc_resume(struct dc *dc)
3385 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
3386 index c0f1c62c59b4..3aedc724241e 100644
3387 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
3388 +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
3389 @@ -948,8 +948,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
3390 same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
3391
3392 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
3393 - sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX &&
3394 - reason != DETECT_REASON_HPDRX) {
3395 + sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
3396 /*
3397 * TODO debug why Dell 2413 doesn't like
3398 * two link trainings
3399 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
3400 index dda90995ba93..8d5cfd5357c7 100644
3401 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
3402 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
3403 @@ -233,12 +233,13 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
3404 struct dc_crtc_timing *timing)
3405 {
3406 struct optc *optc1 = DCN10TG_FROM_TG(optc);
3407 - /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192 */
3408 int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
3409 / opp_cnt;
3410 - int memory_mask = mpcc_hactive <= 2560 ? 0x3 : 0xf;
3411 + uint32_t memory_mask;
3412 uint32_t data_fmt = 0;
3413
3414 + ASSERT(opp_cnt == 2);
3415 +
3416 /* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic
3417 * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1);
3418 * Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start
3419 @@ -246,9 +247,17 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
3420 * MASTER_UPDATE_LOCK_DB_X, 160,
3421 * MASTER_UPDATE_LOCK_DB_Y, 240);
3422 */
3423 +
3424 + /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192,
3425 + * however, for ODM combine we can simplify by always using 4.
3426 + * To make sure there's no overlap, each instance "reserves" 2 memories and
3427 + * they are uniquely combined here.
3428 + */
3429 + memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2);
3430 +
3431 if (REG(OPTC_MEMORY_CONFIG))
3432 REG_SET(OPTC_MEMORY_CONFIG, 0,
3433 - OPTC_MEM_SEL, memory_mask << (optc->inst * 4));
3434 + OPTC_MEM_SEL, memory_mask);
3435
3436 if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
3437 data_fmt = 1;
3438 @@ -257,7 +266,6 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
3439
3440 REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
3441
3442 - ASSERT(opp_cnt == 2);
3443 REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
3444 OPTC_NUM_OF_INPUT_SEGMENT, 1,
3445 OPTC_SEG0_SRC_SEL, opp_id[0],
3446 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
3447 index b953b02a1512..723af0b2dda0 100644
3448 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
3449 +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
3450 @@ -24,7 +24,7 @@
3451 */
3452
3453 #include "dml_common_defs.h"
3454 -#include "../calcs/dcn_calc_math.h"
3455 +#include "dcn_calc_math.h"
3456
3457 #include "dml_inline_defs.h"
3458
3459 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
3460 index eca140da13d8..ded71ea82413 100644
3461 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
3462 +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
3463 @@ -27,7 +27,7 @@
3464 #define __DML_INLINE_DEFS_H__
3465
3466 #include "dml_common_defs.h"
3467 -#include "../calcs/dcn_calc_math.h"
3468 +#include "dcn_calc_math.h"
3469 #include "dml_logger.h"
3470
3471 static inline double dml_min(double a, double b)
3472 diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h
3473 new file mode 100644
3474 index 000000000000..45a07eeffbb6
3475 --- /dev/null
3476 +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h
3477 @@ -0,0 +1,43 @@
3478 +/*
3479 + * Copyright 2017 Advanced Micro Devices, Inc.
3480 + *
3481 + * Permission is hereby granted, free of charge, to any person obtaining a
3482 + * copy of this software and associated documentation files (the "Software"),
3483 + * to deal in the Software without restriction, including without limitation
3484 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
3485 + * and/or sell copies of the Software, and to permit persons to whom the
3486 + * Software is furnished to do so, subject to the following conditions:
3487 + *
3488 + * The above copyright notice and this permission notice shall be included in
3489 + * all copies or substantial portions of the Software.
3490 + *
3491 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3492 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3493 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
3494 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
3495 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
3496 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
3497 + * OTHER DEALINGS IN THE SOFTWARE.
3498 + *
3499 + * Authors: AMD
3500 + *
3501 + */
3502 +
3503 +#ifndef _DCN_CALC_MATH_H_
3504 +#define _DCN_CALC_MATH_H_
3505 +
3506 +float dcn_bw_mod(const float arg1, const float arg2);
3507 +float dcn_bw_min2(const float arg1, const float arg2);
3508 +unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2);
3509 +float dcn_bw_max2(const float arg1, const float arg2);
3510 +float dcn_bw_floor2(const float arg, const float significance);
3511 +float dcn_bw_floor(const float arg);
3512 +float dcn_bw_ceil2(const float arg, const float significance);
3513 +float dcn_bw_ceil(const float arg);
3514 +float dcn_bw_max3(float v1, float v2, float v3);
3515 +float dcn_bw_max5(float v1, float v2, float v3, float v4, float v5);
3516 +float dcn_bw_pow(float a, float exp);
3517 +float dcn_bw_log(float a, float b);
3518 +double dcn_bw_fabs(double a);
3519 +
3520 +#endif /* _DCN_CALC_MATH_H_ */
3521 diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
3522 index 0978c698f0f8..7d67cb2c61f0 100644
3523 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
3524 +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
3525 @@ -803,6 +803,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
3526 2 * in_out_vrr->min_refresh_in_uhz)
3527 in_out_vrr->btr.btr_enabled = false;
3528
3529 + in_out_vrr->fixed.fixed_active = false;
3530 in_out_vrr->btr.btr_active = false;
3531 in_out_vrr->btr.inserted_duration_in_us = 0;
3532 in_out_vrr->btr.frames_to_insert = 0;
3533 @@ -822,6 +823,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
3534 in_out_vrr->adjust.v_total_max = stream->timing.v_total;
3535 } else if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE &&
3536 refresh_range >= MIN_REFRESH_RANGE_IN_US) {
3537 +
3538 in_out_vrr->adjust.v_total_min =
3539 calc_v_total_from_refresh(stream,
3540 in_out_vrr->max_refresh_in_uhz);
3541 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
3542 index 1115761982a7..fed3fc4bb57a 100644
3543 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
3544 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
3545 @@ -1026,12 +1026,15 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
3546
3547 clocks->num_levels = 0;
3548 for (i = 0; i < pclk_vol_table->count; i++) {
3549 - clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
3550 - clocks->data[i].latency_in_us = latency_required ?
3551 - smu10_get_mem_latency(hwmgr,
3552 - pclk_vol_table->entries[i].clk) :
3553 - 0;
3554 - clocks->num_levels++;
3555 + if (pclk_vol_table->entries[i].clk) {
3556 + clocks->data[clocks->num_levels].clocks_in_khz =
3557 + pclk_vol_table->entries[i].clk * 10;
3558 + clocks->data[clocks->num_levels].latency_in_us = latency_required ?
3559 + smu10_get_mem_latency(hwmgr,
3560 + pclk_vol_table->entries[i].clk) :
3561 + 0;
3562 + clocks->num_levels++;
3563 + }
3564 }
3565
3566 return 0;
3567 @@ -1077,9 +1080,11 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
3568
3569 clocks->num_levels = 0;
3570 for (i = 0; i < pclk_vol_table->count; i++) {
3571 - clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
3572 - clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
3573 - clocks->num_levels++;
3574 + if (pclk_vol_table->entries[i].clk) {
3575 + clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
3576 + clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol;
3577 + clocks->num_levels++;
3578 + }
3579 }
3580
3581 return 0;
3582 diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
3583 index c8922b7cac09..12e748b202d6 100644
3584 --- a/drivers/gpu/drm/drm_client_modeset.c
3585 +++ b/drivers/gpu/drm/drm_client_modeset.c
3586 @@ -114,6 +114,33 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc)
3587 return NULL;
3588 }
3589
3590 +static struct drm_display_mode *
3591 +drm_connector_get_tiled_mode(struct drm_connector *connector)
3592 +{
3593 + struct drm_display_mode *mode;
3594 +
3595 + list_for_each_entry(mode, &connector->modes, head) {
3596 + if (mode->hdisplay == connector->tile_h_size &&
3597 + mode->vdisplay == connector->tile_v_size)
3598 + return mode;
3599 + }
3600 + return NULL;
3601 +}
3602 +
3603 +static struct drm_display_mode *
3604 +drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
3605 +{
3606 + struct drm_display_mode *mode;
3607 +
3608 + list_for_each_entry(mode, &connector->modes, head) {
3609 + if (mode->hdisplay == connector->tile_h_size &&
3610 + mode->vdisplay == connector->tile_v_size)
3611 + continue;
3612 + return mode;
3613 + }
3614 + return NULL;
3615 +}
3616 +
3617 static struct drm_display_mode *
3618 drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int height)
3619 {
3620 @@ -348,8 +375,15 @@ static bool drm_client_target_preferred(struct drm_connector **connectors,
3621 struct drm_connector *connector;
3622 u64 conn_configured = 0;
3623 int tile_pass = 0;
3624 + int num_tiled_conns = 0;
3625 int i;
3626
3627 + for (i = 0; i < connector_count; i++) {
3628 + if (connectors[i]->has_tile &&
3629 + connectors[i]->status == connector_status_connected)
3630 + num_tiled_conns++;
3631 + }
3632 +
3633 retry:
3634 for (i = 0; i < connector_count; i++) {
3635 connector = connectors[i];
3636 @@ -399,6 +433,28 @@ retry:
3637 list_for_each_entry(modes[i], &connector->modes, head)
3638 break;
3639 }
3640 + /*
3641 + * In case of tiled mode if all tiles not present fallback to
3642 + * first available non tiled mode.
3643 + * After all tiles are present, try to find the tiled mode
3644 + * for all and if tiled mode not present due to fbcon size
3645 + * limitations, use first non tiled mode only for
3646 + * tile 0,0 and set to no mode for all other tiles.
3647 + */
3648 + if (connector->has_tile) {
3649 + if (num_tiled_conns <
3650 + connector->num_h_tile * connector->num_v_tile ||
3651 + (connector->tile_h_loc == 0 &&
3652 + connector->tile_v_loc == 0 &&
3653 + !drm_connector_get_tiled_mode(connector))) {
3654 + DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
3655 + connector->base.id);
3656 + modes[i] = drm_connector_fallback_non_tiled_mode(connector);
3657 + } else {
3658 + modes[i] = drm_connector_get_tiled_mode(connector);
3659 + }
3660 + }
3661 +
3662 DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
3663 "none");
3664 conn_configured |= BIT_ULL(i);
3665 @@ -516,6 +572,7 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
3666 bool fallback = true, ret = true;
3667 int num_connectors_enabled = 0;
3668 int num_connectors_detected = 0;
3669 + int num_tiled_conns = 0;
3670 struct drm_modeset_acquire_ctx ctx;
3671
3672 if (!drm_drv_uses_atomic_modeset(dev))
3673 @@ -533,6 +590,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
3674 memcpy(save_enabled, enabled, count);
3675 mask = GENMASK(count - 1, 0);
3676 conn_configured = 0;
3677 + for (i = 0; i < count; i++) {
3678 + if (connectors[i]->has_tile &&
3679 + connectors[i]->status == connector_status_connected)
3680 + num_tiled_conns++;
3681 + }
3682 retry:
3683 conn_seq = conn_configured;
3684 for (i = 0; i < count; i++) {
3685 @@ -632,6 +694,16 @@ retry:
3686 connector->name);
3687 modes[i] = &connector->state->crtc->mode;
3688 }
3689 + /*
3690 + * In case of tiled modes, if all tiles are not present
3691 + * then fallback to a non tiled mode.
3692 + */
3693 + if (connector->has_tile &&
3694 + num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
3695 + DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
3696 + connector->base.id);
3697 + modes[i] = drm_connector_fallback_non_tiled_mode(connector);
3698 + }
3699 crtcs[i] = new_crtc;
3700
3701 DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
3702 diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
3703 index be1b7ba92ffe..6a626c82e264 100644
3704 --- a/drivers/gpu/drm/drm_debugfs_crc.c
3705 +++ b/drivers/gpu/drm/drm_debugfs_crc.c
3706 @@ -140,8 +140,8 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
3707 if (IS_ERR(source))
3708 return PTR_ERR(source);
3709
3710 - if (source[len] == '\n')
3711 - source[len] = '\0';
3712 + if (source[len - 1] == '\n')
3713 + source[len - 1] = '\0';
3714
3715 ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
3716 if (ret)
3717 diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
3718 index f8154316a3b0..a05e64e3d80b 100644
3719 --- a/drivers/gpu/drm/drm_mipi_dbi.c
3720 +++ b/drivers/gpu/drm/drm_mipi_dbi.c
3721 @@ -367,9 +367,9 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
3722 memset(dbidev->tx_buf, 0, len);
3723
3724 mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0,
3725 - (width >> 8) & 0xFF, (width - 1) & 0xFF);
3726 + ((width - 1) >> 8) & 0xFF, (width - 1) & 0xFF);
3727 mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0,
3728 - (height >> 8) & 0xFF, (height - 1) & 0xFF);
3729 + ((height - 1) >> 8) & 0xFF, (height - 1) & 0xFF);
3730 mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START,
3731 (u8 *)dbidev->tx_buf, len);
3732
3733 diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
3734 index 218f3bb15276..90237abee088 100644
3735 --- a/drivers/gpu/drm/gma500/framebuffer.c
3736 +++ b/drivers/gpu/drm/gma500/framebuffer.c
3737 @@ -462,6 +462,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
3738 container_of(helper, struct psb_fbdev, psb_fb_helper);
3739 struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
3740 struct drm_psb_private *dev_priv = dev->dev_private;
3741 + unsigned int fb_size;
3742 int bytespp;
3743
3744 bytespp = sizes->surface_bpp / 8;
3745 @@ -471,8 +472,11 @@ static int psbfb_probe(struct drm_fb_helper *helper,
3746 /* If the mode will not fit in 32bit then switch to 16bit to get
3747 a console on full resolution. The X mode setting server will
3748 allocate its own 32bit GEM framebuffer */
3749 - if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
3750 - dev_priv->vram_stolen_size) {
3751 + fb_size = ALIGN(sizes->surface_width * bytespp, 64) *
3752 + sizes->surface_height;
3753 + fb_size = ALIGN(fb_size, PAGE_SIZE);
3754 +
3755 + if (fb_size > dev_priv->vram_stolen_size) {
3756 sizes->surface_bpp = 16;
3757 sizes->surface_depth = 16;
3758 }
3759 diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
3760 index 2e2ed653e9c6..f156f245fdec 100644
3761 --- a/drivers/gpu/drm/ingenic/ingenic-drm.c
3762 +++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
3763 @@ -371,14 +371,18 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
3764 struct ingenic_drm *priv = drm_plane_get_priv(plane);
3765 struct drm_plane_state *state = plane->state;
3766 unsigned int width, height, cpp;
3767 + dma_addr_t addr;
3768
3769 - width = state->crtc->state->adjusted_mode.hdisplay;
3770 - height = state->crtc->state->adjusted_mode.vdisplay;
3771 - cpp = state->fb->format->cpp[plane->index];
3772 + if (state && state->fb) {
3773 + addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
3774 + width = state->crtc->state->adjusted_mode.hdisplay;
3775 + height = state->crtc->state->adjusted_mode.vdisplay;
3776 + cpp = state->fb->format->cpp[plane->index];
3777
3778 - priv->dma_hwdesc->addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
3779 - priv->dma_hwdesc->cmd = width * height * cpp / 4;
3780 - priv->dma_hwdesc->cmd |= JZ_LCD_CMD_EOF_IRQ;
3781 + priv->dma_hwdesc->addr = addr;
3782 + priv->dma_hwdesc->cmd = width * height * cpp / 4;
3783 + priv->dma_hwdesc->cmd |= JZ_LCD_CMD_EOF_IRQ;
3784 + }
3785 }
3786
3787 static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
3788 diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
3789 index 34a731755791..e6c049f4f08b 100644
3790 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
3791 +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
3792 @@ -298,6 +298,7 @@ err_pm_runtime_put:
3793 static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
3794 {
3795 struct drm_device *drm = mtk_crtc->base.dev;
3796 + struct drm_crtc *crtc = &mtk_crtc->base;
3797 int i;
3798
3799 DRM_DEBUG_DRIVER("%s\n", __func__);
3800 @@ -319,6 +320,13 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
3801 mtk_disp_mutex_unprepare(mtk_crtc->mutex);
3802
3803 pm_runtime_put(drm->dev);
3804 +
3805 + if (crtc->state->event && !crtc->state->active) {
3806 + spin_lock_irq(&crtc->dev->event_lock);
3807 + drm_crtc_send_vblank_event(crtc, crtc->state->event);
3808 + crtc->state->event = NULL;
3809 + spin_unlock_irq(&crtc->dev->event_lock);
3810 + }
3811 }
3812
3813 static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
3814 @@ -529,6 +537,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
3815 int pipe = priv->num_pipes;
3816 int ret;
3817 int i;
3818 + uint gamma_lut_size = 0;
3819
3820 if (!path)
3821 return 0;
3822 @@ -579,6 +588,9 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
3823 }
3824
3825 mtk_crtc->ddp_comp[i] = comp;
3826 +
3827 + if (comp->funcs && comp->funcs->gamma_set)
3828 + gamma_lut_size = MTK_LUT_SIZE;
3829 }
3830
3831 mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
3832 @@ -601,8 +613,10 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
3833 NULL, pipe);
3834 if (ret < 0)
3835 return ret;
3836 - drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
3837 - drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE);
3838 +
3839 + if (gamma_lut_size)
3840 + drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
3841 + drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, gamma_lut_size);
3842 priv->num_pipes++;
3843
3844 return 0;
3845 diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
3846 index e9c55d1d6c04..99cd6e62a971 100644
3847 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
3848 +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
3849 @@ -726,11 +726,18 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
3850 gpu->funcs->flush(gpu, gpu->rb[0]);
3851 if (!a5xx_idle(gpu, gpu->rb[0]))
3852 return -EINVAL;
3853 - } else {
3854 - /* Print a warning so if we die, we know why */
3855 + } else if (ret == -ENODEV) {
3856 + /*
3857 + * This device does not use zap shader (but print a warning
3858 + * just in case someone got their dt wrong.. hopefully they
3859 + * have a debug UART to realize the error of their ways...
3860 + * if you mess this up you are about to crash horribly)
3861 + */
3862 dev_warn_once(gpu->dev->dev,
3863 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
3864 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
3865 + } else {
3866 + return ret;
3867 }
3868
3869 /* Last step - yield the ringbuffer */
3870 diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
3871 index dc8ec2c94301..686c34d706b0 100644
3872 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
3873 +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
3874 @@ -537,12 +537,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
3875 a6xx_flush(gpu, gpu->rb[0]);
3876 if (!a6xx_idle(gpu, gpu->rb[0]))
3877 return -EINVAL;
3878 - } else {
3879 - /* Print a warning so if we die, we know why */
3880 + } else if (ret == -ENODEV) {
3881 + /*
3882 + * This device does not use zap shader (but print a warning
3883 + * just in case someone got their dt wrong.. hopefully they
3884 + * have a debug UART to realize the error of their ways...
3885 + * if you mess this up you are about to crash horribly)
3886 + */
3887 dev_warn_once(gpu->dev->dev,
3888 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
3889 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
3890 ret = 0;
3891 + } else {
3892 + return ret;
3893 }
3894
3895 out:
3896 diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
3897 index fa1439941596..0ad5d87b5a8e 100644
3898 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
3899 +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
3900 @@ -635,10 +635,10 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
3901 unsigned long c, i;
3902 int ret = -ENOMEM;
3903
3904 - args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL);
3905 + args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
3906 if (!args.src)
3907 goto out;
3908 - args.dst = kcalloc(max, sizeof(args.dst), GFP_KERNEL);
3909 + args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
3910 if (!args.dst)
3911 goto out_free_src;
3912
3913 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
3914 index 9118df035b28..70bb6bb97af8 100644
3915 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
3916 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
3917 @@ -156,7 +156,7 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
3918
3919 fence = list_entry(fctx->pending.next, typeof(*fence), head);
3920 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
3921 - if (nouveau_fence_update(fence->channel, fctx))
3922 + if (nouveau_fence_update(chan, fctx))
3923 ret = NVIF_NOTIFY_DROP;
3924 }
3925 spin_unlock_irqrestore(&fctx->lock, flags);
3926 diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
3927 index f0daf958e03a..621d28f094bc 100644
3928 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
3929 +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
3930 @@ -63,14 +63,12 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
3931 {
3932 struct nouveau_bo *nvbo = nouveau_bo(bo);
3933 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
3934 - struct nouveau_mem *mem;
3935 int ret;
3936
3937 if (drm->client.device.info.ram_size == 0)
3938 return -ENOMEM;
3939
3940 ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
3941 - mem = nouveau_mem(reg);
3942 if (ret)
3943 return ret;
3944
3945 @@ -103,11 +101,9 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
3946 {
3947 struct nouveau_bo *nvbo = nouveau_bo(bo);
3948 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
3949 - struct nouveau_mem *mem;
3950 int ret;
3951
3952 ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
3953 - mem = nouveau_mem(reg);
3954 if (ret)
3955 return ret;
3956
3957 diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
3958 index e85a08ecd9da..4cc186262d34 100644
3959 --- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c
3960 +++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
3961 @@ -91,8 +91,8 @@ nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
3962 }
3963
3964 refcount_set(&tags->refcount, 1);
3965 + *ptags = memory->tags = tags;
3966 mutex_unlock(&fb->subdev.mutex);
3967 - *ptags = tags;
3968 return 0;
3969 }
3970
3971 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
3972 index bcf32d92ee5a..50e3539f33d2 100644
3973 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
3974 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
3975 @@ -74,6 +74,8 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
3976
3977 if (debug > subdev->debug)
3978 return;
3979 + if (!mthd)
3980 + return;
3981
3982 for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
3983 u32 base = chan->head * mthd->addr;
3984 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
3985 index 500cb08dd608..b57ab5cea9a1 100644
3986 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
3987 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
3988 @@ -143,23 +143,24 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
3989
3990 nent = (fuc.size / sizeof(struct gk20a_fw_av));
3991
3992 - pack = vzalloc((sizeof(*pack) * max_classes) +
3993 - (sizeof(*init) * (nent + 1)));
3994 + pack = vzalloc((sizeof(*pack) * (max_classes + 1)) +
3995 + (sizeof(*init) * (nent + max_classes + 1)));
3996 if (!pack) {
3997 ret = -ENOMEM;
3998 goto end;
3999 }
4000
4001 - init = (void *)(pack + max_classes);
4002 + init = (void *)(pack + max_classes + 1);
4003
4004 - for (i = 0; i < nent; i++) {
4005 - struct gf100_gr_init *ent = &init[i];
4006 + for (i = 0; i < nent; i++, init++) {
4007 struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i];
4008 u32 class = av->addr & 0xffff;
4009 u32 addr = (av->addr & 0xffff0000) >> 14;
4010
4011 if (prevclass != class) {
4012 - pack[classidx].init = ent;
4013 + if (prevclass) /* Add terminator to the method list. */
4014 + init++;
4015 + pack[classidx].init = init;
4016 pack[classidx].type = class;
4017 prevclass = class;
4018 if (++classidx >= max_classes) {
4019 @@ -169,10 +170,10 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
4020 }
4021 }
4022
4023 - ent->addr = addr;
4024 - ent->data = av->data;
4025 - ent->count = 1;
4026 - ent->pitch = 1;
4027 + init->addr = addr;
4028 + init->data = av->data;
4029 + init->count = 1;
4030 + init->pitch = 1;
4031 }
4032
4033 *ppack = pack;
4034 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
4035 index ca251560d3e0..bb4a4266897c 100644
4036 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
4037 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
4038 @@ -146,6 +146,7 @@ nvkm_fault_dtor(struct nvkm_subdev *subdev)
4039 struct nvkm_fault *fault = nvkm_fault(subdev);
4040 int i;
4041
4042 + nvkm_notify_fini(&fault->nrpfb);
4043 nvkm_event_fini(&fault->event);
4044
4045 for (i = 0; i < fault->buffer_nr; i++) {
4046 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
4047 index df8b919dcf09..ace6fefba428 100644
4048 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
4049 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
4050 @@ -108,6 +108,7 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
4051 struct gm200_secboot *gsb;
4052 struct nvkm_acr *acr;
4053
4054 + *psb = NULL;
4055 acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
4056 BIT(NVKM_SECBOOT_FALCON_PMU));
4057 if (IS_ERR(acr))
4058 @@ -116,10 +117,8 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
4059 acr->optional_falcons = BIT(NVKM_SECBOOT_FALCON_PMU);
4060
4061 gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
4062 - if (!gsb) {
4063 - psb = NULL;
4064 + if (!gsb)
4065 return -ENOMEM;
4066 - }
4067 *psb = &gsb->base;
4068
4069 ret = nvkm_secboot_ctor(&gm20b_secboot, acr, device, index, &gsb->base);
4070 diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
4071 index 28fa6ba7b767..8abb31f83ffc 100644
4072 --- a/drivers/gpu/drm/panel/panel-simple.c
4073 +++ b/drivers/gpu/drm/panel/panel-simple.c
4074 @@ -2048,6 +2048,40 @@ static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
4075 .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
4076 };
4077
4078 +static const struct drm_display_mode logicpd_type_28_mode = {
4079 + .clock = 9000,
4080 + .hdisplay = 480,
4081 + .hsync_start = 480 + 3,
4082 + .hsync_end = 480 + 3 + 42,
4083 + .htotal = 480 + 3 + 42 + 2,
4084 +
4085 + .vdisplay = 272,
4086 + .vsync_start = 272 + 2,
4087 + .vsync_end = 272 + 2 + 11,
4088 + .vtotal = 272 + 2 + 11 + 3,
4089 + .vrefresh = 60,
4090 + .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
4091 +};
4092 +
4093 +static const struct panel_desc logicpd_type_28 = {
4094 + .modes = &logicpd_type_28_mode,
4095 + .num_modes = 1,
4096 + .bpc = 8,
4097 + .size = {
4098 + .width = 105,
4099 + .height = 67,
4100 + },
4101 + .delay = {
4102 + .prepare = 200,
4103 + .enable = 200,
4104 + .unprepare = 200,
4105 + .disable = 200,
4106 + },
4107 + .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
4108 + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
4109 + DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
4110 +};
4111 +
4112 static const struct panel_desc mitsubishi_aa070mc01 = {
4113 .modes = &mitsubishi_aa070mc01_mode,
4114 .num_modes = 1,
4115 @@ -3264,6 +3298,9 @@ static const struct of_device_id platform_of_match[] = {
4116 }, {
4117 .compatible = "lg,lp129qe",
4118 .data = &lg_lp129qe,
4119 + }, {
4120 + .compatible = "logicpd,type28",
4121 + .data = &logicpd_type_28,
4122 }, {
4123 .compatible = "mitsubishi,aa070mc01-ca1",
4124 .data = &mitsubishi_aa070mc01,
4125 diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
4126 index 611cbe7aee69..bfc1631093e9 100644
4127 --- a/drivers/gpu/drm/qxl/qxl_kms.c
4128 +++ b/drivers/gpu/drm/qxl/qxl_kms.c
4129 @@ -184,7 +184,7 @@ int qxl_device_init(struct qxl_device *qdev,
4130
4131 if (!qxl_check_device(qdev)) {
4132 r = -ENODEV;
4133 - goto surface_mapping_free;
4134 + goto rom_unmap;
4135 }
4136
4137 r = qxl_bo_init(qdev);
4138 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
4139 index e81b01f8db90..0826efd9b5f5 100644
4140 --- a/drivers/gpu/drm/radeon/radeon_display.c
4141 +++ b/drivers/gpu/drm/radeon/radeon_display.c
4142 @@ -127,6 +127,8 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc)
4143
4144 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
4145
4146 + msleep(10);
4147 +
4148 WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
4149 (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
4150 NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
4151 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
4152 index 4ac55fc2bf97..44d858ce4ce7 100644
4153 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
4154 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
4155 @@ -209,8 +209,10 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
4156
4157 cres->hash.key = user_key | (res_type << 24);
4158 ret = drm_ht_insert_item(&man->resources, &cres->hash);
4159 - if (unlikely(ret != 0))
4160 + if (unlikely(ret != 0)) {
4161 + kfree(cres);
4162 goto out_invalid_key;
4163 + }
4164
4165 cres->state = VMW_CMDBUF_RES_ADD;
4166 cres->res = vmw_resource_reference(res);
4167 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
4168 index a1898e11b04e..943bf944bf72 100644
4169 --- a/drivers/ide/cmd64x.c
4170 +++ b/drivers/ide/cmd64x.c
4171 @@ -66,6 +66,9 @@ static void cmd64x_program_timings(ide_drive_t *drive, u8 mode)
4172 struct ide_timing t;
4173 u8 arttim = 0;
4174
4175 + if (drive->dn >= ARRAY_SIZE(drwtim_regs))
4176 + return;
4177 +
4178 ide_timing_compute(drive, mode, &t, T, 0);
4179
4180 /*
4181 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
4182 index ac6fc3fffa0d..458e72e034b0 100644
4183 --- a/drivers/ide/serverworks.c
4184 +++ b/drivers/ide/serverworks.c
4185 @@ -115,6 +115,9 @@ static void svwks_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
4186 struct pci_dev *dev = to_pci_dev(hwif->dev);
4187 const u8 pio = drive->pio_mode - XFER_PIO_0;
4188
4189 + if (drive->dn >= ARRAY_SIZE(drive_pci))
4190 + return;
4191 +
4192 pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]);
4193
4194 if (svwks_csb_check(dev)) {
4195 @@ -141,6 +144,9 @@ static void svwks_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
4196
4197 u8 ultra_enable = 0, ultra_timing = 0, dma_timing = 0;
4198
4199 + if (drive->dn >= ARRAY_SIZE(drive_pci2))
4200 + return;
4201 +
4202 pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing);
4203 pci_read_config_byte(dev, 0x54, &ultra_enable);
4204
4205 diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
4206 index 00fb3eacda19..65b10efca2b8 100644
4207 --- a/drivers/infiniband/core/cache.c
4208 +++ b/drivers/infiniband/core/cache.c
4209 @@ -51,9 +51,8 @@ struct ib_pkey_cache {
4210
4211 struct ib_update_work {
4212 struct work_struct work;
4213 - struct ib_device *device;
4214 - u8 port_num;
4215 - bool enforce_security;
4216 + struct ib_event event;
4217 + bool enforce_security;
4218 };
4219
4220 union ib_gid zgid;
4221 @@ -130,7 +129,7 @@ static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
4222 event.element.port_num = port;
4223 event.event = IB_EVENT_GID_CHANGE;
4224
4225 - ib_dispatch_event(&event);
4226 + ib_dispatch_event_clients(&event);
4227 }
4228
4229 static const char * const gid_type_str[] = {
4230 @@ -1387,9 +1386,8 @@ err:
4231 return ret;
4232 }
4233
4234 -static void ib_cache_update(struct ib_device *device,
4235 - u8 port,
4236 - bool enforce_security)
4237 +static int
4238 +ib_cache_update(struct ib_device *device, u8 port, bool enforce_security)
4239 {
4240 struct ib_port_attr *tprops = NULL;
4241 struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
4242 @@ -1397,11 +1395,11 @@ static void ib_cache_update(struct ib_device *device,
4243 int ret;
4244
4245 if (!rdma_is_port_valid(device, port))
4246 - return;
4247 + return -EINVAL;
4248
4249 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
4250 if (!tprops)
4251 - return;
4252 + return -ENOMEM;
4253
4254 ret = ib_query_port(device, port, tprops);
4255 if (ret) {
4256 @@ -1419,8 +1417,10 @@ static void ib_cache_update(struct ib_device *device,
4257 pkey_cache = kmalloc(struct_size(pkey_cache, table,
4258 tprops->pkey_tbl_len),
4259 GFP_KERNEL);
4260 - if (!pkey_cache)
4261 + if (!pkey_cache) {
4262 + ret = -ENOMEM;
4263 goto err;
4264 + }
4265
4266 pkey_cache->table_len = tprops->pkey_tbl_len;
4267
4268 @@ -1452,50 +1452,84 @@ static void ib_cache_update(struct ib_device *device,
4269
4270 kfree(old_pkey_cache);
4271 kfree(tprops);
4272 - return;
4273 + return 0;
4274
4275 err:
4276 kfree(pkey_cache);
4277 kfree(tprops);
4278 + return ret;
4279 +}
4280 +
4281 +static void ib_cache_event_task(struct work_struct *_work)
4282 +{
4283 + struct ib_update_work *work =
4284 + container_of(_work, struct ib_update_work, work);
4285 + int ret;
4286 +
4287 + /* Before distributing the cache update event, first sync
4288 + * the cache.
4289 + */
4290 + ret = ib_cache_update(work->event.device, work->event.element.port_num,
4291 + work->enforce_security);
4292 +
4293 + /* GID event is notified already for individual GID entries by
4294 + * dispatch_gid_change_event(). Hence, notifiy for rest of the
4295 + * events.
4296 + */
4297 + if (!ret && work->event.event != IB_EVENT_GID_CHANGE)
4298 + ib_dispatch_event_clients(&work->event);
4299 +
4300 + kfree(work);
4301 }
4302
4303 -static void ib_cache_task(struct work_struct *_work)
4304 +static void ib_generic_event_task(struct work_struct *_work)
4305 {
4306 struct ib_update_work *work =
4307 container_of(_work, struct ib_update_work, work);
4308
4309 - ib_cache_update(work->device,
4310 - work->port_num,
4311 - work->enforce_security);
4312 + ib_dispatch_event_clients(&work->event);
4313 kfree(work);
4314 }
4315
4316 -static void ib_cache_event(struct ib_event_handler *handler,
4317 - struct ib_event *event)
4318 +static bool is_cache_update_event(const struct ib_event *event)
4319 +{
4320 + return (event->event == IB_EVENT_PORT_ERR ||
4321 + event->event == IB_EVENT_PORT_ACTIVE ||
4322 + event->event == IB_EVENT_LID_CHANGE ||
4323 + event->event == IB_EVENT_PKEY_CHANGE ||
4324 + event->event == IB_EVENT_CLIENT_REREGISTER ||
4325 + event->event == IB_EVENT_GID_CHANGE);
4326 +}
4327 +
4328 +/**
4329 + * ib_dispatch_event - Dispatch an asynchronous event
4330 + * @event:Event to dispatch
4331 + *
4332 + * Low-level drivers must call ib_dispatch_event() to dispatch the
4333 + * event to all registered event handlers when an asynchronous event
4334 + * occurs.
4335 + */
4336 +void ib_dispatch_event(const struct ib_event *event)
4337 {
4338 struct ib_update_work *work;
4339
4340 - if (event->event == IB_EVENT_PORT_ERR ||
4341 - event->event == IB_EVENT_PORT_ACTIVE ||
4342 - event->event == IB_EVENT_LID_CHANGE ||
4343 - event->event == IB_EVENT_PKEY_CHANGE ||
4344 - event->event == IB_EVENT_CLIENT_REREGISTER ||
4345 - event->event == IB_EVENT_GID_CHANGE) {
4346 - work = kmalloc(sizeof *work, GFP_ATOMIC);
4347 - if (work) {
4348 - INIT_WORK(&work->work, ib_cache_task);
4349 - work->device = event->device;
4350 - work->port_num = event->element.port_num;
4351 - if (event->event == IB_EVENT_PKEY_CHANGE ||
4352 - event->event == IB_EVENT_GID_CHANGE)
4353 - work->enforce_security = true;
4354 - else
4355 - work->enforce_security = false;
4356 -
4357 - queue_work(ib_wq, &work->work);
4358 - }
4359 - }
4360 + work = kzalloc(sizeof(*work), GFP_ATOMIC);
4361 + if (!work)
4362 + return;
4363 +
4364 + if (is_cache_update_event(event))
4365 + INIT_WORK(&work->work, ib_cache_event_task);
4366 + else
4367 + INIT_WORK(&work->work, ib_generic_event_task);
4368 +
4369 + work->event = *event;
4370 + if (event->event == IB_EVENT_PKEY_CHANGE ||
4371 + event->event == IB_EVENT_GID_CHANGE)
4372 + work->enforce_security = true;
4373 +
4374 + queue_work(ib_wq, &work->work);
4375 }
4376 +EXPORT_SYMBOL(ib_dispatch_event);
4377
4378 int ib_cache_setup_one(struct ib_device *device)
4379 {
4380 @@ -1511,9 +1545,6 @@ int ib_cache_setup_one(struct ib_device *device)
4381 rdma_for_each_port (device, p)
4382 ib_cache_update(device, p, true);
4383
4384 - INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
4385 - device, ib_cache_event);
4386 - ib_register_event_handler(&device->cache.event_handler);
4387 return 0;
4388 }
4389
4390 @@ -1535,14 +1566,12 @@ void ib_cache_release_one(struct ib_device *device)
4391
4392 void ib_cache_cleanup_one(struct ib_device *device)
4393 {
4394 - /* The cleanup function unregisters the event handler,
4395 - * waits for all in-progress workqueue elements and cleans
4396 - * up the GID cache. This function should be called after
4397 - * the device was removed from the devices list and all
4398 - * clients were removed, so the cache exists but is
4399 + /* The cleanup function waits for all in-progress workqueue
4400 + * elements and cleans up the GID cache. This function should be
4401 + * called after the device was removed from the devices list and
4402 + * all clients were removed, so the cache exists but is
4403 * non-functional and shouldn't be updated anymore.
4404 */
4405 - ib_unregister_event_handler(&device->cache.event_handler);
4406 flush_workqueue(ib_wq);
4407 gid_table_cleanup_one(device);
4408
4409 diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
4410 index 9d07378b5b42..9b30773f2da0 100644
4411 --- a/drivers/infiniband/core/core_priv.h
4412 +++ b/drivers/infiniband/core/core_priv.h
4413 @@ -149,6 +149,7 @@ unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
4414 int ib_cache_setup_one(struct ib_device *device);
4415 void ib_cache_cleanup_one(struct ib_device *device);
4416 void ib_cache_release_one(struct ib_device *device);
4417 +void ib_dispatch_event_clients(struct ib_event *event);
4418
4419 #ifdef CONFIG_CGROUP_RDMA
4420 void ib_device_register_rdmacg(struct ib_device *device);
4421 diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
4422 index 2b5bd7206fc6..2a770b8dca00 100644
4423 --- a/drivers/infiniband/core/device.c
4424 +++ b/drivers/infiniband/core/device.c
4425 @@ -591,6 +591,7 @@ struct ib_device *_ib_alloc_device(size_t size)
4426
4427 INIT_LIST_HEAD(&device->event_handler_list);
4428 spin_lock_init(&device->event_handler_lock);
4429 + init_rwsem(&device->event_handler_rwsem);
4430 mutex_init(&device->unregistration_lock);
4431 /*
4432 * client_data needs to be alloc because we don't want our mark to be
4433 @@ -1932,17 +1933,15 @@ EXPORT_SYMBOL(ib_set_client_data);
4434 *
4435 * ib_register_event_handler() registers an event handler that will be
4436 * called back when asynchronous IB events occur (as defined in
4437 - * chapter 11 of the InfiniBand Architecture Specification). This
4438 - * callback may occur in interrupt context.
4439 + * chapter 11 of the InfiniBand Architecture Specification). This
4440 + * callback occurs in workqueue context.
4441 */
4442 void ib_register_event_handler(struct ib_event_handler *event_handler)
4443 {
4444 - unsigned long flags;
4445 -
4446 - spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
4447 + down_write(&event_handler->device->event_handler_rwsem);
4448 list_add_tail(&event_handler->list,
4449 &event_handler->device->event_handler_list);
4450 - spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
4451 + up_write(&event_handler->device->event_handler_rwsem);
4452 }
4453 EXPORT_SYMBOL(ib_register_event_handler);
4454
4455 @@ -1955,35 +1954,23 @@ EXPORT_SYMBOL(ib_register_event_handler);
4456 */
4457 void ib_unregister_event_handler(struct ib_event_handler *event_handler)
4458 {
4459 - unsigned long flags;
4460 -
4461 - spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
4462 + down_write(&event_handler->device->event_handler_rwsem);
4463 list_del(&event_handler->list);
4464 - spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
4465 + up_write(&event_handler->device->event_handler_rwsem);
4466 }
4467 EXPORT_SYMBOL(ib_unregister_event_handler);
4468
4469 -/**
4470 - * ib_dispatch_event - Dispatch an asynchronous event
4471 - * @event:Event to dispatch
4472 - *
4473 - * Low-level drivers must call ib_dispatch_event() to dispatch the
4474 - * event to all registered event handlers when an asynchronous event
4475 - * occurs.
4476 - */
4477 -void ib_dispatch_event(struct ib_event *event)
4478 +void ib_dispatch_event_clients(struct ib_event *event)
4479 {
4480 - unsigned long flags;
4481 struct ib_event_handler *handler;
4482
4483 - spin_lock_irqsave(&event->device->event_handler_lock, flags);
4484 + down_read(&event->device->event_handler_rwsem);
4485
4486 list_for_each_entry(handler, &event->device->event_handler_list, list)
4487 handler->handler(handler, event);
4488
4489 - spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
4490 + up_read(&event->device->event_handler_rwsem);
4491 }
4492 -EXPORT_SYMBOL(ib_dispatch_event);
4493
4494 static int iw_query_port(struct ib_device *device,
4495 u8 port_num,
4496 diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
4497 index 9b1fb84a3d45..10924f122072 100644
4498 --- a/drivers/infiniband/hw/hfi1/chip.c
4499 +++ b/drivers/infiniband/hw/hfi1/chip.c
4500 @@ -1685,6 +1685,14 @@ static u64 access_sw_pio_drain(const struct cntr_entry *entry,
4501 return dd->verbs_dev.n_piodrain;
4502 }
4503
4504 +static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
4505 + void *context, int vl, int mode, u64 data)
4506 +{
4507 + struct hfi1_devdata *dd = context;
4508 +
4509 + return dd->ctx0_seq_drop;
4510 +}
4511 +
4512 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
4513 void *context, int vl, int mode, u64 data)
4514 {
4515 @@ -4106,6 +4114,7 @@ def_access_ibp_counter(rc_crwaits);
4516 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4517 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4518 [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
4519 +[C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH),
4520 [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
4521 [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
4522 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4523 @@ -4249,6 +4258,8 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4524 access_sw_cpu_intr),
4525 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4526 access_sw_cpu_rcv_limit),
4527 +[C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
4528 + access_sw_ctx0_seq_drop),
4529 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4530 access_sw_vtx_wait),
4531 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4532 diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
4533 index 4ca5ac8d7e9e..af0061936c66 100644
4534 --- a/drivers/infiniband/hw/hfi1/chip.h
4535 +++ b/drivers/infiniband/hw/hfi1/chip.h
4536 @@ -859,6 +859,7 @@ static inline int idx_from_vl(int vl)
4537 enum {
4538 C_RCV_OVF = 0,
4539 C_RX_LEN_ERR,
4540 + C_RX_SHORT_ERR,
4541 C_RX_ICRC_ERR,
4542 C_RX_EBP,
4543 C_RX_TID_FULL,
4544 @@ -926,6 +927,7 @@ enum {
4545 C_DC_PG_STS_TX_MBE_CNT,
4546 C_SW_CPU_INTR,
4547 C_SW_CPU_RCV_LIM,
4548 + C_SW_CTX0_SEQ_DROP,
4549 C_SW_VTX_WAIT,
4550 C_SW_PIO_WAIT,
4551 C_SW_PIO_DRAIN,
4552 diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
4553 index ab3589d17aee..fb3ec9bff7a2 100644
4554 --- a/drivers/infiniband/hw/hfi1/chip_registers.h
4555 +++ b/drivers/infiniband/hw/hfi1/chip_registers.h
4556 @@ -381,6 +381,7 @@
4557 #define DC_LCB_STS_LINK_TRANSFER_ACTIVE (DC_LCB_CSRS + 0x000000000468)
4558 #define DC_LCB_STS_ROUND_TRIP_LTP_CNT (DC_LCB_CSRS + 0x0000000004B0)
4559 #define RCV_LENGTH_ERR_CNT 0
4560 +#define RCV_SHORT_ERR_CNT 2
4561 #define RCV_ICRC_ERR_CNT 6
4562 #define RCV_EBP_CNT 9
4563 #define RCV_BUF_OVFL_CNT 10
4564 diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
4565 index 01aa1f132f55..941b465244ab 100644
4566 --- a/drivers/infiniband/hw/hfi1/driver.c
4567 +++ b/drivers/infiniband/hw/hfi1/driver.c
4568 @@ -734,6 +734,7 @@ static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
4569 {
4570 int ret;
4571
4572 + packet->rcd->dd->ctx0_seq_drop++;
4573 /* Set up for the next packet */
4574 packet->rhqoff += packet->rsize;
4575 if (packet->rhqoff >= packet->maxcnt)
4576 diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
4577 index 1af94650bd84..b79931cc74ab 100644
4578 --- a/drivers/infiniband/hw/hfi1/hfi.h
4579 +++ b/drivers/infiniband/hw/hfi1/hfi.h
4580 @@ -1153,6 +1153,8 @@ struct hfi1_devdata {
4581
4582 char *boardname; /* human readable board info */
4583
4584 + u64 ctx0_seq_drop;
4585 +
4586 /* reset value */
4587 u64 z_int_counter;
4588 u64 z_rcv_limit;
4589 diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
4590 index 5f8416ba09a9..702b59f0dab9 100644
4591 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c
4592 +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
4593 @@ -1062,8 +1062,8 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
4594 if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
4595 if (page_addr & ((1 << mtt->page_shift) - 1)) {
4596 dev_err(dev,
4597 - "page_addr 0x%llx is not page_shift %d alignment!\n",
4598 - page_addr, mtt->page_shift);
4599 + "page_addr is not page_shift %d alignment!\n",
4600 + mtt->page_shift);
4601 ret = -EINVAL;
4602 goto out;
4603 }
4604 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
4605 index e1cfbedefcbc..9a918db9e8db 100644
4606 --- a/drivers/infiniband/hw/mlx5/main.c
4607 +++ b/drivers/infiniband/hw/mlx5/main.c
4608 @@ -829,6 +829,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
4609 struct ib_device_attr *props,
4610 struct ib_udata *uhw)
4611 {
4612 + size_t uhw_outlen = (uhw) ? uhw->outlen : 0;
4613 struct mlx5_ib_dev *dev = to_mdev(ibdev);
4614 struct mlx5_core_dev *mdev = dev->mdev;
4615 int err = -ENOMEM;
4616 @@ -842,12 +843,12 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
4617 u64 max_tso;
4618
4619 resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
4620 - if (uhw->outlen && uhw->outlen < resp_len)
4621 + if (uhw_outlen && uhw_outlen < resp_len)
4622 return -EINVAL;
4623 else
4624 resp.response_length = resp_len;
4625
4626 - if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
4627 + if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
4628 return -EINVAL;
4629
4630 memset(props, 0, sizeof(*props));
4631 @@ -911,7 +912,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
4632 props->raw_packet_caps |=
4633 IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
4634
4635 - if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
4636 + if (field_avail(typeof(resp), tso_caps, uhw_outlen)) {
4637 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
4638 if (max_tso) {
4639 resp.tso_caps.max_tso = 1 << max_tso;
4640 @@ -921,7 +922,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
4641 }
4642 }
4643
4644 - if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
4645 + if (field_avail(typeof(resp), rss_caps, uhw_outlen)) {
4646 resp.rss_caps.rx_hash_function =
4647 MLX5_RX_HASH_FUNC_TOEPLITZ;
4648 resp.rss_caps.rx_hash_fields_mask =
4649 @@ -941,9 +942,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
4650 resp.response_length += sizeof(resp.rss_caps);
4651 }
4652 } else {
4653 - if (field_avail(typeof(resp), tso_caps, uhw->outlen))
4654 + if (field_avail(typeof(resp), tso_caps, uhw_outlen))
4655 resp.response_length += sizeof(resp.tso_caps);
4656 - if (field_avail(typeof(resp), rss_caps, uhw->outlen))
4657 + if (field_avail(typeof(resp), rss_caps, uhw_outlen))
4658 resp.response_length += sizeof(resp.rss_caps);
4659 }
4660
4661 @@ -1066,7 +1067,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
4662 MLX5_MAX_CQ_PERIOD;
4663 }
4664
4665 - if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
4666 + if (field_avail(typeof(resp), cqe_comp_caps, uhw_outlen)) {
4667 resp.response_length += sizeof(resp.cqe_comp_caps);
4668
4669 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
4670 @@ -1084,7 +1085,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
4671 }
4672 }
4673
4674 - if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
4675 + if (field_avail(typeof(resp), packet_pacing_caps, uhw_outlen) &&
4676 raw_support) {
4677 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
4678 MLX5_CAP_GEN(mdev, qos)) {
4679 @@ -1103,7 +1104,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
4680 }
4681
4682 if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
4683 - uhw->outlen)) {
4684 + uhw_outlen)) {
4685 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
4686 resp.mlx5_ib_support_multi_pkt_send_wqes =
4687 MLX5_IB_ALLOW_MPW;
4688 @@ -1116,7 +1117,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
4689 sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
4690 }
4691
4692 - if (field_avail(typeof(resp), flags, uhw->outlen)) {
4693 + if (field_avail(typeof(resp), flags, uhw_outlen)) {
4694 resp.response_length += sizeof(resp.flags);
4695
4696 if (MLX5_CAP_GEN(mdev, cqe_compression_128))
4697 @@ -1132,8 +1133,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
4698 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
4699 }
4700
4701 - if (field_avail(typeof(resp), sw_parsing_caps,
4702 - uhw->outlen)) {
4703 + if (field_avail(typeof(resp), sw_parsing_caps, uhw_outlen)) {
4704 resp.response_length += sizeof(resp.sw_parsing_caps);
4705 if (MLX5_CAP_ETH(mdev, swp)) {
4706 resp.sw_parsing_caps.sw_parsing_offloads |=
4707 @@ -1153,7 +1153,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
4708 }
4709 }
4710
4711 - if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
4712 + if (field_avail(typeof(resp), striding_rq_caps, uhw_outlen) &&
4713 raw_support) {
4714 resp.response_length += sizeof(resp.striding_rq_caps);
4715 if (MLX5_CAP_GEN(mdev, striding_rq)) {
4716 @@ -1170,8 +1170,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
4717 }
4718 }
4719
4720 - if (field_avail(typeof(resp), tunnel_offloads_caps,
4721 - uhw->outlen)) {
4722 + if (field_avail(typeof(resp), tunnel_offloads_caps, uhw_outlen)) {
4723 resp.response_length += sizeof(resp.tunnel_offloads_caps);
4724 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
4725 resp.tunnel_offloads_caps |=
4726 @@ -1192,7 +1191,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
4727 MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
4728 }
4729
4730 - if (uhw->outlen) {
4731 + if (uhw_outlen) {
4732 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
4733
4734 if (err)
4735 @@ -4738,7 +4737,6 @@ static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
4736 struct ib_device_attr *dprops = NULL;
4737 struct ib_port_attr *pprops = NULL;
4738 int err = -ENOMEM;
4739 - struct ib_udata uhw = {.inlen = 0, .outlen = 0};
4740
4741 pprops = kzalloc(sizeof(*pprops), GFP_KERNEL);
4742 if (!pprops)
4743 @@ -4748,7 +4746,7 @@ static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
4744 if (!dprops)
4745 goto out;
4746
4747 - err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
4748 + err = mlx5_ib_query_device(&dev->ib_dev, dprops, NULL);
4749 if (err) {
4750 mlx5_ib_warn(dev, "query_device failed %d\n", err);
4751 goto out;
4752 diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
4753 index 5c4b2239129c..b0a02d4c8b93 100644
4754 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h
4755 +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
4756 @@ -407,7 +407,7 @@ struct rxe_dev {
4757 struct list_head pending_mmaps;
4758
4759 spinlock_t mmap_offset_lock; /* guard mmap_offset */
4760 - int mmap_offset;
4761 + u64 mmap_offset;
4762
4763 atomic64_t stats_counters[RXE_NUM_OF_COUNTERS];
4764
4765 diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
4766 index 5525f1fb1526..240e8de24cd2 100644
4767 --- a/drivers/input/touchscreen/edt-ft5x06.c
4768 +++ b/drivers/input/touchscreen/edt-ft5x06.c
4769 @@ -1041,6 +1041,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
4770 {
4771 const struct edt_i2c_chip_data *chip_data;
4772 struct edt_ft5x06_ts_data *tsdata;
4773 + u8 buf[2] = { 0xfc, 0x00 };
4774 struct input_dev *input;
4775 unsigned long irq_flags;
4776 int error;
4777 @@ -1110,6 +1111,12 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
4778 return error;
4779 }
4780
4781 + /*
4782 + * Dummy read access. EP0700MLP1 returns bogus data on the first
4783 + * register read access and ignores writes.
4784 + */
4785 + edt_ft5x06_ts_readwrite(tsdata->client, 2, buf, 2, buf);
4786 +
4787 edt_ft5x06_ts_set_regs(tsdata);
4788 edt_ft5x06_ts_get_defaults(&client->dev, tsdata);
4789 edt_ft5x06_ts_get_parameters(tsdata);
4790 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
4791 index 454695b372c8..8bd5d608a82c 100644
4792 --- a/drivers/iommu/amd_iommu.c
4793 +++ b/drivers/iommu/amd_iommu.c
4794 @@ -272,11 +272,8 @@ static struct pci_dev *setup_aliases(struct device *dev)
4795 */
4796 ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
4797 if (ivrs_alias != pci_dev_id(pdev) &&
4798 - PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
4799 - pci_add_dma_alias(pdev, ivrs_alias & 0xff);
4800 - pci_info(pdev, "Added PCI DMA alias %02x.%d\n",
4801 - PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias));
4802 - }
4803 + PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
4804 + pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
4805
4806 clone_aliases(pdev);
4807
4808 diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
4809 index 483f7bc379fa..d7cbca8bf2cd 100644
4810 --- a/drivers/iommu/amd_iommu_init.c
4811 +++ b/drivers/iommu/amd_iommu_init.c
4812 @@ -147,7 +147,7 @@ bool amd_iommu_dump;
4813 bool amd_iommu_irq_remap __read_mostly;
4814
4815 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
4816 -static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
4817 +static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
4818
4819 static bool amd_iommu_detected;
4820 static bool __initdata amd_iommu_disabled;
4821 @@ -1523,8 +1523,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
4822 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
4823 if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
4824 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
4825 - if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0))
4826 - amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
4827 break;
4828 case 0x11:
4829 case 0x40:
4830 @@ -1534,8 +1532,15 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
4831 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
4832 if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
4833 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
4834 - if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0))
4835 - amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
4836 + /*
4837 + * Note: Since iommu_update_intcapxt() leverages
4838 + * the IOMMU MMIO access to MSI capability block registers
4839 + * for MSI address lo/hi/data, we need to check both
4840 + * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
4841 + */
4842 + if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
4843 + (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
4844 + amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
4845 break;
4846 default:
4847 return -EINVAL;
4848 @@ -1996,8 +2001,8 @@ static int iommu_init_intcapxt(struct amd_iommu *iommu)
4849 struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
4850
4851 /**
4852 - * IntCapXT requires XTSup=1, which can be inferred
4853 - * amd_iommu_xt_mode.
4854 + * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
4855 + * which can be inferred from amd_iommu_xt_mode.
4856 */
4857 if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
4858 return 0;
4859 diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
4860 index fc956479b94e..daeabd98c60e 100644
4861 --- a/drivers/iommu/amd_iommu_types.h
4862 +++ b/drivers/iommu/amd_iommu_types.h
4863 @@ -377,12 +377,12 @@
4864 #define IOMMU_CAP_EFR 27
4865
4866 /* IOMMU Feature Reporting Field (for IVHD type 10h */
4867 -#define IOMMU_FEAT_XTSUP_SHIFT 0
4868 #define IOMMU_FEAT_GASUP_SHIFT 6
4869
4870 /* IOMMU Extended Feature Register (EFR) */
4871 #define IOMMU_EFR_XTSUP_SHIFT 2
4872 #define IOMMU_EFR_GASUP_SHIFT 7
4873 +#define IOMMU_EFR_MSICAPMMIOSUP_SHIFT 46
4874
4875 #define MAX_DOMAIN_ID 65536
4876
4877 diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
4878 index ee8d48d863e1..ef6af714a7e6 100644
4879 --- a/drivers/iommu/arm-smmu-v3.c
4880 +++ b/drivers/iommu/arm-smmu-v3.c
4881 @@ -1643,7 +1643,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
4882 STRTAB_STE_1_EATS_TRANS));
4883
4884 arm_smmu_sync_ste_for_sid(smmu, sid);
4885 - dst[0] = cpu_to_le64(val);
4886 + /* See comment in arm_smmu_write_ctx_desc() */
4887 + WRITE_ONCE(dst[0], cpu_to_le64(val));
4888 arm_smmu_sync_ste_for_sid(smmu, sid);
4889
4890 /* It's likely that we'll want to use the new STE soon */
4891 diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
4892 index eecd6a421667..7196cabafb25 100644
4893 --- a/drivers/iommu/dmar.c
4894 +++ b/drivers/iommu/dmar.c
4895 @@ -1351,7 +1351,6 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
4896 struct qi_desc desc;
4897
4898 if (mask) {
4899 - WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
4900 addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
4901 desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
4902 } else
4903 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
4904 index dd5db856dcaf..760a242d0801 100644
4905 --- a/drivers/iommu/intel-iommu.c
4906 +++ b/drivers/iommu/intel-iommu.c
4907 @@ -3401,7 +3401,8 @@ static unsigned long intel_alloc_iova(struct device *dev,
4908 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
4909 IOVA_PFN(dma_mask), true);
4910 if (unlikely(!iova_pfn)) {
4911 - dev_err(dev, "Allocating %ld-page iova failed", nrpages);
4912 + dev_err_once(dev, "Allocating %ld-page iova failed\n",
4913 + nrpages);
4914 return 0;
4915 }
4916
4917 diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c
4918 index 040a445be300..e7cb0b8a7332 100644
4919 --- a/drivers/iommu/intel-pasid.c
4920 +++ b/drivers/iommu/intel-pasid.c
4921 @@ -499,8 +499,16 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
4922 }
4923
4924 #ifdef CONFIG_X86
4925 - if (cpu_feature_enabled(X86_FEATURE_LA57))
4926 - pasid_set_flpm(pte, 1);
4927 + /* Both CPU and IOMMU paging mode need to match */
4928 + if (cpu_feature_enabled(X86_FEATURE_LA57)) {
4929 + if (cap_5lp_support(iommu->cap)) {
4930 + pasid_set_flpm(pte, 1);
4931 + } else {
4932 + pr_err("VT-d has no 5-level paging support for CPU\n");
4933 + pasid_clear_entry(pte);
4934 + return -EINVAL;
4935 + }
4936 + }
4937 #endif /* CONFIG_X86 */
4938
4939 pasid_set_domain_id(pte, did);
4940 diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
4941 index dca88f9fdf29..518d0b2d12af 100644
4942 --- a/drivers/iommu/intel-svm.c
4943 +++ b/drivers/iommu/intel-svm.c
4944 @@ -317,7 +317,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
4945 /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
4946 ret = intel_pasid_alloc_id(svm,
4947 !!cap_caching_mode(iommu->cap),
4948 - pasid_max - 1, GFP_KERNEL);
4949 + pasid_max, GFP_KERNEL);
4950 if (ret < 0) {
4951 kfree(svm);
4952 kfree(sdev);
4953 @@ -654,11 +654,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
4954 if (req->priv_data_present)
4955 memcpy(&resp.qw2, req->priv_data,
4956 sizeof(req->priv_data));
4957 + resp.qw2 = 0;
4958 + resp.qw3 = 0;
4959 + qi_submit_sync(&resp, iommu);
4960 }
4961 - resp.qw2 = 0;
4962 - resp.qw3 = 0;
4963 - qi_submit_sync(&resp, iommu);
4964 -
4965 head = (head + sizeof(*req)) & PRQ_RING_MASK;
4966 }
4967
4968 diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
4969 index c7a914b9bbbc..0e6a9536eca6 100644
4970 --- a/drivers/iommu/iova.c
4971 +++ b/drivers/iommu/iova.c
4972 @@ -233,7 +233,7 @@ static DEFINE_MUTEX(iova_cache_mutex);
4973
4974 struct iova *alloc_iova_mem(void)
4975 {
4976 - return kmem_cache_zalloc(iova_cache, GFP_ATOMIC);
4977 + return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
4978 }
4979 EXPORT_SYMBOL(alloc_iova_mem);
4980
4981 diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
4982 index 787e8eec9a7f..11f3b50dcdcb 100644
4983 --- a/drivers/irqchip/irq-gic-v3-its.c
4984 +++ b/drivers/irqchip/irq-gic-v3-its.c
4985 @@ -571,7 +571,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its,
4986 struct its_cmd_desc *desc)
4987 {
4988 its_encode_cmd(cmd, GITS_CMD_INVALL);
4989 - its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
4990 + its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
4991
4992 its_fixup_cmd(cmd);
4993
4994 diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
4995 index 1edc99335a94..446603efbc90 100644
4996 --- a/drivers/irqchip/irq-gic-v3.c
4997 +++ b/drivers/irqchip/irq-gic-v3.c
4998 @@ -1801,6 +1801,7 @@ static struct
4999 struct redist_region *redist_regs;
5000 u32 nr_redist_regions;
5001 bool single_redist;
5002 + int enabled_rdists;
5003 u32 maint_irq;
5004 int maint_irq_mode;
5005 phys_addr_t vcpu_base;
5006 @@ -1895,8 +1896,10 @@ static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
5007 * If GICC is enabled and has valid gicr base address, then it means
5008 * GICR base is presented via GICC
5009 */
5010 - if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
5011 + if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
5012 + acpi_data.enabled_rdists++;
5013 return 0;
5014 + }
5015
5016 /*
5017 * It's perfectly valid firmware can pass disabled GICC entry, driver
5018 @@ -1926,8 +1929,10 @@ static int __init gic_acpi_count_gicr_regions(void)
5019
5020 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
5021 gic_acpi_match_gicc, 0);
5022 - if (count > 0)
5023 + if (count > 0) {
5024 acpi_data.single_redist = true;
5025 + count = acpi_data.enabled_rdists;
5026 + }
5027
5028 return count;
5029 }
5030 diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
5031 index 3f09f658e8e2..6b566bba263b 100644
5032 --- a/drivers/irqchip/irq-mbigen.c
5033 +++ b/drivers/irqchip/irq-mbigen.c
5034 @@ -374,6 +374,7 @@ static struct platform_driver mbigen_platform_driver = {
5035 .name = "Hisilicon MBIGEN-V2",
5036 .of_match_table = mbigen_of_match,
5037 .acpi_match_table = ACPI_PTR(mbigen_acpi_match),
5038 + .suppress_bind_attrs = true,
5039 },
5040 .probe = mbigen_device_probe,
5041 };
5042 diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
5043 index 4afc317901a8..66cdc003b8f4 100644
5044 --- a/drivers/leds/leds-pca963x.c
5045 +++ b/drivers/leds/leds-pca963x.c
5046 @@ -40,6 +40,8 @@
5047 #define PCA963X_LED_PWM 0x2 /* Controlled through PWM */
5048 #define PCA963X_LED_GRP_PWM 0x3 /* Controlled through PWM/GRPPWM */
5049
5050 +#define PCA963X_MODE2_OUTDRV 0x04 /* Open-drain or totem pole */
5051 +#define PCA963X_MODE2_INVRT 0x10 /* Normal or inverted direction */
5052 #define PCA963X_MODE2_DMBLNK 0x20 /* Enable blinking */
5053
5054 #define PCA963X_MODE1 0x00
5055 @@ -438,12 +440,12 @@ static int pca963x_probe(struct i2c_client *client,
5056 PCA963X_MODE2);
5057 /* Configure output: open-drain or totem pole (push-pull) */
5058 if (pdata->outdrv == PCA963X_OPEN_DRAIN)
5059 - mode2 |= 0x01;
5060 + mode2 &= ~PCA963X_MODE2_OUTDRV;
5061 else
5062 - mode2 |= 0x05;
5063 + mode2 |= PCA963X_MODE2_OUTDRV;
5064 /* Configure direction: normal or inverted */
5065 if (pdata->dir == PCA963X_INVERTED)
5066 - mode2 |= 0x10;
5067 + mode2 |= PCA963X_MODE2_INVRT;
5068 i2c_smbus_write_byte_data(pca963x->chip->client, PCA963X_MODE2,
5069 mode2);
5070 }
5071 diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
5072 index c71365e7c1fa..a50dcfda656f 100644
5073 --- a/drivers/md/bcache/bset.h
5074 +++ b/drivers/md/bcache/bset.h
5075 @@ -397,7 +397,8 @@ void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state);
5076
5077 /* Bkey utility code */
5078
5079 -#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys)
5080 +#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, \
5081 + (unsigned int)(i)->keys)
5082
5083 static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx)
5084 {
5085 diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
5086 index 33ddc5269e8d..6730820780b0 100644
5087 --- a/drivers/md/bcache/journal.c
5088 +++ b/drivers/md/bcache/journal.c
5089 @@ -422,7 +422,8 @@ err:
5090 static void btree_flush_write(struct cache_set *c)
5091 {
5092 struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
5093 - unsigned int i, nr, ref_nr;
5094 + unsigned int i, nr;
5095 + int ref_nr;
5096 atomic_t *fifo_front_p, *now_fifo_front_p;
5097 size_t mask;
5098
5099 diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
5100 index ba1c93791d8d..503aafe188dc 100644
5101 --- a/drivers/md/bcache/stats.c
5102 +++ b/drivers/md/bcache/stats.c
5103 @@ -109,9 +109,13 @@ int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
5104
5105 void bch_cache_accounting_clear(struct cache_accounting *acc)
5106 {
5107 - memset(&acc->total.cache_hits,
5108 - 0,
5109 - sizeof(struct cache_stats));
5110 + acc->total.cache_hits = 0;
5111 + acc->total.cache_misses = 0;
5112 + acc->total.cache_bypass_hits = 0;
5113 + acc->total.cache_bypass_misses = 0;
5114 + acc->total.cache_readaheads = 0;
5115 + acc->total.cache_miss_collisions = 0;
5116 + acc->total.sectors_bypassed = 0;
5117 }
5118
5119 void bch_cache_accounting_destroy(struct cache_accounting *acc)
5120 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
5121 index 64999c7a8033..658b0f4a01f5 100644
5122 --- a/drivers/md/bcache/super.c
5123 +++ b/drivers/md/bcache/super.c
5124 @@ -1274,6 +1274,9 @@ static void cached_dev_free(struct closure *cl)
5125
5126 mutex_unlock(&bch_register_lock);
5127
5128 + if (dc->sb_bio.bi_inline_vecs[0].bv_page)
5129 + put_page(bio_first_page_all(&dc->sb_bio));
5130 +
5131 if (!IS_ERR_OR_NULL(dc->bdev))
5132 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
5133
5134 @@ -2369,29 +2372,35 @@ static bool bch_is_open(struct block_device *bdev)
5135 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
5136 const char *buffer, size_t size)
5137 {
5138 - ssize_t ret = -EINVAL;
5139 - const char *err = "cannot allocate memory";
5140 + const char *err;
5141 char *path = NULL;
5142 - struct cache_sb *sb = NULL;
5143 + struct cache_sb *sb;
5144 struct block_device *bdev = NULL;
5145 - struct page *sb_page = NULL;
5146 + struct page *sb_page;
5147 + ssize_t ret;
5148
5149 + ret = -EBUSY;
5150 + err = "failed to reference bcache module";
5151 if (!try_module_get(THIS_MODULE))
5152 - return -EBUSY;
5153 + goto out;
5154
5155 /* For latest state of bcache_is_reboot */
5156 smp_mb();
5157 + err = "bcache is in reboot";
5158 if (bcache_is_reboot)
5159 - return -EBUSY;
5160 + goto out_module_put;
5161
5162 + ret = -ENOMEM;
5163 + err = "cannot allocate memory";
5164 path = kstrndup(buffer, size, GFP_KERNEL);
5165 if (!path)
5166 - goto err;
5167 + goto out_module_put;
5168
5169 sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
5170 if (!sb)
5171 - goto err;
5172 + goto out_free_path;
5173
5174 + ret = -EINVAL;
5175 err = "failed to open device";
5176 bdev = blkdev_get_by_path(strim(path),
5177 FMODE_READ|FMODE_WRITE|FMODE_EXCL,
5178 @@ -2408,57 +2417,69 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
5179 if (!IS_ERR(bdev))
5180 bdput(bdev);
5181 if (attr == &ksysfs_register_quiet)
5182 - goto quiet_out;
5183 + goto done;
5184 }
5185 - goto err;
5186 + goto out_free_sb;
5187 }
5188
5189 err = "failed to set blocksize";
5190 if (set_blocksize(bdev, 4096))
5191 - goto err_close;
5192 + goto out_blkdev_put;
5193
5194 err = read_super(sb, bdev, &sb_page);
5195 if (err)
5196 - goto err_close;
5197 + goto out_blkdev_put;
5198
5199 err = "failed to register device";
5200 if (SB_IS_BDEV(sb)) {
5201 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
5202
5203 if (!dc)
5204 - goto err_close;
5205 + goto out_put_sb_page;
5206
5207 mutex_lock(&bch_register_lock);
5208 ret = register_bdev(sb, sb_page, bdev, dc);
5209 mutex_unlock(&bch_register_lock);
5210 /* blkdev_put() will be called in cached_dev_free() */
5211 - if (ret < 0)
5212 - goto err;
5213 + if (ret < 0) {
5214 + bdev = NULL;
5215 + goto out_put_sb_page;
5216 + }
5217 } else {
5218 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
5219
5220 if (!ca)
5221 - goto err_close;
5222 + goto out_put_sb_page;
5223
5224 /* blkdev_put() will be called in bch_cache_release() */
5225 - if (register_cache(sb, sb_page, bdev, ca) != 0)
5226 - goto err;
5227 + if (register_cache(sb, sb_page, bdev, ca) != 0) {
5228 + bdev = NULL;
5229 + goto out_put_sb_page;
5230 + }
5231 }
5232 -quiet_out:
5233 - ret = size;
5234 -out:
5235 - if (sb_page)
5236 - put_page(sb_page);
5237 +
5238 + put_page(sb_page);
5239 +done:
5240 + kfree(sb);
5241 + kfree(path);
5242 + module_put(THIS_MODULE);
5243 + return size;
5244 +
5245 +out_put_sb_page:
5246 + put_page(sb_page);
5247 +out_blkdev_put:
5248 + if (bdev)
5249 + blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
5250 +out_free_sb:
5251 kfree(sb);
5252 +out_free_path:
5253 kfree(path);
5254 + path = NULL;
5255 +out_module_put:
5256 module_put(THIS_MODULE);
5257 +out:
5258 + pr_info("error %s: %s", path?path:"", err);
5259 return ret;
5260 -
5261 -err_close:
5262 - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
5263 -err:
5264 - pr_info("error %s: %s", path, err);
5265 - goto out;
5266 }
5267
5268
5269 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
5270 index 69201bdf7f4c..1b2c98b43519 100644
5271 --- a/drivers/md/dm-thin.c
5272 +++ b/drivers/md/dm-thin.c
5273 @@ -231,6 +231,7 @@ struct pool {
5274 struct dm_target *ti; /* Only set if a pool target is bound */
5275
5276 struct mapped_device *pool_md;
5277 + struct block_device *data_dev;
5278 struct block_device *md_dev;
5279 struct dm_pool_metadata *pmd;
5280
5281 @@ -2945,6 +2946,7 @@ static struct kmem_cache *_new_mapping_cache;
5282
5283 static struct pool *pool_create(struct mapped_device *pool_md,
5284 struct block_device *metadata_dev,
5285 + struct block_device *data_dev,
5286 unsigned long block_size,
5287 int read_only, char **error)
5288 {
5289 @@ -3052,6 +3054,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
5290 pool->last_commit_jiffies = jiffies;
5291 pool->pool_md = pool_md;
5292 pool->md_dev = metadata_dev;
5293 + pool->data_dev = data_dev;
5294 __pool_table_insert(pool);
5295
5296 return pool;
5297 @@ -3093,6 +3096,7 @@ static void __pool_dec(struct pool *pool)
5298
5299 static struct pool *__pool_find(struct mapped_device *pool_md,
5300 struct block_device *metadata_dev,
5301 + struct block_device *data_dev,
5302 unsigned long block_size, int read_only,
5303 char **error, int *created)
5304 {
5305 @@ -3103,19 +3107,23 @@ static struct pool *__pool_find(struct mapped_device *pool_md,
5306 *error = "metadata device already in use by a pool";
5307 return ERR_PTR(-EBUSY);
5308 }
5309 + if (pool->data_dev != data_dev) {
5310 + *error = "data device already in use by a pool";
5311 + return ERR_PTR(-EBUSY);
5312 + }
5313 __pool_inc(pool);
5314
5315 } else {
5316 pool = __pool_table_lookup(pool_md);
5317 if (pool) {
5318 - if (pool->md_dev != metadata_dev) {
5319 + if (pool->md_dev != metadata_dev || pool->data_dev != data_dev) {
5320 *error = "different pool cannot replace a pool";
5321 return ERR_PTR(-EINVAL);
5322 }
5323 __pool_inc(pool);
5324
5325 } else {
5326 - pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
5327 + pool = pool_create(pool_md, metadata_dev, data_dev, block_size, read_only, error);
5328 *created = 1;
5329 }
5330 }
5331 @@ -3368,7 +3376,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
5332 goto out;
5333 }
5334
5335 - pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
5336 + pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev,
5337 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
5338 if (IS_ERR(pool)) {
5339 r = PTR_ERR(pool);
5340 @@ -4114,7 +4122,7 @@ static struct target_type pool_target = {
5341 .name = "thin-pool",
5342 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
5343 DM_TARGET_IMMUTABLE,
5344 - .version = {1, 21, 0},
5345 + .version = {1, 22, 0},
5346 .module = THIS_MODULE,
5347 .ctr = pool_ctr,
5348 .dtr = pool_dtr,
5349 @@ -4493,7 +4501,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
5350
5351 static struct target_type thin_target = {
5352 .name = "thin",
5353 - .version = {1, 21, 0},
5354 + .version = {1, 22, 0},
5355 .module = THIS_MODULE,
5356 .ctr = thin_ctr,
5357 .dtr = thin_dtr,
5358 diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
5359 index 4b9b98cf6674..5bd3ae82992f 100644
5360 --- a/drivers/media/i2c/mt9v032.c
5361 +++ b/drivers/media/i2c/mt9v032.c
5362 @@ -428,10 +428,12 @@ static int mt9v032_enum_mbus_code(struct v4l2_subdev *subdev,
5363 struct v4l2_subdev_pad_config *cfg,
5364 struct v4l2_subdev_mbus_code_enum *code)
5365 {
5366 + struct mt9v032 *mt9v032 = to_mt9v032(subdev);
5367 +
5368 if (code->index > 0)
5369 return -EINVAL;
5370
5371 - code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
5372 + code->code = mt9v032->format.code;
5373 return 0;
5374 }
5375
5376 @@ -439,7 +441,11 @@ static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev,
5377 struct v4l2_subdev_pad_config *cfg,
5378 struct v4l2_subdev_frame_size_enum *fse)
5379 {
5380 - if (fse->index >= 3 || fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
5381 + struct mt9v032 *mt9v032 = to_mt9v032(subdev);
5382 +
5383 + if (fse->index >= 3)
5384 + return -EINVAL;
5385 + if (mt9v032->format.code != fse->code)
5386 return -EINVAL;
5387
5388 fse->min_width = MT9V032_WINDOW_WIDTH_DEF / (1 << fse->index);
5389 diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
5390 index 18dd2d717088..a398ea81e422 100644
5391 --- a/drivers/media/i2c/ov5640.c
5392 +++ b/drivers/media/i2c/ov5640.c
5393 @@ -874,7 +874,7 @@ static unsigned long ov5640_calc_sys_clk(struct ov5640_dev *sensor,
5394 * We have reached the maximum allowed PLL1 output,
5395 * increase sysdiv.
5396 */
5397 - if (!rate)
5398 + if (!_rate)
5399 break;
5400
5401 /*
5402 diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
5403 index 8644205d3cd3..8e5a2c580821 100644
5404 --- a/drivers/media/pci/cx23885/cx23885-cards.c
5405 +++ b/drivers/media/pci/cx23885/cx23885-cards.c
5406 @@ -801,6 +801,25 @@ struct cx23885_board cx23885_boards[] = {
5407 .name = "Hauppauge WinTV-Starburst2",
5408 .portb = CX23885_MPEG_DVB,
5409 },
5410 + [CX23885_BOARD_AVERMEDIA_CE310B] = {
5411 + .name = "AVerMedia CE310B",
5412 + .porta = CX23885_ANALOG_VIDEO,
5413 + .force_bff = 1,
5414 + .input = {{
5415 + .type = CX23885_VMUX_COMPOSITE1,
5416 + .vmux = CX25840_VIN1_CH1 |
5417 + CX25840_NONE_CH2 |
5418 + CX25840_NONE0_CH3,
5419 + .amux = CX25840_AUDIO7,
5420 + }, {
5421 + .type = CX23885_VMUX_SVIDEO,
5422 + .vmux = CX25840_VIN8_CH1 |
5423 + CX25840_NONE_CH2 |
5424 + CX25840_VIN7_CH3 |
5425 + CX25840_SVIDEO_ON,
5426 + .amux = CX25840_AUDIO7,
5427 + } },
5428 + },
5429 };
5430 const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
5431
5432 @@ -1124,6 +1143,10 @@ struct cx23885_subid cx23885_subids[] = {
5433 .subvendor = 0x0070,
5434 .subdevice = 0xf02a,
5435 .card = CX23885_BOARD_HAUPPAUGE_STARBURST2,
5436 + }, {
5437 + .subvendor = 0x1461,
5438 + .subdevice = 0x3100,
5439 + .card = CX23885_BOARD_AVERMEDIA_CE310B,
5440 },
5441 };
5442 const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
5443 @@ -2348,6 +2371,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
5444 case CX23885_BOARD_DVBSKY_T982:
5445 case CX23885_BOARD_VIEWCAST_260E:
5446 case CX23885_BOARD_VIEWCAST_460E:
5447 + case CX23885_BOARD_AVERMEDIA_CE310B:
5448 dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
5449 &dev->i2c_bus[2].i2c_adap,
5450 "cx25840", 0x88 >> 1, NULL);
5451 diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
5452 index 8098b15493de..7fc408ee4934 100644
5453 --- a/drivers/media/pci/cx23885/cx23885-video.c
5454 +++ b/drivers/media/pci/cx23885/cx23885-video.c
5455 @@ -257,7 +257,8 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
5456 (dev->board == CX23885_BOARD_MYGICA_X8507) ||
5457 (dev->board == CX23885_BOARD_AVERMEDIA_HC81R) ||
5458 (dev->board == CX23885_BOARD_VIEWCAST_260E) ||
5459 - (dev->board == CX23885_BOARD_VIEWCAST_460E)) {
5460 + (dev->board == CX23885_BOARD_VIEWCAST_460E) ||
5461 + (dev->board == CX23885_BOARD_AVERMEDIA_CE310B)) {
5462 /* Configure audio routing */
5463 v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
5464 INPUT(input)->amux, 0, 0);
5465 diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
5466 index a95a2e4c6a0d..c472498e57c4 100644
5467 --- a/drivers/media/pci/cx23885/cx23885.h
5468 +++ b/drivers/media/pci/cx23885/cx23885.h
5469 @@ -101,6 +101,7 @@
5470 #define CX23885_BOARD_HAUPPAUGE_STARBURST2 59
5471 #define CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885 60
5472 #define CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885 61
5473 +#define CX23885_BOARD_AVERMEDIA_CE310B 62
5474
5475 #define GPIO_0 0x00000001
5476 #define GPIO_1 0x00000002
5477 diff --git a/drivers/media/platform/sti/bdisp/bdisp-hw.c b/drivers/media/platform/sti/bdisp/bdisp-hw.c
5478 index 4372abbb5950..a74e9fd65238 100644
5479 --- a/drivers/media/platform/sti/bdisp/bdisp-hw.c
5480 +++ b/drivers/media/platform/sti/bdisp/bdisp-hw.c
5481 @@ -14,8 +14,8 @@
5482 #define MAX_SRC_WIDTH 2048
5483
5484 /* Reset & boot poll config */
5485 -#define POLL_RST_MAX 50
5486 -#define POLL_RST_DELAY_MS 20
5487 +#define POLL_RST_MAX 500
5488 +#define POLL_RST_DELAY_MS 2
5489
5490 enum bdisp_target_plan {
5491 BDISP_RGB,
5492 @@ -382,7 +382,7 @@ int bdisp_hw_reset(struct bdisp_dev *bdisp)
5493 for (i = 0; i < POLL_RST_MAX; i++) {
5494 if (readl(bdisp->regs + BLT_STA1) & BLT_STA1_IDLE)
5495 break;
5496 - msleep(POLL_RST_DELAY_MS);
5497 + udelay(POLL_RST_DELAY_MS * 1000);
5498 }
5499 if (i == POLL_RST_MAX)
5500 dev_err(bdisp->dev, "Reset timeout\n");
5501 diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
5502 index f36dc6258900..b8b07c1de2a8 100644
5503 --- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
5504 +++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
5505 @@ -11,6 +11,7 @@
5506 #include <linux/module.h>
5507 #include <linux/mutex.h>
5508 #include <linux/of.h>
5509 +#include <linux/of_device.h>
5510 #include <linux/of_graph.h>
5511 #include <linux/platform_device.h>
5512 #include <linux/pm_runtime.h>
5513 @@ -155,6 +156,27 @@ static int sun4i_csi_probe(struct platform_device *pdev)
5514 subdev = &csi->subdev;
5515 vdev = &csi->vdev;
5516
5517 + /*
5518 + * On Allwinner SoCs, some high memory bandwidth devices do DMA
5519 + * directly over the memory bus (called MBUS), instead of the
5520 + * system bus. The memory bus has a different addressing scheme
5521 + * without the DRAM starting offset.
5522 + *
5523 + * In some cases this can be described by an interconnect in
5524 + * the device tree. In other cases where the hardware is not
5525 + * fully understood and the interconnect is left out of the
5526 + * device tree, fall back to a default offset.
5527 + */
5528 + if (of_find_property(csi->dev->of_node, "interconnects", NULL)) {
5529 + ret = of_dma_configure(csi->dev, csi->dev->of_node, true);
5530 + if (ret)
5531 + return ret;
5532 + } else {
5533 +#ifdef PHYS_PFN_OFFSET
5534 + csi->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
5535 +#endif
5536 + }
5537 +
5538 csi->mdev.dev = csi->dev;
5539 strscpy(csi->mdev.model, "Allwinner Video Capture Device",
5540 sizeof(csi->mdev.model));
5541 diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
5542 index 001c8bde006c..88d39b3554c4 100644
5543 --- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
5544 +++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
5545 @@ -22,8 +22,8 @@
5546 #define CSI_CFG_INPUT_FMT(fmt) ((fmt) << 20)
5547 #define CSI_CFG_OUTPUT_FMT(fmt) ((fmt) << 16)
5548 #define CSI_CFG_YUV_DATA_SEQ(seq) ((seq) << 8)
5549 -#define CSI_CFG_VSYNC_POL(pol) ((pol) << 2)
5550 -#define CSI_CFG_HSYNC_POL(pol) ((pol) << 1)
5551 +#define CSI_CFG_VREF_POL(pol) ((pol) << 2)
5552 +#define CSI_CFG_HREF_POL(pol) ((pol) << 1)
5553 #define CSI_CFG_PCLK_POL(pol) ((pol) << 0)
5554
5555 #define CSI_CPT_CTRL_REG 0x08
5556 diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
5557 index d6979e11a67b..78fa1c535ac6 100644
5558 --- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
5559 +++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
5560 @@ -228,7 +228,7 @@ static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count)
5561 struct sun4i_csi *csi = vb2_get_drv_priv(vq);
5562 struct v4l2_fwnode_bus_parallel *bus = &csi->bus;
5563 const struct sun4i_csi_format *csi_fmt;
5564 - unsigned long hsync_pol, pclk_pol, vsync_pol;
5565 + unsigned long href_pol, pclk_pol, vref_pol;
5566 unsigned long flags;
5567 unsigned int i;
5568 int ret;
5569 @@ -278,13 +278,21 @@ static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count)
5570 writel(CSI_WIN_CTRL_H_ACTIVE(csi->fmt.height),
5571 csi->regs + CSI_WIN_CTRL_H_REG);
5572
5573 - hsync_pol = !!(bus->flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH);
5574 - pclk_pol = !!(bus->flags & V4L2_MBUS_DATA_ACTIVE_HIGH);
5575 - vsync_pol = !!(bus->flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH);
5576 + /*
5577 + * This hardware uses [HV]REF instead of [HV]SYNC. Based on the
5578 + * provided timing diagrams in the manual, positive polarity
5579 + * equals active high [HV]REF.
5580 + *
5581 + * When the back porch is 0, [HV]REF is more or less equivalent
5582 + * to [HV]SYNC inverted.
5583 + */
5584 + href_pol = !!(bus->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
5585 + vref_pol = !!(bus->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
5586 + pclk_pol = !!(bus->flags & V4L2_MBUS_PCLK_SAMPLE_RISING);
5587 writel(CSI_CFG_INPUT_FMT(csi_fmt->input) |
5588 CSI_CFG_OUTPUT_FMT(csi_fmt->output) |
5589 - CSI_CFG_VSYNC_POL(vsync_pol) |
5590 - CSI_CFG_HSYNC_POL(hsync_pol) |
5591 + CSI_CFG_VREF_POL(vref_pol) |
5592 + CSI_CFG_HREF_POL(href_pol) |
5593 CSI_CFG_PCLK_POL(pclk_pol),
5594 csi->regs + CSI_CFG_REG);
5595
5596 diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
5597 index 2b688cc39bb8..99883550375e 100644
5598 --- a/drivers/media/usb/uvc/uvc_driver.c
5599 +++ b/drivers/media/usb/uvc/uvc_driver.c
5600 @@ -497,6 +497,22 @@ static int uvc_parse_format(struct uvc_device *dev,
5601 }
5602 }
5603
5604 + /* Some devices report bpp that doesn't match the format. */
5605 + if (dev->quirks & UVC_QUIRK_FORCE_BPP) {
5606 + const struct v4l2_format_info *info =
5607 + v4l2_format_info(format->fcc);
5608 +
5609 + if (info) {
5610 + unsigned int div = info->hdiv * info->vdiv;
5611 +
5612 + n = info->bpp[0] * div;
5613 + for (i = 1; i < info->comp_planes; i++)
5614 + n += info->bpp[i];
5615 +
5616 + format->bpp = DIV_ROUND_UP(8 * n, div);
5617 + }
5618 + }
5619 +
5620 if (buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED) {
5621 ftype = UVC_VS_FRAME_UNCOMPRESSED;
5622 } else {
5623 @@ -2874,6 +2890,15 @@ static const struct usb_device_id uvc_ids[] = {
5624 .bInterfaceSubClass = 1,
5625 .bInterfaceProtocol = 0,
5626 .driver_info = (kernel_ulong_t)&uvc_quirk_force_y8 },
5627 + /* GEO Semiconductor GC6500 */
5628 + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
5629 + | USB_DEVICE_ID_MATCH_INT_INFO,
5630 + .idVendor = 0x29fe,
5631 + .idProduct = 0x4d53,
5632 + .bInterfaceClass = USB_CLASS_VIDEO,
5633 + .bInterfaceSubClass = 1,
5634 + .bInterfaceProtocol = 0,
5635 + .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_FORCE_BPP) },
5636 /* Intel RealSense D4M */
5637 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
5638 | USB_DEVICE_ID_MATCH_INT_INFO,
5639 diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
5640 index c7c1baa90dea..24e3d8c647e7 100644
5641 --- a/drivers/media/usb/uvc/uvcvideo.h
5642 +++ b/drivers/media/usb/uvc/uvcvideo.h
5643 @@ -198,6 +198,7 @@
5644 #define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200
5645 #define UVC_QUIRK_RESTORE_CTRLS_ON_INIT 0x00000400
5646 #define UVC_QUIRK_FORCE_Y8 0x00000800
5647 +#define UVC_QUIRK_FORCE_BPP 0x00001000
5648
5649 /* Format flags */
5650 #define UVC_FMT_FLAG_COMPRESSED 0x00000001
5651 diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
5652 index 11835969e982..48ba7e02bed7 100644
5653 --- a/drivers/misc/xilinx_sdfec.c
5654 +++ b/drivers/misc/xilinx_sdfec.c
5655 @@ -1025,25 +1025,25 @@ static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd,
5656 }
5657 #endif
5658
5659 -static unsigned int xsdfec_poll(struct file *file, poll_table *wait)
5660 +static __poll_t xsdfec_poll(struct file *file, poll_table *wait)
5661 {
5662 - unsigned int mask = 0;
5663 + __poll_t mask = 0;
5664 struct xsdfec_dev *xsdfec;
5665
5666 xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
5667
5668 if (!xsdfec)
5669 - return POLLNVAL | POLLHUP;
5670 + return EPOLLNVAL | EPOLLHUP;
5671
5672 poll_wait(file, &xsdfec->waitq, wait);
5673
5674 /* XSDFEC ISR detected an error */
5675 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
5676 if (xsdfec->state_updated)
5677 - mask |= POLLIN | POLLPRI;
5678 + mask |= EPOLLIN | EPOLLPRI;
5679
5680 if (xsdfec->stats_updated)
5681 - mask |= POLLIN | POLLRDNORM;
5682 + mask |= EPOLLIN | EPOLLRDNORM;
5683 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
5684
5685 return mask;
5686 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
5687 index 41297533b4a8..68618891b0e4 100644
5688 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
5689 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
5690 @@ -942,6 +942,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
5691 dma_addr -= bp->rx_dma_offset;
5692 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
5693 DMA_ATTR_WEAK_ORDERING);
5694 + page_pool_release_page(rxr->page_pool, page);
5695
5696 if (unlikely(!payload))
5697 payload = eth_get_headlen(bp->dev, data_ptr, len);
5698 diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
5699 index acb2856936d2..6e2ab10ad2e6 100644
5700 --- a/drivers/net/ethernet/cisco/enic/enic_main.c
5701 +++ b/drivers/net/ethernet/cisco/enic/enic_main.c
5702 @@ -2013,10 +2013,10 @@ static int enic_stop(struct net_device *netdev)
5703 napi_disable(&enic->napi[i]);
5704
5705 netif_carrier_off(netdev);
5706 - netif_tx_disable(netdev);
5707 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
5708 for (i = 0; i < enic->wq_count; i++)
5709 napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
5710 + netif_tx_disable(netdev);
5711
5712 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
5713 enic_dev_del_station_addr(enic);
5714 diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
5715 index 51ad86417cb1..2580bcd85025 100644
5716 --- a/drivers/net/ethernet/freescale/gianfar.c
5717 +++ b/drivers/net/ethernet/freescale/gianfar.c
5718 @@ -2204,13 +2204,17 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
5719 skb_dirtytx = tx_queue->skb_dirtytx;
5720
5721 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
5722 + bool do_tstamp;
5723 +
5724 + do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
5725 + priv->hwts_tx_en;
5726
5727 frags = skb_shinfo(skb)->nr_frags;
5728
5729 /* When time stamping, one additional TxBD must be freed.
5730 * Also, we need to dma_unmap_single() the TxPAL.
5731 */
5732 - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
5733 + if (unlikely(do_tstamp))
5734 nr_txbds = frags + 2;
5735 else
5736 nr_txbds = frags + 1;
5737 @@ -2224,7 +2228,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
5738 (lstatus & BD_LENGTH_MASK))
5739 break;
5740
5741 - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
5742 + if (unlikely(do_tstamp)) {
5743 next = next_txbd(bdp, base, tx_ring_size);
5744 buflen = be16_to_cpu(next->length) +
5745 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
5746 @@ -2234,7 +2238,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
5747 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
5748 buflen, DMA_TO_DEVICE);
5749
5750 - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
5751 + if (unlikely(do_tstamp)) {
5752 struct skb_shared_hwtstamps shhwtstamps;
5753 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
5754 ~0x7UL);
5755 diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
5756 index f73cd917c44f..3156de786d95 100644
5757 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
5758 +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
5759 @@ -791,7 +791,7 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
5760 struct i40e_ring *ring;
5761
5762 if (test_bit(__I40E_CONFIG_BUSY, pf->state))
5763 - return -ENETDOWN;
5764 + return -EAGAIN;
5765
5766 if (test_bit(__I40E_VSI_DOWN, vsi->state))
5767 return -ENETDOWN;
5768 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
5769 index 9f09253f9f46..a05158472ed1 100644
5770 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
5771 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
5772 @@ -297,6 +297,9 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
5773 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
5774 #endif
5775 s->tx_cqes += sq_stats->cqes;
5776 +
5777 + /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
5778 + barrier();
5779 }
5780 }
5781 }
5782 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
5783 index 49933818c6f5..2dc0978428e6 100644
5784 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
5785 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
5786 @@ -215,7 +215,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled,
5787 start_again:
5788 err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
5789 if (err)
5790 - return err;
5791 + goto err_ctx_prepare;
5792 j = 0;
5793 for (; i < rif_count; i++) {
5794 struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
5795 @@ -247,6 +247,7 @@ start_again:
5796 return 0;
5797 err_entry_append:
5798 err_entry_get:
5799 +err_ctx_prepare:
5800 rtnl_unlock();
5801 devlink_dpipe_entry_clear(&entry);
5802 return err;
5803 diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
5804 index 9f8a1f69c0c4..23ebddfb9532 100644
5805 --- a/drivers/net/ethernet/netronome/nfp/abm/cls.c
5806 +++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
5807 @@ -176,10 +176,8 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
5808 u8 mask, val;
5809 int err;
5810
5811 - if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack)) {
5812 - err = -EOPNOTSUPP;
5813 + if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
5814 goto err_delete;
5815 - }
5816
5817 tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
5818
5819 @@ -200,18 +198,14 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
5820 if ((iter->val & cmask) == (val & cmask) &&
5821 iter->band != knode->res->classid) {
5822 NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
5823 - err = -EOPNOTSUPP;
5824 goto err_delete;
5825 }
5826 }
5827
5828 if (!match) {
5829 match = kzalloc(sizeof(*match), GFP_KERNEL);
5830 - if (!match) {
5831 - err = -ENOMEM;
5832 - goto err_delete;
5833 - }
5834 -
5835 + if (!match)
5836 + return -ENOMEM;
5837 list_add(&match->list, &alink->dscp_map);
5838 }
5839 match->handle = knode->handle;
5840 @@ -227,7 +221,7 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
5841
5842 err_delete:
5843 nfp_abm_u32_knode_delete(alink, knode);
5844 - return err;
5845 + return -EOPNOTSUPP;
5846 }
5847
5848 static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
5849 diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
5850 index 5ae0b5663d54..a2cef6a004e7 100644
5851 --- a/drivers/net/ethernet/realtek/r8169_main.c
5852 +++ b/drivers/net/ethernet/realtek/r8169_main.c
5853 @@ -7064,6 +7064,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5854 int chipset, region;
5855 int jumbo_max, rc;
5856
5857 + /* Some tools for creating an initramfs don't consider softdeps, then
5858 + * r8169.ko may be in initramfs, but realtek.ko not. Then the generic
5859 + * PHY driver is used that doesn't work with most chip versions.
5860 + */
5861 + if (!driver_find("RTL8201CP Ethernet", &mdio_bus_type)) {
5862 + dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
5863 + return -ENOENT;
5864 + }
5865 +
5866 dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
5867 if (!dev)
5868 return -ENOMEM;
5869 diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
5870 index 6fc04ffb22c2..d4e095d0e8f1 100644
5871 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
5872 +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
5873 @@ -517,25 +517,14 @@ static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
5874 return ret;
5875 }
5876
5877 -static int ixp4xx_mdio_register(void)
5878 +static int ixp4xx_mdio_register(struct eth_regs __iomem *regs)
5879 {
5880 int err;
5881
5882 if (!(mdio_bus = mdiobus_alloc()))
5883 return -ENOMEM;
5884
5885 - if (cpu_is_ixp43x()) {
5886 - /* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */
5887 - if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH))
5888 - return -ENODEV;
5889 - mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
5890 - } else {
5891 - /* All MII PHY accesses use NPE-B Ethernet registers */
5892 - if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
5893 - return -ENODEV;
5894 - mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
5895 - }
5896 -
5897 + mdio_regs = regs;
5898 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
5899 spin_lock_init(&mdio_lock);
5900 mdio_bus->name = "IXP4xx MII Bus";
5901 @@ -1374,7 +1363,7 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
5902 .ndo_validate_addr = eth_validate_addr,
5903 };
5904
5905 -static int eth_init_one(struct platform_device *pdev)
5906 +static int ixp4xx_eth_probe(struct platform_device *pdev)
5907 {
5908 struct port *port;
5909 struct net_device *dev;
5910 @@ -1384,7 +1373,7 @@ static int eth_init_one(struct platform_device *pdev)
5911 char phy_id[MII_BUS_ID_SIZE + 3];
5912 int err;
5913
5914 - if (!(dev = alloc_etherdev(sizeof(struct port))))
5915 + if (!(dev = devm_alloc_etherdev(&pdev->dev, sizeof(struct port))))
5916 return -ENOMEM;
5917
5918 SET_NETDEV_DEV(dev, &pdev->dev);
5919 @@ -1394,20 +1383,51 @@ static int eth_init_one(struct platform_device *pdev)
5920
5921 switch (port->id) {
5922 case IXP4XX_ETH_NPEA:
5923 + /* If the MDIO bus is not up yet, defer probe */
5924 + if (!mdio_bus)
5925 + return -EPROBE_DEFER;
5926 port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
5927 regs_phys = IXP4XX_EthA_BASE_PHYS;
5928 break;
5929 case IXP4XX_ETH_NPEB:
5930 + /*
5931 + * On all except IXP43x, NPE-B is used for the MDIO bus.
5932 + * If there is no NPE-B in the feature set, bail out, else
5933 + * register the MDIO bus.
5934 + */
5935 + if (!cpu_is_ixp43x()) {
5936 + if (!(ixp4xx_read_feature_bits() &
5937 + IXP4XX_FEATURE_NPEB_ETH0))
5938 + return -ENODEV;
5939 + /* Else register the MDIO bus on NPE-B */
5940 + if ((err = ixp4xx_mdio_register(IXP4XX_EthC_BASE_VIRT)))
5941 + return err;
5942 + }
5943 + if (!mdio_bus)
5944 + return -EPROBE_DEFER;
5945 port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
5946 regs_phys = IXP4XX_EthB_BASE_PHYS;
5947 break;
5948 case IXP4XX_ETH_NPEC:
5949 + /*
5950 + * IXP43x lacks NPE-B and uses NPE-C for the MDIO bus access,
5951 + * of there is no NPE-C, no bus, nothing works, so bail out.
5952 + */
5953 + if (cpu_is_ixp43x()) {
5954 + if (!(ixp4xx_read_feature_bits() &
5955 + IXP4XX_FEATURE_NPEC_ETH))
5956 + return -ENODEV;
5957 + /* Else register the MDIO bus on NPE-C */
5958 + if ((err = ixp4xx_mdio_register(IXP4XX_EthC_BASE_VIRT)))
5959 + return err;
5960 + }
5961 + if (!mdio_bus)
5962 + return -EPROBE_DEFER;
5963 port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
5964 regs_phys = IXP4XX_EthC_BASE_PHYS;
5965 break;
5966 default:
5967 - err = -ENODEV;
5968 - goto err_free;
5969 + return -ENODEV;
5970 }
5971
5972 dev->netdev_ops = &ixp4xx_netdev_ops;
5973 @@ -1416,10 +1436,8 @@ static int eth_init_one(struct platform_device *pdev)
5974
5975 netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
5976
5977 - if (!(port->npe = npe_request(NPE_ID(port->id)))) {
5978 - err = -EIO;
5979 - goto err_free;
5980 - }
5981 + if (!(port->npe = npe_request(NPE_ID(port->id))))
5982 + return -EIO;
5983
5984 port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
5985 if (!port->mem_res) {
5986 @@ -1465,12 +1483,10 @@ err_free_mem:
5987 release_resource(port->mem_res);
5988 err_npe_rel:
5989 npe_release(port->npe);
5990 -err_free:
5991 - free_netdev(dev);
5992 return err;
5993 }
5994
5995 -static int eth_remove_one(struct platform_device *pdev)
5996 +static int ixp4xx_eth_remove(struct platform_device *pdev)
5997 {
5998 struct net_device *dev = platform_get_drvdata(pdev);
5999 struct phy_device *phydev = dev->phydev;
6000 @@ -1478,45 +1494,21 @@ static int eth_remove_one(struct platform_device *pdev)
6001
6002 unregister_netdev(dev);
6003 phy_disconnect(phydev);
6004 + ixp4xx_mdio_remove();
6005 npe_port_tab[NPE_ID(port->id)] = NULL;
6006 npe_release(port->npe);
6007 release_resource(port->mem_res);
6008 - free_netdev(dev);
6009 return 0;
6010 }
6011
6012 static struct platform_driver ixp4xx_eth_driver = {
6013 .driver.name = DRV_NAME,
6014 - .probe = eth_init_one,
6015 - .remove = eth_remove_one,
6016 + .probe = ixp4xx_eth_probe,
6017 + .remove = ixp4xx_eth_remove,
6018 };
6019 -
6020 -static int __init eth_init_module(void)
6021 -{
6022 - int err;
6023 -
6024 - /*
6025 - * FIXME: we bail out on device tree boot but this really needs
6026 - * to be fixed in a nicer way: this registers the MDIO bus before
6027 - * even matching the driver infrastructure, we should only probe
6028 - * detected hardware.
6029 - */
6030 - if (of_have_populated_dt())
6031 - return -ENODEV;
6032 - if ((err = ixp4xx_mdio_register()))
6033 - return err;
6034 - return platform_driver_register(&ixp4xx_eth_driver);
6035 -}
6036 -
6037 -static void __exit eth_cleanup_module(void)
6038 -{
6039 - platform_driver_unregister(&ixp4xx_eth_driver);
6040 - ixp4xx_mdio_remove();
6041 -}
6042 +module_platform_driver(ixp4xx_eth_driver);
6043
6044 MODULE_AUTHOR("Krzysztof Halasa");
6045 MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
6046 MODULE_LICENSE("GPL v2");
6047 MODULE_ALIAS("platform:ixp4xx_eth");
6048 -module_init(eth_init_module);
6049 -module_exit(eth_cleanup_module);
6050 diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
6051 index 7c5265fd2b94..4190f9ed5313 100644
6052 --- a/drivers/net/phy/fixed_phy.c
6053 +++ b/drivers/net/phy/fixed_phy.c
6054 @@ -212,16 +212,13 @@ static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
6055 */
6056 gpiod = gpiod_get_from_of_node(fixed_link_node, "link-gpios", 0,
6057 GPIOD_IN, "mdio");
6058 - of_node_put(fixed_link_node);
6059 - if (IS_ERR(gpiod)) {
6060 - if (PTR_ERR(gpiod) == -EPROBE_DEFER)
6061 - return gpiod;
6062 -
6063 + if (IS_ERR(gpiod) && PTR_ERR(gpiod) != -EPROBE_DEFER) {
6064 if (PTR_ERR(gpiod) != -ENOENT)
6065 pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
6066 fixed_link_node);
6067 gpiod = NULL;
6068 }
6069 + of_node_put(fixed_link_node);
6070
6071 return gpiod;
6072 }
6073 diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
6074 index 677c45985338..c76df51dd3c5 100644
6075 --- a/drivers/net/phy/realtek.c
6076 +++ b/drivers/net/phy/realtek.c
6077 @@ -171,7 +171,9 @@ static int rtl8211c_config_init(struct phy_device *phydev)
6078
6079 static int rtl8211f_config_init(struct phy_device *phydev)
6080 {
6081 + struct device *dev = &phydev->mdio.dev;
6082 u16 val;
6083 + int ret;
6084
6085 /* enable TX-delay for rgmii-{id,txid}, and disable it for rgmii and
6086 * rgmii-rxid. The RX-delay can be enabled by the external RXDLY pin.
6087 @@ -189,7 +191,22 @@ static int rtl8211f_config_init(struct phy_device *phydev)
6088 return 0;
6089 }
6090
6091 - return phy_modify_paged(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY, val);
6092 + ret = phy_modify_paged_changed(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY,
6093 + val);
6094 + if (ret < 0) {
6095 + dev_err(dev, "Failed to update the TX delay register\n");
6096 + return ret;
6097 + } else if (ret) {
6098 + dev_dbg(dev,
6099 + "%s 2ns TX delay (and changing the value from pin-strapping RXD1 or the bootloader)\n",
6100 + val ? "Enabling" : "Disabling");
6101 + } else {
6102 + dev_dbg(dev,
6103 + "2ns TX delay was already %s (by pin-strapping RXD1 or bootloader configuration)\n",
6104 + val ? "enabled" : "disabled");
6105 + }
6106 +
6107 + return 0;
6108 }
6109
6110 static int rtl8211e_config_init(struct phy_device *phydev)
6111 diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
6112 index aef7de225783..4ad0a0c33d85 100644
6113 --- a/drivers/net/wan/fsl_ucc_hdlc.c
6114 +++ b/drivers/net/wan/fsl_ucc_hdlc.c
6115 @@ -245,6 +245,11 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
6116 ret = -ENOMEM;
6117 goto free_riptr;
6118 }
6119 + if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
6120 + dev_err(priv->dev, "MURAM allocation out of addressable range\n");
6121 + ret = -ENOMEM;
6122 + goto free_tiptr;
6123 + }
6124
6125 /* Set RIPTR, TIPTR */
6126 iowrite16be(riptr, &priv->ucc_pram->riptr);
6127 diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
6128 index 5643675ff724..bf78073ee7fd 100644
6129 --- a/drivers/net/wan/hdlc_x25.c
6130 +++ b/drivers/net/wan/hdlc_x25.c
6131 @@ -62,11 +62,12 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
6132 {
6133 unsigned char *ptr;
6134
6135 - skb_push(skb, 1);
6136 -
6137 if (skb_cow(skb, 1))
6138 return NET_RX_DROP;
6139
6140 + skb_push(skb, 1);
6141 + skb_reset_network_header(skb);
6142 +
6143 ptr = skb->data;
6144 *ptr = X25_IFACE_DATA;
6145
6146 @@ -79,6 +80,13 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
6147 static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
6148 {
6149 hdlc_device *hdlc = dev_to_hdlc(dev);
6150 +
6151 + skb_reset_network_header(skb);
6152 + skb->protocol = hdlc_type_trans(skb, dev);
6153 +
6154 + if (dev_nit_active(dev))
6155 + dev_queue_xmit_nit(skb, dev);
6156 +
6157 hdlc->xmit(skb, dev); /* Ignore return value :-( */
6158 }
6159
6160 @@ -93,6 +101,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
6161 switch (skb->data[0]) {
6162 case X25_IFACE_DATA: /* Data to be transmitted */
6163 skb_pull(skb, 1);
6164 + skb_reset_network_header(skb);
6165 if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
6166 dev_kfree_skb(skb);
6167 return NETDEV_TX_OK;
6168 diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
6169 index ea6ee6a608ce..e7619cec978a 100644
6170 --- a/drivers/net/wan/ixp4xx_hss.c
6171 +++ b/drivers/net/wan/ixp4xx_hss.c
6172 @@ -258,7 +258,7 @@ struct port {
6173 struct hss_plat_info *plat;
6174 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
6175 struct desc *desc_tab; /* coherent */
6176 - u32 desc_tab_phys;
6177 + dma_addr_t desc_tab_phys;
6178 unsigned int id;
6179 unsigned int clock_type, clock_rate, loopback;
6180 unsigned int initialized, carrier;
6181 @@ -858,7 +858,7 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
6182 dev->stats.tx_dropped++;
6183 return NETDEV_TX_OK;
6184 }
6185 - memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
6186 + memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4);
6187 dev_kfree_skb(skb);
6188 #endif
6189
6190 diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
6191 index fc15a0037f0e..63607c3b8e81 100644
6192 --- a/drivers/net/wireless/ath/ath10k/snoc.c
6193 +++ b/drivers/net/wireless/ath/ath10k/snoc.c
6194 @@ -1729,13 +1729,16 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
6195 ret = ath10k_qmi_init(ar, msa_size);
6196 if (ret) {
6197 ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
6198 - goto err_core_destroy;
6199 + goto err_power_off;
6200 }
6201
6202 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
6203
6204 return 0;
6205
6206 +err_power_off:
6207 + ath10k_hw_power_off(ar);
6208 +
6209 err_free_irq:
6210 ath10k_snoc_free_irq(ar);
6211
6212 diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
6213 index 4d5d10c01064..eb0c963d9fd5 100644
6214 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
6215 +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
6216 @@ -3650,6 +3650,7 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
6217 struct wmi_tlv *tlv;
6218 struct sk_buff *skb;
6219 __le32 *channel_list;
6220 + u16 tlv_len;
6221 size_t len;
6222 void *ptr;
6223 u32 i;
6224 @@ -3707,10 +3708,12 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
6225 /* nlo_configured_parameters(nlo_list) */
6226 cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
6227 WMI_NLO_MAX_SSIDS));
6228 + tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
6229 + sizeof(struct nlo_configured_parameters);
6230
6231 tlv = ptr;
6232 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
6233 - tlv->len = __cpu_to_le16(len);
6234 + tlv->len = __cpu_to_le16(tlv_len);
6235
6236 ptr += sizeof(*tlv);
6237 nlo_list = ptr;
6238 diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
6239 index 4f707c6394bb..90f1197a6ad8 100644
6240 --- a/drivers/net/wireless/ath/ath10k/wmi.c
6241 +++ b/drivers/net/wireless/ath/ath10k/wmi.c
6242 @@ -9422,7 +9422,7 @@ static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
6243
6244 msdu = pkt_addr->vaddr;
6245 dma_unmap_single(ar->dev, pkt_addr->paddr,
6246 - msdu->len, DMA_FROM_DEVICE);
6247 + msdu->len, DMA_TO_DEVICE);
6248 ieee80211_free_txskb(ar->hw, msdu);
6249
6250 return 0;
6251 diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
6252 index 04d576deae72..6cb0d7bcfe76 100644
6253 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
6254 +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
6255 @@ -880,6 +880,7 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
6256 u8 data_offset;
6257 struct wil_rx_status_extended *s;
6258 u16 sring_idx = sring - wil->srings;
6259 + int invalid_buff_id_retry;
6260
6261 BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
6262
6263 @@ -893,9 +894,9 @@ again:
6264 /* Extract the buffer ID from the status message */
6265 buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
6266
6267 + invalid_buff_id_retry = 0;
6268 while (!buff_id) {
6269 struct wil_rx_status_extended *s;
6270 - int invalid_buff_id_retry = 0;
6271
6272 wil_dbg_txrx(wil,
6273 "buff_id is not updated yet by HW, (swhead 0x%x)\n",
6274 diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
6275 index 4325e91736eb..8b6b657c4b85 100644
6276 --- a/drivers/net/wireless/broadcom/b43legacy/main.c
6277 +++ b/drivers/net/wireless/broadcom/b43legacy/main.c
6278 @@ -1275,8 +1275,9 @@ static void handle_irq_ucode_debug(struct b43legacy_wldev *dev)
6279 }
6280
6281 /* Interrupt handler bottom-half */
6282 -static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev)
6283 +static void b43legacy_interrupt_tasklet(unsigned long data)
6284 {
6285 + struct b43legacy_wldev *dev = (struct b43legacy_wldev *)data;
6286 u32 reason;
6287 u32 dma_reason[ARRAY_SIZE(dev->dma_reason)];
6288 u32 merged_dma_reason = 0;
6289 @@ -3741,7 +3742,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
6290 b43legacy_set_status(wldev, B43legacy_STAT_UNINIT);
6291 wldev->bad_frames_preempt = modparam_bad_frames_preempt;
6292 tasklet_init(&wldev->isr_tasklet,
6293 - (void (*)(unsigned long))b43legacy_interrupt_tasklet,
6294 + b43legacy_interrupt_tasklet,
6295 (unsigned long)wldev);
6296 if (modparam_pio)
6297 wldev->__using_pio = true;
6298 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
6299 index 7ba9f6a68645..1f5deea5a288 100644
6300 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
6301 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
6302 @@ -2092,7 +2092,8 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
6303 /* firmware requires unique mac address for p2pdev interface */
6304 if (addr && ether_addr_equal(addr, pri_ifp->mac_addr)) {
6305 bphy_err(drvr, "discovery vif must be different from primary interface\n");
6306 - return ERR_PTR(-EINVAL);
6307 + err = -EINVAL;
6308 + goto fail;
6309 }
6310
6311 brcmf_p2p_generate_bss_mac(p2p, addr);
6312 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
6313 index 264ad63232f8..a935993a3c51 100644
6314 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
6315 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
6316 @@ -1935,6 +1935,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
6317 BRCMF_SDIO_FT_NORMAL)) {
6318 rd->len = 0;
6319 brcmu_pkt_buf_free_skb(pkt);
6320 + continue;
6321 }
6322 bus->sdcnt.rx_readahead_cnt++;
6323 if (rd->len != roundup(rd_new.len, 16)) {
6324 @@ -4225,6 +4226,12 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
6325 }
6326
6327 if (err == 0) {
6328 + /* Assign bus interface call back */
6329 + sdiod->bus_if->dev = sdiod->dev;
6330 + sdiod->bus_if->ops = &brcmf_sdio_bus_ops;
6331 + sdiod->bus_if->chip = bus->ci->chip;
6332 + sdiod->bus_if->chiprev = bus->ci->chiprev;
6333 +
6334 /* Allow full data communication using DPC from now on. */
6335 brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
6336
6337 @@ -4241,12 +4248,6 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
6338
6339 sdio_release_host(sdiod->func1);
6340
6341 - /* Assign bus interface call back */
6342 - sdiod->bus_if->dev = sdiod->dev;
6343 - sdiod->bus_if->ops = &brcmf_sdio_bus_ops;
6344 - sdiod->bus_if->chip = bus->ci->chip;
6345 - sdiod->bus_if->chiprev = bus->ci->chiprev;
6346 -
6347 err = brcmf_alloc(sdiod->dev, sdiod->settings);
6348 if (err) {
6349 brcmf_err("brcmf_alloc failed\n");
6350 diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
6351 index 8dfbaff2d1fe..a162146a43a7 100644
6352 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
6353 +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
6354 @@ -3206,8 +3206,9 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
6355 }
6356 }
6357
6358 -static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
6359 +static void ipw2100_irq_tasklet(unsigned long data)
6360 {
6361 + struct ipw2100_priv *priv = (struct ipw2100_priv *)data;
6362 struct net_device *dev = priv->net_dev;
6363 unsigned long flags;
6364 u32 inta, tmp;
6365 @@ -6007,7 +6008,7 @@ static void ipw2100_rf_kill(struct work_struct *work)
6366 spin_unlock_irqrestore(&priv->low_lock, flags);
6367 }
6368
6369 -static void ipw2100_irq_tasklet(struct ipw2100_priv *priv);
6370 +static void ipw2100_irq_tasklet(unsigned long data);
6371
6372 static const struct net_device_ops ipw2100_netdev_ops = {
6373 .ndo_open = ipw2100_open,
6374 @@ -6137,7 +6138,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6375 INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
6376 INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event);
6377
6378 - tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
6379 + tasklet_init(&priv->irq_tasklet,
6380 ipw2100_irq_tasklet, (unsigned long)priv);
6381
6382 /* NOTE: We do not start the deferred work for status checks yet */
6383 diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
6384 index ed0f06532d5e..ac5f797fb1ad 100644
6385 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
6386 +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
6387 @@ -1945,8 +1945,9 @@ static void notify_wx_assoc_event(struct ipw_priv *priv)
6388 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
6389 }
6390
6391 -static void ipw_irq_tasklet(struct ipw_priv *priv)
6392 +static void ipw_irq_tasklet(unsigned long data)
6393 {
6394 + struct ipw_priv *priv = (struct ipw_priv *)data;
6395 u32 inta, inta_mask, handled = 0;
6396 unsigned long flags;
6397 int rc = 0;
6398 @@ -10680,7 +10681,7 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
6399 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
6400 #endif /* CONFIG_IPW2200_QOS */
6401
6402 - tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
6403 + tasklet_init(&priv->irq_tasklet,
6404 ipw_irq_tasklet, (unsigned long)priv);
6405
6406 return ret;
6407 diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
6408 index 4fbcc7fba3cc..e2e9c3e8fff5 100644
6409 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
6410 +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
6411 @@ -1376,8 +1376,9 @@ il3945_dump_nic_error_log(struct il_priv *il)
6412 }
6413
6414 static void
6415 -il3945_irq_tasklet(struct il_priv *il)
6416 +il3945_irq_tasklet(unsigned long data)
6417 {
6418 + struct il_priv *il = (struct il_priv *)data;
6419 u32 inta, handled = 0;
6420 u32 inta_fh;
6421 unsigned long flags;
6422 @@ -3403,7 +3404,7 @@ il3945_setup_deferred_work(struct il_priv *il)
6423 timer_setup(&il->watchdog, il_bg_watchdog, 0);
6424
6425 tasklet_init(&il->irq_tasklet,
6426 - (void (*)(unsigned long))il3945_irq_tasklet,
6427 + il3945_irq_tasklet,
6428 (unsigned long)il);
6429 }
6430
6431 diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
6432 index ffb705b18fb1..5fe17039a337 100644
6433 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
6434 +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
6435 @@ -4344,8 +4344,9 @@ il4965_synchronize_irq(struct il_priv *il)
6436 }
6437
6438 static void
6439 -il4965_irq_tasklet(struct il_priv *il)
6440 +il4965_irq_tasklet(unsigned long data)
6441 {
6442 + struct il_priv *il = (struct il_priv *)data;
6443 u32 inta, handled = 0;
6444 u32 inta_fh;
6445 unsigned long flags;
6446 @@ -6238,7 +6239,7 @@ il4965_setup_deferred_work(struct il_priv *il)
6447 timer_setup(&il->watchdog, il_bg_watchdog, 0);
6448
6449 tasklet_init(&il->irq_tasklet,
6450 - (void (*)(unsigned long))il4965_irq_tasklet,
6451 + il4965_irq_tasklet,
6452 (unsigned long)il);
6453 }
6454
6455 diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
6456 index 73f7bbf742bc..746749f37996 100644
6457 --- a/drivers/net/wireless/intel/iwlegacy/common.c
6458 +++ b/drivers/net/wireless/intel/iwlegacy/common.c
6459 @@ -699,7 +699,7 @@ il_eeprom_init(struct il_priv *il)
6460 u32 gp = _il_rd(il, CSR_EEPROM_GP);
6461 int sz;
6462 int ret;
6463 - u16 addr;
6464 + int addr;
6465
6466 /* allocate eeprom */
6467 sz = il->cfg->eeprom_size;
6468 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
6469 index 18ccc2692437..6ca087ffd163 100644
6470 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
6471 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
6472 @@ -5,10 +5,9 @@
6473 *
6474 * GPL LICENSE SUMMARY
6475 *
6476 - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
6477 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
6478 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6479 - * Copyright(c) 2018 - 2019 Intel Corporation
6480 + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
6481 *
6482 * This program is free software; you can redistribute it and/or modify
6483 * it under the terms of version 2 of the GNU General Public License as
6484 @@ -28,10 +27,9 @@
6485 *
6486 * BSD LICENSE
6487 *
6488 - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
6489 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
6490 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6491 - * Copyright(c) 2018 - 2019 Intel Corporation
6492 + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
6493 * All rights reserved.
6494 *
6495 * Redistribution and use in source and binary forms, with or without
6496 @@ -2025,7 +2023,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
6497 rcu_read_lock();
6498
6499 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
6500 - if (IS_ERR(sta)) {
6501 + if (IS_ERR_OR_NULL(sta)) {
6502 rcu_read_unlock();
6503 WARN(1, "Can't find STA to configure HE\n");
6504 return;
6505 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
6506 index f0c539b37ea7..a630e4edd9b4 100644
6507 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
6508 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
6509 @@ -731,7 +731,8 @@ static struct thermal_zone_device_ops tzone_ops = {
6510 static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
6511 {
6512 int i;
6513 - char name[] = "iwlwifi";
6514 + char name[16];
6515 + static atomic_t counter = ATOMIC_INIT(0);
6516
6517 if (!iwl_mvm_is_tt_in_fw(mvm)) {
6518 mvm->tz_device.tzone = NULL;
6519 @@ -741,6 +742,7 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
6520
6521 BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
6522
6523 + sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
6524 mvm->tz_device.tzone = thermal_zone_device_register(name,
6525 IWL_MAX_DTS_TRIPS,
6526 IWL_WRITABLE_TRIPS_MSK,
6527 diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
6528 index 0094b1d2b577..3ec46f48cfde 100644
6529 --- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
6530 +++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
6531 @@ -2508,7 +2508,7 @@ static int prism2_hostapd_add_sta(struct ap_data *ap,
6532 sta->supported_rates[0] = 2;
6533 if (sta->tx_supp_rates & WLAN_RATE_2M)
6534 sta->supported_rates[1] = 4;
6535 - if (sta->tx_supp_rates & WLAN_RATE_5M5)
6536 + if (sta->tx_supp_rates & WLAN_RATE_5M5)
6537 sta->supported_rates[2] = 11;
6538 if (sta->tx_supp_rates & WLAN_RATE_11M)
6539 sta->supported_rates[3] = 22;
6540 diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
6541 index 8c79b963bcff..e753f43e0162 100644
6542 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
6543 +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
6544 @@ -1361,7 +1361,8 @@ static int ezusb_init(struct hermes *hw)
6545 int retval;
6546
6547 BUG_ON(in_interrupt());
6548 - BUG_ON(!upriv);
6549 + if (!upriv)
6550 + return -EINVAL;
6551
6552 upriv->reply_count = 0;
6553 /* Write the MAGIC number on the simulated registers to keep
6554 diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
6555 index f88d26535978..25335bd2873b 100644
6556 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c
6557 +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
6558 @@ -1061,13 +1061,15 @@ done:
6559 return ret;
6560 }
6561
6562 -static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
6563 +static void _rtl_pci_irq_tasklet(unsigned long data)
6564 {
6565 + struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
6566 _rtl_pci_tx_chk_waitq(hw);
6567 }
6568
6569 -static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
6570 +static void _rtl_pci_prepare_bcn_tasklet(unsigned long data)
6571 {
6572 + struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
6573 struct rtl_priv *rtlpriv = rtl_priv(hw);
6574 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
6575 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
6576 @@ -1193,10 +1195,10 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
6577
6578 /*task */
6579 tasklet_init(&rtlpriv->works.irq_tasklet,
6580 - (void (*)(unsigned long))_rtl_pci_irq_tasklet,
6581 + _rtl_pci_irq_tasklet,
6582 (unsigned long)hw);
6583 tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
6584 - (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
6585 + _rtl_pci_prepare_bcn_tasklet,
6586 (unsigned long)hw);
6587 INIT_WORK(&rtlpriv->works.lps_change_work,
6588 rtl_lps_change_work_callback);
6589 diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
6590 index 806af37192bc..88e2252bf8a2 100644
6591 --- a/drivers/net/wireless/realtek/rtw88/main.c
6592 +++ b/drivers/net/wireless/realtek/rtw88/main.c
6593 @@ -556,8 +556,8 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
6594 if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
6595 is_support_sgi = true;
6596 } else if (sta->ht_cap.ht_supported) {
6597 - ra_mask |= (sta->ht_cap.mcs.rx_mask[NL80211_BAND_5GHZ] << 20) |
6598 - (sta->ht_cap.mcs.rx_mask[NL80211_BAND_2GHZ] << 12);
6599 + ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) |
6600 + (sta->ht_cap.mcs.rx_mask[0] << 12);
6601 if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
6602 stbc_en = HT_STBC_EN;
6603 if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
6604 @@ -567,6 +567,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
6605 is_support_sgi = true;
6606 }
6607
6608 + if (efuse->hw_cap.nss == 1)
6609 + ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS;
6610 +
6611 if (hal->current_band_type == RTW_BAND_5G) {
6612 ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
6613 if (sta->vht_cap.vht_supported) {
6614 @@ -600,11 +603,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
6615 wireless_set = 0;
6616 }
6617
6618 - if (efuse->hw_cap.nss == 1) {
6619 - ra_mask &= RA_MASK_VHT_RATES_1SS;
6620 - ra_mask &= RA_MASK_HT_RATES_1SS;
6621 - }
6622 -
6623 switch (sta->bandwidth) {
6624 case IEEE80211_STA_RX_BW_80:
6625 bw_mode = RTW_CHANNEL_WIDTH_80;
6626 diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
6627 index d90928be663b..77a2bdee50fa 100644
6628 --- a/drivers/net/wireless/realtek/rtw88/pci.c
6629 +++ b/drivers/net/wireless/realtek/rtw88/pci.c
6630 @@ -762,6 +762,11 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
6631
6632 while (count--) {
6633 skb = skb_dequeue(&ring->queue);
6634 + if (!skb) {
6635 + rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
6636 + count, hw_queue, bd_idx, ring->r.rp, cur_rp);
6637 + break;
6638 + }
6639 tx_data = rtw_pci_get_tx_data(skb);
6640 pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
6641 PCI_DMA_TODEVICE);
6642 diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
6643 index 604dba4f18af..8e4d355dc3ae 100644
6644 --- a/drivers/nfc/port100.c
6645 +++ b/drivers/nfc/port100.c
6646 @@ -565,7 +565,7 @@ static void port100_tx_update_payload_len(void *_frame, int len)
6647 {
6648 struct port100_frame *frame = _frame;
6649
6650 - frame->datalen = cpu_to_le16(le16_to_cpu(frame->datalen) + len);
6651 + le16_add_cpu(&frame->datalen, len);
6652 }
6653
6654 static bool port100_rx_frame_is_valid(void *_frame)
6655 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
6656 index 14d513087a14..f34a56d588d3 100644
6657 --- a/drivers/nvme/host/pci.c
6658 +++ b/drivers/nvme/host/pci.c
6659 @@ -167,7 +167,6 @@ struct nvme_queue {
6660 /* only used for poll queues: */
6661 spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
6662 volatile struct nvme_completion *cqes;
6663 - struct blk_mq_tags **tags;
6664 dma_addr_t sq_dma_addr;
6665 dma_addr_t cq_dma_addr;
6666 u32 __iomem *q_db;
6667 @@ -377,29 +376,17 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
6668
6669 WARN_ON(hctx_idx != 0);
6670 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
6671 - WARN_ON(nvmeq->tags);
6672
6673 hctx->driver_data = nvmeq;
6674 - nvmeq->tags = &dev->admin_tagset.tags[0];
6675 return 0;
6676 }
6677
6678 -static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
6679 -{
6680 - struct nvme_queue *nvmeq = hctx->driver_data;
6681 -
6682 - nvmeq->tags = NULL;
6683 -}
6684 -
6685 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
6686 unsigned int hctx_idx)
6687 {
6688 struct nvme_dev *dev = data;
6689 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
6690
6691 - if (!nvmeq->tags)
6692 - nvmeq->tags = &dev->tagset.tags[hctx_idx];
6693 -
6694 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
6695 hctx->driver_data = nvmeq;
6696 return 0;
6697 @@ -950,6 +937,13 @@ static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
6698 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
6699 }
6700
6701 +static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
6702 +{
6703 + if (!nvmeq->qid)
6704 + return nvmeq->dev->admin_tagset.tags[0];
6705 + return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
6706 +}
6707 +
6708 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
6709 {
6710 volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
6711 @@ -975,7 +969,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
6712 return;
6713 }
6714
6715 - req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
6716 + req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
6717 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
6718 nvme_end_request(req, cqe->status, cqe->result);
6719 }
6720 @@ -1578,7 +1572,6 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
6721 .queue_rq = nvme_queue_rq,
6722 .complete = nvme_pci_complete_rq,
6723 .init_hctx = nvme_admin_init_hctx,
6724 - .exit_hctx = nvme_admin_exit_hctx,
6725 .init_request = nvme_init_request,
6726 .timeout = nvme_timeout,
6727 };
6728 diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
6729 index 3a67e244e568..57a4062cbb59 100644
6730 --- a/drivers/nvme/target/core.c
6731 +++ b/drivers/nvme/target/core.c
6732 @@ -555,7 +555,8 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
6733 } else {
6734 struct nvmet_ns *old;
6735
6736 - list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
6737 + list_for_each_entry_rcu(old, &subsys->namespaces, dev_link,
6738 + lockdep_is_held(&subsys->lock)) {
6739 BUG_ON(ns->nsid == old->nsid);
6740 if (ns->nsid < old->nsid)
6741 break;
6742 @@ -1174,7 +1175,8 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
6743
6744 ctrl->p2p_client = get_device(req->p2p_client);
6745
6746 - list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
6747 + list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link,
6748 + lockdep_is_held(&ctrl->subsys->lock))
6749 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
6750 }
6751
6752 diff --git a/drivers/opp/of.c b/drivers/opp/of.c
6753 index 1cbb58240b80..1e5fcdee043c 100644
6754 --- a/drivers/opp/of.c
6755 +++ b/drivers/opp/of.c
6756 @@ -678,15 +678,17 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
6757 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
6758 ret);
6759 of_node_put(np);
6760 - return ret;
6761 + goto put_list_kref;
6762 } else if (opp) {
6763 count++;
6764 }
6765 }
6766
6767 /* There should be one of more OPP defined */
6768 - if (WARN_ON(!count))
6769 - return -ENOENT;
6770 + if (WARN_ON(!count)) {
6771 + ret = -ENOENT;
6772 + goto put_list_kref;
6773 + }
6774
6775 list_for_each_entry(opp, &opp_table->opp_list, node)
6776 pstate_count += !!opp->pstate;
6777 @@ -695,7 +697,8 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
6778 if (pstate_count && pstate_count != count) {
6779 dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
6780 count, pstate_count);
6781 - return -ENOENT;
6782 + ret = -ENOENT;
6783 + goto put_list_kref;
6784 }
6785
6786 if (pstate_count)
6787 @@ -704,6 +707,11 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
6788 opp_table->parsed_static_opps = true;
6789
6790 return 0;
6791 +
6792 +put_list_kref:
6793 + _put_opp_list_kref(opp_table);
6794 +
6795 + return ret;
6796 }
6797
6798 /* Initializes OPP tables based on old-deprecated bindings */
6799 @@ -738,6 +746,7 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
6800 if (ret) {
6801 dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
6802 __func__, freq, ret);
6803 + _put_opp_list_kref(opp_table);
6804 return ret;
6805 }
6806 nr -= 2;
6807 diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
6808 index 2d457bfdaf66..933a4346ae5d 100644
6809 --- a/drivers/pci/controller/pcie-iproc.c
6810 +++ b/drivers/pci/controller/pcie-iproc.c
6811 @@ -1608,6 +1608,30 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802,
6812 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804,
6813 quirk_paxc_disable_msi_parsing);
6814
6815 +static void quirk_paxc_bridge(struct pci_dev *pdev)
6816 +{
6817 + /*
6818 + * The PCI config space is shared with the PAXC root port and the first
6819 + * Ethernet device. So, we need to workaround this by telling the PCI
6820 + * code that the bridge is not an Ethernet device.
6821 + */
6822 + if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
6823 + pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
6824 +
6825 + /*
6826 + * MPSS is not being set properly (as it is currently 0). This is
6827 + * because that area of the PCI config space is hard coded to zero, and
6828 + * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS)
6829 + * so that the MPS can be set to the real max value.
6830 + */
6831 + pdev->pcie_mpss = 2;
6832 +}
6833 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
6834 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
6835 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
6836 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
6837 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
6838 +
6839 MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
6840 MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
6841 MODULE_LICENSE("GPL v2");
6842 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
6843 index fcfaadc774ee..981ae16f935b 100644
6844 --- a/drivers/pci/pci.c
6845 +++ b/drivers/pci/pci.c
6846 @@ -5875,7 +5875,8 @@ EXPORT_SYMBOL_GPL(pci_pr3_present);
6847 /**
6848 * pci_add_dma_alias - Add a DMA devfn alias for a device
6849 * @dev: the PCI device for which alias is added
6850 - * @devfn: alias slot and function
6851 + * @devfn_from: alias slot and function
6852 + * @nr_devfns: number of subsequent devfns to alias
6853 *
6854 * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6855 * which is used to program permissible bus-devfn source addresses for DMA
6856 @@ -5891,18 +5892,29 @@ EXPORT_SYMBOL_GPL(pci_pr3_present);
6857 * cannot be left as a userspace activity). DMA aliases should therefore
6858 * be configured via quirks, such as the PCI fixup header quirk.
6859 */
6860 -void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
6861 +void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
6862 {
6863 + int devfn_to;
6864 +
6865 + nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
6866 + devfn_to = devfn_from + nr_devfns - 1;
6867 +
6868 if (!dev->dma_alias_mask)
6869 - dev->dma_alias_mask = bitmap_zalloc(U8_MAX, GFP_KERNEL);
6870 + dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6871 if (!dev->dma_alias_mask) {
6872 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6873 return;
6874 }
6875
6876 - set_bit(devfn, dev->dma_alias_mask);
6877 - pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6878 - PCI_SLOT(devfn), PCI_FUNC(devfn));
6879 + bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6880 +
6881 + if (nr_devfns == 1)
6882 + pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6883 + PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6884 + else if (nr_devfns > 1)
6885 + pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6886 + PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6887 + PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6888 }
6889
6890 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6891 diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
6892 index 3f6947ee3324..273d60cb0762 100644
6893 --- a/drivers/pci/pci.h
6894 +++ b/drivers/pci/pci.h
6895 @@ -4,6 +4,9 @@
6896
6897 #include <linux/pci.h>
6898
6899 +/* Number of possible devfns: 0.0 to 1f.7 inclusive */
6900 +#define MAX_NR_DEVFNS 256
6901 +
6902 #define PCI_FIND_CAP_TTL 48
6903
6904 #define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */
6905 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
6906 index 2f88b1ff7ada..2fdceaab7307 100644
6907 --- a/drivers/pci/quirks.c
6908 +++ b/drivers/pci/quirks.c
6909 @@ -1871,19 +1871,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
6910 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
6911 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
6912
6913 +static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay)
6914 +{
6915 + if (dev->d3_delay >= delay)
6916 + return;
6917 +
6918 + dev->d3_delay = delay;
6919 + pci_info(dev, "extending delay after power-on from D3hot to %d msec\n",
6920 + dev->d3_delay);
6921 +}
6922 +
6923 static void quirk_radeon_pm(struct pci_dev *dev)
6924 {
6925 if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
6926 - dev->subsystem_device == 0x00e2) {
6927 - if (dev->d3_delay < 20) {
6928 - dev->d3_delay = 20;
6929 - pci_info(dev, "extending delay after power-on from D3 to %d msec\n",
6930 - dev->d3_delay);
6931 - }
6932 - }
6933 + dev->subsystem_device == 0x00e2)
6934 + quirk_d3hot_delay(dev, 20);
6935 }
6936 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
6937
6938 +/*
6939 + * Ryzen5/7 XHCI controllers fail upon resume from runtime suspend or s2idle.
6940 + * https://bugzilla.kernel.org/show_bug.cgi?id=205587
6941 + *
6942 + * The kernel attempts to transition these devices to D3cold, but that seems
6943 + * to be ineffective on the platforms in question; the PCI device appears to
6944 + * remain on in D3hot state. The D3hot-to-D0 transition then requires an
6945 + * extended delay in order to succeed.
6946 + */
6947 +static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
6948 +{
6949 + quirk_d3hot_delay(dev, 20);
6950 +}
6951 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
6952 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
6953 +
6954 #ifdef CONFIG_X86_IO_APIC
6955 static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
6956 {
6957 @@ -2381,32 +2402,6 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
6958 PCI_DEVICE_ID_TIGON3_5719,
6959 quirk_brcm_5719_limit_mrrs);
6960
6961 -#ifdef CONFIG_PCIE_IPROC_PLATFORM
6962 -static void quirk_paxc_bridge(struct pci_dev *pdev)
6963 -{
6964 - /*
6965 - * The PCI config space is shared with the PAXC root port and the first
6966 - * Ethernet device. So, we need to workaround this by telling the PCI
6967 - * code that the bridge is not an Ethernet device.
6968 - */
6969 - if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
6970 - pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
6971 -
6972 - /*
6973 - * MPSS is not being set properly (as it is currently 0). This is
6974 - * because that area of the PCI config space is hard coded to zero, and
6975 - * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS)
6976 - * so that the MPS can be set to the real max value.
6977 - */
6978 - pdev->pcie_mpss = 2;
6979 -}
6980 -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
6981 -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
6982 -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
6983 -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
6984 -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
6985 -#endif
6986 -
6987 /*
6988 * Originally in EDAC sources for i82875P: Intel tells BIOS developers to
6989 * hide device 6 which configures the overflow device access containing the
6990 @@ -3932,7 +3927,7 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe)
6991 static void quirk_dma_func0_alias(struct pci_dev *dev)
6992 {
6993 if (PCI_FUNC(dev->devfn) != 0)
6994 - pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
6995 + pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0), 1);
6996 }
6997
6998 /*
6999 @@ -3946,7 +3941,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
7000 static void quirk_dma_func1_alias(struct pci_dev *dev)
7001 {
7002 if (PCI_FUNC(dev->devfn) != 1)
7003 - pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1));
7004 + pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1), 1);
7005 }
7006
7007 /*
7008 @@ -4031,7 +4026,7 @@ static void quirk_fixed_dma_alias(struct pci_dev *dev)
7009
7010 id = pci_match_id(fixed_dma_alias_tbl, dev);
7011 if (id)
7012 - pci_add_dma_alias(dev, id->driver_data);
7013 + pci_add_dma_alias(dev, id->driver_data, 1);
7014 }
7015
7016 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
7017 @@ -4073,9 +4068,9 @@ DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
7018 */
7019 static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
7020 {
7021 - pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0));
7022 - pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0));
7023 - pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3));
7024 + pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0), 1);
7025 + pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0), 1);
7026 + pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3), 1);
7027 }
7028 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
7029 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
7030 @@ -4099,13 +4094,8 @@ static void quirk_pex_vca_alias(struct pci_dev *pdev)
7031 const unsigned int num_pci_slots = 0x20;
7032 unsigned int slot;
7033
7034 - for (slot = 0; slot < num_pci_slots; slot++) {
7035 - pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0));
7036 - pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x1));
7037 - pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x2));
7038 - pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x3));
7039 - pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x4));
7040 - }
7041 + for (slot = 0; slot < num_pci_slots; slot++)
7042 + pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0), 5);
7043 }
7044 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias);
7045 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias);
7046 @@ -5320,7 +5310,7 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
7047 pci_dbg(pdev,
7048 "Aliasing Partition %d Proxy ID %02x.%d\n",
7049 pp, PCI_SLOT(devfn), PCI_FUNC(devfn));
7050 - pci_add_dma_alias(pdev, devfn);
7051 + pci_add_dma_alias(pdev, devfn, 1);
7052 }
7053 }
7054
7055 @@ -5362,6 +5352,21 @@ SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */
7056 SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */
7057 SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */
7058
7059 +/*
7060 + * The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
7061 + * These IDs are used to forward responses to the originator on the other
7062 + * side of the NTB. Alias all possible IDs to the NTB to permit access when
7063 + * the IOMMU is turned on.
7064 + */
7065 +static void quirk_plx_ntb_dma_alias(struct pci_dev *pdev)
7066 +{
7067 + pci_info(pdev, "Setting PLX NTB proxy ID aliases\n");
7068 + /* PLX NTB may use all 256 devfns */
7069 + pci_add_dma_alias(pdev, 0, 256);
7070 +}
7071 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b0, quirk_plx_ntb_dma_alias);
7072 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b1, quirk_plx_ntb_dma_alias);
7073 +
7074 /*
7075 * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
7076 * not always reset the secondary Nvidia GPU between reboots if the system
7077 diff --git a/drivers/pci/search.c b/drivers/pci/search.c
7078 index bade14002fd8..e4dbdef5aef0 100644
7079 --- a/drivers/pci/search.c
7080 +++ b/drivers/pci/search.c
7081 @@ -41,9 +41,9 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
7082 * DMA, iterate over that too.
7083 */
7084 if (unlikely(pdev->dma_alias_mask)) {
7085 - u8 devfn;
7086 + unsigned int devfn;
7087
7088 - for_each_set_bit(devfn, pdev->dma_alias_mask, U8_MAX) {
7089 + for_each_set_bit(devfn, pdev->dma_alias_mask, MAX_NR_DEVFNS) {
7090 ret = fn(pdev, PCI_DEVID(pdev->bus->number, devfn),
7091 data);
7092 if (ret)
7093 diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
7094 index 2a3966d059e7..0e51baa48b14 100644
7095 --- a/drivers/perf/fsl_imx8_ddr_perf.c
7096 +++ b/drivers/perf/fsl_imx8_ddr_perf.c
7097 @@ -572,13 +572,17 @@ static int ddr_perf_probe(struct platform_device *pdev)
7098
7099 if (ret < 0) {
7100 dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
7101 - goto ddr_perf_err;
7102 + goto cpuhp_state_err;
7103 }
7104
7105 pmu->cpuhp_state = ret;
7106
7107 /* Register the pmu instance for cpu hotplug */
7108 - cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
7109 + ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
7110 + if (ret) {
7111 + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
7112 + goto cpuhp_instance_err;
7113 + }
7114
7115 /* Request irq */
7116 irq = of_irq_get(np, 0);
7117 @@ -612,9 +616,10 @@ static int ddr_perf_probe(struct platform_device *pdev)
7118 return 0;
7119
7120 ddr_perf_err:
7121 - if (pmu->cpuhp_state)
7122 - cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
7123 -
7124 + cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
7125 +cpuhp_instance_err:
7126 + cpuhp_remove_multi_state(pmu->cpuhp_state);
7127 +cpuhp_state_err:
7128 ida_simple_remove(&ddr_ida, pmu->id);
7129 dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
7130 return ret;
7131 @@ -625,6 +630,7 @@ static int ddr_perf_remove(struct platform_device *pdev)
7132 struct ddr_pmu *pmu = platform_get_drvdata(pdev);
7133
7134 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
7135 + cpuhp_remove_multi_state(pmu->cpuhp_state);
7136 irq_set_affinity_hint(pmu->irq, NULL);
7137
7138 perf_pmu_unregister(&pmu->pmu);
7139 diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
7140 index 7d658e6627e7..606fe216f902 100644
7141 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c
7142 +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
7143 @@ -752,7 +752,13 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
7144
7145 raw_spin_lock_irqsave(&byt_lock, flags);
7146 value = readl(reg);
7147 - value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
7148 +
7149 + /* Do not clear direct-irq enabled IRQs (from gpio_disable_free) */
7150 + if (value & BYT_DIRECT_IRQ_EN)
7151 + /* nothing to do */ ;
7152 + else
7153 + value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
7154 +
7155 writel(value, reg);
7156 raw_spin_unlock_irqrestore(&byt_lock, flags);
7157 }
7158 diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
7159 index 4a95867deb8a..5a026601d4f9 100644
7160 --- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c
7161 +++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
7162 @@ -497,17 +497,15 @@ enum {
7163 SD_WP_MARK, SD_CLK_MARK, SD_CMD_MARK,
7164 CRX0_MARK, CRX1_MARK,
7165 CTX0_MARK, CTX1_MARK,
7166 + CRX0_CRX1_MARK, CTX0_CTX1_MARK,
7167
7168 PWM1A_MARK, PWM1B_MARK, PWM1C_MARK, PWM1D_MARK,
7169 PWM1E_MARK, PWM1F_MARK, PWM1G_MARK, PWM1H_MARK,
7170 PWM2A_MARK, PWM2B_MARK, PWM2C_MARK, PWM2D_MARK,
7171 PWM2E_MARK, PWM2F_MARK, PWM2G_MARK, PWM2H_MARK,
7172 IERXD_MARK, IETXD_MARK,
7173 - CRX0_CRX1_MARK,
7174 WDTOVF_MARK,
7175
7176 - CRX0X1_MARK,
7177 -
7178 /* DMAC */
7179 TEND0_MARK, DACK0_MARK, DREQ0_MARK,
7180 TEND1_MARK, DACK1_MARK, DREQ1_MARK,
7181 @@ -995,12 +993,12 @@ static const u16 pinmux_data[] = {
7182
7183 PINMUX_DATA(PJ3_DATA, PJ3MD_00),
7184 PINMUX_DATA(CRX1_MARK, PJ3MD_01),
7185 - PINMUX_DATA(CRX0X1_MARK, PJ3MD_10),
7186 + PINMUX_DATA(CRX0_CRX1_MARK, PJ3MD_10),
7187 PINMUX_DATA(IRQ1_PJ_MARK, PJ3MD_11),
7188
7189 PINMUX_DATA(PJ2_DATA, PJ2MD_000),
7190 PINMUX_DATA(CTX1_MARK, PJ2MD_001),
7191 - PINMUX_DATA(CRX0_CRX1_MARK, PJ2MD_010),
7192 + PINMUX_DATA(CTX0_CTX1_MARK, PJ2MD_010),
7193 PINMUX_DATA(CS2_MARK, PJ2MD_011),
7194 PINMUX_DATA(SCK0_MARK, PJ2MD_100),
7195 PINMUX_DATA(LCD_M_DISP_MARK, PJ2MD_101),
7196 @@ -1245,6 +1243,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
7197 GPIO_FN(CTX1),
7198 GPIO_FN(CRX1),
7199 GPIO_FN(CTX0),
7200 + GPIO_FN(CTX0_CTX1),
7201 GPIO_FN(CRX0),
7202 GPIO_FN(CRX0_CRX1),
7203
7204 diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
7205 index 6cbb18ef77dc..d20974a55d93 100644
7206 --- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
7207 +++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
7208 @@ -737,13 +737,12 @@ enum {
7209 CRX0_MARK, CTX0_MARK,
7210 CRX1_MARK, CTX1_MARK,
7211 CRX2_MARK, CTX2_MARK,
7212 - CRX0_CRX1_MARK,
7213 - CRX0_CRX1_CRX2_MARK,
7214 - CTX0CTX1CTX2_MARK,
7215 + CRX0_CRX1_MARK, CTX0_CTX1_MARK,
7216 + CRX0_CRX1_CRX2_MARK, CTX0_CTX1_CTX2_MARK,
7217 CRX1_PJ22_MARK, CTX1_PJ23_MARK,
7218 CRX2_PJ20_MARK, CTX2_PJ21_MARK,
7219 - CRX0CRX1_PJ22_MARK,
7220 - CRX0CRX1CRX2_PJ20_MARK,
7221 + CRX0_CRX1_PJ22_MARK, CTX0_CTX1_PJ23_MARK,
7222 + CRX0_CRX1_CRX2_PJ20_MARK, CTX0_CTX1_CTX2_PJ21_MARK,
7223
7224 /* VDC */
7225 DV_CLK_MARK,
7226 @@ -821,6 +820,7 @@ static const u16 pinmux_data[] = {
7227 PINMUX_DATA(CS3_MARK, PC8MD_001),
7228 PINMUX_DATA(TXD7_MARK, PC8MD_010),
7229 PINMUX_DATA(CTX1_MARK, PC8MD_011),
7230 + PINMUX_DATA(CTX0_CTX1_MARK, PC8MD_100),
7231
7232 PINMUX_DATA(PC7_DATA, PC7MD_000),
7233 PINMUX_DATA(CKE_MARK, PC7MD_001),
7234 @@ -833,11 +833,12 @@ static const u16 pinmux_data[] = {
7235 PINMUX_DATA(CAS_MARK, PC6MD_001),
7236 PINMUX_DATA(SCK7_MARK, PC6MD_010),
7237 PINMUX_DATA(CTX0_MARK, PC6MD_011),
7238 + PINMUX_DATA(CTX0_CTX1_CTX2_MARK, PC6MD_100),
7239
7240 PINMUX_DATA(PC5_DATA, PC5MD_000),
7241 PINMUX_DATA(RAS_MARK, PC5MD_001),
7242 PINMUX_DATA(CRX0_MARK, PC5MD_011),
7243 - PINMUX_DATA(CTX0CTX1CTX2_MARK, PC5MD_100),
7244 + PINMUX_DATA(CTX0_CTX1_CTX2_MARK, PC5MD_100),
7245 PINMUX_DATA(IRQ0_PC_MARK, PC5MD_101),
7246
7247 PINMUX_DATA(PC4_DATA, PC4MD_00),
7248 @@ -1289,30 +1290,32 @@ static const u16 pinmux_data[] = {
7249 PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010),
7250 PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011),
7251 PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100),
7252 - PINMUX_DATA(CTX1_MARK, PJ23MD_101),
7253 + PINMUX_DATA(CTX1_PJ23_MARK, PJ23MD_101),
7254 + PINMUX_DATA(CTX0_CTX1_PJ23_MARK, PJ23MD_110),
7255
7256 PINMUX_DATA(PJ22_DATA, PJ22MD_000),
7257 PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001),
7258 PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010),
7259 PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011),
7260 PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100),
7261 - PINMUX_DATA(CRX1_MARK, PJ22MD_101),
7262 - PINMUX_DATA(CRX0_CRX1_MARK, PJ22MD_110),
7263 + PINMUX_DATA(CRX1_PJ22_MARK, PJ22MD_101),
7264 + PINMUX_DATA(CRX0_CRX1_PJ22_MARK, PJ22MD_110),
7265
7266 PINMUX_DATA(PJ21_DATA, PJ21MD_000),
7267 PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001),
7268 PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010),
7269 PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011),
7270 PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100),
7271 - PINMUX_DATA(CTX2_MARK, PJ21MD_101),
7272 + PINMUX_DATA(CTX2_PJ21_MARK, PJ21MD_101),
7273 + PINMUX_DATA(CTX0_CTX1_CTX2_PJ21_MARK, PJ21MD_110),
7274
7275 PINMUX_DATA(PJ20_DATA, PJ20MD_000),
7276 PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001),
7277 PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010),
7278 PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011),
7279 PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100),
7280 - PINMUX_DATA(CRX2_MARK, PJ20MD_101),
7281 - PINMUX_DATA(CRX0CRX1CRX2_PJ20_MARK, PJ20MD_110),
7282 + PINMUX_DATA(CRX2_PJ20_MARK, PJ20MD_101),
7283 + PINMUX_DATA(CRX0_CRX1_CRX2_PJ20_MARK, PJ20MD_110),
7284
7285 PINMUX_DATA(PJ19_DATA, PJ19MD_000),
7286 PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001),
7287 @@ -1663,12 +1666,24 @@ static const struct pinmux_func pinmux_func_gpios[] = {
7288 GPIO_FN(WDTOVF),
7289
7290 /* CAN */
7291 + GPIO_FN(CTX2),
7292 + GPIO_FN(CRX2),
7293 GPIO_FN(CTX1),
7294 GPIO_FN(CRX1),
7295 GPIO_FN(CTX0),
7296 GPIO_FN(CRX0),
7297 + GPIO_FN(CTX0_CTX1),
7298 GPIO_FN(CRX0_CRX1),
7299 + GPIO_FN(CTX0_CTX1_CTX2),
7300 GPIO_FN(CRX0_CRX1_CRX2),
7301 + GPIO_FN(CTX2_PJ21),
7302 + GPIO_FN(CRX2_PJ20),
7303 + GPIO_FN(CTX1_PJ23),
7304 + GPIO_FN(CRX1_PJ22),
7305 + GPIO_FN(CTX0_CTX1_PJ23),
7306 + GPIO_FN(CRX0_CRX1_PJ22),
7307 + GPIO_FN(CTX0_CTX1_CTX2_PJ21),
7308 + GPIO_FN(CRX0_CRX1_CRX2_PJ20),
7309
7310 /* DMAC */
7311 GPIO_FN(TEND0),
7312 diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
7313 index 00772fc53490..e36fcad668a6 100644
7314 --- a/drivers/pwm/pwm-omap-dmtimer.c
7315 +++ b/drivers/pwm/pwm-omap-dmtimer.c
7316 @@ -298,15 +298,10 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
7317 goto put;
7318 }
7319
7320 -put:
7321 - of_node_put(timer);
7322 - if (ret < 0)
7323 - return ret;
7324 -
7325 omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL);
7326 if (!omap) {
7327 - pdata->free(dm_timer);
7328 - return -ENOMEM;
7329 + ret = -ENOMEM;
7330 + goto err_alloc_omap;
7331 }
7332
7333 omap->pdata = pdata;
7334 @@ -339,18 +334,38 @@ put:
7335 ret = pwmchip_add(&omap->chip);
7336 if (ret < 0) {
7337 dev_err(&pdev->dev, "failed to register PWM\n");
7338 - omap->pdata->free(omap->dm_timer);
7339 - return ret;
7340 + goto err_pwmchip_add;
7341 }
7342
7343 + of_node_put(timer);
7344 +
7345 platform_set_drvdata(pdev, omap);
7346
7347 return 0;
7348 +
7349 +err_pwmchip_add:
7350 +
7351 + /*
7352 + * *omap is allocated using devm_kzalloc,
7353 + * so no free necessary here
7354 + */
7355 +err_alloc_omap:
7356 +
7357 + pdata->free(dm_timer);
7358 +put:
7359 + of_node_put(timer);
7360 +
7361 + return ret;
7362 }
7363
7364 static int pwm_omap_dmtimer_remove(struct platform_device *pdev)
7365 {
7366 struct pwm_omap_dmtimer_chip *omap = platform_get_drvdata(pdev);
7367 + int ret;
7368 +
7369 + ret = pwmchip_remove(&omap->chip);
7370 + if (ret)
7371 + return ret;
7372
7373 if (pm_runtime_active(&omap->dm_timer_pdev->dev))
7374 omap->pdata->stop(omap->dm_timer);
7375 @@ -359,7 +374,7 @@ static int pwm_omap_dmtimer_remove(struct platform_device *pdev)
7376
7377 mutex_destroy(&omap->mutex);
7378
7379 - return pwmchip_remove(&omap->chip);
7380 + return 0;
7381 }
7382
7383 static const struct of_device_id pwm_omap_dmtimer_of_match[] = {
7384 diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
7385 index 168684b02ebc..b07bdca3d510 100644
7386 --- a/drivers/pwm/pwm-pca9685.c
7387 +++ b/drivers/pwm/pwm-pca9685.c
7388 @@ -159,13 +159,9 @@ static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset,
7389 static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
7390 {
7391 struct pca9685 *pca = gpiochip_get_data(gpio);
7392 - struct pwm_device *pwm;
7393
7394 pca9685_pwm_gpio_set(gpio, offset, 0);
7395 pm_runtime_put(pca->chip.dev);
7396 - mutex_lock(&pca->lock);
7397 - pwm = &pca->chip.pwms[offset];
7398 - mutex_unlock(&pca->lock);
7399 }
7400
7401 static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip,
7402 diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
7403 index 1dba0bdf3762..0011bdc15afb 100644
7404 --- a/drivers/regulator/core.c
7405 +++ b/drivers/regulator/core.c
7406 @@ -3462,6 +3462,7 @@ int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV,
7407 out:
7408 return ret;
7409 }
7410 +EXPORT_SYMBOL_GPL(regulator_set_voltage_rdev);
7411
7412 static int regulator_limit_voltage_step(struct regulator_dev *rdev,
7413 int *current_uV, int *min_uV)
7414 @@ -4026,6 +4027,7 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev)
7415 return ret;
7416 return ret - rdev->constraints->uV_offset;
7417 }
7418 +EXPORT_SYMBOL_GPL(regulator_get_voltage_rdev);
7419
7420 /**
7421 * regulator_get_voltage - get regulator output voltage
7422 diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
7423 index 61bd5ef0806c..97c846c19c2f 100644
7424 --- a/drivers/regulator/rk808-regulator.c
7425 +++ b/drivers/regulator/rk808-regulator.c
7426 @@ -1297,7 +1297,7 @@ static int rk808_regulator_dt_parse_pdata(struct device *dev,
7427 }
7428
7429 if (!pdata->dvs_gpio[i]) {
7430 - dev_warn(dev, "there is no dvs%d gpio\n", i);
7431 + dev_info(dev, "there is no dvs%d gpio\n", i);
7432 continue;
7433 }
7434
7435 diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c
7436 index 9a9ee8188109..cbadb1c99679 100644
7437 --- a/drivers/regulator/vctrl-regulator.c
7438 +++ b/drivers/regulator/vctrl-regulator.c
7439 @@ -11,10 +11,13 @@
7440 #include <linux/module.h>
7441 #include <linux/of.h>
7442 #include <linux/of_device.h>
7443 +#include <linux/regulator/coupler.h>
7444 #include <linux/regulator/driver.h>
7445 #include <linux/regulator/of_regulator.h>
7446 #include <linux/sort.h>
7447
7448 +#include "internal.h"
7449 +
7450 struct vctrl_voltage_range {
7451 int min_uV;
7452 int max_uV;
7453 @@ -79,7 +82,7 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV)
7454 static int vctrl_get_voltage(struct regulator_dev *rdev)
7455 {
7456 struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
7457 - int ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg);
7458 + int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
7459
7460 return vctrl_calc_output_voltage(vctrl, ctrl_uV);
7461 }
7462 @@ -90,16 +93,16 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
7463 {
7464 struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
7465 struct regulator *ctrl_reg = vctrl->ctrl_reg;
7466 - int orig_ctrl_uV = regulator_get_voltage(ctrl_reg);
7467 + int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev);
7468 int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
7469 int ret;
7470
7471 if (req_min_uV >= uV || !vctrl->ovp_threshold)
7472 /* voltage rising or no OVP */
7473 - return regulator_set_voltage(
7474 - ctrl_reg,
7475 + return regulator_set_voltage_rdev(ctrl_reg->rdev,
7476 vctrl_calc_ctrl_voltage(vctrl, req_min_uV),
7477 - vctrl_calc_ctrl_voltage(vctrl, req_max_uV));
7478 + vctrl_calc_ctrl_voltage(vctrl, req_max_uV),
7479 + PM_SUSPEND_ON);
7480
7481 while (uV > req_min_uV) {
7482 int max_drop_uV = (uV * vctrl->ovp_threshold) / 100;
7483 @@ -114,9 +117,10 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
7484 next_uV = max_t(int, req_min_uV, uV - max_drop_uV);
7485 next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV);
7486
7487 - ret = regulator_set_voltage(ctrl_reg,
7488 + ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
7489 + next_ctrl_uV,
7490 next_ctrl_uV,
7491 - next_ctrl_uV);
7492 + PM_SUSPEND_ON);
7493 if (ret)
7494 goto err;
7495
7496 @@ -130,7 +134,8 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
7497
7498 err:
7499 /* Try to go back to original voltage */
7500 - regulator_set_voltage(ctrl_reg, orig_ctrl_uV, orig_ctrl_uV);
7501 + regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV,
7502 + PM_SUSPEND_ON);
7503
7504 return ret;
7505 }
7506 @@ -155,9 +160,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
7507
7508 if (selector >= vctrl->sel || !vctrl->ovp_threshold) {
7509 /* voltage rising or no OVP */
7510 - ret = regulator_set_voltage(ctrl_reg,
7511 + ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
7512 + vctrl->vtable[selector].ctrl,
7513 vctrl->vtable[selector].ctrl,
7514 - vctrl->vtable[selector].ctrl);
7515 + PM_SUSPEND_ON);
7516 if (!ret)
7517 vctrl->sel = selector;
7518
7519 @@ -173,9 +179,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
7520 else
7521 next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel;
7522
7523 - ret = regulator_set_voltage(ctrl_reg,
7524 + ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
7525 vctrl->vtable[next_sel].ctrl,
7526 - vctrl->vtable[next_sel].ctrl);
7527 + vctrl->vtable[next_sel].ctrl,
7528 + PM_SUSPEND_ON);
7529 if (ret) {
7530 dev_err(&rdev->dev,
7531 "failed to set control voltage to %duV\n",
7532 @@ -195,9 +202,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
7533 err:
7534 if (vctrl->sel != orig_sel) {
7535 /* Try to go back to original voltage */
7536 - if (!regulator_set_voltage(ctrl_reg,
7537 + if (!regulator_set_voltage_rdev(ctrl_reg->rdev,
7538 + vctrl->vtable[orig_sel].ctrl,
7539 vctrl->vtable[orig_sel].ctrl,
7540 - vctrl->vtable[orig_sel].ctrl))
7541 + PM_SUSPEND_ON))
7542 vctrl->sel = orig_sel;
7543 else
7544 dev_warn(&rdev->dev,
7545 @@ -482,7 +490,7 @@ static int vctrl_probe(struct platform_device *pdev)
7546 if (ret)
7547 return ret;
7548
7549 - ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg);
7550 + ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
7551 if (ctrl_uV < 0) {
7552 dev_err(&pdev->dev, "failed to get control voltage\n");
7553 return ctrl_uV;
7554 diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
7555 index 3c5fbbbfb0f1..b542debbc6f0 100644
7556 --- a/drivers/remoteproc/remoteproc_core.c
7557 +++ b/drivers/remoteproc/remoteproc_core.c
7558 @@ -2224,7 +2224,7 @@ static int __init remoteproc_init(void)
7559
7560 return 0;
7561 }
7562 -module_init(remoteproc_init);
7563 +subsys_initcall(remoteproc_init);
7564
7565 static void __exit remoteproc_exit(void)
7566 {
7567 diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
7568 index 74e589f5dd6a..279e535bf5d8 100644
7569 --- a/drivers/reset/reset-uniphier.c
7570 +++ b/drivers/reset/reset-uniphier.c
7571 @@ -193,8 +193,8 @@ static const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = {
7572 #define UNIPHIER_PERI_RESET_FI2C(id, ch) \
7573 UNIPHIER_RESETX((id), 0x114, 24 + (ch))
7574
7575 -#define UNIPHIER_PERI_RESET_SCSSI(id) \
7576 - UNIPHIER_RESETX((id), 0x110, 17)
7577 +#define UNIPHIER_PERI_RESET_SCSSI(id, ch) \
7578 + UNIPHIER_RESETX((id), 0x110, 17 + (ch))
7579
7580 #define UNIPHIER_PERI_RESET_MCSSI(id) \
7581 UNIPHIER_RESETX((id), 0x114, 14)
7582 @@ -209,7 +209,7 @@ static const struct uniphier_reset_data uniphier_ld4_peri_reset_data[] = {
7583 UNIPHIER_PERI_RESET_I2C(6, 2),
7584 UNIPHIER_PERI_RESET_I2C(7, 3),
7585 UNIPHIER_PERI_RESET_I2C(8, 4),
7586 - UNIPHIER_PERI_RESET_SCSSI(11),
7587 + UNIPHIER_PERI_RESET_SCSSI(11, 0),
7588 UNIPHIER_RESET_END,
7589 };
7590
7591 @@ -225,8 +225,11 @@ static const struct uniphier_reset_data uniphier_pro4_peri_reset_data[] = {
7592 UNIPHIER_PERI_RESET_FI2C(8, 4),
7593 UNIPHIER_PERI_RESET_FI2C(9, 5),
7594 UNIPHIER_PERI_RESET_FI2C(10, 6),
7595 - UNIPHIER_PERI_RESET_SCSSI(11),
7596 - UNIPHIER_PERI_RESET_MCSSI(12),
7597 + UNIPHIER_PERI_RESET_SCSSI(11, 0),
7598 + UNIPHIER_PERI_RESET_SCSSI(12, 1),
7599 + UNIPHIER_PERI_RESET_SCSSI(13, 2),
7600 + UNIPHIER_PERI_RESET_SCSSI(14, 3),
7601 + UNIPHIER_PERI_RESET_MCSSI(15),
7602 UNIPHIER_RESET_END,
7603 };
7604
7605 diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
7606 index 1adf9f815652..5d502fbd5803 100644
7607 --- a/drivers/rtc/Kconfig
7608 +++ b/drivers/rtc/Kconfig
7609 @@ -240,6 +240,7 @@ config RTC_DRV_AS3722
7610
7611 config RTC_DRV_DS1307
7612 tristate "Dallas/Maxim DS1307/37/38/39/40/41, ST M41T00, EPSON RX-8025, ISL12057"
7613 + select REGMAP_I2C
7614 help
7615 If you say yes here you get support for various compatible RTC
7616 chips (often with battery backup) connected with I2C. This driver
7617 @@ -632,6 +633,7 @@ config RTC_DRV_RX8010
7618
7619 config RTC_DRV_RX8581
7620 tristate "Epson RX-8571/RX-8581"
7621 + select REGMAP_I2C
7622 help
7623 If you say yes here you will get support for the Epson RX-8571/
7624 RX-8581.
7625 @@ -659,6 +661,7 @@ config RTC_DRV_EM3027
7626
7627 config RTC_DRV_RV3028
7628 tristate "Micro Crystal RV3028"
7629 + select REGMAP_I2C
7630 help
7631 If you say yes here you get support for the Micro Crystal
7632 RV3028.
7633 @@ -688,6 +691,7 @@ config RTC_DRV_S5M
7634
7635 config RTC_DRV_SD3078
7636 tristate "ZXW Shenzhen whwave SD3078"
7637 + select REGMAP_I2C
7638 help
7639 If you say yes here you get support for the ZXW Shenzhen whwave
7640 SD3078 RTC chips.
7641 @@ -859,14 +863,14 @@ config RTC_I2C_AND_SPI
7642 default m if I2C=m
7643 default y if I2C=y
7644 default y if SPI_MASTER=y
7645 - select REGMAP_I2C if I2C
7646 - select REGMAP_SPI if SPI_MASTER
7647
7648 comment "SPI and I2C RTC drivers"
7649
7650 config RTC_DRV_DS3232
7651 tristate "Dallas/Maxim DS3232/DS3234"
7652 depends on RTC_I2C_AND_SPI
7653 + select REGMAP_I2C if I2C
7654 + select REGMAP_SPI if SPI_MASTER
7655 help
7656 If you say yes here you get support for Dallas Semiconductor
7657 DS3232 and DS3234 real-time clock chips. If an interrupt is associated
7658 @@ -886,6 +890,8 @@ config RTC_DRV_DS3232_HWMON
7659 config RTC_DRV_PCF2127
7660 tristate "NXP PCF2127"
7661 depends on RTC_I2C_AND_SPI
7662 + select REGMAP_I2C if I2C
7663 + select REGMAP_SPI if SPI_MASTER
7664 select WATCHDOG_CORE if WATCHDOG
7665 help
7666 If you say yes here you get support for the NXP PCF2127/29 RTC
7667 @@ -902,6 +908,8 @@ config RTC_DRV_PCF2127
7668 config RTC_DRV_RV3029C2
7669 tristate "Micro Crystal RV3029/3049"
7670 depends on RTC_I2C_AND_SPI
7671 + select REGMAP_I2C if I2C
7672 + select REGMAP_SPI if SPI_MASTER
7673 help
7674 If you say yes here you get support for the Micro Crystal
7675 RV3029 and RV3049 RTC chips.
7676 diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
7677 index a9d40d3b90ef..4190a025381a 100644
7678 --- a/drivers/scsi/aic7xxx/aic7xxx_core.c
7679 +++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
7680 @@ -2314,7 +2314,7 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
7681 * At some speeds, we only support
7682 * ST transfers.
7683 */
7684 - if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
7685 + if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
7686 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
7687 break;
7688 }
7689 diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
7690 index 0bc63a7ab41c..b5dd1caae5e9 100644
7691 --- a/drivers/scsi/iscsi_tcp.c
7692 +++ b/drivers/scsi/iscsi_tcp.c
7693 @@ -887,6 +887,10 @@ free_host:
7694 static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
7695 {
7696 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
7697 + struct iscsi_session *session = cls_session->dd_data;
7698 +
7699 + if (WARN_ON_ONCE(session->leadconn))
7700 + return;
7701
7702 iscsi_tcp_r2tpool_free(cls_session->dd_data);
7703 iscsi_session_teardown(cls_session);
7704 diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
7705 index f883fac2d2b1..f81d1453eefb 100644
7706 --- a/drivers/scsi/lpfc/lpfc_ct.c
7707 +++ b/drivers/scsi/lpfc/lpfc_ct.c
7708 @@ -1477,33 +1477,35 @@ int
7709 lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
7710 size_t size)
7711 {
7712 - char fwrev[FW_REV_STR_SIZE];
7713 - int n;
7714 + char fwrev[FW_REV_STR_SIZE] = {0};
7715 + char tmp[MAXHOSTNAMELEN] = {0};
7716
7717 - lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
7718 + memset(symbol, 0, size);
7719
7720 - n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
7721 - if (size < n)
7722 - return n;
7723 + scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName);
7724 + if (strlcat(symbol, tmp, size) >= size)
7725 + goto buffer_done;
7726
7727 - n += scnprintf(symbol + n, size - n, " FV%s", fwrev);
7728 - if (size < n)
7729 - return n;
7730 + lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
7731 + scnprintf(tmp, sizeof(tmp), " FV%s", fwrev);
7732 + if (strlcat(symbol, tmp, size) >= size)
7733 + goto buffer_done;
7734
7735 - n += scnprintf(symbol + n, size - n, " DV%s.",
7736 - lpfc_release_version);
7737 - if (size < n)
7738 - return n;
7739 + scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version);
7740 + if (strlcat(symbol, tmp, size) >= size)
7741 + goto buffer_done;
7742
7743 - n += scnprintf(symbol + n, size - n, " HN:%s.",
7744 - init_utsname()->nodename);
7745 - if (size < n)
7746 - return n;
7747 + scnprintf(tmp, sizeof(tmp), " HN:%s", init_utsname()->nodename);
7748 + if (strlcat(symbol, tmp, size) >= size)
7749 + goto buffer_done;
7750
7751 /* Note :- OS name is "Linux" */
7752 - n += scnprintf(symbol + n, size - n, " OS:%s",
7753 - init_utsname()->sysname);
7754 - return n;
7755 + scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname);
7756 + strlcat(symbol, tmp, size);
7757 +
7758 +buffer_done:
7759 + return strnlen(symbol, size);
7760 +
7761 }
7762
7763 static uint32_t
7764 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
7765 index ed8d9709b9b9..271afea654e2 100644
7766 --- a/drivers/scsi/scsi_transport_iscsi.c
7767 +++ b/drivers/scsi/scsi_transport_iscsi.c
7768 @@ -2947,6 +2947,24 @@ iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
7769 return err;
7770 }
7771
7772 +static int iscsi_session_has_conns(int sid)
7773 +{
7774 + struct iscsi_cls_conn *conn;
7775 + unsigned long flags;
7776 + int found = 0;
7777 +
7778 + spin_lock_irqsave(&connlock, flags);
7779 + list_for_each_entry(conn, &connlist, conn_list) {
7780 + if (iscsi_conn_get_sid(conn) == sid) {
7781 + found = 1;
7782 + break;
7783 + }
7784 + }
7785 + spin_unlock_irqrestore(&connlock, flags);
7786 +
7787 + return found;
7788 +}
7789 +
7790 static int
7791 iscsi_set_iface_params(struct iscsi_transport *transport,
7792 struct iscsi_uevent *ev, uint32_t len)
7793 @@ -3524,10 +3542,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
7794 break;
7795 case ISCSI_UEVENT_DESTROY_SESSION:
7796 session = iscsi_session_lookup(ev->u.d_session.sid);
7797 - if (session)
7798 - transport->destroy_session(session);
7799 - else
7800 + if (!session)
7801 err = -EINVAL;
7802 + else if (iscsi_session_has_conns(ev->u.d_session.sid))
7803 + err = -EBUSY;
7804 + else
7805 + transport->destroy_session(session);
7806 break;
7807 case ISCSI_UEVENT_UNBIND_SESSION:
7808 session = iscsi_session_lookup(ev->u.d_session.sid);
7809 diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
7810 index 0f6ff33ce52e..d4a8be5ffd52 100644
7811 --- a/drivers/scsi/ufs/ufs-mediatek.c
7812 +++ b/drivers/scsi/ufs/ufs-mediatek.c
7813 @@ -13,6 +13,7 @@
7814
7815 #include "ufshcd.h"
7816 #include "ufshcd-pltfrm.h"
7817 +#include "ufs_quirks.h"
7818 #include "unipro.h"
7819 #include "ufs-mediatek.h"
7820
7821 @@ -286,6 +287,15 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7822 return 0;
7823 }
7824
7825 +static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba,
7826 + struct ufs_dev_desc *card)
7827 +{
7828 + if (card->wmanufacturerid == UFS_VENDOR_SAMSUNG)
7829 + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
7830 +
7831 + return 0;
7832 +}
7833 +
7834 /**
7835 * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
7836 *
7837 @@ -298,6 +308,7 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
7838 .setup_clocks = ufs_mtk_setup_clocks,
7839 .link_startup_notify = ufs_mtk_link_startup_notify,
7840 .pwr_change_notify = ufs_mtk_pwr_change_notify,
7841 + .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
7842 .suspend = ufs_mtk_suspend,
7843 .resume = ufs_mtk_resume,
7844 };
7845 diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
7846 index a5b71487a206..411ef60b2c14 100644
7847 --- a/drivers/scsi/ufs/ufs-qcom.c
7848 +++ b/drivers/scsi/ufs/ufs-qcom.c
7849 @@ -905,7 +905,8 @@ out:
7850 return err;
7851 }
7852
7853 -static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
7854 +static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba,
7855 + struct ufs_dev_desc *card)
7856 {
7857 int err = 0;
7858
7859 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
7860 index b0d6978d78bf..d9ea0ae4f374 100644
7861 --- a/drivers/scsi/ufs/ufshcd.c
7862 +++ b/drivers/scsi/ufs/ufshcd.c
7863 @@ -4788,7 +4788,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
7864 break;
7865 } /* end of switch */
7866
7867 - if (host_byte(result) != DID_OK)
7868 + if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
7869 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
7870 return result;
7871 }
7872 @@ -5321,8 +5321,8 @@ static void ufshcd_err_handler(struct work_struct *work)
7873
7874 /*
7875 * if host reset is required then skip clearing the pending
7876 - * transfers forcefully because they will automatically get
7877 - * cleared after link startup.
7878 + * transfers forcefully because they will get cleared during
7879 + * host reset and restore
7880 */
7881 if (needs_reset)
7882 goto skip_pending_xfer_clear;
7883 @@ -6205,9 +6205,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
7884 int err;
7885 unsigned long flags;
7886
7887 - /* Reset the host controller */
7888 + /*
7889 + * Stop the host controller and complete the requests
7890 + * cleared by h/w
7891 + */
7892 spin_lock_irqsave(hba->host->host_lock, flags);
7893 ufshcd_hba_stop(hba, false);
7894 + hba->silence_err_logs = true;
7895 + ufshcd_complete_requests(hba);
7896 + hba->silence_err_logs = false;
7897 spin_unlock_irqrestore(hba->host->host_lock, flags);
7898
7899 /* scale up clocks to max frequency before full reinitialization */
7900 @@ -6241,7 +6247,6 @@ out:
7901 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7902 {
7903 int err = 0;
7904 - unsigned long flags;
7905 int retries = MAX_HOST_RESET_RETRIES;
7906
7907 do {
7908 @@ -6251,15 +6256,6 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7909 err = ufshcd_host_reset_and_restore(hba);
7910 } while (err && --retries);
7911
7912 - /*
7913 - * After reset the door-bell might be cleared, complete
7914 - * outstanding requests in s/w here.
7915 - */
7916 - spin_lock_irqsave(hba->host->host_lock, flags);
7917 - ufshcd_transfer_req_compl(hba);
7918 - ufshcd_tmc_handler(hba);
7919 - spin_unlock_irqrestore(hba->host->host_lock, flags);
7920 -
7921 return err;
7922 }
7923
7924 @@ -6725,7 +6721,8 @@ out:
7925 return ret;
7926 }
7927
7928 -static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
7929 +static void ufshcd_tune_unipro_params(struct ufs_hba *hba,
7930 + struct ufs_dev_desc *card)
7931 {
7932 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7933 ufshcd_tune_pa_tactivate(hba);
7934 @@ -6739,7 +6736,7 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
7935 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7936 ufshcd_quirk_tune_host_pa_tactivate(hba);
7937
7938 - ufshcd_vops_apply_dev_quirks(hba);
7939 + ufshcd_vops_apply_dev_quirks(hba, card);
7940 }
7941
7942 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7943 @@ -6902,10 +6899,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
7944 }
7945
7946 ufs_fixup_device_setup(hba, &card);
7947 + ufshcd_tune_unipro_params(hba, &card);
7948 ufs_put_device_desc(&card);
7949
7950 - ufshcd_tune_unipro_params(hba);
7951 -
7952 /* UFS device is also active now */
7953 ufshcd_set_ufs_dev_active(hba);
7954 ufshcd_force_reset_auto_bkops(hba);
7955 diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
7956 index 52c9676a1242..5260e594e0b9 100644
7957 --- a/drivers/scsi/ufs/ufshcd.h
7958 +++ b/drivers/scsi/ufs/ufshcd.h
7959 @@ -322,7 +322,7 @@ struct ufs_hba_variant_ops {
7960 void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
7961 void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
7962 enum ufs_notify_change_status);
7963 - int (*apply_dev_quirks)(struct ufs_hba *);
7964 + int (*apply_dev_quirks)(struct ufs_hba *, struct ufs_dev_desc *);
7965 int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
7966 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
7967 void (*dbg_register_dump)(struct ufs_hba *hba);
7968 @@ -513,6 +513,7 @@ struct ufs_stats {
7969 * @uic_error: UFS interconnect layer error status
7970 * @saved_err: sticky error mask
7971 * @saved_uic_err: sticky UIC error mask
7972 + * @silence_err_logs: flag to silence error logs
7973 * @dev_cmd: ufs device management command information
7974 * @last_dme_cmd_tstamp: time stamp of the last completed DME command
7975 * @auto_bkops_enabled: to track whether bkops is enabled in device
7976 @@ -670,6 +671,7 @@ struct ufs_hba {
7977 u32 saved_err;
7978 u32 saved_uic_err;
7979 struct ufs_stats ufs_stats;
7980 + bool silence_err_logs;
7981
7982 /* Device management request data */
7983 struct ufs_dev_cmd dev_cmd;
7984 @@ -1045,10 +1047,11 @@ static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
7985 return hba->vops->hibern8_notify(hba, cmd, status);
7986 }
7987
7988 -static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
7989 +static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba,
7990 + struct ufs_dev_desc *card)
7991 {
7992 if (hba->vops && hba->vops->apply_dev_quirks)
7993 - return hba->vops->apply_dev_quirks(hba);
7994 + return hba->vops->apply_dev_quirks(hba, card);
7995 return 0;
7996 }
7997
7998 diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
7999 index df76778af601..f8b9c4058926 100644
8000 --- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
8001 +++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
8002 @@ -123,7 +123,7 @@ void __init tegra_init_apbmisc(void)
8003 apbmisc.flags = IORESOURCE_MEM;
8004
8005 /* strapping options */
8006 - if (tegra_get_chip_id() == TEGRA124) {
8007 + if (of_machine_is_compatible("nvidia,tegra124")) {
8008 straps.start = 0x7000e864;
8009 straps.end = 0x7000e867;
8010 } else {
8011 diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
8012 index 3528ed5eea9b..92e460d4f3d1 100644
8013 --- a/drivers/spi/spi-fsl-lpspi.c
8014 +++ b/drivers/spi/spi-fsl-lpspi.c
8015 @@ -862,6 +862,22 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
8016 fsl_lpspi->dev = &pdev->dev;
8017 fsl_lpspi->is_slave = is_slave;
8018
8019 + controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
8020 + controller->transfer_one = fsl_lpspi_transfer_one;
8021 + controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
8022 + controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
8023 + controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
8024 + controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
8025 + controller->dev.of_node = pdev->dev.of_node;
8026 + controller->bus_num = pdev->id;
8027 + controller->slave_abort = fsl_lpspi_slave_abort;
8028 +
8029 + ret = devm_spi_register_controller(&pdev->dev, controller);
8030 + if (ret < 0) {
8031 + dev_err(&pdev->dev, "spi_register_controller error.\n");
8032 + goto out_controller_put;
8033 + }
8034 +
8035 if (!fsl_lpspi->is_slave) {
8036 for (i = 0; i < controller->num_chipselect; i++) {
8037 int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
8038 @@ -885,16 +901,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
8039 controller->prepare_message = fsl_lpspi_prepare_message;
8040 }
8041
8042 - controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
8043 - controller->transfer_one = fsl_lpspi_transfer_one;
8044 - controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
8045 - controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
8046 - controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
8047 - controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
8048 - controller->dev.of_node = pdev->dev.of_node;
8049 - controller->bus_num = pdev->id;
8050 - controller->slave_abort = fsl_lpspi_slave_abort;
8051 -
8052 init_completion(&fsl_lpspi->xfer_done);
8053
8054 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
8055 @@ -952,12 +958,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
8056 if (ret < 0)
8057 dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
8058
8059 - ret = devm_spi_register_controller(&pdev->dev, controller);
8060 - if (ret < 0) {
8061 - dev_err(&pdev->dev, "spi_register_controller error.\n");
8062 - goto out_controller_put;
8063 - }
8064 -
8065 return 0;
8066
8067 out_controller_put:
8068 diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
8069 index 63c9f7edaf6c..43078ba3def5 100644
8070 --- a/drivers/spi/spi-fsl-qspi.c
8071 +++ b/drivers/spi/spi-fsl-qspi.c
8072 @@ -398,7 +398,7 @@ static bool fsl_qspi_supports_op(struct spi_mem *mem,
8073 op->data.nbytes > q->devtype_data->txfifo)
8074 return false;
8075
8076 - return true;
8077 + return spi_mem_default_supports_op(mem, op);
8078 }
8079
8080 static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
8081 diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
8082 index 0a1a04fd5d13..8dd1396909d7 100644
8083 --- a/drivers/staging/media/meson/vdec/vdec.c
8084 +++ b/drivers/staging/media/meson/vdec/vdec.c
8085 @@ -133,6 +133,8 @@ vdec_queue_recycle(struct amvdec_session *sess, struct vb2_buffer *vb)
8086 struct amvdec_buffer *new_buf;
8087
8088 new_buf = kmalloc(sizeof(*new_buf), GFP_KERNEL);
8089 + if (!new_buf)
8090 + return;
8091 new_buf->vb = vb;
8092
8093 mutex_lock(&sess->bufs_recycle_lock);
8094 diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
8095 index ec5835d1aa8c..9f0418ee7528 100644
8096 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
8097 +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
8098 @@ -229,18 +229,21 @@ static char *translate_scan(struct adapter *padapter,
8099
8100 /* parsing WPA/WPA2 IE */
8101 {
8102 - u8 buf[MAX_WPA_IE_LEN];
8103 + u8 *buf;
8104 u8 wpa_ie[255], rsn_ie[255];
8105 u16 wpa_len = 0, rsn_len = 0;
8106 u8 *p;
8107
8108 + buf = kzalloc(MAX_WPA_IE_LEN, GFP_ATOMIC);
8109 + if (!buf)
8110 + return start;
8111 +
8112 rtw_get_sec_ie(pnetwork->network.ies, pnetwork->network.ie_length, rsn_ie, &rsn_len, wpa_ie, &wpa_len);
8113 RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: ssid =%s\n", pnetwork->network.ssid.ssid));
8114 RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
8115
8116 if (wpa_len > 0) {
8117 p = buf;
8118 - memset(buf, 0, MAX_WPA_IE_LEN);
8119 p += sprintf(p, "wpa_ie=");
8120 for (i = 0; i < wpa_len; i++)
8121 p += sprintf(p, "%02x", wpa_ie[i]);
8122 @@ -257,7 +260,6 @@ static char *translate_scan(struct adapter *padapter,
8123 }
8124 if (rsn_len > 0) {
8125 p = buf;
8126 - memset(buf, 0, MAX_WPA_IE_LEN);
8127 p += sprintf(p, "rsn_ie=");
8128 for (i = 0; i < rsn_len; i++)
8129 p += sprintf(p, "%02x", rsn_ie[i]);
8130 @@ -271,6 +273,7 @@ static char *translate_scan(struct adapter *padapter,
8131 iwe.u.data.length = rsn_len;
8132 start = iwe_stream_add_point(info, start, stop, &iwe, rsn_ie);
8133 }
8134 + kfree(buf);
8135 }
8136
8137 {/* parsing WPS IE */
8138 diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
8139 index e8a9047de451..36f1a4d870eb 100644
8140 --- a/drivers/tty/synclink_gt.c
8141 +++ b/drivers/tty/synclink_gt.c
8142 @@ -1334,10 +1334,10 @@ static void throttle(struct tty_struct * tty)
8143 DBGINFO(("%s throttle\n", info->device_name));
8144 if (I_IXOFF(tty))
8145 send_xchar(tty, STOP_CHAR(tty));
8146 - if (C_CRTSCTS(tty)) {
8147 + if (C_CRTSCTS(tty)) {
8148 spin_lock_irqsave(&info->lock,flags);
8149 info->signals &= ~SerialSignal_RTS;
8150 - set_signals(info);
8151 + set_signals(info);
8152 spin_unlock_irqrestore(&info->lock,flags);
8153 }
8154 }
8155 @@ -1359,10 +1359,10 @@ static void unthrottle(struct tty_struct * tty)
8156 else
8157 send_xchar(tty, START_CHAR(tty));
8158 }
8159 - if (C_CRTSCTS(tty)) {
8160 + if (C_CRTSCTS(tty)) {
8161 spin_lock_irqsave(&info->lock,flags);
8162 info->signals |= SerialSignal_RTS;
8163 - set_signals(info);
8164 + set_signals(info);
8165 spin_unlock_irqrestore(&info->lock,flags);
8166 }
8167 }
8168 @@ -2560,8 +2560,8 @@ static void change_params(struct slgt_info *info)
8169 info->read_status_mask = IRQ_RXOVER;
8170 if (I_INPCK(info->port.tty))
8171 info->read_status_mask |= MASK_PARITY | MASK_FRAMING;
8172 - if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
8173 - info->read_status_mask |= MASK_BREAK;
8174 + if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
8175 + info->read_status_mask |= MASK_BREAK;
8176 if (I_IGNPAR(info->port.tty))
8177 info->ignore_status_mask |= MASK_PARITY | MASK_FRAMING;
8178 if (I_IGNBRK(info->port.tty)) {
8179 @@ -3192,7 +3192,7 @@ static int tiocmset(struct tty_struct *tty,
8180 info->signals &= ~SerialSignal_DTR;
8181
8182 spin_lock_irqsave(&info->lock,flags);
8183 - set_signals(info);
8184 + set_signals(info);
8185 spin_unlock_irqrestore(&info->lock,flags);
8186 return 0;
8187 }
8188 @@ -3203,7 +3203,7 @@ static int carrier_raised(struct tty_port *port)
8189 struct slgt_info *info = container_of(port, struct slgt_info, port);
8190
8191 spin_lock_irqsave(&info->lock,flags);
8192 - get_signals(info);
8193 + get_signals(info);
8194 spin_unlock_irqrestore(&info->lock,flags);
8195 return (info->signals & SerialSignal_DCD) ? 1 : 0;
8196 }
8197 @@ -3218,7 +3218,7 @@ static void dtr_rts(struct tty_port *port, int on)
8198 info->signals |= SerialSignal_RTS | SerialSignal_DTR;
8199 else
8200 info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
8201 - set_signals(info);
8202 + set_signals(info);
8203 spin_unlock_irqrestore(&info->lock,flags);
8204 }
8205
8206 diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
8207 index fcb91bf7a15b..54b897a646d0 100644
8208 --- a/drivers/tty/synclinkmp.c
8209 +++ b/drivers/tty/synclinkmp.c
8210 @@ -1453,10 +1453,10 @@ static void throttle(struct tty_struct * tty)
8211 if (I_IXOFF(tty))
8212 send_xchar(tty, STOP_CHAR(tty));
8213
8214 - if (C_CRTSCTS(tty)) {
8215 + if (C_CRTSCTS(tty)) {
8216 spin_lock_irqsave(&info->lock,flags);
8217 info->serial_signals &= ~SerialSignal_RTS;
8218 - set_signals(info);
8219 + set_signals(info);
8220 spin_unlock_irqrestore(&info->lock,flags);
8221 }
8222 }
8223 @@ -1482,10 +1482,10 @@ static void unthrottle(struct tty_struct * tty)
8224 send_xchar(tty, START_CHAR(tty));
8225 }
8226
8227 - if (C_CRTSCTS(tty)) {
8228 + if (C_CRTSCTS(tty)) {
8229 spin_lock_irqsave(&info->lock,flags);
8230 info->serial_signals |= SerialSignal_RTS;
8231 - set_signals(info);
8232 + set_signals(info);
8233 spin_unlock_irqrestore(&info->lock,flags);
8234 }
8235 }
8236 @@ -2470,7 +2470,7 @@ static void isr_io_pin( SLMP_INFO *info, u16 status )
8237 if (status & SerialSignal_CTS) {
8238 if ( debug_level >= DEBUG_LEVEL_ISR )
8239 printk("CTS tx start...");
8240 - info->port.tty->hw_stopped = 0;
8241 + info->port.tty->hw_stopped = 0;
8242 tx_start(info);
8243 info->pending_bh |= BH_TRANSMIT;
8244 return;
8245 @@ -2479,7 +2479,7 @@ static void isr_io_pin( SLMP_INFO *info, u16 status )
8246 if (!(status & SerialSignal_CTS)) {
8247 if ( debug_level >= DEBUG_LEVEL_ISR )
8248 printk("CTS tx stop...");
8249 - info->port.tty->hw_stopped = 1;
8250 + info->port.tty->hw_stopped = 1;
8251 tx_stop(info);
8252 }
8253 }
8254 @@ -2806,8 +2806,8 @@ static void change_params(SLMP_INFO *info)
8255 info->read_status_mask2 = OVRN;
8256 if (I_INPCK(info->port.tty))
8257 info->read_status_mask2 |= PE | FRME;
8258 - if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
8259 - info->read_status_mask1 |= BRKD;
8260 + if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
8261 + info->read_status_mask1 |= BRKD;
8262 if (I_IGNPAR(info->port.tty))
8263 info->ignore_status_mask2 |= PE | FRME;
8264 if (I_IGNBRK(info->port.tty)) {
8265 @@ -3177,7 +3177,7 @@ static int tiocmget(struct tty_struct *tty)
8266 unsigned long flags;
8267
8268 spin_lock_irqsave(&info->lock,flags);
8269 - get_signals(info);
8270 + get_signals(info);
8271 spin_unlock_irqrestore(&info->lock,flags);
8272
8273 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS : 0) |
8274 @@ -3215,7 +3215,7 @@ static int tiocmset(struct tty_struct *tty,
8275 info->serial_signals &= ~SerialSignal_DTR;
8276
8277 spin_lock_irqsave(&info->lock,flags);
8278 - set_signals(info);
8279 + set_signals(info);
8280 spin_unlock_irqrestore(&info->lock,flags);
8281
8282 return 0;
8283 @@ -3227,7 +3227,7 @@ static int carrier_raised(struct tty_port *port)
8284 unsigned long flags;
8285
8286 spin_lock_irqsave(&info->lock,flags);
8287 - get_signals(info);
8288 + get_signals(info);
8289 spin_unlock_irqrestore(&info->lock,flags);
8290
8291 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
8292 @@ -3243,7 +3243,7 @@ static void dtr_rts(struct tty_port *port, int on)
8293 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
8294 else
8295 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
8296 - set_signals(info);
8297 + set_signals(info);
8298 spin_unlock_irqrestore(&info->lock,flags);
8299 }
8300
8301 diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
8302 index ebcf1434e296..44858f70f5f5 100644
8303 --- a/drivers/uio/uio_dmem_genirq.c
8304 +++ b/drivers/uio/uio_dmem_genirq.c
8305 @@ -132,11 +132,13 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
8306 if (irq_on) {
8307 if (test_and_clear_bit(0, &priv->flags))
8308 enable_irq(dev_info->irq);
8309 + spin_unlock_irqrestore(&priv->lock, flags);
8310 } else {
8311 - if (!test_and_set_bit(0, &priv->flags))
8312 + if (!test_and_set_bit(0, &priv->flags)) {
8313 + spin_unlock_irqrestore(&priv->lock, flags);
8314 disable_irq(dev_info->irq);
8315 + }
8316 }
8317 - spin_unlock_irqrestore(&priv->lock, flags);
8318
8319 return 0;
8320 }
8321 diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
8322 index 6be10e496e10..a9133773b89e 100644
8323 --- a/drivers/usb/dwc2/gadget.c
8324 +++ b/drivers/usb/dwc2/gadget.c
8325 @@ -4056,11 +4056,12 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
8326 * a unique tx-fifo even if it is non-periodic.
8327 */
8328 if (dir_in && hsotg->dedicated_fifos) {
8329 + unsigned fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
8330 u32 fifo_index = 0;
8331 u32 fifo_size = UINT_MAX;
8332
8333 size = hs_ep->ep.maxpacket * hs_ep->mc;
8334 - for (i = 1; i < hsotg->num_of_eps; ++i) {
8335 + for (i = 1; i <= fifo_count; ++i) {
8336 if (hsotg->fifo_map & (1 << i))
8337 continue;
8338 val = dwc2_readl(hsotg, DPTXFSIZN(i));
8339 diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
8340 index 5567ed2cddbe..fa252870c926 100644
8341 --- a/drivers/usb/dwc3/host.c
8342 +++ b/drivers/usb/dwc3/host.c
8343 @@ -88,10 +88,10 @@ int dwc3_host_init(struct dwc3 *dwc)
8344 memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
8345
8346 if (dwc->usb3_lpm_capable)
8347 - props[prop_idx++].name = "usb3-lpm-capable";
8348 + props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");
8349
8350 if (dwc->usb2_lpm_disable)
8351 - props[prop_idx++].name = "usb2-lpm-disable";
8352 + props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable");
8353
8354 /**
8355 * WORKAROUND: dwc3 revisions <=3.00a have a limitation
8356 @@ -103,7 +103,7 @@ int dwc3_host_init(struct dwc3 *dwc)
8357 * This following flag tells XHCI to do just that.
8358 */
8359 if (dwc->revision <= DWC3_REVISION_300A)
8360 - props[prop_idx++].name = "quirk-broken-port-ped";
8361 + props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped");
8362
8363 if (prop_idx) {
8364 ret = platform_device_add_properties(xhci, props);
8365 diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
8366 index 7a0e9a58c2d8..116d386472ef 100644
8367 --- a/drivers/usb/gadget/udc/gr_udc.c
8368 +++ b/drivers/usb/gadget/udc/gr_udc.c
8369 @@ -2176,8 +2176,6 @@ static int gr_probe(struct platform_device *pdev)
8370 return -ENOMEM;
8371 }
8372
8373 - spin_lock(&dev->lock);
8374 -
8375 /* Inside lock so that no gadget can use this udc until probe is done */
8376 retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
8377 if (retval) {
8378 @@ -2186,15 +2184,21 @@ static int gr_probe(struct platform_device *pdev)
8379 }
8380 dev->added = 1;
8381
8382 + spin_lock(&dev->lock);
8383 +
8384 retval = gr_udc_init(dev);
8385 - if (retval)
8386 + if (retval) {
8387 + spin_unlock(&dev->lock);
8388 goto out;
8389 -
8390 - gr_dfs_create(dev);
8391 + }
8392
8393 /* Clear all interrupt enables that might be left on since last boot */
8394 gr_disable_interrupts_and_pullup(dev);
8395
8396 + spin_unlock(&dev->lock);
8397 +
8398 + gr_dfs_create(dev);
8399 +
8400 retval = gr_request_irq(dev, dev->irq);
8401 if (retval) {
8402 dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
8403 @@ -2223,8 +2227,6 @@ static int gr_probe(struct platform_device *pdev)
8404 dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
8405
8406 out:
8407 - spin_unlock(&dev->lock);
8408 -
8409 if (retval)
8410 gr_remove(pdev);
8411
8412 diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
8413 index a3d2fef67746..5c93226e0e20 100644
8414 --- a/drivers/usb/musb/omap2430.c
8415 +++ b/drivers/usb/musb/omap2430.c
8416 @@ -361,8 +361,6 @@ static const struct musb_platform_ops omap2430_ops = {
8417 .init = omap2430_musb_init,
8418 .exit = omap2430_musb_exit,
8419
8420 - .set_vbus = omap2430_musb_set_vbus,
8421 -
8422 .enable = omap2430_musb_enable,
8423 .disable = omap2430_musb_disable,
8424
8425 diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
8426 index f2983f0f84be..3f5f8198a6bb 100644
8427 --- a/drivers/vfio/pci/vfio_pci_nvlink2.c
8428 +++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
8429 @@ -97,8 +97,10 @@ static void vfio_pci_nvgpu_release(struct vfio_pci_device *vdev,
8430
8431 /* If there were any mappings at all... */
8432 if (data->mm) {
8433 - ret = mm_iommu_put(data->mm, data->mem);
8434 - WARN_ON(ret);
8435 + if (data->mem) {
8436 + ret = mm_iommu_put(data->mm, data->mem);
8437 + WARN_ON(ret);
8438 + }
8439
8440 mmdrop(data->mm);
8441 }
8442 diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
8443 index 1410f476e135..1fc50fc0694b 100644
8444 --- a/drivers/video/fbdev/pxa168fb.c
8445 +++ b/drivers/video/fbdev/pxa168fb.c
8446 @@ -766,8 +766,8 @@ failed_free_cmap:
8447 failed_free_clk:
8448 clk_disable_unprepare(fbi->clk);
8449 failed_free_fbmem:
8450 - dma_free_coherent(fbi->dev, info->fix.smem_len,
8451 - info->screen_base, fbi->fb_start_dma);
8452 + dma_free_wc(fbi->dev, info->fix.smem_len,
8453 + info->screen_base, fbi->fb_start_dma);
8454 failed_free_info:
8455 kfree(info);
8456
8457 @@ -801,7 +801,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
8458
8459 irq = platform_get_irq(pdev, 0);
8460
8461 - dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
8462 + dma_free_wc(fbi->dev, info->fix.smem_len,
8463 info->screen_base, info->fix.smem_start);
8464
8465 clk_disable_unprepare(fbi->clk);
8466 diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
8467 index c962d9b370c6..d2c4eb9efd70 100644
8468 --- a/drivers/virtio/virtio_balloon.c
8469 +++ b/drivers/virtio/virtio_balloon.c
8470 @@ -157,6 +157,8 @@ static void set_page_pfns(struct virtio_balloon *vb,
8471 {
8472 unsigned int i;
8473
8474 + BUILD_BUG_ON(VIRTIO_BALLOON_PAGES_PER_PAGE > VIRTIO_BALLOON_ARRAY_PFNS_MAX);
8475 +
8476 /*
8477 * Set balloon pfns pointing at this page.
8478 * Note that the first pfn points at start of the page.
8479 diff --git a/drivers/visorbus/visorchipset.c b/drivers/visorbus/visorchipset.c
8480 index ca752b8f495f..cb1eb7e05f87 100644
8481 --- a/drivers/visorbus/visorchipset.c
8482 +++ b/drivers/visorbus/visorchipset.c
8483 @@ -1210,14 +1210,17 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
8484 {
8485 struct controlvm_message local_crash_bus_msg;
8486 struct controlvm_message local_crash_dev_msg;
8487 - struct controlvm_message msg;
8488 + struct controlvm_message msg = {
8489 + .hdr.id = CONTROLVM_CHIPSET_INIT,
8490 + .cmd.init_chipset = {
8491 + .bus_count = 23,
8492 + .switch_count = 0,
8493 + },
8494 + };
8495 u32 local_crash_msg_offset;
8496 u16 local_crash_msg_count;
8497
8498 /* send init chipset msg */
8499 - msg.hdr.id = CONTROLVM_CHIPSET_INIT;
8500 - msg.cmd.init_chipset.bus_count = 23;
8501 - msg.cmd.init_chipset.switch_count = 0;
8502 chipset_init(&msg);
8503 /* get saved message count */
8504 if (visorchannel_read(chipset_dev->controlvm_channel,
8505 diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/vme/bridges/vme_fake.c
8506 index 3208a4409e44..6a1bc284f297 100644
8507 --- a/drivers/vme/bridges/vme_fake.c
8508 +++ b/drivers/vme/bridges/vme_fake.c
8509 @@ -414,8 +414,9 @@ static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr,
8510 }
8511 }
8512
8513 -static u8 fake_vmeread8(struct fake_driver *bridge, unsigned long long addr,
8514 - u32 aspace, u32 cycle)
8515 +static noinline_for_stack u8 fake_vmeread8(struct fake_driver *bridge,
8516 + unsigned long long addr,
8517 + u32 aspace, u32 cycle)
8518 {
8519 u8 retval = 0xff;
8520 int i;
8521 @@ -446,8 +447,9 @@ static u8 fake_vmeread8(struct fake_driver *bridge, unsigned long long addr,
8522 return retval;
8523 }
8524
8525 -static u16 fake_vmeread16(struct fake_driver *bridge, unsigned long long addr,
8526 - u32 aspace, u32 cycle)
8527 +static noinline_for_stack u16 fake_vmeread16(struct fake_driver *bridge,
8528 + unsigned long long addr,
8529 + u32 aspace, u32 cycle)
8530 {
8531 u16 retval = 0xffff;
8532 int i;
8533 @@ -478,8 +480,9 @@ static u16 fake_vmeread16(struct fake_driver *bridge, unsigned long long addr,
8534 return retval;
8535 }
8536
8537 -static u32 fake_vmeread32(struct fake_driver *bridge, unsigned long long addr,
8538 - u32 aspace, u32 cycle)
8539 +static noinline_for_stack u32 fake_vmeread32(struct fake_driver *bridge,
8540 + unsigned long long addr,
8541 + u32 aspace, u32 cycle)
8542 {
8543 u32 retval = 0xffffffff;
8544 int i;
8545 @@ -609,8 +612,9 @@ out:
8546 return retval;
8547 }
8548
8549 -static void fake_vmewrite8(struct fake_driver *bridge, u8 *buf,
8550 - unsigned long long addr, u32 aspace, u32 cycle)
8551 +static noinline_for_stack void fake_vmewrite8(struct fake_driver *bridge,
8552 + u8 *buf, unsigned long long addr,
8553 + u32 aspace, u32 cycle)
8554 {
8555 int i;
8556 unsigned long long start, end, offset;
8557 @@ -639,8 +643,9 @@ static void fake_vmewrite8(struct fake_driver *bridge, u8 *buf,
8558
8559 }
8560
8561 -static void fake_vmewrite16(struct fake_driver *bridge, u16 *buf,
8562 - unsigned long long addr, u32 aspace, u32 cycle)
8563 +static noinline_for_stack void fake_vmewrite16(struct fake_driver *bridge,
8564 + u16 *buf, unsigned long long addr,
8565 + u32 aspace, u32 cycle)
8566 {
8567 int i;
8568 unsigned long long start, end, offset;
8569 @@ -669,8 +674,9 @@ static void fake_vmewrite16(struct fake_driver *bridge, u16 *buf,
8570
8571 }
8572
8573 -static void fake_vmewrite32(struct fake_driver *bridge, u32 *buf,
8574 - unsigned long long addr, u32 aspace, u32 cycle)
8575 +static noinline_for_stack void fake_vmewrite32(struct fake_driver *bridge,
8576 + u32 *buf, unsigned long long addr,
8577 + u32 aspace, u32 cycle)
8578 {
8579 int i;
8580 unsigned long long start, end, offset;
8581 diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
8582 index 0b52ab4cb964..72c70f59fc60 100644
8583 --- a/fs/btrfs/check-integrity.c
8584 +++ b/fs/btrfs/check-integrity.c
8585 @@ -629,7 +629,6 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(dev_t dev,
8586 static int btrfsic_process_superblock(struct btrfsic_state *state,
8587 struct btrfs_fs_devices *fs_devices)
8588 {
8589 - struct btrfs_fs_info *fs_info = state->fs_info;
8590 struct btrfs_super_block *selected_super;
8591 struct list_head *dev_head = &fs_devices->devices;
8592 struct btrfs_device *device;
8593 @@ -700,7 +699,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
8594 break;
8595 }
8596
8597 - num_copies = btrfs_num_copies(fs_info, next_bytenr,
8598 + num_copies = btrfs_num_copies(state->fs_info, next_bytenr,
8599 state->metablock_size);
8600 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
8601 pr_info("num_copies(log_bytenr=%llu) = %d\n",
8602 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
8603 index 290ca193c6c0..169075550a5a 100644
8604 --- a/fs/btrfs/ctree.h
8605 +++ b/fs/btrfs/ctree.h
8606 @@ -3107,17 +3107,21 @@ do { \
8607 rcu_read_unlock(); \
8608 } while (0)
8609
8610 -__cold
8611 -static inline void assfail(const char *expr, const char *file, int line)
8612 +#ifdef CONFIG_BTRFS_ASSERT
8613 +__cold __noreturn
8614 +static inline void assertfail(const char *expr, const char *file, int line)
8615 {
8616 - if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) {
8617 - pr_err("assertion failed: %s, in %s:%d\n", expr, file, line);
8618 - BUG();
8619 - }
8620 + pr_err("assertion failed: %s, in %s:%d\n", expr, file, line);
8621 + BUG();
8622 }
8623
8624 -#define ASSERT(expr) \
8625 - (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
8626 +#define ASSERT(expr) \
8627 + (likely(expr) ? (void)0 : assertfail(#expr, __FILE__, __LINE__))
8628 +
8629 +#else
8630 +static inline void assertfail(const char *expr, const char* file, int line) { }
8631 +#define ASSERT(expr) (void)(expr)
8632 +#endif
8633
8634 /*
8635 * Use that for functions that are conditionally exported for sanity tests but
8636 diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
8637 index c878bc25d046..f62a179f85bb 100644
8638 --- a/fs/btrfs/file-item.c
8639 +++ b/fs/btrfs/file-item.c
8640 @@ -274,7 +274,8 @@ found:
8641 csum += count * csum_size;
8642 nblocks -= count;
8643 next:
8644 - while (count--) {
8645 + while (count > 0) {
8646 + count--;
8647 disk_bytenr += fs_info->sectorsize;
8648 offset += fs_info->sectorsize;
8649 page_bytes_left -= fs_info->sectorsize;
8650 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
8651 index 6f0568fb5899..b83eef445db3 100644
8652 --- a/fs/btrfs/inode.c
8653 +++ b/fs/btrfs/inode.c
8654 @@ -2168,6 +2168,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
8655 /* see btrfs_writepage_start_hook for details on why this is required */
8656 struct btrfs_writepage_fixup {
8657 struct page *page;
8658 + struct inode *inode;
8659 struct btrfs_work work;
8660 };
8661
8662 @@ -2181,27 +2182,71 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
8663 struct inode *inode;
8664 u64 page_start;
8665 u64 page_end;
8666 - int ret;
8667 + int ret = 0;
8668 + bool free_delalloc_space = true;
8669
8670 fixup = container_of(work, struct btrfs_writepage_fixup, work);
8671 page = fixup->page;
8672 + inode = fixup->inode;
8673 + page_start = page_offset(page);
8674 + page_end = page_offset(page) + PAGE_SIZE - 1;
8675 +
8676 + /*
8677 + * This is similar to page_mkwrite, we need to reserve the space before
8678 + * we take the page lock.
8679 + */
8680 + ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
8681 + PAGE_SIZE);
8682 again:
8683 lock_page(page);
8684 +
8685 + /*
8686 + * Before we queued this fixup, we took a reference on the page.
8687 + * page->mapping may go NULL, but it shouldn't be moved to a different
8688 + * address space.
8689 + */
8690 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
8691 - ClearPageChecked(page);
8692 + /*
8693 + * Unfortunately this is a little tricky, either
8694 + *
8695 + * 1) We got here and our page had already been dealt with and
8696 + * we reserved our space, thus ret == 0, so we need to just
8697 + * drop our space reservation and bail. This can happen the
8698 + * first time we come into the fixup worker, or could happen
8699 + * while waiting for the ordered extent.
8700 + * 2) Our page was already dealt with, but we happened to get an
8701 + * ENOSPC above from the btrfs_delalloc_reserve_space. In
8702 + * this case we obviously don't have anything to release, but
8703 + * because the page was already dealt with we don't want to
8704 + * mark the page with an error, so make sure we're resetting
8705 + * ret to 0. This is why we have this check _before_ the ret
8706 + * check, because we do not want to have a surprise ENOSPC
8707 + * when the page was already properly dealt with.
8708 + */
8709 + if (!ret) {
8710 + btrfs_delalloc_release_extents(BTRFS_I(inode),
8711 + PAGE_SIZE);
8712 + btrfs_delalloc_release_space(inode, data_reserved,
8713 + page_start, PAGE_SIZE,
8714 + true);
8715 + }
8716 + ret = 0;
8717 goto out_page;
8718 }
8719
8720 - inode = page->mapping->host;
8721 - page_start = page_offset(page);
8722 - page_end = page_offset(page) + PAGE_SIZE - 1;
8723 + /*
8724 + * We can't mess with the page state unless it is locked, so now that
8725 + * it is locked bail if we failed to make our space reservation.
8726 + */
8727 + if (ret)
8728 + goto out_page;
8729
8730 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
8731 &cached_state);
8732
8733 /* already ordered? We're done */
8734 if (PagePrivate2(page))
8735 - goto out;
8736 + goto out_reserved;
8737
8738 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
8739 PAGE_SIZE);
8740 @@ -2214,39 +2259,49 @@ again:
8741 goto again;
8742 }
8743
8744 - ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
8745 - PAGE_SIZE);
8746 - if (ret) {
8747 - mapping_set_error(page->mapping, ret);
8748 - end_extent_writepage(page, ret, page_start, page_end);
8749 - ClearPageChecked(page);
8750 - goto out;
8751 - }
8752 -
8753 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
8754 &cached_state);
8755 - if (ret) {
8756 - mapping_set_error(page->mapping, ret);
8757 - end_extent_writepage(page, ret, page_start, page_end);
8758 - ClearPageChecked(page);
8759 + if (ret)
8760 goto out_reserved;
8761 - }
8762
8763 - ClearPageChecked(page);
8764 - set_page_dirty(page);
8765 + /*
8766 + * Everything went as planned, we're now the owner of a dirty page with
8767 + * delayed allocation bits set and space reserved for our COW
8768 + * destination.
8769 + *
8770 + * The page was dirty when we started, nothing should have cleaned it.
8771 + */
8772 + BUG_ON(!PageDirty(page));
8773 + free_delalloc_space = false;
8774 out_reserved:
8775 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8776 - if (ret)
8777 + if (free_delalloc_space)
8778 btrfs_delalloc_release_space(inode, data_reserved, page_start,
8779 PAGE_SIZE, true);
8780 -out:
8781 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
8782 &cached_state);
8783 out_page:
8784 + if (ret) {
8785 + /*
8786 + * We hit ENOSPC or other errors. Update the mapping and page
8787 + * to reflect the errors and clean the page.
8788 + */
8789 + mapping_set_error(page->mapping, ret);
8790 + end_extent_writepage(page, ret, page_start, page_end);
8791 + clear_page_dirty_for_io(page);
8792 + SetPageError(page);
8793 + }
8794 + ClearPageChecked(page);
8795 unlock_page(page);
8796 put_page(page);
8797 kfree(fixup);
8798 extent_changeset_free(data_reserved);
8799 + /*
8800 + * As a precaution, do a delayed iput in case it would be the last iput
8801 + * that could need flushing space. Recursing back to fixup worker would
8802 + * deadlock.
8803 + */
8804 + btrfs_add_delayed_iput(inode);
8805 }
8806
8807 /*
8808 @@ -2270,6 +2325,13 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
8809 if (TestClearPagePrivate2(page))
8810 return 0;
8811
8812 + /*
8813 + * PageChecked is set below when we create a fixup worker for this page,
8814 + * don't try to create another one if we're already PageChecked()
8815 + *
8816 + * The extent_io writepage code will redirty the page if we send back
8817 + * EAGAIN.
8818 + */
8819 if (PageChecked(page))
8820 return -EAGAIN;
8821
8822 @@ -2277,12 +2339,21 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
8823 if (!fixup)
8824 return -EAGAIN;
8825
8826 + /*
8827 + * We are already holding a reference to this inode from
8828 + * write_cache_pages. We need to hold it because the space reservation
8829 + * takes place outside of the page lock, and we can't trust
8830 + * page->mapping outside of the page lock.
8831 + */
8832 + ihold(inode);
8833 SetPageChecked(page);
8834 get_page(page);
8835 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
8836 fixup->page = page;
8837 + fixup->inode = inode;
8838 btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
8839 - return -EBUSY;
8840 +
8841 + return -EAGAIN;
8842 }
8843
8844 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
8845 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
8846 index f7d9fc1a6fc2..3e64f49c394b 100644
8847 --- a/fs/btrfs/volumes.c
8848 +++ b/fs/btrfs/volumes.c
8849 @@ -907,6 +907,32 @@ static struct btrfs_fs_devices *find_fsid_changed(
8850
8851 return NULL;
8852 }
8853 +
8854 +static struct btrfs_fs_devices *find_fsid_reverted_metadata(
8855 + struct btrfs_super_block *disk_super)
8856 +{
8857 + struct btrfs_fs_devices *fs_devices;
8858 +
8859 + /*
8860 + * Handle the case where the scanned device is part of an fs whose last
8861 + * metadata UUID change reverted it to the original FSID. At the same
8862 + * time * fs_devices was first created by another constitutent device
8863 + * which didn't fully observe the operation. This results in an
8864 + * btrfs_fs_devices created with metadata/fsid different AND
8865 + * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
8866 + * fs_devices equal to the FSID of the disk.
8867 + */
8868 + list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
8869 + if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
8870 + BTRFS_FSID_SIZE) != 0 &&
8871 + memcmp(fs_devices->metadata_uuid, disk_super->fsid,
8872 + BTRFS_FSID_SIZE) == 0 &&
8873 + fs_devices->fsid_change)
8874 + return fs_devices;
8875 + }
8876 +
8877 + return NULL;
8878 +}
8879 /*
8880 * Add new device to list of registered devices
8881 *
8882 @@ -946,7 +972,9 @@ static noinline struct btrfs_device *device_list_add(const char *path,
8883 fs_devices = find_fsid(disk_super->fsid,
8884 disk_super->metadata_uuid);
8885 } else {
8886 - fs_devices = find_fsid(disk_super->fsid, NULL);
8887 + fs_devices = find_fsid_reverted_metadata(disk_super);
8888 + if (!fs_devices)
8889 + fs_devices = find_fsid(disk_super->fsid, NULL);
8890 }
8891
8892
8893 @@ -976,12 +1004,18 @@ static noinline struct btrfs_device *device_list_add(const char *path,
8894 * a device which had the CHANGING_FSID_V2 flag then replace the
8895 * metadata_uuid/fsid values of the fs_devices.
8896 */
8897 - if (has_metadata_uuid && fs_devices->fsid_change &&
8898 + if (fs_devices->fsid_change &&
8899 found_transid > fs_devices->latest_generation) {
8900 memcpy(fs_devices->fsid, disk_super->fsid,
8901 BTRFS_FSID_SIZE);
8902 - memcpy(fs_devices->metadata_uuid,
8903 - disk_super->metadata_uuid, BTRFS_FSID_SIZE);
8904 +
8905 + if (has_metadata_uuid)
8906 + memcpy(fs_devices->metadata_uuid,
8907 + disk_super->metadata_uuid,
8908 + BTRFS_FSID_SIZE);
8909 + else
8910 + memcpy(fs_devices->metadata_uuid,
8911 + disk_super->fsid, BTRFS_FSID_SIZE);
8912
8913 fs_devices->fsid_change = false;
8914 }
8915 @@ -7561,6 +7595,8 @@ int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
8916 else
8917 btrfs_dev_stat_set(dev, i, 0);
8918 }
8919 + btrfs_info(fs_info, "device stats zeroed by %s (%d)",
8920 + current->comm, task_pid_nr(current));
8921 } else {
8922 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
8923 if (stats->nr_items > i)
8924 diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
8925 index ee02a742fff5..8c1f04c3a684 100644
8926 --- a/fs/ceph/mds_client.c
8927 +++ b/fs/ceph/mds_client.c
8928 @@ -2552,8 +2552,7 @@ static void __do_request(struct ceph_mds_client *mdsc,
8929 if (!(mdsc->fsc->mount_options->flags &
8930 CEPH_MOUNT_OPT_MOUNTWAIT) &&
8931 !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
8932 - err = -ENOENT;
8933 - pr_info("probably no mds server is up\n");
8934 + err = -EHOSTUNREACH;
8935 goto finish;
8936 }
8937 }
8938 diff --git a/fs/ceph/super.c b/fs/ceph/super.c
8939 index b47f43fc2d68..62fc7d46032e 100644
8940 --- a/fs/ceph/super.c
8941 +++ b/fs/ceph/super.c
8942 @@ -1137,6 +1137,11 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
8943 return res;
8944
8945 out_splat:
8946 + if (!ceph_mdsmap_is_cluster_available(fsc->mdsc->mdsmap)) {
8947 + pr_info("No mds server is up or the cluster is laggy\n");
8948 + err = -EHOSTUNREACH;
8949 + }
8950 +
8951 ceph_mdsc_close_sessions(fsc->mdsc);
8952 deactivate_locked_super(sb);
8953 goto out_final;
8954 diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
8955 index 41957b82d796..606f26d862dc 100644
8956 --- a/fs/cifs/cifs_dfs_ref.c
8957 +++ b/fs/cifs/cifs_dfs_ref.c
8958 @@ -120,17 +120,17 @@ cifs_build_devname(char *nodename, const char *prepath)
8959
8960
8961 /**
8962 - * cifs_compose_mount_options - creates mount options for refferral
8963 + * cifs_compose_mount_options - creates mount options for referral
8964 * @sb_mountdata: parent/root DFS mount options (template)
8965 * @fullpath: full path in UNC format
8966 - * @ref: server's referral
8967 + * @ref: optional server's referral
8968 * @devname: optional pointer for saving device name
8969 *
8970 * creates mount options for submount based on template options sb_mountdata
8971 * and replacing unc,ip,prefixpath options with ones we've got form ref_unc.
8972 *
8973 * Returns: pointer to new mount options or ERR_PTR.
8974 - * Caller is responcible for freeing retunrned value if it is not error.
8975 + * Caller is responsible for freeing returned value if it is not error.
8976 */
8977 char *cifs_compose_mount_options(const char *sb_mountdata,
8978 const char *fullpath,
8979 @@ -150,18 +150,27 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
8980 if (sb_mountdata == NULL)
8981 return ERR_PTR(-EINVAL);
8982
8983 - if (strlen(fullpath) - ref->path_consumed) {
8984 - prepath = fullpath + ref->path_consumed;
8985 - /* skip initial delimiter */
8986 - if (*prepath == '/' || *prepath == '\\')
8987 - prepath++;
8988 - }
8989 + if (ref) {
8990 + if (strlen(fullpath) - ref->path_consumed) {
8991 + prepath = fullpath + ref->path_consumed;
8992 + /* skip initial delimiter */
8993 + if (*prepath == '/' || *prepath == '\\')
8994 + prepath++;
8995 + }
8996
8997 - name = cifs_build_devname(ref->node_name, prepath);
8998 - if (IS_ERR(name)) {
8999 - rc = PTR_ERR(name);
9000 - name = NULL;
9001 - goto compose_mount_options_err;
9002 + name = cifs_build_devname(ref->node_name, prepath);
9003 + if (IS_ERR(name)) {
9004 + rc = PTR_ERR(name);
9005 + name = NULL;
9006 + goto compose_mount_options_err;
9007 + }
9008 + } else {
9009 + name = cifs_build_devname((char *)fullpath, NULL);
9010 + if (IS_ERR(name)) {
9011 + rc = PTR_ERR(name);
9012 + name = NULL;
9013 + goto compose_mount_options_err;
9014 + }
9015 }
9016
9017 rc = dns_resolve_server_name_to_ip(name, &srvIP);
9018 @@ -225,6 +234,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
9019
9020 if (devname)
9021 *devname = name;
9022 + else
9023 + kfree(name);
9024
9025 /*cifs_dbg(FYI, "%s: parent mountdata: %s\n", __func__, sb_mountdata);*/
9026 /*cifs_dbg(FYI, "%s: submount mountdata: %s\n", __func__, mountdata );*/
9027 @@ -241,23 +252,23 @@ compose_mount_options_err:
9028 }
9029
9030 /**
9031 - * cifs_dfs_do_refmount - mounts specified path using provided refferal
9032 + * cifs_dfs_do_mount - mounts specified path using DFS full path
9033 + *
9034 + * Always pass down @fullpath to smb3_do_mount() so we can use the root server
9035 + * to perform failover in case we failed to connect to the first target in the
9036 + * referral.
9037 + *
9038 * @cifs_sb: parent/root superblock
9039 * @fullpath: full path in UNC format
9040 - * @ref: server's referral
9041 */
9042 -static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt,
9043 - struct cifs_sb_info *cifs_sb,
9044 - const char *fullpath, const struct dfs_info3_param *ref)
9045 +static struct vfsmount *cifs_dfs_do_mount(struct dentry *mntpt,
9046 + struct cifs_sb_info *cifs_sb,
9047 + const char *fullpath)
9048 {
9049 struct vfsmount *mnt;
9050 char *mountdata;
9051 char *devname;
9052
9053 - /*
9054 - * Always pass down the DFS full path to smb3_do_mount() so we
9055 - * can use it later for failover.
9056 - */
9057 devname = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL);
9058 if (!devname)
9059 return ERR_PTR(-ENOMEM);
9060 @@ -266,7 +277,7 @@ static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt,
9061
9062 /* strip first '\' from fullpath */
9063 mountdata = cifs_compose_mount_options(cifs_sb->mountdata,
9064 - fullpath + 1, ref, NULL);
9065 + fullpath + 1, NULL, NULL);
9066 if (IS_ERR(mountdata)) {
9067 kfree(devname);
9068 return (struct vfsmount *)mountdata;
9069 @@ -278,28 +289,16 @@ static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt,
9070 return mnt;
9071 }
9072
9073 -static void dump_referral(const struct dfs_info3_param *ref)
9074 -{
9075 - cifs_dbg(FYI, "DFS: ref path: %s\n", ref->path_name);
9076 - cifs_dbg(FYI, "DFS: node path: %s\n", ref->node_name);
9077 - cifs_dbg(FYI, "DFS: fl: %d, srv_type: %d\n",
9078 - ref->flags, ref->server_type);
9079 - cifs_dbg(FYI, "DFS: ref_flags: %d, path_consumed: %d\n",
9080 - ref->ref_flag, ref->path_consumed);
9081 -}
9082 -
9083 /*
9084 * Create a vfsmount that we can automount
9085 */
9086 static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
9087 {
9088 - struct dfs_info3_param referral = {0};
9089 struct cifs_sb_info *cifs_sb;
9090 struct cifs_ses *ses;
9091 struct cifs_tcon *tcon;
9092 char *full_path, *root_path;
9093 unsigned int xid;
9094 - int len;
9095 int rc;
9096 struct vfsmount *mnt;
9097
9098 @@ -357,7 +356,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
9099 if (!rc) {
9100 rc = dfs_cache_find(xid, ses, cifs_sb->local_nls,
9101 cifs_remap(cifs_sb), full_path + 1,
9102 - &referral, NULL);
9103 + NULL, NULL);
9104 }
9105
9106 free_xid(xid);
9107 @@ -366,26 +365,16 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
9108 mnt = ERR_PTR(rc);
9109 goto free_root_path;
9110 }
9111 -
9112 - dump_referral(&referral);
9113 -
9114 - len = strlen(referral.node_name);
9115 - if (len < 2) {
9116 - cifs_dbg(VFS, "%s: Net Address path too short: %s\n",
9117 - __func__, referral.node_name);
9118 - mnt = ERR_PTR(-EINVAL);
9119 - goto free_dfs_ref;
9120 - }
9121 /*
9122 - * cifs_mount() will retry every available node server in case
9123 - * of failures.
9124 + * OK - we were able to get and cache a referral for @full_path.
9125 + *
9126 + * Now, pass it down to cifs_mount() and it will retry every available
9127 + * node server in case of failures - no need to do it here.
9128 */
9129 - mnt = cifs_dfs_do_refmount(mntpt, cifs_sb, full_path, &referral);
9130 - cifs_dbg(FYI, "%s: cifs_dfs_do_refmount:%s , mnt:%p\n", __func__,
9131 - referral.node_name, mnt);
9132 + mnt = cifs_dfs_do_mount(mntpt, cifs_sb, full_path);
9133 + cifs_dbg(FYI, "%s: cifs_dfs_do_mount:%s , mnt:%p\n", __func__,
9134 + full_path + 1, mnt);
9135
9136 -free_dfs_ref:
9137 - free_dfs_info_param(&referral);
9138 free_root_path:
9139 kfree(root_path);
9140 free_full_path:
9141 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
9142 index 02451d085ddd..5d3c867bdc80 100644
9143 --- a/fs/cifs/connect.c
9144 +++ b/fs/cifs/connect.c
9145 @@ -3652,8 +3652,10 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
9146 {
9147 struct cifs_sb_info *old = CIFS_SB(sb);
9148 struct cifs_sb_info *new = mnt_data->cifs_sb;
9149 - bool old_set = old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
9150 - bool new_set = new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
9151 + bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
9152 + old->prepath;
9153 + bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
9154 + new->prepath;
9155
9156 if (old_set && new_set && !strcmp(new->prepath, old->prepath))
9157 return 1;
9158 diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
9159 index 2faa05860a48..cf6cec59696c 100644
9160 --- a/fs/cifs/dfs_cache.c
9161 +++ b/fs/cifs/dfs_cache.c
9162 @@ -1319,7 +1319,7 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
9163 char *mdata = NULL, *devname = NULL;
9164 struct TCP_Server_Info *server;
9165 struct cifs_ses *ses;
9166 - struct smb_vol vol;
9167 + struct smb_vol vol = {NULL};
9168
9169 rpath = get_dfs_root(path);
9170 if (IS_ERR(rpath))
9171 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
9172 index 06d932ed097e..c6fc6582ee7b 100644
9173 --- a/fs/cifs/smb2pdu.c
9174 +++ b/fs/cifs/smb2pdu.c
9175 @@ -3917,6 +3917,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
9176 wdata->cfile->fid.persistent_fid,
9177 tcon->tid, tcon->ses->Suid, wdata->offset,
9178 wdata->bytes, wdata->result);
9179 + if (wdata->result == -ENOSPC)
9180 + printk_once(KERN_WARNING "Out of space writing to %s\n",
9181 + tcon->treeName);
9182 } else
9183 trace_smb3_write_done(0 /* no xid */,
9184 wdata->cfile->fid.persistent_fid,
9185 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
9186 index 8d2bbcc2d813..fd7ce3573a00 100644
9187 --- a/fs/ext4/file.c
9188 +++ b/fs/ext4/file.c
9189 @@ -40,9 +40,10 @@ static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
9190 struct inode *inode = file_inode(iocb->ki_filp);
9191 ssize_t ret;
9192
9193 - if (!inode_trylock_shared(inode)) {
9194 - if (iocb->ki_flags & IOCB_NOWAIT)
9195 + if (iocb->ki_flags & IOCB_NOWAIT) {
9196 + if (!inode_trylock_shared(inode))
9197 return -EAGAIN;
9198 + } else {
9199 inode_lock_shared(inode);
9200 }
9201 /*
9202 @@ -190,9 +191,10 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
9203 struct inode *inode = file_inode(iocb->ki_filp);
9204 ssize_t ret;
9205
9206 - if (!inode_trylock(inode)) {
9207 - if (iocb->ki_flags & IOCB_NOWAIT)
9208 + if (iocb->ki_flags & IOCB_NOWAIT) {
9209 + if (!inode_trylock(inode))
9210 return -EAGAIN;
9211 + } else {
9212 inode_lock(inode);
9213 }
9214 ret = ext4_write_checks(iocb, from);
9215 diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
9216 index a30b203fa461..a5f55fece9b0 100644
9217 --- a/fs/ext4/readpage.c
9218 +++ b/fs/ext4/readpage.c
9219 @@ -57,6 +57,7 @@ enum bio_post_read_step {
9220 STEP_INITIAL = 0,
9221 STEP_DECRYPT,
9222 STEP_VERITY,
9223 + STEP_MAX,
9224 };
9225
9226 struct bio_post_read_ctx {
9227 @@ -106,10 +107,22 @@ static void verity_work(struct work_struct *work)
9228 {
9229 struct bio_post_read_ctx *ctx =
9230 container_of(work, struct bio_post_read_ctx, work);
9231 + struct bio *bio = ctx->bio;
9232
9233 - fsverity_verify_bio(ctx->bio);
9234 + /*
9235 + * fsverity_verify_bio() may call readpages() again, and although verity
9236 + * will be disabled for that, decryption may still be needed, causing
9237 + * another bio_post_read_ctx to be allocated. So to guarantee that
9238 + * mempool_alloc() never deadlocks we must free the current ctx first.
9239 + * This is safe because verity is the last post-read step.
9240 + */
9241 + BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
9242 + mempool_free(ctx, bio_post_read_ctx_pool);
9243 + bio->bi_private = NULL;
9244
9245 - bio_post_read_processing(ctx);
9246 + fsverity_verify_bio(bio);
9247 +
9248 + __read_end_io(bio);
9249 }
9250
9251 static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
9252 diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
9253 index 2e9c73165800..5d6fd940aab2 100644
9254 --- a/fs/f2fs/data.c
9255 +++ b/fs/f2fs/data.c
9256 @@ -1074,19 +1074,6 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
9257 int err = 0;
9258 bool direct_io = iocb->ki_flags & IOCB_DIRECT;
9259
9260 - /* convert inline data for Direct I/O*/
9261 - if (direct_io) {
9262 - err = f2fs_convert_inline_inode(inode);
9263 - if (err)
9264 - return err;
9265 - }
9266 -
9267 - if (direct_io && allow_outplace_dio(inode, iocb, from))
9268 - return 0;
9269 -
9270 - if (is_inode_flag_set(inode, FI_NO_PREALLOC))
9271 - return 0;
9272 -
9273 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
9274 map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
9275 if (map.m_len > map.m_lblk)
9276 diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
9277 index 72f308790a8e..c3a9da79ac99 100644
9278 --- a/fs/f2fs/file.c
9279 +++ b/fs/f2fs/file.c
9280 @@ -50,7 +50,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
9281 struct page *page = vmf->page;
9282 struct inode *inode = file_inode(vmf->vma->vm_file);
9283 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
9284 - struct dnode_of_data dn = { .node_changed = false };
9285 + struct dnode_of_data dn;
9286 int err;
9287
9288 if (unlikely(f2fs_cp_error(sbi))) {
9289 @@ -63,6 +63,9 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
9290 goto err;
9291 }
9292
9293 + /* should do out of any locked page */
9294 + f2fs_balance_fs(sbi, true);
9295 +
9296 sb_start_pagefault(inode->i_sb);
9297
9298 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
9299 @@ -120,8 +123,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
9300 out_sem:
9301 up_read(&F2FS_I(inode)->i_mmap_sem);
9302
9303 - f2fs_balance_fs(sbi, dn.node_changed);
9304 -
9305 sb_end_pagefault(inode->i_sb);
9306 err:
9307 return block_page_mkwrite_return(err);
9308 @@ -3348,18 +3349,41 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
9309 ret = -EAGAIN;
9310 goto out;
9311 }
9312 - } else {
9313 - preallocated = true;
9314 - target_size = iocb->ki_pos + iov_iter_count(from);
9315 + goto write;
9316 + }
9317
9318 - err = f2fs_preallocate_blocks(iocb, from);
9319 - if (err) {
9320 - clear_inode_flag(inode, FI_NO_PREALLOC);
9321 - inode_unlock(inode);
9322 - ret = err;
9323 - goto out;
9324 - }
9325 + if (is_inode_flag_set(inode, FI_NO_PREALLOC))
9326 + goto write;
9327 +
9328 + if (iocb->ki_flags & IOCB_DIRECT) {
9329 + /*
9330 + * Convert inline data for Direct I/O before entering
9331 + * f2fs_direct_IO().
9332 + */
9333 + err = f2fs_convert_inline_inode(inode);
9334 + if (err)
9335 + goto out_err;
9336 + /*
9337 + * If force_buffere_io() is true, we have to allocate
9338 + * blocks all the time, since f2fs_direct_IO will fall
9339 + * back to buffered IO.
9340 + */
9341 + if (!f2fs_force_buffered_io(inode, iocb, from) &&
9342 + allow_outplace_dio(inode, iocb, from))
9343 + goto write;
9344 + }
9345 + preallocated = true;
9346 + target_size = iocb->ki_pos + iov_iter_count(from);
9347 +
9348 + err = f2fs_preallocate_blocks(iocb, from);
9349 + if (err) {
9350 +out_err:
9351 + clear_inode_flag(inode, FI_NO_PREALLOC);
9352 + inode_unlock(inode);
9353 + ret = err;
9354 + goto out;
9355 }
9356 +write:
9357 ret = __generic_file_write_iter(iocb, from);
9358 clear_inode_flag(inode, FI_NO_PREALLOC);
9359
9360 diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
9361 index a1c507b0b4ac..5d9584281935 100644
9362 --- a/fs/f2fs/namei.c
9363 +++ b/fs/f2fs/namei.c
9364 @@ -797,6 +797,7 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
9365
9366 if (whiteout) {
9367 f2fs_i_links_write(inode, false);
9368 + inode->i_state |= I_LINKABLE;
9369 *whiteout = inode;
9370 } else {
9371 d_tmpfile(dentry, inode);
9372 @@ -867,6 +868,12 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
9373 F2FS_I(old_dentry->d_inode)->i_projid)))
9374 return -EXDEV;
9375
9376 + if (flags & RENAME_WHITEOUT) {
9377 + err = f2fs_create_whiteout(old_dir, &whiteout);
9378 + if (err)
9379 + return err;
9380 + }
9381 +
9382 err = dquot_initialize(old_dir);
9383 if (err)
9384 goto out;
9385 @@ -898,17 +905,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
9386 }
9387 }
9388
9389 - if (flags & RENAME_WHITEOUT) {
9390 - err = f2fs_create_whiteout(old_dir, &whiteout);
9391 - if (err)
9392 - goto out_dir;
9393 - }
9394 -
9395 if (new_inode) {
9396
9397 err = -ENOTEMPTY;
9398 if (old_dir_entry && !f2fs_empty_dir(new_inode))
9399 - goto out_whiteout;
9400 + goto out_dir;
9401
9402 err = -ENOENT;
9403 new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name,
9404 @@ -916,7 +917,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
9405 if (!new_entry) {
9406 if (IS_ERR(new_page))
9407 err = PTR_ERR(new_page);
9408 - goto out_whiteout;
9409 + goto out_dir;
9410 }
9411
9412 f2fs_balance_fs(sbi, true);
9413 @@ -948,7 +949,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
9414 err = f2fs_add_link(new_dentry, old_inode);
9415 if (err) {
9416 f2fs_unlock_op(sbi);
9417 - goto out_whiteout;
9418 + goto out_dir;
9419 }
9420
9421 if (old_dir_entry)
9422 @@ -972,7 +973,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
9423 if (IS_ERR(old_page))
9424 err = PTR_ERR(old_page);
9425 f2fs_unlock_op(sbi);
9426 - goto out_whiteout;
9427 + goto out_dir;
9428 }
9429 }
9430 }
9431 @@ -991,7 +992,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
9432 f2fs_delete_entry(old_entry, old_page, old_dir, NULL);
9433
9434 if (whiteout) {
9435 - whiteout->i_state |= I_LINKABLE;
9436 set_inode_flag(whiteout, FI_INC_LINK);
9437 err = f2fs_add_link(old_dentry, whiteout);
9438 if (err)
9439 @@ -1027,15 +1027,14 @@ put_out_dir:
9440 f2fs_unlock_op(sbi);
9441 if (new_page)
9442 f2fs_put_page(new_page, 0);
9443 -out_whiteout:
9444 - if (whiteout)
9445 - iput(whiteout);
9446 out_dir:
9447 if (old_dir_entry)
9448 f2fs_put_page(old_dir_page, 0);
9449 out_old:
9450 f2fs_put_page(old_page, 0);
9451 out:
9452 + if (whiteout)
9453 + iput(whiteout);
9454 return err;
9455 }
9456
9457 diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
9458 index b558b64a4c9c..170934430d7d 100644
9459 --- a/fs/f2fs/sysfs.c
9460 +++ b/fs/f2fs/sysfs.c
9461 @@ -729,10 +729,12 @@ int __init f2fs_init_sysfs(void)
9462
9463 ret = kobject_init_and_add(&f2fs_feat, &f2fs_feat_ktype,
9464 NULL, "features");
9465 - if (ret)
9466 + if (ret) {
9467 + kobject_put(&f2fs_feat);
9468 kset_unregister(&f2fs_kset);
9469 - else
9470 + } else {
9471 f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
9472 + }
9473 return ret;
9474 }
9475
9476 @@ -753,8 +755,11 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
9477 init_completion(&sbi->s_kobj_unregister);
9478 err = kobject_init_and_add(&sbi->s_kobj, &f2fs_sb_ktype, NULL,
9479 "%s", sb->s_id);
9480 - if (err)
9481 + if (err) {
9482 + kobject_put(&sbi->s_kobj);
9483 + wait_for_completion(&sbi->s_kobj_unregister);
9484 return err;
9485 + }
9486
9487 if (f2fs_proc_root)
9488 sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
9489 @@ -782,4 +787,5 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
9490 remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);
9491 }
9492 kobject_del(&sbi->s_kobj);
9493 + kobject_put(&sbi->s_kobj);
9494 }
9495 diff --git a/fs/fuse/file.c b/fs/fuse/file.c
9496 index 695369f46f92..3dd37a998ea9 100644
9497 --- a/fs/fuse/file.c
9498 +++ b/fs/fuse/file.c
9499 @@ -803,6 +803,10 @@ static int fuse_do_readpage(struct file *file, struct page *page)
9500
9501 attr_ver = fuse_get_attr_version(fc);
9502
9503 + /* Don't overflow end offset */
9504 + if (pos + (desc.length - 1) == LLONG_MAX)
9505 + desc.length--;
9506 +
9507 fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ);
9508 res = fuse_simple_request(fc, &ia.ap.args);
9509 if (res < 0)
9510 @@ -888,6 +892,14 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
9511 ap->args.out_pages = true;
9512 ap->args.page_zeroing = true;
9513 ap->args.page_replace = true;
9514 +
9515 + /* Don't overflow end offset */
9516 + if (pos + (count - 1) == LLONG_MAX) {
9517 + count--;
9518 + ap->descs[ap->num_pages - 1].length--;
9519 + }
9520 + WARN_ON((loff_t) (pos + count) < 0);
9521 +
9522 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
9523 ia->read.attr_ver = fuse_get_attr_version(fc);
9524 if (fc->async_read) {
9525 diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
9526 index a1909066bde6..62cf497f18eb 100644
9527 --- a/fs/jbd2/checkpoint.c
9528 +++ b/fs/jbd2/checkpoint.c
9529 @@ -164,7 +164,7 @@ void __jbd2_log_wait_for_space(journal_t *journal)
9530 "journal space in %s\n", __func__,
9531 journal->j_devname);
9532 WARN_ON(1);
9533 - jbd2_journal_abort(journal, 0);
9534 + jbd2_journal_abort(journal, -EIO);
9535 }
9536 write_lock(&journal->j_state_lock);
9537 } else {
9538 diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
9539 index 2a42904bcd62..754ec3c47d6f 100644
9540 --- a/fs/jbd2/commit.c
9541 +++ b/fs/jbd2/commit.c
9542 @@ -784,7 +784,7 @@ start_journal_io:
9543 err = journal_submit_commit_record(journal, commit_transaction,
9544 &cbh, crc32_sum);
9545 if (err)
9546 - __jbd2_journal_abort_hard(journal);
9547 + jbd2_journal_abort(journal, err);
9548 }
9549
9550 blk_finish_plug(&plug);
9551 @@ -877,7 +877,7 @@ start_journal_io:
9552 err = journal_submit_commit_record(journal, commit_transaction,
9553 &cbh, crc32_sum);
9554 if (err)
9555 - __jbd2_journal_abort_hard(journal);
9556 + jbd2_journal_abort(journal, err);
9557 }
9558 if (cbh)
9559 err = journal_wait_on_commit_record(journal, cbh);
9560 diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
9561 index ef485f892d1b..c1ce2805c563 100644
9562 --- a/fs/jbd2/journal.c
9563 +++ b/fs/jbd2/journal.c
9564 @@ -1682,6 +1682,11 @@ int jbd2_journal_load(journal_t *journal)
9565 journal->j_devname);
9566 return -EFSCORRUPTED;
9567 }
9568 + /*
9569 + * clear JBD2_ABORT flag initialized in journal_init_common
9570 + * here to update log tail information with the newest seq.
9571 + */
9572 + journal->j_flags &= ~JBD2_ABORT;
9573
9574 /* OK, we've finished with the dynamic journal bits:
9575 * reinitialise the dynamic contents of the superblock in memory
9576 @@ -1689,7 +1694,6 @@ int jbd2_journal_load(journal_t *journal)
9577 if (journal_reset(journal))
9578 goto recovery_error;
9579
9580 - journal->j_flags &= ~JBD2_ABORT;
9581 journal->j_flags |= JBD2_LOADED;
9582 return 0;
9583
9584 @@ -2110,8 +2114,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
9585
9586 if (journal->j_flags & JBD2_ABORT) {
9587 write_unlock(&journal->j_state_lock);
9588 - if (!old_errno && old_errno != -ESHUTDOWN &&
9589 - errno == -ESHUTDOWN)
9590 + if (old_errno != -ESHUTDOWN && errno == -ESHUTDOWN)
9591 jbd2_journal_update_sb_errno(journal);
9592 return;
9593 }
9594 @@ -2119,12 +2122,10 @@ static void __journal_abort_soft (journal_t *journal, int errno)
9595
9596 __jbd2_journal_abort_hard(journal);
9597
9598 - if (errno) {
9599 - jbd2_journal_update_sb_errno(journal);
9600 - write_lock(&journal->j_state_lock);
9601 - journal->j_flags |= JBD2_REC_ERR;
9602 - write_unlock(&journal->j_state_lock);
9603 - }
9604 + jbd2_journal_update_sb_errno(journal);
9605 + write_lock(&journal->j_state_lock);
9606 + journal->j_flags |= JBD2_REC_ERR;
9607 + write_unlock(&journal->j_state_lock);
9608 }
9609
9610 /**
9611 @@ -2166,11 +2167,6 @@ static void __journal_abort_soft (journal_t *journal, int errno)
9612 * failure to disk. ext3_error, for example, now uses this
9613 * functionality.
9614 *
9615 - * Errors which originate from within the journaling layer will NOT
9616 - * supply an errno; a null errno implies that absolutely no further
9617 - * writes are done to the journal (unless there are any already in
9618 - * progress).
9619 - *
9620 */
9621
9622 void jbd2_journal_abort(journal_t *journal, int errno)
9623 diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
9624 index 5196bfa7894d..9b61c80a93e9 100644
9625 --- a/fs/nfs/nfs42proc.c
9626 +++ b/fs/nfs/nfs42proc.c
9627 @@ -283,14 +283,14 @@ static ssize_t _nfs42_proc_copy(struct file *src,
9628 status = handle_async_copy(res, server, src, dst,
9629 &args->src_stateid);
9630 if (status)
9631 - return status;
9632 + goto out;
9633 }
9634
9635 if ((!res->synchronous || !args->sync) &&
9636 res->write_res.verifier.committed != NFS_FILE_SYNC) {
9637 status = process_copy_commit(dst, pos_dst, res);
9638 if (status)
9639 - return status;
9640 + goto out;
9641 }
9642
9643 truncate_pagecache_range(dst_inode, pos_dst,
9644 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
9645 index fc38b9fe4549..005d1802ab40 100644
9646 --- a/fs/nfsd/vfs.c
9647 +++ b/fs/nfsd/vfs.c
9648 @@ -280,19 +280,25 @@ out:
9649 * Commit metadata changes to stable storage.
9650 */
9651 static int
9652 -commit_metadata(struct svc_fh *fhp)
9653 +commit_inode_metadata(struct inode *inode)
9654 {
9655 - struct inode *inode = d_inode(fhp->fh_dentry);
9656 const struct export_operations *export_ops = inode->i_sb->s_export_op;
9657
9658 - if (!EX_ISSYNC(fhp->fh_export))
9659 - return 0;
9660 -
9661 if (export_ops->commit_metadata)
9662 return export_ops->commit_metadata(inode);
9663 return sync_inode_metadata(inode, 1);
9664 }
9665
9666 +static int
9667 +commit_metadata(struct svc_fh *fhp)
9668 +{
9669 + struct inode *inode = d_inode(fhp->fh_dentry);
9670 +
9671 + if (!EX_ISSYNC(fhp->fh_export))
9672 + return 0;
9673 + return commit_inode_metadata(inode);
9674 +}
9675 +
9676 /*
9677 * Go over the attributes and take care of the small differences between
9678 * NFS semantics and what Linux expects.
9679 @@ -537,6 +543,9 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
9680 if (sync) {
9681 loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX;
9682 int status = vfs_fsync_range(dst, dst_pos, dst_end, 0);
9683 +
9684 + if (!status)
9685 + status = commit_inode_metadata(file_inode(src));
9686 if (status < 0)
9687 return nfserrno(status);
9688 }
9689 diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile
9690 index 38b224372776..5e700b45d32d 100644
9691 --- a/fs/ocfs2/dlm/Makefile
9692 +++ b/fs/ocfs2/dlm/Makefile
9693 @@ -1,6 +1,4 @@
9694 # SPDX-License-Identifier: GPL-2.0-only
9695 -ccflags-y := -I $(srctree)/$(src)/..
9696 -
9697 obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o
9698
9699 ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \
9700 diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
9701 index 4de89af96abf..6abaded3ff6b 100644
9702 --- a/fs/ocfs2/dlm/dlmast.c
9703 +++ b/fs/ocfs2/dlm/dlmast.c
9704 @@ -23,15 +23,15 @@
9705 #include <linux/spinlock.h>
9706
9707
9708 -#include "cluster/heartbeat.h"
9709 -#include "cluster/nodemanager.h"
9710 -#include "cluster/tcp.h"
9711 +#include "../cluster/heartbeat.h"
9712 +#include "../cluster/nodemanager.h"
9713 +#include "../cluster/tcp.h"
9714
9715 #include "dlmapi.h"
9716 #include "dlmcommon.h"
9717
9718 #define MLOG_MASK_PREFIX ML_DLM
9719 -#include "cluster/masklog.h"
9720 +#include "../cluster/masklog.h"
9721
9722 static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
9723 struct dlm_lock *lock);
9724 diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
9725 index 965f45dbe17b..6051edc33aef 100644
9726 --- a/fs/ocfs2/dlm/dlmconvert.c
9727 +++ b/fs/ocfs2/dlm/dlmconvert.c
9728 @@ -23,9 +23,9 @@
9729 #include <linux/spinlock.h>
9730
9731
9732 -#include "cluster/heartbeat.h"
9733 -#include "cluster/nodemanager.h"
9734 -#include "cluster/tcp.h"
9735 +#include "../cluster/heartbeat.h"
9736 +#include "../cluster/nodemanager.h"
9737 +#include "../cluster/tcp.h"
9738
9739 #include "dlmapi.h"
9740 #include "dlmcommon.h"
9741 @@ -33,7 +33,7 @@
9742 #include "dlmconvert.h"
9743
9744 #define MLOG_MASK_PREFIX ML_DLM
9745 -#include "cluster/masklog.h"
9746 +#include "../cluster/masklog.h"
9747
9748 /* NOTE: __dlmconvert_master is the only function in here that
9749 * needs a spinlock held on entry (res->spinlock) and it is the
9750 diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
9751 index 4d0b452012b2..c5c6efba7b5e 100644
9752 --- a/fs/ocfs2/dlm/dlmdebug.c
9753 +++ b/fs/ocfs2/dlm/dlmdebug.c
9754 @@ -17,9 +17,9 @@
9755 #include <linux/debugfs.h>
9756 #include <linux/export.h>
9757
9758 -#include "cluster/heartbeat.h"
9759 -#include "cluster/nodemanager.h"
9760 -#include "cluster/tcp.h"
9761 +#include "../cluster/heartbeat.h"
9762 +#include "../cluster/nodemanager.h"
9763 +#include "../cluster/tcp.h"
9764
9765 #include "dlmapi.h"
9766 #include "dlmcommon.h"
9767 @@ -27,7 +27,7 @@
9768 #include "dlmdebug.h"
9769
9770 #define MLOG_MASK_PREFIX ML_DLM
9771 -#include "cluster/masklog.h"
9772 +#include "../cluster/masklog.h"
9773
9774 static int stringify_lockname(const char *lockname, int locklen, char *buf,
9775 int len);
9776 diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
9777 index ee6f459f9770..357cfc702ce3 100644
9778 --- a/fs/ocfs2/dlm/dlmdomain.c
9779 +++ b/fs/ocfs2/dlm/dlmdomain.c
9780 @@ -20,9 +20,9 @@
9781 #include <linux/debugfs.h>
9782 #include <linux/sched/signal.h>
9783
9784 -#include "cluster/heartbeat.h"
9785 -#include "cluster/nodemanager.h"
9786 -#include "cluster/tcp.h"
9787 +#include "../cluster/heartbeat.h"
9788 +#include "../cluster/nodemanager.h"
9789 +#include "../cluster/tcp.h"
9790
9791 #include "dlmapi.h"
9792 #include "dlmcommon.h"
9793 @@ -30,7 +30,7 @@
9794 #include "dlmdebug.h"
9795
9796 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN)
9797 -#include "cluster/masklog.h"
9798 +#include "../cluster/masklog.h"
9799
9800 /*
9801 * ocfs2 node maps are array of long int, which limits to send them freely
9802 diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
9803 index baff087f3863..83f0760e4fba 100644
9804 --- a/fs/ocfs2/dlm/dlmlock.c
9805 +++ b/fs/ocfs2/dlm/dlmlock.c
9806 @@ -25,9 +25,9 @@
9807 #include <linux/delay.h>
9808
9809
9810 -#include "cluster/heartbeat.h"
9811 -#include "cluster/nodemanager.h"
9812 -#include "cluster/tcp.h"
9813 +#include "../cluster/heartbeat.h"
9814 +#include "../cluster/nodemanager.h"
9815 +#include "../cluster/tcp.h"
9816
9817 #include "dlmapi.h"
9818 #include "dlmcommon.h"
9819 @@ -35,7 +35,7 @@
9820 #include "dlmconvert.h"
9821
9822 #define MLOG_MASK_PREFIX ML_DLM
9823 -#include "cluster/masklog.h"
9824 +#include "../cluster/masklog.h"
9825
9826 static struct kmem_cache *dlm_lock_cache;
9827
9828 diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
9829 index 74b768ca1cd8..c9d7037b6793 100644
9830 --- a/fs/ocfs2/dlm/dlmmaster.c
9831 +++ b/fs/ocfs2/dlm/dlmmaster.c
9832 @@ -25,9 +25,9 @@
9833 #include <linux/delay.h>
9834
9835
9836 -#include "cluster/heartbeat.h"
9837 -#include "cluster/nodemanager.h"
9838 -#include "cluster/tcp.h"
9839 +#include "../cluster/heartbeat.h"
9840 +#include "../cluster/nodemanager.h"
9841 +#include "../cluster/tcp.h"
9842
9843 #include "dlmapi.h"
9844 #include "dlmcommon.h"
9845 @@ -35,7 +35,7 @@
9846 #include "dlmdebug.h"
9847
9848 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
9849 -#include "cluster/masklog.h"
9850 +#include "../cluster/masklog.h"
9851
9852 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
9853 struct dlm_master_list_entry *mle,
9854 diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
9855 index 064ce5bbc3f6..bcaaca5112d6 100644
9856 --- a/fs/ocfs2/dlm/dlmrecovery.c
9857 +++ b/fs/ocfs2/dlm/dlmrecovery.c
9858 @@ -26,16 +26,16 @@
9859 #include <linux/delay.h>
9860
9861
9862 -#include "cluster/heartbeat.h"
9863 -#include "cluster/nodemanager.h"
9864 -#include "cluster/tcp.h"
9865 +#include "../cluster/heartbeat.h"
9866 +#include "../cluster/nodemanager.h"
9867 +#include "../cluster/tcp.h"
9868
9869 #include "dlmapi.h"
9870 #include "dlmcommon.h"
9871 #include "dlmdomain.h"
9872
9873 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
9874 -#include "cluster/masklog.h"
9875 +#include "../cluster/masklog.h"
9876
9877 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
9878
9879 diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
9880 index 61c51c268460..fd40c17cd022 100644
9881 --- a/fs/ocfs2/dlm/dlmthread.c
9882 +++ b/fs/ocfs2/dlm/dlmthread.c
9883 @@ -25,16 +25,16 @@
9884 #include <linux/delay.h>
9885
9886
9887 -#include "cluster/heartbeat.h"
9888 -#include "cluster/nodemanager.h"
9889 -#include "cluster/tcp.h"
9890 +#include "../cluster/heartbeat.h"
9891 +#include "../cluster/nodemanager.h"
9892 +#include "../cluster/tcp.h"
9893
9894 #include "dlmapi.h"
9895 #include "dlmcommon.h"
9896 #include "dlmdomain.h"
9897
9898 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
9899 -#include "cluster/masklog.h"
9900 +#include "../cluster/masklog.h"
9901
9902 static int dlm_thread(void *data);
9903 static void dlm_flush_asts(struct dlm_ctxt *dlm);
9904 diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
9905 index 3883633e82eb..dcb17ca8ae74 100644
9906 --- a/fs/ocfs2/dlm/dlmunlock.c
9907 +++ b/fs/ocfs2/dlm/dlmunlock.c
9908 @@ -23,15 +23,15 @@
9909 #include <linux/spinlock.h>
9910 #include <linux/delay.h>
9911
9912 -#include "cluster/heartbeat.h"
9913 -#include "cluster/nodemanager.h"
9914 -#include "cluster/tcp.h"
9915 +#include "../cluster/heartbeat.h"
9916 +#include "../cluster/nodemanager.h"
9917 +#include "../cluster/tcp.h"
9918
9919 #include "dlmapi.h"
9920 #include "dlmcommon.h"
9921
9922 #define MLOG_MASK_PREFIX ML_DLM
9923 -#include "cluster/masklog.h"
9924 +#include "../cluster/masklog.h"
9925
9926 #define DLM_UNLOCK_FREE_LOCK 0x00000001
9927 #define DLM_UNLOCK_CALL_AST 0x00000002
9928 diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile
9929 index a9874e441bd4..c7895f65be0e 100644
9930 --- a/fs/ocfs2/dlmfs/Makefile
9931 +++ b/fs/ocfs2/dlmfs/Makefile
9932 @@ -1,6 +1,4 @@
9933 # SPDX-License-Identifier: GPL-2.0-only
9934 -ccflags-y := -I $(srctree)/$(src)/..
9935 -
9936 obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o
9937
9938 ocfs2_dlmfs-objs := userdlm.o dlmfs.o
9939 diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
9940 index 4f1668c81e1f..8e4f1ace467c 100644
9941 --- a/fs/ocfs2/dlmfs/dlmfs.c
9942 +++ b/fs/ocfs2/dlmfs/dlmfs.c
9943 @@ -33,11 +33,11 @@
9944
9945 #include <linux/uaccess.h>
9946
9947 -#include "stackglue.h"
9948 +#include "../stackglue.h"
9949 #include "userdlm.h"
9950
9951 #define MLOG_MASK_PREFIX ML_DLMFS
9952 -#include "cluster/masklog.h"
9953 +#include "../cluster/masklog.h"
9954
9955
9956 static const struct super_operations dlmfs_ops;
9957 diff --git a/fs/ocfs2/dlmfs/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c
9958 index 525b14ddfba5..3df5be25bfb1 100644
9959 --- a/fs/ocfs2/dlmfs/userdlm.c
9960 +++ b/fs/ocfs2/dlmfs/userdlm.c
9961 @@ -21,12 +21,12 @@
9962 #include <linux/types.h>
9963 #include <linux/crc32.h>
9964
9965 -#include "ocfs2_lockingver.h"
9966 -#include "stackglue.h"
9967 +#include "../ocfs2_lockingver.h"
9968 +#include "../stackglue.h"
9969 #include "userdlm.h"
9970
9971 #define MLOG_MASK_PREFIX ML_DLMFS
9972 -#include "cluster/masklog.h"
9973 +#include "../cluster/masklog.h"
9974
9975
9976 static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
9977 diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
9978 index 3103ba7f97a2..bfe611ed1b1d 100644
9979 --- a/fs/ocfs2/journal.h
9980 +++ b/fs/ocfs2/journal.h
9981 @@ -597,9 +597,11 @@ static inline void ocfs2_update_inode_fsync_trans(handle_t *handle,
9982 {
9983 struct ocfs2_inode_info *oi = OCFS2_I(inode);
9984
9985 - oi->i_sync_tid = handle->h_transaction->t_tid;
9986 - if (datasync)
9987 - oi->i_datasync_tid = handle->h_transaction->t_tid;
9988 + if (!is_handle_aborted(handle)) {
9989 + oi->i_sync_tid = handle->h_transaction->t_tid;
9990 + if (datasync)
9991 + oi->i_datasync_tid = handle->h_transaction->t_tid;
9992 + }
9993 }
9994
9995 #endif /* OCFS2_JOURNAL_H */
9996 diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
9997 index 25543a966c48..29eaa4544372 100644
9998 --- a/fs/orangefs/orangefs-debugfs.c
9999 +++ b/fs/orangefs/orangefs-debugfs.c
10000 @@ -273,6 +273,7 @@ static void *help_start(struct seq_file *m, loff_t *pos)
10001
10002 static void *help_next(struct seq_file *m, void *v, loff_t *pos)
10003 {
10004 + (*pos)++;
10005 gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_next: start\n");
10006
10007 return NULL;
10008 diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
10009 index da9ebe33882b..bb4973aefbb1 100644
10010 --- a/fs/reiserfs/stree.c
10011 +++ b/fs/reiserfs/stree.c
10012 @@ -2246,7 +2246,8 @@ error_out:
10013 /* also releases the path */
10014 unfix_nodes(&s_ins_balance);
10015 #ifdef REISERQUOTA_DEBUG
10016 - reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
10017 + if (inode)
10018 + reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
10019 "reiserquota insert_item(): freeing %u id=%u type=%c",
10020 quota_bytes, inode->i_uid, head2type(ih));
10021 #endif
10022 diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
10023 index d127af64283e..a6bce5b1fb1d 100644
10024 --- a/fs/reiserfs/super.c
10025 +++ b/fs/reiserfs/super.c
10026 @@ -1948,7 +1948,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
10027 if (!sbi->s_jdev) {
10028 SWARN(silent, s, "", "Cannot allocate memory for "
10029 "journal device name");
10030 - goto error;
10031 + goto error_unlocked;
10032 }
10033 }
10034 #ifdef CONFIG_QUOTA
10035 diff --git a/fs/udf/super.c b/fs/udf/super.c
10036 index 8c28e93e9b73..4baa1ca91e9b 100644
10037 --- a/fs/udf/super.c
10038 +++ b/fs/udf/super.c
10039 @@ -1035,7 +1035,6 @@ static int check_partition_desc(struct super_block *sb,
10040 switch (le32_to_cpu(p->accessType)) {
10041 case PD_ACCESS_TYPE_READ_ONLY:
10042 case PD_ACCESS_TYPE_WRITE_ONCE:
10043 - case PD_ACCESS_TYPE_REWRITABLE:
10044 case PD_ACCESS_TYPE_NONE:
10045 goto force_ro;
10046 }
10047 @@ -2492,17 +2491,29 @@ static unsigned int udf_count_free_table(struct super_block *sb,
10048 static unsigned int udf_count_free(struct super_block *sb)
10049 {
10050 unsigned int accum = 0;
10051 - struct udf_sb_info *sbi;
10052 + struct udf_sb_info *sbi = UDF_SB(sb);
10053 struct udf_part_map *map;
10054 + unsigned int part = sbi->s_partition;
10055 + int ptype = sbi->s_partmaps[part].s_partition_type;
10056 +
10057 + if (ptype == UDF_METADATA_MAP25) {
10058 + part = sbi->s_partmaps[part].s_type_specific.s_metadata.
10059 + s_phys_partition_ref;
10060 + } else if (ptype == UDF_VIRTUAL_MAP15 || ptype == UDF_VIRTUAL_MAP20) {
10061 + /*
10062 + * Filesystems with VAT are append-only and we cannot write to
10063 + * them. Let's just report 0 here.
10064 + */
10065 + return 0;
10066 + }
10067
10068 - sbi = UDF_SB(sb);
10069 if (sbi->s_lvid_bh) {
10070 struct logicalVolIntegrityDesc *lvid =
10071 (struct logicalVolIntegrityDesc *)
10072 sbi->s_lvid_bh->b_data;
10073 - if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) {
10074 + if (le32_to_cpu(lvid->numOfPartitions) > part) {
10075 accum = le32_to_cpu(
10076 - lvid->freeSpaceTable[sbi->s_partition]);
10077 + lvid->freeSpaceTable[part]);
10078 if (accum == 0xFFFFFFFF)
10079 accum = 0;
10080 }
10081 @@ -2511,7 +2522,7 @@ static unsigned int udf_count_free(struct super_block *sb)
10082 if (accum)
10083 return accum;
10084
10085 - map = &sbi->s_partmaps[sbi->s_partition];
10086 + map = &sbi->s_partmaps[part];
10087 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
10088 accum += udf_count_free_bitmap(sb,
10089 map->s_uspace.s_bitmap);
10090 diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
10091 index 068793a619ca..2d55cee638fc 100644
10092 --- a/include/linux/cpuhotplug.h
10093 +++ b/include/linux/cpuhotplug.h
10094 @@ -59,6 +59,7 @@ enum cpuhp_state {
10095 CPUHP_IOMMU_INTEL_DEAD,
10096 CPUHP_LUSTRE_CFS_DEAD,
10097 CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
10098 + CPUHP_PADATA_DEAD,
10099 CPUHP_WORKQUEUE_PREP,
10100 CPUHP_POWER_NUMA_PREPARE,
10101 CPUHP_HRTIMERS_PREPARE,
10102 diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
10103 index dad4a68fa009..8013562751a5 100644
10104 --- a/include/linux/dmaengine.h
10105 +++ b/include/linux/dmaengine.h
10106 @@ -674,6 +674,7 @@ struct dma_filter {
10107 * @fill_align: alignment shift for memset operations
10108 * @dev_id: unique device ID
10109 * @dev: struct device reference for dma mapping api
10110 + * @owner: owner module (automatically set based on the provided dev)
10111 * @src_addr_widths: bit mask of src addr widths the device supports
10112 * Width is specified in bytes, e.g. for a device supporting
10113 * a width of 4 the mask should have BIT(4) set.
10114 @@ -737,6 +738,7 @@ struct dma_device {
10115
10116 int dev_id;
10117 struct device *dev;
10118 + struct module *owner;
10119
10120 u32 src_addr_widths;
10121 u32 dst_addr_widths;
10122 diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
10123 index 3ef96743db8d..1ecd35664e0d 100644
10124 --- a/include/linux/list_nulls.h
10125 +++ b/include/linux/list_nulls.h
10126 @@ -72,10 +72,10 @@ static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
10127 struct hlist_nulls_node *first = h->first;
10128
10129 n->next = first;
10130 - n->pprev = &h->first;
10131 + WRITE_ONCE(n->pprev, &h->first);
10132 h->first = n;
10133 if (!is_a_nulls(first))
10134 - first->pprev = &n->next;
10135 + WRITE_ONCE(first->pprev, &n->next);
10136 }
10137
10138 static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
10139 @@ -85,13 +85,13 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
10140
10141 WRITE_ONCE(*pprev, next);
10142 if (!is_a_nulls(next))
10143 - next->pprev = pprev;
10144 + WRITE_ONCE(next->pprev, pprev);
10145 }
10146
10147 static inline void hlist_nulls_del(struct hlist_nulls_node *n)
10148 {
10149 __hlist_nulls_del(n);
10150 - n->pprev = LIST_POISON2;
10151 + WRITE_ONCE(n->pprev, LIST_POISON2);
10152 }
10153
10154 /**
10155 diff --git a/include/linux/pci.h b/include/linux/pci.h
10156 index be529d311122..f39f22f9ee47 100644
10157 --- a/include/linux/pci.h
10158 +++ b/include/linux/pci.h
10159 @@ -2324,7 +2324,7 @@ static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
10160 }
10161 #endif
10162
10163 -void pci_add_dma_alias(struct pci_dev *dev, u8 devfn);
10164 +void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
10165 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
10166 int pci_for_each_dma_alias(struct pci_dev *pdev,
10167 int (*fn)(struct pci_dev *pdev,
10168 diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
10169 index 8cfe570fdece..2cbde6542849 100644
10170 --- a/include/linux/platform_data/ti-sysc.h
10171 +++ b/include/linux/platform_data/ti-sysc.h
10172 @@ -49,6 +49,7 @@ struct sysc_regbits {
10173 s8 emufree_shift;
10174 };
10175
10176 +#define SYSC_QUIRK_CLKDM_NOAUTO BIT(21)
10177 #define SYSC_QUIRK_FORCE_MSTANDBY BIT(20)
10178 #define SYSC_MODULE_QUIRK_AESS BIT(19)
10179 #define SYSC_MODULE_QUIRK_SGX BIT(18)
10180 diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
10181 index 0832c9b66852..e0ddb47f4402 100644
10182 --- a/include/linux/raid/pq.h
10183 +++ b/include/linux/raid/pq.h
10184 @@ -27,7 +27,6 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
10185
10186 #include <errno.h>
10187 #include <inttypes.h>
10188 -#include <limits.h>
10189 #include <stddef.h>
10190 #include <sys/mman.h>
10191 #include <sys/time.h>
10192 @@ -59,7 +58,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
10193 #define enable_kernel_altivec()
10194 #define disable_kernel_altivec()
10195
10196 +#undef EXPORT_SYMBOL
10197 #define EXPORT_SYMBOL(sym)
10198 +#undef EXPORT_SYMBOL_GPL
10199 #define EXPORT_SYMBOL_GPL(sym)
10200 #define MODULE_LICENSE(licence)
10201 #define MODULE_DESCRIPTION(desc)
10202 diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
10203 index 61974c4c566b..90f2e2232c6d 100644
10204 --- a/include/linux/rculist_nulls.h
10205 +++ b/include/linux/rculist_nulls.h
10206 @@ -34,7 +34,7 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
10207 {
10208 if (!hlist_nulls_unhashed(n)) {
10209 __hlist_nulls_del(n);
10210 - n->pprev = NULL;
10211 + WRITE_ONCE(n->pprev, NULL);
10212 }
10213 }
10214
10215 @@ -66,7 +66,7 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
10216 static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
10217 {
10218 __hlist_nulls_del(n);
10219 - n->pprev = LIST_POISON2;
10220 + WRITE_ONCE(n->pprev, LIST_POISON2);
10221 }
10222
10223 /**
10224 @@ -94,10 +94,10 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
10225 struct hlist_nulls_node *first = h->first;
10226
10227 n->next = first;
10228 - n->pprev = &h->first;
10229 + WRITE_ONCE(n->pprev, &h->first);
10230 rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
10231 if (!is_a_nulls(first))
10232 - first->pprev = &n->next;
10233 + WRITE_ONCE(first->pprev, &n->next);
10234 }
10235
10236 /**
10237 diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
10238 index e0b8f2602670..a0e93f0ef62a 100644
10239 --- a/include/media/v4l2-device.h
10240 +++ b/include/media/v4l2-device.h
10241 @@ -371,7 +371,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
10242 struct v4l2_subdev *__sd; \
10243 \
10244 __v4l2_device_call_subdevs_p(v4l2_dev, __sd, \
10245 - !(grpid) || __sd->grp_id == (grpid), o, f , \
10246 + (grpid) == 0 || __sd->grp_id == (grpid), o, f , \
10247 ##args); \
10248 } while (0)
10249
10250 @@ -403,7 +403,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
10251 ({ \
10252 struct v4l2_subdev *__sd; \
10253 __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \
10254 - !(grpid) || __sd->grp_id == (grpid), o, f , \
10255 + (grpid) == 0 || __sd->grp_id == (grpid), o, f , \
10256 ##args); \
10257 })
10258
10259 @@ -431,8 +431,8 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
10260 struct v4l2_subdev *__sd; \
10261 \
10262 __v4l2_device_call_subdevs_p(v4l2_dev, __sd, \
10263 - !(grpmsk) || (__sd->grp_id & (grpmsk)), o, f , \
10264 - ##args); \
10265 + (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \
10266 + f , ##args); \
10267 } while (0)
10268
10269 /**
10270 @@ -462,8 +462,8 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
10271 ({ \
10272 struct v4l2_subdev *__sd; \
10273 __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \
10274 - !(grpmsk) || (__sd->grp_id & (grpmsk)), o, f , \
10275 - ##args); \
10276 + (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \
10277 + f , ##args); \
10278 })
10279
10280
10281 diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
10282 index 75c7b5ed53c5..30d50528d710 100644
10283 --- a/include/rdma/ib_verbs.h
10284 +++ b/include/rdma/ib_verbs.h
10285 @@ -2146,7 +2146,6 @@ struct ib_port_cache {
10286
10287 struct ib_cache {
10288 rwlock_t lock;
10289 - struct ib_event_handler event_handler;
10290 };
10291
10292 struct ib_port_immutable {
10293 @@ -2590,7 +2589,11 @@ struct ib_device {
10294 struct rcu_head rcu_head;
10295
10296 struct list_head event_handler_list;
10297 - spinlock_t event_handler_lock;
10298 + /* Protects event_handler_list */
10299 + struct rw_semaphore event_handler_rwsem;
10300 +
10301 + /* Protects QP's event_handler calls and open_qp list */
10302 + spinlock_t event_handler_lock;
10303
10304 struct rw_semaphore client_data_rwsem;
10305 struct xarray client_data;
10306 @@ -2897,7 +2900,7 @@ bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
10307
10308 void ib_register_event_handler(struct ib_event_handler *event_handler);
10309 void ib_unregister_event_handler(struct ib_event_handler *event_handler);
10310 -void ib_dispatch_event(struct ib_event *event);
10311 +void ib_dispatch_event(const struct ib_event *event);
10312
10313 int ib_query_port(struct ib_device *device,
10314 u8 port_num, struct ib_port_attr *port_attr);
10315 diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
10316 index 694bd040cf51..fdd31c5fd126 100644
10317 --- a/include/trace/events/rcu.h
10318 +++ b/include/trace/events/rcu.h
10319 @@ -442,7 +442,7 @@ TRACE_EVENT_RCU(rcu_fqs,
10320 */
10321 TRACE_EVENT_RCU(rcu_dyntick,
10322
10323 - TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
10324 + TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
10325
10326 TP_ARGS(polarity, oldnesting, newnesting, dynticks),
10327
10328 @@ -457,7 +457,7 @@ TRACE_EVENT_RCU(rcu_dyntick,
10329 __entry->polarity = polarity;
10330 __entry->oldnesting = oldnesting;
10331 __entry->newnesting = newnesting;
10332 - __entry->dynticks = atomic_read(&dynticks);
10333 + __entry->dynticks = dynticks;
10334 ),
10335
10336 TP_printk("%s %lx %lx %#3x", __entry->polarity,
10337 diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
10338 index a70f7209cda3..218c09ff6a27 100644
10339 --- a/kernel/bpf/inode.c
10340 +++ b/kernel/bpf/inode.c
10341 @@ -196,6 +196,7 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
10342 void *key = map_iter(m)->key;
10343 void *prev_key;
10344
10345 + (*pos)++;
10346 if (map_iter(m)->done)
10347 return NULL;
10348
10349 @@ -208,8 +209,6 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
10350 map_iter(m)->done = true;
10351 return NULL;
10352 }
10353 -
10354 - ++(*pos);
10355 return key;
10356 }
10357
10358 diff --git a/kernel/cpu.c b/kernel/cpu.c
10359 index 116825437cd6..406828fb3038 100644
10360 --- a/kernel/cpu.c
10361 +++ b/kernel/cpu.c
10362 @@ -525,8 +525,7 @@ static int bringup_wait_for_ap(unsigned int cpu)
10363 if (WARN_ON_ONCE((!cpu_online(cpu))))
10364 return -ECANCELED;
10365
10366 - /* Unpark the stopper thread and the hotplug thread of the target cpu */
10367 - stop_machine_unpark(cpu);
10368 + /* Unpark the hotplug thread of the target cpu */
10369 kthread_unpark(st->thread);
10370
10371 /*
10372 @@ -1089,8 +1088,8 @@ void notify_cpu_starting(unsigned int cpu)
10373
10374 /*
10375 * Called from the idle task. Wake up the controlling task which brings the
10376 - * stopper and the hotplug thread of the upcoming CPU up and then delegates
10377 - * the rest of the online bringup to the hotplug thread.
10378 + * hotplug thread of the upcoming CPU up and then delegates the rest of the
10379 + * online bringup to the hotplug thread.
10380 */
10381 void cpuhp_online_idle(enum cpuhp_state state)
10382 {
10383 @@ -1100,6 +1099,12 @@ void cpuhp_online_idle(enum cpuhp_state state)
10384 if (state != CPUHP_AP_ONLINE_IDLE)
10385 return;
10386
10387 + /*
10388 + * Unpart the stopper thread before we start the idle loop (and start
10389 + * scheduling); this ensures the stopper task is always available.
10390 + */
10391 + stop_machine_unpark(smp_processor_id());
10392 +
10393 st->state = CPUHP_AP_ONLINE_IDLE;
10394 complete_ap_thread(st, true);
10395 }
10396 diff --git a/kernel/module.c b/kernel/module.c
10397 index cb09a5f37a5f..a2a47f4a33a7 100644
10398 --- a/kernel/module.c
10399 +++ b/kernel/module.c
10400 @@ -214,7 +214,8 @@ static struct module *mod_find(unsigned long addr)
10401 {
10402 struct module *mod;
10403
10404 - list_for_each_entry_rcu(mod, &modules, list) {
10405 + list_for_each_entry_rcu(mod, &modules, list,
10406 + lockdep_is_held(&module_mutex)) {
10407 if (within_module(addr, mod))
10408 return mod;
10409 }
10410 @@ -448,7 +449,8 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
10411 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
10412 return true;
10413
10414 - list_for_each_entry_rcu(mod, &modules, list) {
10415 + list_for_each_entry_rcu(mod, &modules, list,
10416 + lockdep_is_held(&module_mutex)) {
10417 struct symsearch arr[] = {
10418 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
10419 NOT_GPL_ONLY, false },
10420 @@ -616,7 +618,8 @@ static struct module *find_module_all(const char *name, size_t len,
10421
10422 module_assert_mutex_or_preempt();
10423
10424 - list_for_each_entry_rcu(mod, &modules, list) {
10425 + list_for_each_entry_rcu(mod, &modules, list,
10426 + lockdep_is_held(&module_mutex)) {
10427 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
10428 continue;
10429 if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
10430 @@ -1781,6 +1784,8 @@ static int module_add_modinfo_attrs(struct module *mod)
10431 error_out:
10432 if (i > 0)
10433 module_remove_modinfo_attrs(mod, --i);
10434 + else
10435 + kfree(mod->modinfo_attrs);
10436 return error;
10437 }
10438
10439 @@ -3054,9 +3059,7 @@ static int setup_load_info(struct load_info *info, int flags)
10440
10441 /* Try to find a name early so we can log errors with a module name */
10442 info->index.info = find_sec(info, ".modinfo");
10443 - if (!info->index.info)
10444 - info->name = "(missing .modinfo section)";
10445 - else
10446 + if (info->index.info)
10447 info->name = get_modinfo(info, "name");
10448
10449 /* Find internal symbols and strings. */
10450 @@ -3071,14 +3074,15 @@ static int setup_load_info(struct load_info *info, int flags)
10451 }
10452
10453 if (info->index.sym == 0) {
10454 - pr_warn("%s: module has no symbols (stripped?)\n", info->name);
10455 + pr_warn("%s: module has no symbols (stripped?)\n",
10456 + info->name ?: "(missing .modinfo section or name field)");
10457 return -ENOEXEC;
10458 }
10459
10460 info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
10461 if (!info->index.mod) {
10462 pr_warn("%s: No module found in object\n",
10463 - info->name ?: "(missing .modinfo name field)");
10464 + info->name ?: "(missing .modinfo section or name field)");
10465 return -ENOEXEC;
10466 }
10467 /* This is temporary: point mod into copy of data. */
10468 diff --git a/kernel/padata.c b/kernel/padata.c
10469 index 9c82ee4a9732..fda7a7039422 100644
10470 --- a/kernel/padata.c
10471 +++ b/kernel/padata.c
10472 @@ -512,7 +512,7 @@ static int padata_replace_one(struct padata_shell *ps)
10473 return 0;
10474 }
10475
10476 -static int padata_replace(struct padata_instance *pinst, int cpu)
10477 +static int padata_replace(struct padata_instance *pinst)
10478 {
10479 int notification_mask = 0;
10480 struct padata_shell *ps;
10481 @@ -523,16 +523,12 @@ static int padata_replace(struct padata_instance *pinst, int cpu)
10482 cpumask_copy(pinst->omask, pinst->rcpumask.pcpu);
10483 cpumask_and(pinst->rcpumask.pcpu, pinst->cpumask.pcpu,
10484 cpu_online_mask);
10485 - if (cpu >= 0)
10486 - cpumask_clear_cpu(cpu, pinst->rcpumask.pcpu);
10487 if (!cpumask_equal(pinst->omask, pinst->rcpumask.pcpu))
10488 notification_mask |= PADATA_CPU_PARALLEL;
10489
10490 cpumask_copy(pinst->omask, pinst->rcpumask.cbcpu);
10491 cpumask_and(pinst->rcpumask.cbcpu, pinst->cpumask.cbcpu,
10492 cpu_online_mask);
10493 - if (cpu >= 0)
10494 - cpumask_clear_cpu(cpu, pinst->rcpumask.cbcpu);
10495 if (!cpumask_equal(pinst->omask, pinst->rcpumask.cbcpu))
10496 notification_mask |= PADATA_CPU_SERIAL;
10497
10498 @@ -624,7 +620,7 @@ out_replace:
10499 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
10500 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
10501
10502 - err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst, -1);
10503 + err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
10504
10505 if (valid)
10506 __padata_start(pinst);
10507 @@ -715,7 +711,7 @@ static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
10508 int err = 0;
10509
10510 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
10511 - err = padata_replace(pinst, -1);
10512 + err = padata_replace(pinst);
10513
10514 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
10515 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
10516 @@ -729,12 +725,12 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
10517 {
10518 int err = 0;
10519
10520 - if (cpumask_test_cpu(cpu, cpu_online_mask)) {
10521 + if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
10522 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
10523 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
10524 __padata_stop(pinst);
10525
10526 - err = padata_replace(pinst, cpu);
10527 + err = padata_replace(pinst);
10528 }
10529
10530 return err;
10531 @@ -796,7 +792,7 @@ static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
10532 return ret;
10533 }
10534
10535 -static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node)
10536 +static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
10537 {
10538 struct padata_instance *pinst;
10539 int ret;
10540 @@ -817,6 +813,7 @@ static enum cpuhp_state hp_online;
10541 static void __padata_free(struct padata_instance *pinst)
10542 {
10543 #ifdef CONFIG_HOTPLUG_CPU
10544 + cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, &pinst->node);
10545 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node);
10546 #endif
10547
10548 @@ -1024,6 +1021,8 @@ static struct padata_instance *padata_alloc(const char *name,
10549
10550 #ifdef CONFIG_HOTPLUG_CPU
10551 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
10552 + cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
10553 + &pinst->node);
10554 #endif
10555
10556 put_online_cpus();
10557 @@ -1136,17 +1135,24 @@ static __init int padata_driver_init(void)
10558 int ret;
10559
10560 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
10561 - padata_cpu_online,
10562 - padata_cpu_prep_down);
10563 + padata_cpu_online, NULL);
10564 if (ret < 0)
10565 return ret;
10566 hp_online = ret;
10567 +
10568 + ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
10569 + NULL, padata_cpu_dead);
10570 + if (ret < 0) {
10571 + cpuhp_remove_multi_state(hp_online);
10572 + return ret;
10573 + }
10574 return 0;
10575 }
10576 module_init(padata_driver_init);
10577
10578 static __exit void padata_driver_exit(void)
10579 {
10580 + cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
10581 cpuhp_remove_multi_state(hp_online);
10582 }
10583 module_exit(padata_driver_exit);
10584 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
10585 index ca65327a6de8..c0a5b56aea4e 100644
10586 --- a/kernel/printk/printk.c
10587 +++ b/kernel/printk/printk.c
10588 @@ -2770,8 +2770,6 @@ void register_console(struct console *newcon)
10589 * for us.
10590 */
10591 logbuf_lock_irqsave(flags);
10592 - console_seq = syslog_seq;
10593 - console_idx = syslog_idx;
10594 /*
10595 * We're about to replay the log buffer. Only do this to the
10596 * just-registered console to avoid excessive message spam to
10597 @@ -2783,6 +2781,8 @@ void register_console(struct console *newcon)
10598 */
10599 exclusive_console = newcon;
10600 exclusive_console_stop_seq = console_seq;
10601 + console_seq = syslog_seq;
10602 + console_idx = syslog_idx;
10603 logbuf_unlock_irqrestore(flags);
10604 }
10605 console_unlock();
10606 diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
10607 index 81105141b6a8..62e59596a30a 100644
10608 --- a/kernel/rcu/tree.c
10609 +++ b/kernel/rcu/tree.c
10610 @@ -576,7 +576,7 @@ static void rcu_eqs_enter(bool user)
10611 }
10612
10613 lockdep_assert_irqs_disabled();
10614 - trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks);
10615 + trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
10616 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
10617 rdp = this_cpu_ptr(&rcu_data);
10618 do_nocb_deferred_wakeup(rdp);
10619 @@ -649,14 +649,15 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
10620 * leave it in non-RCU-idle state.
10621 */
10622 if (rdp->dynticks_nmi_nesting != 1) {
10623 - trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks);
10624 + trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
10625 + atomic_read(&rdp->dynticks));
10626 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
10627 rdp->dynticks_nmi_nesting - 2);
10628 return;
10629 }
10630
10631 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
10632 - trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdp->dynticks);
10633 + trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
10634 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
10635
10636 if (irq)
10637 @@ -743,7 +744,7 @@ static void rcu_eqs_exit(bool user)
10638 rcu_dynticks_task_exit();
10639 rcu_dynticks_eqs_exit();
10640 rcu_cleanup_after_idle();
10641 - trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks);
10642 + trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
10643 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
10644 WRITE_ONCE(rdp->dynticks_nesting, 1);
10645 WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
10646 @@ -827,7 +828,7 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
10647 }
10648 trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
10649 rdp->dynticks_nmi_nesting,
10650 - rdp->dynticks_nmi_nesting + incby, rdp->dynticks);
10651 + rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
10652 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
10653 rdp->dynticks_nmi_nesting + incby);
10654 barrier();
10655 diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
10656 index 69c5aa64fcfd..f504ac831779 100644
10657 --- a/kernel/rcu/tree_exp.h
10658 +++ b/kernel/rcu/tree_exp.h
10659 @@ -558,7 +558,7 @@ static void rcu_exp_wait_wake(unsigned long s)
10660 spin_unlock(&rnp->exp_lock);
10661 }
10662 smp_mb(); /* All above changes before wakeup. */
10663 - wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rcu_state.expedited_sequence) & 0x3]);
10664 + wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
10665 }
10666 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
10667 mutex_unlock(&rcu_state.exp_wake_mutex);
10668 diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
10669 index f849e7429816..f7118842a2b8 100644
10670 --- a/kernel/rcu/tree_plugin.h
10671 +++ b/kernel/rcu/tree_plugin.h
10672 @@ -2322,6 +2322,8 @@ static void __init rcu_organize_nocb_kthreads(void)
10673 {
10674 int cpu;
10675 bool firsttime = true;
10676 + bool gotnocbs = false;
10677 + bool gotnocbscbs = true;
10678 int ls = rcu_nocb_gp_stride;
10679 int nl = 0; /* Next GP kthread. */
10680 struct rcu_data *rdp;
10681 @@ -2344,21 +2346,31 @@ static void __init rcu_organize_nocb_kthreads(void)
10682 rdp = per_cpu_ptr(&rcu_data, cpu);
10683 if (rdp->cpu >= nl) {
10684 /* New GP kthread, set up for CBs & next GP. */
10685 + gotnocbs = true;
10686 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
10687 rdp->nocb_gp_rdp = rdp;
10688 rdp_gp = rdp;
10689 - if (!firsttime && dump_tree)
10690 - pr_cont("\n");
10691 - firsttime = false;
10692 - pr_alert("%s: No-CB GP kthread CPU %d:", __func__, cpu);
10693 + if (dump_tree) {
10694 + if (!firsttime)
10695 + pr_cont("%s\n", gotnocbscbs
10696 + ? "" : " (self only)");
10697 + gotnocbscbs = false;
10698 + firsttime = false;
10699 + pr_alert("%s: No-CB GP kthread CPU %d:",
10700 + __func__, cpu);
10701 + }
10702 } else {
10703 /* Another CB kthread, link to previous GP kthread. */
10704 + gotnocbscbs = true;
10705 rdp->nocb_gp_rdp = rdp_gp;
10706 rdp_prev->nocb_next_cb_rdp = rdp;
10707 - pr_alert(" %d", cpu);
10708 + if (dump_tree)
10709 + pr_cont(" %d", cpu);
10710 }
10711 rdp_prev = rdp;
10712 }
10713 + if (gotnocbs && dump_tree)
10714 + pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
10715 }
10716
10717 /*
10718 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
10719 index dfaefb175ba0..e6c65725b7ce 100644
10720 --- a/kernel/sched/core.c
10721 +++ b/kernel/sched/core.c
10722 @@ -1252,7 +1252,8 @@ static void __init init_uclamp(void)
10723 mutex_init(&uclamp_mutex);
10724
10725 for_each_possible_cpu(cpu) {
10726 - memset(&cpu_rq(cpu)->uclamp, 0, sizeof(struct uclamp_rq));
10727 + memset(&cpu_rq(cpu)->uclamp, 0,
10728 + sizeof(struct uclamp_rq)*UCLAMP_CNT);
10729 cpu_rq(cpu)->uclamp_flags = 0;
10730 }
10731
10732 diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
10733 index 49b835f1305f..1fa1e13a5944 100644
10734 --- a/kernel/sched/topology.c
10735 +++ b/kernel/sched/topology.c
10736 @@ -1882,6 +1882,42 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve
10737 return sd;
10738 }
10739
10740 +/*
10741 + * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for
10742 + * any two given CPUs at this (non-NUMA) topology level.
10743 + */
10744 +static bool topology_span_sane(struct sched_domain_topology_level *tl,
10745 + const struct cpumask *cpu_map, int cpu)
10746 +{
10747 + int i;
10748 +
10749 + /* NUMA levels are allowed to overlap */
10750 + if (tl->flags & SDTL_OVERLAP)
10751 + return true;
10752 +
10753 + /*
10754 + * Non-NUMA levels cannot partially overlap - they must be either
10755 + * completely equal or completely disjoint. Otherwise we can end up
10756 + * breaking the sched_group lists - i.e. a later get_group() pass
10757 + * breaks the linking done for an earlier span.
10758 + */
10759 + for_each_cpu(i, cpu_map) {
10760 + if (i == cpu)
10761 + continue;
10762 + /*
10763 + * We should 'and' all those masks with 'cpu_map' to exactly
10764 + * match the topology we're about to build, but that can only
10765 + * remove CPUs, which only lessens our ability to detect
10766 + * overlaps
10767 + */
10768 + if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) &&
10769 + cpumask_intersects(tl->mask(cpu), tl->mask(i)))
10770 + return false;
10771 + }
10772 +
10773 + return true;
10774 +}
10775 +
10776 /*
10777 * Find the sched_domain_topology_level where all CPU capacities are visible
10778 * for all CPUs.
10779 @@ -1978,6 +2014,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
10780 has_asym = true;
10781 }
10782
10783 + if (WARN_ON(!topology_span_sane(tl, cpu_map, i)))
10784 + goto error;
10785 +
10786 sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i);
10787
10788 if (tl == sched_domain_topology)
10789 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
10790 index 4b11f0309eee..b97401f6bc23 100644
10791 --- a/kernel/time/alarmtimer.c
10792 +++ b/kernel/time/alarmtimer.c
10793 @@ -88,6 +88,7 @@ static int alarmtimer_rtc_add_device(struct device *dev,
10794 unsigned long flags;
10795 struct rtc_device *rtc = to_rtc_device(dev);
10796 struct wakeup_source *__ws;
10797 + struct platform_device *pdev;
10798 int ret = 0;
10799
10800 if (rtcdev)
10801 @@ -99,9 +100,11 @@ static int alarmtimer_rtc_add_device(struct device *dev,
10802 return -1;
10803
10804 __ws = wakeup_source_register(dev, "alarmtimer");
10805 + pdev = platform_device_register_data(dev, "alarmtimer",
10806 + PLATFORM_DEVID_AUTO, NULL, 0);
10807
10808 spin_lock_irqsave(&rtcdev_lock, flags);
10809 - if (!rtcdev) {
10810 + if (__ws && !IS_ERR(pdev) && !rtcdev) {
10811 if (!try_module_get(rtc->owner)) {
10812 ret = -1;
10813 goto unlock;
10814 @@ -112,10 +115,14 @@ static int alarmtimer_rtc_add_device(struct device *dev,
10815 get_device(dev);
10816 ws = __ws;
10817 __ws = NULL;
10818 + pdev = NULL;
10819 + } else {
10820 + ret = -1;
10821 }
10822 unlock:
10823 spin_unlock_irqrestore(&rtcdev_lock, flags);
10824
10825 + platform_device_unregister(pdev);
10826 wakeup_source_unregister(__ws);
10827
10828 return ret;
10829 @@ -876,8 +883,7 @@ static struct platform_driver alarmtimer_driver = {
10830 */
10831 static int __init alarmtimer_init(void)
10832 {
10833 - struct platform_device *pdev;
10834 - int error = 0;
10835 + int error;
10836 int i;
10837
10838 alarmtimer_rtc_timer_init();
10839 @@ -900,15 +906,7 @@ static int __init alarmtimer_init(void)
10840 if (error)
10841 goto out_if;
10842
10843 - pdev = platform_device_register_simple("alarmtimer", -1, NULL, 0);
10844 - if (IS_ERR(pdev)) {
10845 - error = PTR_ERR(pdev);
10846 - goto out_drv;
10847 - }
10848 return 0;
10849 -
10850 -out_drv:
10851 - platform_driver_unregister(&alarmtimer_driver);
10852 out_if:
10853 alarmtimer_rtc_interface_remove();
10854 return error;
10855 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
10856 index 407d8bf4ed93..15160d707da4 100644
10857 --- a/kernel/trace/ftrace.c
10858 +++ b/kernel/trace/ftrace.c
10859 @@ -6537,9 +6537,10 @@ static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
10860 struct trace_array *tr = m->private;
10861 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
10862
10863 - if (v == FTRACE_NO_PIDS)
10864 + if (v == FTRACE_NO_PIDS) {
10865 + (*pos)++;
10866 return NULL;
10867 -
10868 + }
10869 return trace_pid_next(pid_list, v, pos);
10870 }
10871
10872 diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
10873 index 4be7fc84d6b6..a31be3fce3e8 100644
10874 --- a/kernel/trace/trace_events_hist.c
10875 +++ b/kernel/trace/trace_events_hist.c
10876 @@ -2037,12 +2037,6 @@ static int parse_map_size(char *str)
10877 unsigned long size, map_bits;
10878 int ret;
10879
10880 - strsep(&str, "=");
10881 - if (!str) {
10882 - ret = -EINVAL;
10883 - goto out;
10884 - }
10885 -
10886 ret = kstrtoul(str, 0, &size);
10887 if (ret)
10888 goto out;
10889 @@ -2102,25 +2096,25 @@ static int parse_action(char *str, struct hist_trigger_attrs *attrs)
10890 static int parse_assignment(struct trace_array *tr,
10891 char *str, struct hist_trigger_attrs *attrs)
10892 {
10893 - int ret = 0;
10894 + int len, ret = 0;
10895
10896 - if ((str_has_prefix(str, "key=")) ||
10897 - (str_has_prefix(str, "keys="))) {
10898 - attrs->keys_str = kstrdup(str, GFP_KERNEL);
10899 + if ((len = str_has_prefix(str, "key=")) ||
10900 + (len = str_has_prefix(str, "keys="))) {
10901 + attrs->keys_str = kstrdup(str + len, GFP_KERNEL);
10902 if (!attrs->keys_str) {
10903 ret = -ENOMEM;
10904 goto out;
10905 }
10906 - } else if ((str_has_prefix(str, "val=")) ||
10907 - (str_has_prefix(str, "vals=")) ||
10908 - (str_has_prefix(str, "values="))) {
10909 - attrs->vals_str = kstrdup(str, GFP_KERNEL);
10910 + } else if ((len = str_has_prefix(str, "val=")) ||
10911 + (len = str_has_prefix(str, "vals=")) ||
10912 + (len = str_has_prefix(str, "values="))) {
10913 + attrs->vals_str = kstrdup(str + len, GFP_KERNEL);
10914 if (!attrs->vals_str) {
10915 ret = -ENOMEM;
10916 goto out;
10917 }
10918 - } else if (str_has_prefix(str, "sort=")) {
10919 - attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
10920 + } else if ((len = str_has_prefix(str, "sort="))) {
10921 + attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL);
10922 if (!attrs->sort_key_str) {
10923 ret = -ENOMEM;
10924 goto out;
10925 @@ -2131,12 +2125,8 @@ static int parse_assignment(struct trace_array *tr,
10926 ret = -ENOMEM;
10927 goto out;
10928 }
10929 - } else if (str_has_prefix(str, "clock=")) {
10930 - strsep(&str, "=");
10931 - if (!str) {
10932 - ret = -EINVAL;
10933 - goto out;
10934 - }
10935 + } else if ((len = str_has_prefix(str, "clock="))) {
10936 + str += len;
10937
10938 str = strstrip(str);
10939 attrs->clock = kstrdup(str, GFP_KERNEL);
10940 @@ -2144,8 +2134,8 @@ static int parse_assignment(struct trace_array *tr,
10941 ret = -ENOMEM;
10942 goto out;
10943 }
10944 - } else if (str_has_prefix(str, "size=")) {
10945 - int map_bits = parse_map_size(str);
10946 + } else if ((len = str_has_prefix(str, "size="))) {
10947 + int map_bits = parse_map_size(str + len);
10948
10949 if (map_bits < 0) {
10950 ret = map_bits;
10951 @@ -2185,8 +2175,14 @@ parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
10952
10953 while (trigger_str) {
10954 char *str = strsep(&trigger_str, ":");
10955 + char *rhs;
10956
10957 - if (strchr(str, '=')) {
10958 + rhs = strchr(str, '=');
10959 + if (rhs) {
10960 + if (!strlen(++rhs)) {
10961 + ret = -EINVAL;
10962 + goto free;
10963 + }
10964 ret = parse_assignment(tr, str, attrs);
10965 if (ret)
10966 goto free;
10967 @@ -4559,10 +4555,6 @@ static int create_val_fields(struct hist_trigger_data *hist_data,
10968 if (!fields_str)
10969 goto out;
10970
10971 - strsep(&fields_str, "=");
10972 - if (!fields_str)
10973 - goto out;
10974 -
10975 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
10976 j < TRACING_MAP_VALS_MAX; i++) {
10977 field_str = strsep(&fields_str, ",");
10978 @@ -4657,10 +4649,6 @@ static int create_key_fields(struct hist_trigger_data *hist_data,
10979 if (!fields_str)
10980 goto out;
10981
10982 - strsep(&fields_str, "=");
10983 - if (!fields_str)
10984 - goto out;
10985 -
10986 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
10987 field_str = strsep(&fields_str, ",");
10988 if (!field_str)
10989 @@ -4818,12 +4806,6 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
10990 if (!fields_str)
10991 goto out;
10992
10993 - strsep(&fields_str, "=");
10994 - if (!fields_str) {
10995 - ret = -EINVAL;
10996 - goto out;
10997 - }
10998 -
10999 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
11000 struct hist_field *hist_field;
11001 char *field_str, *field_name;
11002 @@ -4832,9 +4814,11 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
11003 sort_key = &hist_data->sort_keys[i];
11004
11005 field_str = strsep(&fields_str, ",");
11006 - if (!field_str) {
11007 - if (i == 0)
11008 - ret = -EINVAL;
11009 + if (!field_str)
11010 + break;
11011 +
11012 + if (!*field_str) {
11013 + ret = -EINVAL;
11014 break;
11015 }
11016
11017 @@ -4844,7 +4828,7 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
11018 }
11019
11020 field_name = strsep(&field_str, ".");
11021 - if (!field_name) {
11022 + if (!field_name || !*field_name) {
11023 ret = -EINVAL;
11024 break;
11025 }
11026 diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
11027 index 40106fff06a4..287d77eae59b 100644
11028 --- a/kernel/trace/trace_events_trigger.c
11029 +++ b/kernel/trace/trace_events_trigger.c
11030 @@ -116,9 +116,10 @@ static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
11031 {
11032 struct trace_event_file *event_file = event_file_data(m->private);
11033
11034 - if (t == SHOW_AVAILABLE_TRIGGERS)
11035 + if (t == SHOW_AVAILABLE_TRIGGERS) {
11036 + (*pos)++;
11037 return NULL;
11038 -
11039 + }
11040 return seq_list_next(t, &event_file->triggers, pos);
11041 }
11042
11043 diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
11044 index 9ab0a1a7ad5e..3c9c17feea33 100644
11045 --- a/kernel/trace/trace_stat.c
11046 +++ b/kernel/trace/trace_stat.c
11047 @@ -282,18 +282,22 @@ static int tracing_stat_init(void)
11048
11049 d_tracing = tracing_init_dentry();
11050 if (IS_ERR(d_tracing))
11051 - return 0;
11052 + return -ENODEV;
11053
11054 stat_dir = tracefs_create_dir("trace_stat", d_tracing);
11055 - if (!stat_dir)
11056 + if (!stat_dir) {
11057 pr_warn("Could not create tracefs 'trace_stat' entry\n");
11058 + return -ENOMEM;
11059 + }
11060 return 0;
11061 }
11062
11063 static int init_stat_file(struct stat_session *session)
11064 {
11065 - if (!stat_dir && tracing_stat_init())
11066 - return -ENODEV;
11067 + int ret;
11068 +
11069 + if (!stat_dir && (ret = tracing_stat_init()))
11070 + return ret;
11071
11072 session->file = tracefs_create_file(session->ts->name, 0644,
11073 stat_dir,
11074 @@ -306,7 +310,7 @@ static int init_stat_file(struct stat_session *session)
11075 int register_stat_tracer(struct tracer_stat *trace)
11076 {
11077 struct stat_session *session, *node;
11078 - int ret;
11079 + int ret = -EINVAL;
11080
11081 if (!trace)
11082 return -EINVAL;
11083 @@ -317,17 +321,15 @@ int register_stat_tracer(struct tracer_stat *trace)
11084 /* Already registered? */
11085 mutex_lock(&all_stat_sessions_mutex);
11086 list_for_each_entry(node, &all_stat_sessions, session_list) {
11087 - if (node->ts == trace) {
11088 - mutex_unlock(&all_stat_sessions_mutex);
11089 - return -EINVAL;
11090 - }
11091 + if (node->ts == trace)
11092 + goto out;
11093 }
11094 - mutex_unlock(&all_stat_sessions_mutex);
11095
11096 + ret = -ENOMEM;
11097 /* Init the session */
11098 session = kzalloc(sizeof(*session), GFP_KERNEL);
11099 if (!session)
11100 - return -ENOMEM;
11101 + goto out;
11102
11103 session->ts = trace;
11104 INIT_LIST_HEAD(&session->session_list);
11105 @@ -336,15 +338,16 @@ int register_stat_tracer(struct tracer_stat *trace)
11106 ret = init_stat_file(session);
11107 if (ret) {
11108 destroy_session(session);
11109 - return ret;
11110 + goto out;
11111 }
11112
11113 + ret = 0;
11114 /* Register */
11115 - mutex_lock(&all_stat_sessions_mutex);
11116 list_add_tail(&session->session_list, &all_stat_sessions);
11117 + out:
11118 mutex_unlock(&all_stat_sessions_mutex);
11119
11120 - return 0;
11121 + return ret;
11122 }
11123
11124 void unregister_stat_tracer(struct tracer_stat *trace)
11125 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
11126 index f41334ef0971..cbd3cf503c90 100644
11127 --- a/kernel/watchdog.c
11128 +++ b/kernel/watchdog.c
11129 @@ -161,6 +161,8 @@ static void lockup_detector_update_enable(void)
11130
11131 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
11132
11133 +#define SOFTLOCKUP_RESET ULONG_MAX
11134 +
11135 /* Global variables, exported for sysctl */
11136 unsigned int __read_mostly softlockup_panic =
11137 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
11138 @@ -274,7 +276,7 @@ notrace void touch_softlockup_watchdog_sched(void)
11139 * Preemption can be enabled. It doesn't matter which CPU's timestamp
11140 * gets zeroed here, so use the raw_ operation.
11141 */
11142 - raw_cpu_write(watchdog_touch_ts, 0);
11143 + raw_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
11144 }
11145
11146 notrace void touch_softlockup_watchdog(void)
11147 @@ -298,14 +300,14 @@ void touch_all_softlockup_watchdogs(void)
11148 * the softlockup check.
11149 */
11150 for_each_cpu(cpu, &watchdog_allowed_mask)
11151 - per_cpu(watchdog_touch_ts, cpu) = 0;
11152 + per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
11153 wq_watchdog_touch(-1);
11154 }
11155
11156 void touch_softlockup_watchdog_sync(void)
11157 {
11158 __this_cpu_write(softlockup_touch_sync, true);
11159 - __this_cpu_write(watchdog_touch_ts, 0);
11160 + __this_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
11161 }
11162
11163 static int is_softlockup(unsigned long touch_ts)
11164 @@ -383,7 +385,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
11165 /* .. and repeat */
11166 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
11167
11168 - if (touch_ts == 0) {
11169 + if (touch_ts == SOFTLOCKUP_RESET) {
11170 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
11171 /*
11172 * If the time stamp was touched atomically
11173 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
11174 index 61261195f5b6..48054dbf1b51 100644
11175 --- a/lib/debugobjects.c
11176 +++ b/lib/debugobjects.c
11177 @@ -132,14 +132,18 @@ static void fill_pool(void)
11178 struct debug_obj *obj;
11179 unsigned long flags;
11180
11181 - if (likely(obj_pool_free >= debug_objects_pool_min_level))
11182 + if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
11183 return;
11184
11185 /*
11186 * Reuse objs from the global free list; they will be reinitialized
11187 * when allocating.
11188 + *
11189 + * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
11190 + * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
11191 + * sections.
11192 */
11193 - while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
11194 + while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
11195 raw_spin_lock_irqsave(&pool_lock, flags);
11196 /*
11197 * Recheck with the lock held as the worker thread might have
11198 @@ -148,9 +152,9 @@ static void fill_pool(void)
11199 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
11200 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
11201 hlist_del(&obj->node);
11202 - obj_nr_tofree--;
11203 + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
11204 hlist_add_head(&obj->node, &obj_pool);
11205 - obj_pool_free++;
11206 + WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
11207 }
11208 raw_spin_unlock_irqrestore(&pool_lock, flags);
11209 }
11210 @@ -158,7 +162,7 @@ static void fill_pool(void)
11211 if (unlikely(!obj_cache))
11212 return;
11213
11214 - while (obj_pool_free < debug_objects_pool_min_level) {
11215 + while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
11216 struct debug_obj *new[ODEBUG_BATCH_SIZE];
11217 int cnt;
11218
11219 @@ -174,7 +178,7 @@ static void fill_pool(void)
11220 while (cnt) {
11221 hlist_add_head(&new[--cnt]->node, &obj_pool);
11222 debug_objects_allocated++;
11223 - obj_pool_free++;
11224 + WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
11225 }
11226 raw_spin_unlock_irqrestore(&pool_lock, flags);
11227 }
11228 @@ -236,7 +240,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
11229 obj = __alloc_object(&obj_pool);
11230 if (obj) {
11231 obj_pool_used++;
11232 - obj_pool_free--;
11233 + WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
11234
11235 /*
11236 * Looking ahead, allocate one batch of debug objects and
11237 @@ -255,7 +259,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
11238 &percpu_pool->free_objs);
11239 percpu_pool->obj_free++;
11240 obj_pool_used++;
11241 - obj_pool_free--;
11242 + WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
11243 }
11244 }
11245
11246 @@ -309,8 +313,8 @@ static void free_obj_work(struct work_struct *work)
11247 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
11248 hlist_del(&obj->node);
11249 hlist_add_head(&obj->node, &obj_pool);
11250 - obj_pool_free++;
11251 - obj_nr_tofree--;
11252 + WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
11253 + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
11254 }
11255 raw_spin_unlock_irqrestore(&pool_lock, flags);
11256 return;
11257 @@ -324,7 +328,7 @@ free_objs:
11258 if (obj_nr_tofree) {
11259 hlist_move_list(&obj_to_free, &tofree);
11260 debug_objects_freed += obj_nr_tofree;
11261 - obj_nr_tofree = 0;
11262 + WRITE_ONCE(obj_nr_tofree, 0);
11263 }
11264 raw_spin_unlock_irqrestore(&pool_lock, flags);
11265
11266 @@ -375,10 +379,10 @@ free_to_obj_pool:
11267 obj_pool_used--;
11268
11269 if (work) {
11270 - obj_nr_tofree++;
11271 + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
11272 hlist_add_head(&obj->node, &obj_to_free);
11273 if (lookahead_count) {
11274 - obj_nr_tofree += lookahead_count;
11275 + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
11276 obj_pool_used -= lookahead_count;
11277 while (lookahead_count) {
11278 hlist_add_head(&objs[--lookahead_count]->node,
11279 @@ -396,15 +400,15 @@ free_to_obj_pool:
11280 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
11281 obj = __alloc_object(&obj_pool);
11282 hlist_add_head(&obj->node, &obj_to_free);
11283 - obj_pool_free--;
11284 - obj_nr_tofree++;
11285 + WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
11286 + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
11287 }
11288 }
11289 } else {
11290 - obj_pool_free++;
11291 + WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
11292 hlist_add_head(&obj->node, &obj_pool);
11293 if (lookahead_count) {
11294 - obj_pool_free += lookahead_count;
11295 + WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
11296 obj_pool_used -= lookahead_count;
11297 while (lookahead_count) {
11298 hlist_add_head(&objs[--lookahead_count]->node,
11299 @@ -423,7 +427,7 @@ free_to_obj_pool:
11300 static void free_object(struct debug_obj *obj)
11301 {
11302 __free_object(obj);
11303 - if (!obj_freeing && obj_nr_tofree) {
11304 + if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
11305 WRITE_ONCE(obj_freeing, true);
11306 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
11307 }
11308 @@ -982,7 +986,7 @@ repeat:
11309 debug_objects_maxchecked = objs_checked;
11310
11311 /* Schedule work to actually kmem_cache_free() objects */
11312 - if (!obj_freeing && obj_nr_tofree) {
11313 + if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
11314 WRITE_ONCE(obj_freeing, true);
11315 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
11316 }
11317 @@ -1008,12 +1012,12 @@ static int debug_stats_show(struct seq_file *m, void *v)
11318 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
11319 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
11320 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
11321 - seq_printf(m, "pool_free :%d\n", obj_pool_free + obj_percpu_free);
11322 + seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
11323 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
11324 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
11325 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
11326 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
11327 - seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
11328 + seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
11329 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
11330 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
11331 return 0;
11332 diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c
11333 index 9c485df1308f..f02e10fa6238 100644
11334 --- a/lib/raid6/mktables.c
11335 +++ b/lib/raid6/mktables.c
11336 @@ -56,8 +56,8 @@ int main(int argc, char *argv[])
11337 uint8_t v;
11338 uint8_t exptbl[256], invtbl[256];
11339
11340 - printf("#include <linux/raid/pq.h>\n");
11341 printf("#include <linux/export.h>\n");
11342 + printf("#include <linux/raid/pq.h>\n");
11343
11344 /* Compute multiplication table */
11345 printf("\nconst u8 __attribute__((aligned(256)))\n"
11346 diff --git a/lib/scatterlist.c b/lib/scatterlist.c
11347 index c2cf2c311b7d..5813072bc589 100644
11348 --- a/lib/scatterlist.c
11349 +++ b/lib/scatterlist.c
11350 @@ -311,7 +311,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
11351 if (prv)
11352 table->nents = ++table->orig_nents;
11353
11354 - return -ENOMEM;
11355 + return -ENOMEM;
11356 }
11357
11358 sg_init_table(sg, alloc_size);
11359 diff --git a/net/core/dev.c b/net/core/dev.c
11360 index 82325d3d1371..a7e2e57af63a 100644
11361 --- a/net/core/dev.c
11362 +++ b/net/core/dev.c
11363 @@ -4256,14 +4256,14 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
11364 /* Reinjected packets coming from act_mirred or similar should
11365 * not get XDP generic processing.
11366 */
11367 - if (skb_cloned(skb) || skb_is_tc_redirected(skb))
11368 + if (skb_is_tc_redirected(skb))
11369 return XDP_PASS;
11370
11371 /* XDP packets must be linear and must have sufficient headroom
11372 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
11373 * native XDP provides, thus we need to do it here as well.
11374 */
11375 - if (skb_is_nonlinear(skb) ||
11376 + if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
11377 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
11378 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
11379 int troom = skb->tail + skb->data_len - skb->end;
11380 diff --git a/net/core/filter.c b/net/core/filter.c
11381 index 1a78d64096bb..d59dbc88fef5 100644
11382 --- a/net/core/filter.c
11383 +++ b/net/core/filter.c
11384 @@ -3543,7 +3543,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
11385 return err;
11386 }
11387 default:
11388 - break;
11389 + return -EBADRQC;
11390 }
11391 return 0;
11392 }
11393 diff --git a/net/core/sock_map.c b/net/core/sock_map.c
11394 index 085cef5857bb..405397801bb0 100644
11395 --- a/net/core/sock_map.c
11396 +++ b/net/core/sock_map.c
11397 @@ -881,6 +881,9 @@ static void sock_hash_free(struct bpf_map *map)
11398 /* wait for psock readers accessing its map link */
11399 synchronize_rcu();
11400
11401 + /* wait for psock readers accessing its map link */
11402 + synchronize_rcu();
11403 +
11404 bpf_map_area_free(htab->buckets);
11405 kfree(htab);
11406 }
11407 diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
11408 index c8a128c9e5e0..70db7c909f74 100644
11409 --- a/net/dsa/tag_qca.c
11410 +++ b/net/dsa/tag_qca.c
11411 @@ -33,7 +33,7 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
11412 struct dsa_port *dp = dsa_slave_to_port(dev);
11413 u16 *phdr, hdr;
11414
11415 - if (skb_cow_head(skb, 0) < 0)
11416 + if (skb_cow_head(skb, QCA_HDR_LEN) < 0)
11417 return NULL;
11418
11419 skb_push(skb, QCA_HDR_LEN);
11420 diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
11421 index 5284fcf16be7..f8d2919cf9fd 100644
11422 --- a/net/netfilter/nft_tunnel.c
11423 +++ b/net/netfilter/nft_tunnel.c
11424 @@ -248,8 +248,9 @@ static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
11425 }
11426
11427 static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
11428 + [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 },
11429 [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 },
11430 - [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
11431 + [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
11432 [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 },
11433 };
11434
11435 diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
11436 index 7394e01c0c9c..5eceeee593cf 100644
11437 --- a/net/sched/cls_flower.c
11438 +++ b/net/sched/cls_flower.c
11439 @@ -689,6 +689,7 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
11440 .len = 128 / BITS_PER_BYTE },
11441 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
11442 .len = 128 / BITS_PER_BYTE },
11443 + [TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
11444 };
11445
11446 static const struct nla_policy
11447 diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
11448 index 039cc86974f4..610a0b728161 100644
11449 --- a/net/sched/cls_matchall.c
11450 +++ b/net/sched/cls_matchall.c
11451 @@ -157,6 +157,7 @@ static void *mall_get(struct tcf_proto *tp, u32 handle)
11452 static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
11453 [TCA_MATCHALL_UNSPEC] = { .type = NLA_UNSPEC },
11454 [TCA_MATCHALL_CLASSID] = { .type = NLA_U32 },
11455 + [TCA_MATCHALL_FLAGS] = { .type = NLA_U32 },
11456 };
11457
11458 static int mall_set_parms(struct net *net, struct tcf_proto *tp,
11459 diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
11460 index f38727ecf8b2..e1f64f4ba236 100644
11461 --- a/net/smc/smc_diag.c
11462 +++ b/net/smc/smc_diag.c
11463 @@ -39,16 +39,15 @@ static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
11464 {
11465 struct smc_sock *smc = smc_sk(sk);
11466
11467 + memset(r, 0, sizeof(*r));
11468 r->diag_family = sk->sk_family;
11469 + sock_diag_save_cookie(sk, r->id.idiag_cookie);
11470 if (!smc->clcsock)
11471 return;
11472 r->id.idiag_sport = htons(smc->clcsock->sk->sk_num);
11473 r->id.idiag_dport = smc->clcsock->sk->sk_dport;
11474 r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if;
11475 - sock_diag_save_cookie(sk, r->id.idiag_cookie);
11476 if (sk->sk_protocol == SMCPROTO_SMC) {
11477 - memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
11478 - memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
11479 r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr;
11480 r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr;
11481 #if IS_ENABLED(CONFIG_IPV6)
11482 diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
11483 index f740cb51802a..7ede1e52fd81 100644
11484 --- a/net/sunrpc/cache.c
11485 +++ b/net/sunrpc/cache.c
11486 @@ -1888,7 +1888,9 @@ void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
11487 if (!hlist_unhashed(&h->cache_list)){
11488 hlist_del_init_rcu(&h->cache_list);
11489 cd->entries--;
11490 + set_bit(CACHE_CLEANED, &h->flags);
11491 spin_unlock(&cd->hash_lock);
11492 + cache_fresh_unlocked(h, cd);
11493 cache_put(h, cd);
11494 } else
11495 spin_unlock(&cd->hash_lock);
11496 diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
11497 index e7ad48c605e0..6d1df7117e11 100644
11498 --- a/samples/bpf/Makefile
11499 +++ b/samples/bpf/Makefile
11500 @@ -219,6 +219,7 @@ BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
11501 readelf -S ./llvm_btf_verify.o | grep BTF; \
11502 /bin/rm -f ./llvm_btf_verify.o)
11503
11504 +BPF_EXTRA_CFLAGS += -fno-stack-protector
11505 ifneq ($(BTF_LLVM_PROBE),)
11506 EXTRA_CFLAGS += -g
11507 else
11508 diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
11509 index 10ba926ae292..d1dd4a6b6adb 100644
11510 --- a/scripts/Kbuild.include
11511 +++ b/scripts/Kbuild.include
11512 @@ -55,14 +55,13 @@ kecho := $($(quiet)kecho)
11513 # - stdin is piped in from the first prerequisite ($<) so one has
11514 # to specify a valid file as first prerequisite (often the kbuild file)
11515 define filechk
11516 - $(Q)set -e; \
11517 - mkdir -p $(dir $@); \
11518 - { $(filechk_$(1)); } > $@.tmp; \
11519 - if [ -r $@ ] && cmp -s $@ $@.tmp; then \
11520 - rm -f $@.tmp; \
11521 - else \
11522 - $(kecho) ' UPD $@'; \
11523 - mv -f $@.tmp $@; \
11524 + $(Q)set -e; \
11525 + mkdir -p $(dir $@); \
11526 + trap "rm -f $(dot-target).tmp" EXIT; \
11527 + { $(filechk_$(1)); } > $(dot-target).tmp; \
11528 + if [ ! -r $@ ] || ! cmp -s $@ $(dot-target).tmp; then \
11529 + $(kecho) ' UPD $@'; \
11530 + mv -f $(dot-target).tmp $@; \
11531 fi
11532 endef
11533
11534 diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
11535 index d4adfbe42690..bfb44b265a94 100644
11536 --- a/scripts/Kconfig.include
11537 +++ b/scripts/Kconfig.include
11538 @@ -25,7 +25,7 @@ failure = $(if-success,$(1),n,y)
11539
11540 # $(cc-option,<flag>)
11541 # Return y if the compiler supports <flag>, n otherwise
11542 -cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null)
11543 +cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -S -x c /dev/null -o /dev/null)
11544
11545 # $(ld-option,<flag>)
11546 # Return y if the linker supports <flag>, n otherwise
11547 diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
11548 index 3569d2dec37c..17298239e363 100644
11549 --- a/scripts/kconfig/confdata.c
11550 +++ b/scripts/kconfig/confdata.c
11551 @@ -1353,7 +1353,7 @@ bool conf_set_all_new_symbols(enum conf_def_mode mode)
11552
11553 sym_calc_value(csym);
11554 if (mode == def_random)
11555 - has_changed = randomize_choice_values(csym);
11556 + has_changed |= randomize_choice_values(csym);
11557 else {
11558 set_all_choice_values(csym);
11559 has_changed = true;
11560 diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
11561 index 436379940356..408b5c0b99b1 100755
11562 --- a/scripts/link-vmlinux.sh
11563 +++ b/scripts/link-vmlinux.sh
11564 @@ -108,13 +108,13 @@ gen_btf()
11565 local bin_arch
11566
11567 if ! [ -x "$(command -v ${PAHOLE})" ]; then
11568 - info "BTF" "${1}: pahole (${PAHOLE}) is not available"
11569 + echo >&2 "BTF: ${1}: pahole (${PAHOLE}) is not available"
11570 return 1
11571 fi
11572
11573 pahole_ver=$(${PAHOLE} --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/')
11574 if [ "${pahole_ver}" -lt "113" ]; then
11575 - info "BTF" "${1}: pahole version $(${PAHOLE} --version) is too old, need at least v1.13"
11576 + echo >&2 "BTF: ${1}: pahole version $(${PAHOLE} --version) is too old, need at least v1.13"
11577 return 1
11578 fi
11579
11580 diff --git a/security/selinux/avc.c b/security/selinux/avc.c
11581 index 23dc888ae305..d18cb32a242a 100644
11582 --- a/security/selinux/avc.c
11583 +++ b/security/selinux/avc.c
11584 @@ -617,40 +617,37 @@ static struct avc_node *avc_insert(struct selinux_avc *avc,
11585 struct avc_node *pos, *node = NULL;
11586 int hvalue;
11587 unsigned long flag;
11588 + spinlock_t *lock;
11589 + struct hlist_head *head;
11590
11591 if (avc_latest_notif_update(avc, avd->seqno, 1))
11592 - goto out;
11593 + return NULL;
11594
11595 node = avc_alloc_node(avc);
11596 - if (node) {
11597 - struct hlist_head *head;
11598 - spinlock_t *lock;
11599 - int rc = 0;
11600 -
11601 - hvalue = avc_hash(ssid, tsid, tclass);
11602 - avc_node_populate(node, ssid, tsid, tclass, avd);
11603 - rc = avc_xperms_populate(node, xp_node);
11604 - if (rc) {
11605 - kmem_cache_free(avc_node_cachep, node);
11606 - return NULL;
11607 - }
11608 - head = &avc->avc_cache.slots[hvalue];
11609 - lock = &avc->avc_cache.slots_lock[hvalue];
11610 + if (!node)
11611 + return NULL;
11612
11613 - spin_lock_irqsave(lock, flag);
11614 - hlist_for_each_entry(pos, head, list) {
11615 - if (pos->ae.ssid == ssid &&
11616 - pos->ae.tsid == tsid &&
11617 - pos->ae.tclass == tclass) {
11618 - avc_node_replace(avc, node, pos);
11619 - goto found;
11620 - }
11621 + avc_node_populate(node, ssid, tsid, tclass, avd);
11622 + if (avc_xperms_populate(node, xp_node)) {
11623 + avc_node_kill(avc, node);
11624 + return NULL;
11625 + }
11626 +
11627 + hvalue = avc_hash(ssid, tsid, tclass);
11628 + head = &avc->avc_cache.slots[hvalue];
11629 + lock = &avc->avc_cache.slots_lock[hvalue];
11630 + spin_lock_irqsave(lock, flag);
11631 + hlist_for_each_entry(pos, head, list) {
11632 + if (pos->ae.ssid == ssid &&
11633 + pos->ae.tsid == tsid &&
11634 + pos->ae.tclass == tclass) {
11635 + avc_node_replace(avc, node, pos);
11636 + goto found;
11637 }
11638 - hlist_add_head_rcu(&node->list, head);
11639 -found:
11640 - spin_unlock_irqrestore(lock, flag);
11641 }
11642 -out:
11643 + hlist_add_head_rcu(&node->list, head);
11644 +found:
11645 + spin_unlock_irqrestore(lock, flag);
11646 return node;
11647 }
11648
11649 @@ -894,7 +891,7 @@ static int avc_update_node(struct selinux_avc *avc,
11650 if (orig->ae.xp_node) {
11651 rc = avc_xperms_populate(node, orig->ae.xp_node);
11652 if (rc) {
11653 - kmem_cache_free(avc_node_cachep, node);
11654 + avc_node_kill(avc, node);
11655 goto out_unlock;
11656 }
11657 }
11658 diff --git a/sound/core/control.c b/sound/core/control.c
11659 index 7a4d8690ce41..08ca7666e84c 100644
11660 --- a/sound/core/control.c
11661 +++ b/sound/core/control.c
11662 @@ -1430,8 +1430,9 @@ static int call_tlv_handler(struct snd_ctl_file *file, int op_flag,
11663 if (kctl->tlv.c == NULL)
11664 return -ENXIO;
11665
11666 - /* When locked, this is unavailable. */
11667 - if (vd->owner != NULL && vd->owner != file)
11668 + /* Write and command operations are not allowed for locked element. */
11669 + if (op_flag != SNDRV_CTL_TLV_OP_READ &&
11670 + vd->owner != NULL && vd->owner != file)
11671 return -EPERM;
11672
11673 return kctl->tlv.c(kctl, op_flag, size, buf);
11674 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
11675 index 90aa0f400a57..1e20e85e9b46 100644
11676 --- a/sound/pci/hda/patch_conexant.c
11677 +++ b/sound/pci/hda/patch_conexant.c
11678 @@ -922,6 +922,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
11679 SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
11680 SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
11681 SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
11682 + SND_PCI_QUIRK(0x17aa, 0x21d2, "Lenovo T420s", CXT_PINCFG_LENOVO_TP410),
11683 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
11684 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
11685 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
11686 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
11687 index 8ac805a634f4..307ca1f03676 100644
11688 --- a/sound/pci/hda/patch_hdmi.c
11689 +++ b/sound/pci/hda/patch_hdmi.c
11690 @@ -2794,9 +2794,12 @@ static int alloc_intel_hdmi(struct hda_codec *codec)
11691 /* parse and post-process for Intel codecs */
11692 static int parse_intel_hdmi(struct hda_codec *codec)
11693 {
11694 - int err;
11695 + int err, retries = 3;
11696 +
11697 + do {
11698 + err = hdmi_parse_codec(codec);
11699 + } while (err < 0 && retries--);
11700
11701 - err = hdmi_parse_codec(codec);
11702 if (err < 0) {
11703 generic_spec_free(codec);
11704 return err;
11705 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
11706 index a66d4be3516e..f162e607fc6c 100644
11707 --- a/sound/pci/hda/patch_realtek.c
11708 +++ b/sound/pci/hda/patch_realtek.c
11709 @@ -5852,6 +5852,7 @@ enum {
11710 ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
11711 ALC288_FIXUP_DELL_XPS_13,
11712 ALC288_FIXUP_DISABLE_AAMIX,
11713 + ALC292_FIXUP_DELL_E7X_AAMIX,
11714 ALC292_FIXUP_DELL_E7X,
11715 ALC292_FIXUP_DISABLE_AAMIX,
11716 ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK,
11717 @@ -6547,12 +6548,19 @@ static const struct hda_fixup alc269_fixups[] = {
11718 .chained = true,
11719 .chain_id = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE
11720 },
11721 - [ALC292_FIXUP_DELL_E7X] = {
11722 + [ALC292_FIXUP_DELL_E7X_AAMIX] = {
11723 .type = HDA_FIXUP_FUNC,
11724 .v.func = alc_fixup_dell_xps13,
11725 .chained = true,
11726 .chain_id = ALC292_FIXUP_DISABLE_AAMIX
11727 },
11728 + [ALC292_FIXUP_DELL_E7X] = {
11729 + .type = HDA_FIXUP_FUNC,
11730 + .v.func = snd_hda_gen_fixup_micmute_led,
11731 + /* micmute fixup must be applied at last */
11732 + .chained_before = true,
11733 + .chain_id = ALC292_FIXUP_DELL_E7X_AAMIX,
11734 + },
11735 [ALC298_FIXUP_ALIENWARE_MIC_NO_PRESENCE] = {
11736 .type = HDA_FIXUP_PINS,
11737 .v.pins = (const struct hda_pintbl[]) {
11738 diff --git a/sound/sh/aica.c b/sound/sh/aica.c
11739 index 52e9cfb4f819..8421b2f9c9f3 100644
11740 --- a/sound/sh/aica.c
11741 +++ b/sound/sh/aica.c
11742 @@ -101,10 +101,10 @@ static void spu_memset(u32 toi, u32 what, int length)
11743 }
11744
11745 /* spu_memload - write to SPU address space */
11746 -static void spu_memload(u32 toi, void *from, int length)
11747 +static void spu_memload(u32 toi, const void *from, int length)
11748 {
11749 unsigned long flags;
11750 - u32 *froml = from;
11751 + const u32 *froml = from;
11752 u32 __iomem *to = (u32 __iomem *) (SPU_MEMORY_BASE + toi);
11753 int i;
11754 u32 val;
11755 diff --git a/sound/sh/sh_dac_audio.c b/sound/sh/sh_dac_audio.c
11756 index ed877a138965..7c46494466ff 100644
11757 --- a/sound/sh/sh_dac_audio.c
11758 +++ b/sound/sh/sh_dac_audio.c
11759 @@ -175,7 +175,6 @@ static int snd_sh_dac_pcm_copy(struct snd_pcm_substream *substream,
11760 {
11761 /* channel is not used (interleaved data) */
11762 struct snd_sh_dac *chip = snd_pcm_substream_chip(substream);
11763 - struct snd_pcm_runtime *runtime = substream->runtime;
11764
11765 if (copy_from_user_toio(chip->data_buffer + pos, src, count))
11766 return -EFAULT;
11767 @@ -195,7 +194,6 @@ static int snd_sh_dac_pcm_copy_kernel(struct snd_pcm_substream *substream,
11768 {
11769 /* channel is not used (interleaved data) */
11770 struct snd_sh_dac *chip = snd_pcm_substream_chip(substream);
11771 - struct snd_pcm_runtime *runtime = substream->runtime;
11772
11773 memcpy_toio(chip->data_buffer + pos, src, count);
11774 chip->buffer_end = chip->data_buffer + pos + count;
11775 @@ -214,7 +212,6 @@ static int snd_sh_dac_pcm_silence(struct snd_pcm_substream *substream,
11776 {
11777 /* channel is not used (interleaved data) */
11778 struct snd_sh_dac *chip = snd_pcm_substream_chip(substream);
11779 - struct snd_pcm_runtime *runtime = substream->runtime;
11780
11781 memset_io(chip->data_buffer + pos, 0, count);
11782 chip->buffer_end = chip->data_buffer + pos + count;
11783 diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
11784 index f118c229ed82..d1dc8e6366dc 100644
11785 --- a/sound/soc/atmel/Kconfig
11786 +++ b/sound/soc/atmel/Kconfig
11787 @@ -19,6 +19,8 @@ config SND_ATMEL_SOC_DMA
11788
11789 config SND_ATMEL_SOC_SSC
11790 tristate
11791 + select SND_ATMEL_SOC_DMA
11792 + select SND_ATMEL_SOC_PDC
11793
11794 config SND_ATMEL_SOC_SSC_PDC
11795 tristate "SoC PCM DAI support for AT91 SSC controller using PDC"
11796 diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
11797 index 4f6e58c3954a..06b7d6c6c9a0 100644
11798 --- a/sound/soc/intel/boards/sof_rt5682.c
11799 +++ b/sound/soc/intel/boards/sof_rt5682.c
11800 @@ -34,6 +34,10 @@
11801 #define SOF_RT5682_SSP_AMP(quirk) \
11802 (((quirk) << SOF_RT5682_SSP_AMP_SHIFT) & SOF_RT5682_SSP_AMP_MASK)
11803 #define SOF_RT5682_MCLK_BYTCHT_EN BIT(9)
11804 +#define SOF_RT5682_NUM_HDMIDEV_SHIFT 10
11805 +#define SOF_RT5682_NUM_HDMIDEV_MASK (GENMASK(12, 10))
11806 +#define SOF_RT5682_NUM_HDMIDEV(quirk) \
11807 + ((quirk << SOF_RT5682_NUM_HDMIDEV_SHIFT) & SOF_RT5682_NUM_HDMIDEV_MASK)
11808
11809 /* Default: MCLK on, MCLK 19.2M, SSP0 */
11810 static unsigned long sof_rt5682_quirk = SOF_RT5682_MCLK_EN |
11811 @@ -585,6 +589,19 @@ static int sof_audio_probe(struct platform_device *pdev)
11812 if (!ctx)
11813 return -ENOMEM;
11814
11815 + if (pdev->id_entry && pdev->id_entry->driver_data)
11816 + sof_rt5682_quirk = (unsigned long)pdev->id_entry->driver_data;
11817 +
11818 + dmi_check_system(sof_rt5682_quirk_table);
11819 +
11820 + mach = (&pdev->dev)->platform_data;
11821 +
11822 + /* A speaker amp might not be present when the quirk claims one is.
11823 + * Detect this via whether the machine driver match includes quirk_data.
11824 + */
11825 + if ((sof_rt5682_quirk & SOF_SPEAKER_AMP_PRESENT) && !mach->quirk_data)
11826 + sof_rt5682_quirk &= ~SOF_SPEAKER_AMP_PRESENT;
11827 +
11828 if (soc_intel_is_byt() || soc_intel_is_cht()) {
11829 is_legacy_cpu = 1;
11830 dmic_be_num = 0;
11831 @@ -595,11 +612,13 @@ static int sof_audio_probe(struct platform_device *pdev)
11832 SOF_RT5682_SSP_CODEC(2);
11833 } else {
11834 dmic_be_num = 2;
11835 - hdmi_num = 3;
11836 + hdmi_num = (sof_rt5682_quirk & SOF_RT5682_NUM_HDMIDEV_MASK) >>
11837 + SOF_RT5682_NUM_HDMIDEV_SHIFT;
11838 + /* default number of HDMI DAI's */
11839 + if (!hdmi_num)
11840 + hdmi_num = 3;
11841 }
11842
11843 - dmi_check_system(sof_rt5682_quirk_table);
11844 -
11845 /* need to get main clock from pmc */
11846 if (sof_rt5682_quirk & SOF_RT5682_MCLK_BYTCHT_EN) {
11847 ctx->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
11848 @@ -643,7 +662,6 @@ static int sof_audio_probe(struct platform_device *pdev)
11849 INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
11850
11851 sof_audio_card_rt5682.dev = &pdev->dev;
11852 - mach = (&pdev->dev)->platform_data;
11853
11854 /* set platform name for each dailink */
11855 ret = snd_soc_fixup_dai_links_platform_name(&sof_audio_card_rt5682,
11856 @@ -672,6 +690,21 @@ static int sof_rt5682_remove(struct platform_device *pdev)
11857 return 0;
11858 }
11859
11860 +static const struct platform_device_id board_ids[] = {
11861 + {
11862 + .name = "sof_rt5682",
11863 + },
11864 + {
11865 + .name = "tgl_max98357a_rt5682",
11866 + .driver_data = (kernel_ulong_t)(SOF_RT5682_MCLK_EN |
11867 + SOF_RT5682_SSP_CODEC(0) |
11868 + SOF_SPEAKER_AMP_PRESENT |
11869 + SOF_RT5682_SSP_AMP(1) |
11870 + SOF_RT5682_NUM_HDMIDEV(4)),
11871 + },
11872 + { }
11873 +};
11874 +
11875 static struct platform_driver sof_audio = {
11876 .probe = sof_audio_probe,
11877 .remove = sof_rt5682_remove,
11878 @@ -679,6 +712,7 @@ static struct platform_driver sof_audio = {
11879 .name = "sof_rt5682",
11880 .pm = &snd_soc_pm_ops,
11881 },
11882 + .id_table = board_ids,
11883 };
11884 module_platform_driver(sof_audio)
11885
11886 @@ -688,3 +722,4 @@ MODULE_AUTHOR("Bard Liao <bard.liao@intel.com>");
11887 MODULE_AUTHOR("Sathya Prakash M R <sathya.prakash.m.r@intel.com>");
11888 MODULE_LICENSE("GPL v2");
11889 MODULE_ALIAS("platform:sof_rt5682");
11890 +MODULE_ALIAS("platform:tgl_max98357a_rt5682");
11891 diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
11892 index fef01e1dd15c..d00203ef8305 100644
11893 --- a/sound/soc/soc-topology.c
11894 +++ b/sound/soc/soc-topology.c
11895 @@ -604,9 +604,11 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
11896 ext_ops = tplg->bytes_ext_ops;
11897 num_ops = tplg->bytes_ext_ops_count;
11898 for (i = 0; i < num_ops; i++) {
11899 - if (!sbe->put && ext_ops[i].id == be->ext_ops.put)
11900 + if (!sbe->put &&
11901 + ext_ops[i].id == le32_to_cpu(be->ext_ops.put))
11902 sbe->put = ext_ops[i].put;
11903 - if (!sbe->get && ext_ops[i].id == be->ext_ops.get)
11904 + if (!sbe->get &&
11905 + ext_ops[i].id == le32_to_cpu(be->ext_ops.get))
11906 sbe->get = ext_ops[i].get;
11907 }
11908
11909 @@ -621,11 +623,11 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
11910 num_ops = tplg->io_ops_count;
11911 for (i = 0; i < num_ops; i++) {
11912
11913 - if (k->put == NULL && ops[i].id == hdr->ops.put)
11914 + if (k->put == NULL && ops[i].id == le32_to_cpu(hdr->ops.put))
11915 k->put = ops[i].put;
11916 - if (k->get == NULL && ops[i].id == hdr->ops.get)
11917 + if (k->get == NULL && ops[i].id == le32_to_cpu(hdr->ops.get))
11918 k->get = ops[i].get;
11919 - if (k->info == NULL && ops[i].id == hdr->ops.info)
11920 + if (k->info == NULL && ops[i].id == le32_to_cpu(hdr->ops.info))
11921 k->info = ops[i].info;
11922 }
11923
11924 @@ -638,11 +640,11 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
11925 num_ops = ARRAY_SIZE(io_ops);
11926 for (i = 0; i < num_ops; i++) {
11927
11928 - if (k->put == NULL && ops[i].id == hdr->ops.put)
11929 + if (k->put == NULL && ops[i].id == le32_to_cpu(hdr->ops.put))
11930 k->put = ops[i].put;
11931 - if (k->get == NULL && ops[i].id == hdr->ops.get)
11932 + if (k->get == NULL && ops[i].id == le32_to_cpu(hdr->ops.get))
11933 k->get = ops[i].get;
11934 - if (k->info == NULL && ops[i].id == hdr->ops.info)
11935 + if (k->info == NULL && ops[i].id == le32_to_cpu(hdr->ops.info))
11936 k->info = ops[i].info;
11937 }
11938
11939 @@ -931,7 +933,7 @@ static int soc_tplg_denum_create_texts(struct soc_enum *se,
11940 if (se->dobj.control.dtexts == NULL)
11941 return -ENOMEM;
11942
11943 - for (i = 0; i < ec->items; i++) {
11944 + for (i = 0; i < le32_to_cpu(ec->items); i++) {
11945
11946 if (strnlen(ec->texts[i], SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
11947 SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
11948 @@ -1325,7 +1327,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
11949 if (kc[i].name == NULL)
11950 goto err_sm;
11951 kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
11952 - kc[i].access = mc->hdr.access;
11953 + kc[i].access = le32_to_cpu(mc->hdr.access);
11954
11955 /* we only support FL/FR channel mapping atm */
11956 sm->reg = tplc_chan_get_reg(tplg, mc->channel,
11957 @@ -1337,10 +1339,10 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
11958 sm->rshift = tplc_chan_get_shift(tplg, mc->channel,
11959 SNDRV_CHMAP_FR);
11960
11961 - sm->max = mc->max;
11962 - sm->min = mc->min;
11963 - sm->invert = mc->invert;
11964 - sm->platform_max = mc->platform_max;
11965 + sm->max = le32_to_cpu(mc->max);
11966 + sm->min = le32_to_cpu(mc->min);
11967 + sm->invert = le32_to_cpu(mc->invert);
11968 + sm->platform_max = le32_to_cpu(mc->platform_max);
11969 sm->dobj.index = tplg->index;
11970 INIT_LIST_HEAD(&sm->dobj.list);
11971
11972 @@ -1401,7 +1403,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
11973 goto err_se;
11974
11975 tplg->pos += (sizeof(struct snd_soc_tplg_enum_control) +
11976 - ec->priv.size);
11977 + le32_to_cpu(ec->priv.size));
11978
11979 dev_dbg(tplg->dev, " adding DAPM widget enum control %s\n",
11980 ec->hdr.name);
11981 @@ -1411,7 +1413,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
11982 if (kc[i].name == NULL)
11983 goto err_se;
11984 kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
11985 - kc[i].access = ec->hdr.access;
11986 + kc[i].access = le32_to_cpu(ec->hdr.access);
11987
11988 /* we only support FL/FR channel mapping atm */
11989 se->reg = tplc_chan_get_reg(tplg, ec->channel, SNDRV_CHMAP_FL);
11990 @@ -1420,8 +1422,8 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
11991 se->shift_r = tplc_chan_get_shift(tplg, ec->channel,
11992 SNDRV_CHMAP_FR);
11993
11994 - se->items = ec->items;
11995 - se->mask = ec->mask;
11996 + se->items = le32_to_cpu(ec->items);
11997 + se->mask = le32_to_cpu(ec->mask);
11998 se->dobj.index = tplg->index;
11999
12000 switch (le32_to_cpu(ec->hdr.ops.info)) {
12001 @@ -1523,9 +1525,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dbytes_create(
12002 if (kc[i].name == NULL)
12003 goto err_sbe;
12004 kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
12005 - kc[i].access = be->hdr.access;
12006 + kc[i].access = le32_to_cpu(be->hdr.access);
12007
12008 - sbe->max = be->max;
12009 + sbe->max = le32_to_cpu(be->max);
12010 INIT_LIST_HEAD(&sbe->dobj.list);
12011
12012 /* map standard io handlers and check for external handlers */
12013 diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
12014 index 896d21984b73..1923b0c36bce 100644
12015 --- a/sound/soc/sof/intel/hda-dai.c
12016 +++ b/sound/soc/sof/intel/hda-dai.c
12017 @@ -261,14 +261,11 @@ static int hda_link_pcm_prepare(struct snd_pcm_substream *substream,
12018 {
12019 struct hdac_ext_stream *link_dev =
12020 snd_soc_dai_get_dma_data(dai, substream);
12021 - struct sof_intel_hda_stream *hda_stream;
12022 struct snd_sof_dev *sdev =
12023 snd_soc_component_get_drvdata(dai->component);
12024 struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
12025 int stream = substream->stream;
12026
12027 - hda_stream = hstream_to_sof_hda_stream(link_dev);
12028 -
12029 if (link_dev->link_prepared)
12030 return 0;
12031
12032 diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
12033 index 23e430d3e056..4be53ef2eab6 100644
12034 --- a/sound/soc/sof/intel/hda.h
12035 +++ b/sound/soc/sof/intel/hda.h
12036 @@ -336,7 +336,7 @@
12037
12038 /* Number of DAIs */
12039 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
12040 -#define SOF_SKL_NUM_DAIS 14
12041 +#define SOF_SKL_NUM_DAIS 15
12042 #else
12043 #define SOF_SKL_NUM_DAIS 8
12044 #endif
12045 diff --git a/sound/usb/card.c b/sound/usb/card.c
12046 index db91dc76cc91..54f9ce38471e 100644
12047 --- a/sound/usb/card.c
12048 +++ b/sound/usb/card.c
12049 @@ -597,6 +597,10 @@ static int usb_audio_probe(struct usb_interface *intf,
12050 }
12051 }
12052 if (! chip) {
12053 + err = snd_usb_apply_boot_quirk_once(dev, intf, quirk, id);
12054 + if (err < 0)
12055 + goto __error;
12056 +
12057 /* it's a fresh one.
12058 * now look for an empty slot and create a new card instance
12059 */
12060 diff --git a/sound/usb/format.c b/sound/usb/format.c
12061 index 25668ba5e68e..f4f0cf3deaf0 100644
12062 --- a/sound/usb/format.c
12063 +++ b/sound/usb/format.c
12064 @@ -296,6 +296,9 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
12065 case USB_ID(0x0E41, 0x4242): /* Line6 Helix Rack */
12066 case USB_ID(0x0E41, 0x4244): /* Line6 Helix LT */
12067 case USB_ID(0x0E41, 0x4246): /* Line6 HX-Stomp */
12068 + case USB_ID(0x0E41, 0x4248): /* Line6 Helix >= fw 2.82 */
12069 + case USB_ID(0x0E41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
12070 + case USB_ID(0x0E41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
12071 /* supported rates: 48Khz */
12072 kfree(fp->rate_table);
12073 fp->rate_table = kmalloc(sizeof(int), GFP_KERNEL);
12074 diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
12075 index fa24bd491cf6..ad8f38380aa3 100644
12076 --- a/sound/usb/pcm.c
12077 +++ b/sound/usb/pcm.c
12078 @@ -348,6 +348,10 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
12079 ep = 0x84;
12080 ifnum = 0;
12081 goto add_sync_ep_from_ifnum;
12082 + case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
12083 + ep = 0x81;
12084 + ifnum = 2;
12085 + goto add_sync_ep_from_ifnum;
12086 case USB_ID(0x0582, 0x01d8): /* BOSS Katana */
12087 /* BOSS Katana amplifiers do not need quirks */
12088 return 0;
12089 diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
12090 index 1ed25b1d2a6a..7448ab07bd36 100644
12091 --- a/sound/usb/quirks.c
12092 +++ b/sound/usb/quirks.c
12093 @@ -1113,6 +1113,31 @@ free_buf:
12094 return err;
12095 }
12096
12097 +static int snd_usb_motu_m_series_boot_quirk(struct usb_device *dev)
12098 +{
12099 + int ret;
12100 +
12101 + if (snd_usb_pipe_sanity_check(dev, usb_sndctrlpipe(dev, 0)))
12102 + return -EINVAL;
12103 + ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
12104 + 1, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
12105 + 0x0, 0, NULL, 0, 1000);
12106 +
12107 + if (ret < 0)
12108 + return ret;
12109 +
12110 + msleep(2000);
12111 +
12112 + ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
12113 + 1, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
12114 + 0x20, 0, NULL, 0, 1000);
12115 +
12116 + if (ret < 0)
12117 + return ret;
12118 +
12119 + return 0;
12120 +}
12121 +
12122 /*
12123 * Setup quirks
12124 */
12125 @@ -1297,6 +1322,19 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
12126 return 0;
12127 }
12128
12129 +int snd_usb_apply_boot_quirk_once(struct usb_device *dev,
12130 + struct usb_interface *intf,
12131 + const struct snd_usb_audio_quirk *quirk,
12132 + unsigned int id)
12133 +{
12134 + switch (id) {
12135 + case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
12136 + return snd_usb_motu_m_series_boot_quirk(dev);
12137 + }
12138 +
12139 + return 0;
12140 +}
12141 +
12142 /*
12143 * check if the device uses big-endian samples
12144 */
12145 diff --git a/sound/usb/quirks.h b/sound/usb/quirks.h
12146 index a80e0ddd0736..df0355843a4c 100644
12147 --- a/sound/usb/quirks.h
12148 +++ b/sound/usb/quirks.h
12149 @@ -20,6 +20,11 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
12150 const struct snd_usb_audio_quirk *quirk,
12151 unsigned int usb_id);
12152
12153 +int snd_usb_apply_boot_quirk_once(struct usb_device *dev,
12154 + struct usb_interface *intf,
12155 + const struct snd_usb_audio_quirk *quirk,
12156 + unsigned int usb_id);
12157 +
12158 void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
12159 struct audioformat *fmt);
12160
12161 diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
12162 index d1caa8ed9e68..9985fc139487 100644
12163 --- a/sound/usb/usx2y/usX2Yhwdep.c
12164 +++ b/sound/usb/usx2y/usX2Yhwdep.c
12165 @@ -119,7 +119,7 @@ static int snd_usX2Y_hwdep_dsp_status(struct snd_hwdep *hw,
12166 info->num_dsps = 2; // 0: Prepad Data, 1: FPGA Code
12167 if (us428->chip_status & USX2Y_STAT_CHIP_INIT)
12168 info->chip_ready = 1;
12169 - info->version = USX2Y_DRIVER_VERSION;
12170 + info->version = USX2Y_DRIVER_VERSION;
12171 return 0;
12172 }
12173
12174 diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt
12175 index 0a0e9112f284..5cb9f009f2be 100644
12176 --- a/tools/arch/x86/lib/x86-opcode-map.txt
12177 +++ b/tools/arch/x86/lib/x86-opcode-map.txt
12178 @@ -909,7 +909,7 @@ EndTable
12179
12180 GrpTable: Grp3_2
12181 0: TEST Ev,Iz
12182 -1:
12183 +1: TEST Ev,Iz
12184 2: NOT Ev
12185 3: NEG Ev
12186 4: MUL rAX,Ev
12187 diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
12188 index 1ef45e55039e..2f017caa678d 100644
12189 --- a/tools/bpf/bpftool/cgroup.c
12190 +++ b/tools/bpf/bpftool/cgroup.c
12191 @@ -117,6 +117,25 @@ static int count_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
12192 return prog_cnt;
12193 }
12194
12195 +static int cgroup_has_attached_progs(int cgroup_fd)
12196 +{
12197 + enum bpf_attach_type type;
12198 + bool no_prog = true;
12199 +
12200 + for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
12201 + int count = count_attached_bpf_progs(cgroup_fd, type);
12202 +
12203 + if (count < 0 && errno != EINVAL)
12204 + return -1;
12205 +
12206 + if (count > 0) {
12207 + no_prog = false;
12208 + break;
12209 + }
12210 + }
12211 +
12212 + return no_prog ? 0 : 1;
12213 +}
12214 static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
12215 int level)
12216 {
12217 @@ -161,6 +180,7 @@ static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
12218 static int do_show(int argc, char **argv)
12219 {
12220 enum bpf_attach_type type;
12221 + int has_attached_progs;
12222 const char *path;
12223 int cgroup_fd;
12224 int ret = -1;
12225 @@ -192,6 +212,16 @@ static int do_show(int argc, char **argv)
12226 goto exit;
12227 }
12228
12229 + has_attached_progs = cgroup_has_attached_progs(cgroup_fd);
12230 + if (has_attached_progs < 0) {
12231 + p_err("can't query bpf programs attached to %s: %s",
12232 + path, strerror(errno));
12233 + goto exit_cgroup;
12234 + } else if (!has_attached_progs) {
12235 + ret = 0;
12236 + goto exit_cgroup;
12237 + }
12238 +
12239 if (json_output)
12240 jsonw_start_array(json_wtr);
12241 else
12242 @@ -212,6 +242,7 @@ static int do_show(int argc, char **argv)
12243 if (json_output)
12244 jsonw_end_array(json_wtr);
12245
12246 +exit_cgroup:
12247 close(cgroup_fd);
12248 exit:
12249 return ret;
12250 @@ -228,7 +259,7 @@ static int do_show_tree_fn(const char *fpath, const struct stat *sb,
12251 int typeflag, struct FTW *ftw)
12252 {
12253 enum bpf_attach_type type;
12254 - bool skip = true;
12255 + int has_attached_progs;
12256 int cgroup_fd;
12257
12258 if (typeflag != FTW_D)
12259 @@ -240,22 +271,13 @@ static int do_show_tree_fn(const char *fpath, const struct stat *sb,
12260 return SHOW_TREE_FN_ERR;
12261 }
12262
12263 - for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
12264 - int count = count_attached_bpf_progs(cgroup_fd, type);
12265 -
12266 - if (count < 0 && errno != EINVAL) {
12267 - p_err("can't query bpf programs attached to %s: %s",
12268 - fpath, strerror(errno));
12269 - close(cgroup_fd);
12270 - return SHOW_TREE_FN_ERR;
12271 - }
12272 - if (count > 0) {
12273 - skip = false;
12274 - break;
12275 - }
12276 - }
12277 -
12278 - if (skip) {
12279 + has_attached_progs = cgroup_has_attached_progs(cgroup_fd);
12280 + if (has_attached_progs < 0) {
12281 + p_err("can't query bpf programs attached to %s: %s",
12282 + fpath, strerror(errno));
12283 + close(cgroup_fd);
12284 + return SHOW_TREE_FN_ERR;
12285 + } else if (!has_attached_progs) {
12286 close(cgroup_fd);
12287 return 0;
12288 }
12289 diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
12290 index 7aba8243a0e7..bd021a0eeef8 100644
12291 --- a/tools/lib/api/fs/fs.c
12292 +++ b/tools/lib/api/fs/fs.c
12293 @@ -210,6 +210,7 @@ static bool fs__env_override(struct fs *fs)
12294 size_t name_len = strlen(fs->name);
12295 /* name + "_PATH" + '\0' */
12296 char upper_name[name_len + 5 + 1];
12297 +
12298 memcpy(upper_name, fs->name, name_len);
12299 mem_toupper(upper_name, name_len);
12300 strcpy(&upper_name[name_len], "_PATH");
12301 @@ -219,7 +220,8 @@ static bool fs__env_override(struct fs *fs)
12302 return false;
12303
12304 fs->found = true;
12305 - strncpy(fs->path, override_path, sizeof(fs->path));
12306 + strncpy(fs->path, override_path, sizeof(fs->path) - 1);
12307 + fs->path[sizeof(fs->path) - 1] = '\0';
12308 return true;
12309 }
12310
12311 diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
12312 index d2a19b0bc05a..ee08aeff30a1 100644
12313 --- a/tools/objtool/Makefile
12314 +++ b/tools/objtool/Makefile
12315 @@ -2,10 +2,6 @@
12316 include ../scripts/Makefile.include
12317 include ../scripts/Makefile.arch
12318
12319 -ifeq ($(ARCH),x86_64)
12320 -ARCH := x86
12321 -endif
12322 -
12323 # always use the host compiler
12324 HOSTAR ?= ar
12325 HOSTCC ?= gcc
12326 @@ -33,7 +29,7 @@ all: $(OBJTOOL)
12327
12328 INCLUDES := -I$(srctree)/tools/include \
12329 -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
12330 - -I$(srctree)/tools/arch/$(ARCH)/include
12331 + -I$(srctree)/tools/arch/$(SRCARCH)/include
12332 WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
12333 CFLAGS := -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
12334 LDFLAGS += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
12335 diff --git a/tools/testing/selftests/bpf/test_select_reuseport.c b/tools/testing/selftests/bpf/test_select_reuseport.c
12336 index 7566c13eb51a..079d0f5a2909 100644
12337 --- a/tools/testing/selftests/bpf/test_select_reuseport.c
12338 +++ b/tools/testing/selftests/bpf/test_select_reuseport.c
12339 @@ -30,7 +30,7 @@
12340 #define REUSEPORT_ARRAY_SIZE 32
12341
12342 static int result_map, tmp_index_ovr_map, linum_map, data_check_map;
12343 -static enum result expected_results[NR_RESULTS];
12344 +static __u32 expected_results[NR_RESULTS];
12345 static int sk_fds[REUSEPORT_ARRAY_SIZE];
12346 static int reuseport_array, outer_map;
12347 static int select_by_skb_data_prog;
12348 @@ -662,7 +662,19 @@ static void setup_per_test(int type, unsigned short family, bool inany)
12349
12350 static void cleanup_per_test(void)
12351 {
12352 - int i, err;
12353 + int i, err, zero = 0;
12354 +
12355 + memset(expected_results, 0, sizeof(expected_results));
12356 +
12357 + for (i = 0; i < NR_RESULTS; i++) {
12358 + err = bpf_map_update_elem(result_map, &i, &zero, BPF_ANY);
12359 + RET_IF(err, "reset elem in result_map",
12360 + "i:%u err:%d errno:%d\n", i, err, errno);
12361 + }
12362 +
12363 + err = bpf_map_update_elem(linum_map, &zero, &zero, BPF_ANY);
12364 + RET_IF(err, "reset line number in linum_map", "err:%d errno:%d\n",
12365 + err, errno);
12366
12367 for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++)
12368 close(sk_fds[i]);
12369 diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh
12370 index a8d20cbb711c..e84d901f8567 100644
12371 --- a/tools/testing/selftests/kselftest/runner.sh
12372 +++ b/tools/testing/selftests/kselftest/runner.sh
12373 @@ -91,7 +91,7 @@ run_one()
12374 run_many()
12375 {
12376 echo "TAP version 13"
12377 - DIR=$(basename "$PWD")
12378 + DIR="${PWD#${BASE_DIR}/}"
12379 test_num=0
12380 total=$(echo "$@" | wc -w)
12381 echo "1..$total"
12382 diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
12383 index 34df4c8882af..383bac05ac32 100644
12384 --- a/tools/testing/selftests/net/so_txtime.c
12385 +++ b/tools/testing/selftests/net/so_txtime.c
12386 @@ -12,7 +12,11 @@
12387 #include <arpa/inet.h>
12388 #include <error.h>
12389 #include <errno.h>
12390 +#include <inttypes.h>
12391 #include <linux/net_tstamp.h>
12392 +#include <linux/errqueue.h>
12393 +#include <linux/ipv6.h>
12394 +#include <linux/tcp.h>
12395 #include <stdbool.h>
12396 #include <stdlib.h>
12397 #include <stdio.h>
12398 @@ -28,7 +32,7 @@ static int cfg_clockid = CLOCK_TAI;
12399 static bool cfg_do_ipv4;
12400 static bool cfg_do_ipv6;
12401 static uint16_t cfg_port = 8000;
12402 -static int cfg_variance_us = 2000;
12403 +static int cfg_variance_us = 4000;
12404
12405 static uint64_t glob_tstart;
12406
12407 @@ -43,6 +47,9 @@ static struct timed_send cfg_in[MAX_NUM_PKT];
12408 static struct timed_send cfg_out[MAX_NUM_PKT];
12409 static int cfg_num_pkt;
12410
12411 +static int cfg_errq_level;
12412 +static int cfg_errq_type;
12413 +
12414 static uint64_t gettime_ns(void)
12415 {
12416 struct timespec ts;
12417 @@ -90,13 +97,15 @@ static void do_send_one(int fdt, struct timed_send *ts)
12418
12419 }
12420
12421 -static void do_recv_one(int fdr, struct timed_send *ts)
12422 +static bool do_recv_one(int fdr, struct timed_send *ts)
12423 {
12424 int64_t tstop, texpect;
12425 char rbuf[2];
12426 int ret;
12427
12428 ret = recv(fdr, rbuf, sizeof(rbuf), 0);
12429 + if (ret == -1 && errno == EAGAIN)
12430 + return true;
12431 if (ret == -1)
12432 error(1, errno, "read");
12433 if (ret != 1)
12434 @@ -113,6 +122,8 @@ static void do_recv_one(int fdr, struct timed_send *ts)
12435
12436 if (labs(tstop - texpect) > cfg_variance_us)
12437 error(1, 0, "exceeds variance (%d us)", cfg_variance_us);
12438 +
12439 + return false;
12440 }
12441
12442 static void do_recv_verify_empty(int fdr)
12443 @@ -125,12 +136,70 @@ static void do_recv_verify_empty(int fdr)
12444 error(1, 0, "recv: not empty as expected (%d, %d)", ret, errno);
12445 }
12446
12447 +static void do_recv_errqueue_timeout(int fdt)
12448 +{
12449 + char control[CMSG_SPACE(sizeof(struct sock_extended_err)) +
12450 + CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0};
12451 + char data[sizeof(struct ipv6hdr) +
12452 + sizeof(struct tcphdr) + 1];
12453 + struct sock_extended_err *err;
12454 + struct msghdr msg = {0};
12455 + struct iovec iov = {0};
12456 + struct cmsghdr *cm;
12457 + int64_t tstamp = 0;
12458 + int ret;
12459 +
12460 + iov.iov_base = data;
12461 + iov.iov_len = sizeof(data);
12462 +
12463 + msg.msg_iov = &iov;
12464 + msg.msg_iovlen = 1;
12465 +
12466 + msg.msg_control = control;
12467 + msg.msg_controllen = sizeof(control);
12468 +
12469 + while (1) {
12470 + ret = recvmsg(fdt, &msg, MSG_ERRQUEUE);
12471 + if (ret == -1 && errno == EAGAIN)
12472 + break;
12473 + if (ret == -1)
12474 + error(1, errno, "errqueue");
12475 + if (msg.msg_flags != MSG_ERRQUEUE)
12476 + error(1, 0, "errqueue: flags 0x%x\n", msg.msg_flags);
12477 +
12478 + cm = CMSG_FIRSTHDR(&msg);
12479 + if (cm->cmsg_level != cfg_errq_level ||
12480 + cm->cmsg_type != cfg_errq_type)
12481 + error(1, 0, "errqueue: type 0x%x.0x%x\n",
12482 + cm->cmsg_level, cm->cmsg_type);
12483 +
12484 + err = (struct sock_extended_err *)CMSG_DATA(cm);
12485 + if (err->ee_origin != SO_EE_ORIGIN_TXTIME)
12486 + error(1, 0, "errqueue: origin 0x%x\n", err->ee_origin);
12487 + if (err->ee_code != ECANCELED)
12488 + error(1, 0, "errqueue: code 0x%x\n", err->ee_code);
12489 +
12490 + tstamp = ((int64_t) err->ee_data) << 32 | err->ee_info;
12491 + tstamp -= (int64_t) glob_tstart;
12492 + tstamp /= 1000 * 1000;
12493 + fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped\n",
12494 + data[ret - 1], tstamp);
12495 +
12496 + msg.msg_flags = 0;
12497 + msg.msg_controllen = sizeof(control);
12498 + }
12499 +
12500 + error(1, 0, "recv: timeout");
12501 +}
12502 +
12503 static void setsockopt_txtime(int fd)
12504 {
12505 struct sock_txtime so_txtime_val = { .clockid = cfg_clockid };
12506 struct sock_txtime so_txtime_val_read = { 0 };
12507 socklen_t vallen = sizeof(so_txtime_val);
12508
12509 + so_txtime_val.flags = SOF_TXTIME_REPORT_ERRORS;
12510 +
12511 if (setsockopt(fd, SOL_SOCKET, SO_TXTIME,
12512 &so_txtime_val, sizeof(so_txtime_val)))
12513 error(1, errno, "setsockopt txtime");
12514 @@ -194,7 +263,8 @@ static void do_test(struct sockaddr *addr, socklen_t alen)
12515 for (i = 0; i < cfg_num_pkt; i++)
12516 do_send_one(fdt, &cfg_in[i]);
12517 for (i = 0; i < cfg_num_pkt; i++)
12518 - do_recv_one(fdr, &cfg_out[i]);
12519 + if (do_recv_one(fdr, &cfg_out[i]))
12520 + do_recv_errqueue_timeout(fdt);
12521
12522 do_recv_verify_empty(fdr);
12523
12524 @@ -280,6 +350,10 @@ int main(int argc, char **argv)
12525 addr6.sin6_family = AF_INET6;
12526 addr6.sin6_port = htons(cfg_port);
12527 addr6.sin6_addr = in6addr_loopback;
12528 +
12529 + cfg_errq_level = SOL_IPV6;
12530 + cfg_errq_type = IPV6_RECVERR;
12531 +
12532 do_test((void *)&addr6, sizeof(addr6));
12533 }
12534
12535 @@ -289,6 +363,10 @@ int main(int argc, char **argv)
12536 addr4.sin_family = AF_INET;
12537 addr4.sin_port = htons(cfg_port);
12538 addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
12539 +
12540 + cfg_errq_level = SOL_IP;
12541 + cfg_errq_type = IP_RECVERR;
12542 +
12543 do_test((void *)&addr4, sizeof(addr4));
12544 }
12545
12546 diff --git a/tools/testing/selftests/net/so_txtime.sh b/tools/testing/selftests/net/so_txtime.sh
12547 index 5aa519328a5b..3f7800eaecb1 100755
12548 --- a/tools/testing/selftests/net/so_txtime.sh
12549 +++ b/tools/testing/selftests/net/so_txtime.sh
12550 @@ -5,7 +5,12 @@
12551
12552 # Run in network namespace
12553 if [[ $# -eq 0 ]]; then
12554 - ./in_netns.sh $0 __subprocess
12555 + if ! ./in_netns.sh $0 __subprocess; then
12556 + # test is time sensitive, can be flaky
12557 + echo "test failed: retry once"
12558 + ./in_netns.sh $0 __subprocess
12559 + fi
12560 +
12561 exit $?
12562 fi
12563
12564 @@ -18,7 +23,7 @@ tc qdisc add dev lo root fq
12565 ./so_txtime -4 -6 -c mono a,10,b,20 a,10,b,20
12566 ./so_txtime -4 -6 -c mono a,20,b,10 b,20,a,20
12567
12568 -if tc qdisc replace dev lo root etf clockid CLOCK_TAI delta 200000; then
12569 +if tc qdisc replace dev lo root etf clockid CLOCK_TAI delta 400000; then
12570 ! ./so_txtime -4 -6 -c tai a,-1 a,-1
12571 ! ./so_txtime -4 -6 -c tai a,0 a,0
12572 ./so_txtime -4 -6 -c tai a,10 a,10
12573 diff --git a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
12574 index 26112ab5cdf4..f52ed92b53e7 100755
12575 --- a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
12576 +++ b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
12577 @@ -53,9 +53,13 @@ eeh_one_dev() {
12578 # is a no-op.
12579 echo $dev >/sys/kernel/debug/powerpc/eeh_dev_check
12580
12581 - # Enforce a 30s timeout for recovery. Even the IPR, which is infamously
12582 - # slow to reset, should recover within 30s.
12583 - max_wait=30
12584 + # Default to a 60s timeout when waiting for a device to recover. This
12585 + # is an arbitrary default which can be overridden by setting the
12586 + # EEH_MAX_WAIT environmental variable when required.
12587 +
12588 + # The current record holder for longest recovery time is:
12589 + # "Adaptec Series 8 12G SAS/PCIe 3" at 39 seconds
12590 + max_wait=${EEH_MAX_WAIT:=60}
12591
12592 for i in `seq 0 ${max_wait}` ; do
12593 if pe_ok $dev ; then
12594 diff --git a/tools/testing/selftests/size/get_size.c b/tools/testing/selftests/size/get_size.c
12595 index 2ad45b944355..2980b1a63366 100644
12596 --- a/tools/testing/selftests/size/get_size.c
12597 +++ b/tools/testing/selftests/size/get_size.c
12598 @@ -11,23 +11,35 @@
12599 * own execution. It also attempts to have as few dependencies
12600 * on kernel features as possible.
12601 *
12602 - * It should be statically linked, with startup libs avoided.
12603 - * It uses no library calls, and only the following 3 syscalls:
12604 + * It should be statically linked, with startup libs avoided. It uses
12605 + * no library calls except the syscall() function for the following 3
12606 + * syscalls:
12607 * sysinfo(), write(), and _exit()
12608 *
12609 * For output, it avoids printf (which in some C libraries
12610 * has large external dependencies) by implementing it's own
12611 * number output and print routines, and using __builtin_strlen()
12612 + *
12613 + * The test may crash if any of the above syscalls fails because in some
12614 + * libc implementations (e.g. the GNU C Library) errno is saved in
12615 + * thread-local storage, which does not get initialized due to avoiding
12616 + * startup libs.
12617 */
12618
12619 #include <sys/sysinfo.h>
12620 #include <unistd.h>
12621 +#include <sys/syscall.h>
12622
12623 #define STDOUT_FILENO 1
12624
12625 static int print(const char *s)
12626 {
12627 - return write(STDOUT_FILENO, s, __builtin_strlen(s));
12628 + size_t len = 0;
12629 +
12630 + while (s[len] != '\0')
12631 + len++;
12632 +
12633 + return syscall(SYS_write, STDOUT_FILENO, s, len);
12634 }
12635
12636 static inline char *num_to_str(unsigned long num, char *buf, int len)
12637 @@ -79,12 +91,12 @@ void _start(void)
12638 print("TAP version 13\n");
12639 print("# Testing system size.\n");
12640
12641 - ccode = sysinfo(&info);
12642 + ccode = syscall(SYS_sysinfo, &info);
12643 if (ccode < 0) {
12644 print("not ok 1");
12645 print(test_name);
12646 print(" ---\n reason: \"could not get sysinfo\"\n ...\n");
12647 - _exit(ccode);
12648 + syscall(SYS_exit, ccode);
12649 }
12650 print("ok 1");
12651 print(test_name);
12652 @@ -100,5 +112,5 @@ void _start(void)
12653 print(" ...\n");
12654 print("1..1\n");
12655
12656 - _exit(0);
12657 + syscall(SYS_exit, 0);
12658 }
12659 diff --git a/tools/usb/usbip/src/usbip_network.c b/tools/usb/usbip/src/usbip_network.c
12660 index d595d72693fb..ed4dc8c14269 100644
12661 --- a/tools/usb/usbip/src/usbip_network.c
12662 +++ b/tools/usb/usbip/src/usbip_network.c
12663 @@ -50,39 +50,39 @@ void usbip_setup_port_number(char *arg)
12664 info("using port %d (\"%s\")", usbip_port, usbip_port_string);
12665 }
12666
12667 -void usbip_net_pack_uint32_t(int pack, uint32_t *num)
12668 +uint32_t usbip_net_pack_uint32_t(int pack, uint32_t num)
12669 {
12670 uint32_t i;
12671
12672 if (pack)
12673 - i = htonl(*num);
12674 + i = htonl(num);
12675 else
12676 - i = ntohl(*num);
12677 + i = ntohl(num);
12678
12679 - *num = i;
12680 + return i;
12681 }
12682
12683 -void usbip_net_pack_uint16_t(int pack, uint16_t *num)
12684 +uint16_t usbip_net_pack_uint16_t(int pack, uint16_t num)
12685 {
12686 uint16_t i;
12687
12688 if (pack)
12689 - i = htons(*num);
12690 + i = htons(num);
12691 else
12692 - i = ntohs(*num);
12693 + i = ntohs(num);
12694
12695 - *num = i;
12696 + return i;
12697 }
12698
12699 void usbip_net_pack_usb_device(int pack, struct usbip_usb_device *udev)
12700 {
12701 - usbip_net_pack_uint32_t(pack, &udev->busnum);
12702 - usbip_net_pack_uint32_t(pack, &udev->devnum);
12703 - usbip_net_pack_uint32_t(pack, &udev->speed);
12704 + udev->busnum = usbip_net_pack_uint32_t(pack, udev->busnum);
12705 + udev->devnum = usbip_net_pack_uint32_t(pack, udev->devnum);
12706 + udev->speed = usbip_net_pack_uint32_t(pack, udev->speed);
12707
12708 - usbip_net_pack_uint16_t(pack, &udev->idVendor);
12709 - usbip_net_pack_uint16_t(pack, &udev->idProduct);
12710 - usbip_net_pack_uint16_t(pack, &udev->bcdDevice);
12711 + udev->idVendor = usbip_net_pack_uint16_t(pack, udev->idVendor);
12712 + udev->idProduct = usbip_net_pack_uint16_t(pack, udev->idProduct);
12713 + udev->bcdDevice = usbip_net_pack_uint16_t(pack, udev->bcdDevice);
12714 }
12715
12716 void usbip_net_pack_usb_interface(int pack __attribute__((unused)),
12717 @@ -129,6 +129,14 @@ ssize_t usbip_net_send(int sockfd, void *buff, size_t bufflen)
12718 return usbip_net_xmit(sockfd, buff, bufflen, 1);
12719 }
12720
12721 +static inline void usbip_net_pack_op_common(int pack,
12722 + struct op_common *op_common)
12723 +{
12724 + op_common->version = usbip_net_pack_uint16_t(pack, op_common->version);
12725 + op_common->code = usbip_net_pack_uint16_t(pack, op_common->code);
12726 + op_common->status = usbip_net_pack_uint32_t(pack, op_common->status);
12727 +}
12728 +
12729 int usbip_net_send_op_common(int sockfd, uint32_t code, uint32_t status)
12730 {
12731 struct op_common op_common;
12732 @@ -140,7 +148,7 @@ int usbip_net_send_op_common(int sockfd, uint32_t code, uint32_t status)
12733 op_common.code = code;
12734 op_common.status = status;
12735
12736 - PACK_OP_COMMON(1, &op_common);
12737 + usbip_net_pack_op_common(1, &op_common);
12738
12739 rc = usbip_net_send(sockfd, &op_common, sizeof(op_common));
12740 if (rc < 0) {
12741 @@ -164,7 +172,7 @@ int usbip_net_recv_op_common(int sockfd, uint16_t *code, int *status)
12742 goto err;
12743 }
12744
12745 - PACK_OP_COMMON(0, &op_common);
12746 + usbip_net_pack_op_common(0, &op_common);
12747
12748 if (op_common.version != USBIP_VERSION) {
12749 err("USBIP Kernel and tool version mismatch: %d %d:",
12750 diff --git a/tools/usb/usbip/src/usbip_network.h b/tools/usb/usbip/src/usbip_network.h
12751 index 555215eae43e..83b4c5344f72 100644
12752 --- a/tools/usb/usbip/src/usbip_network.h
12753 +++ b/tools/usb/usbip/src/usbip_network.h
12754 @@ -32,12 +32,6 @@ struct op_common {
12755
12756 } __attribute__((packed));
12757
12758 -#define PACK_OP_COMMON(pack, op_common) do {\
12759 - usbip_net_pack_uint16_t(pack, &(op_common)->version);\
12760 - usbip_net_pack_uint16_t(pack, &(op_common)->code);\
12761 - usbip_net_pack_uint32_t(pack, &(op_common)->status);\
12762 -} while (0)
12763 -
12764 /* ---------------------------------------------------------------------- */
12765 /* Dummy Code */
12766 #define OP_UNSPEC 0x00
12767 @@ -163,11 +157,11 @@ struct op_devlist_reply_extra {
12768 } while (0)
12769
12770 #define PACK_OP_DEVLIST_REPLY(pack, reply) do {\
12771 - usbip_net_pack_uint32_t(pack, &(reply)->ndev);\
12772 + (reply)->ndev = usbip_net_pack_uint32_t(pack, (reply)->ndev);\
12773 } while (0)
12774
12775 -void usbip_net_pack_uint32_t(int pack, uint32_t *num);
12776 -void usbip_net_pack_uint16_t(int pack, uint16_t *num);
12777 +uint32_t usbip_net_pack_uint32_t(int pack, uint32_t num);
12778 +uint16_t usbip_net_pack_uint16_t(int pack, uint16_t num);
12779 void usbip_net_pack_usb_device(int pack, struct usbip_usb_device *udev);
12780 void usbip_net_pack_usb_interface(int pack, struct usbip_usb_interface *uinf);
12781