Magellan Linux

Contents of /trunk/kernel-magellan/patches-5.3/0103-5.3.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3457 - (show annotations) (download)
Thu Oct 17 06:32:34 2019 UTC (4 years, 6 months ago) by niro
File size: 459944 byte(s)
-linux-5.3.4
1 diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml
2 index e0284d8c3b63..38d4cede0860 100644
3 --- a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml
4 +++ b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml
5 @@ -70,7 +70,9 @@ allOf:
6 properties:
7 compatible:
8 contains:
9 - const: allwinner,sun8i-h3-spdif
10 + enum:
11 + - allwinner,sun8i-h3-spdif
12 + - allwinner,sun50i-h6-spdif
13
14 then:
15 properties:
16 diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
17 index 7d7c191102a7..11298f0ce44d 100644
18 --- a/Documentation/sound/hd-audio/models.rst
19 +++ b/Documentation/sound/hd-audio/models.rst
20 @@ -260,6 +260,9 @@ alc295-hp-x360
21 HP Spectre X360 fixups
22 alc-sense-combo
23 Headset button support for Chrome platform
24 +huawei-mbx-stereo
25 + Enable initialization verbs for Huawei MBX stereo speakers;
26 + might be risky, try this at your own risk
27
28 ALC66x/67x/892
29 ==============
30 diff --git a/Makefile b/Makefile
31 index a5f4e184b552..fa11c1d89acf 100644
32 --- a/Makefile
33 +++ b/Makefile
34 @@ -1,7 +1,7 @@
35 # SPDX-License-Identifier: GPL-2.0
36 VERSION = 5
37 PATCHLEVEL = 3
38 -SUBLEVEL = 3
39 +SUBLEVEL = 4
40 EXTRAVERSION =
41 NAME = Bobtail Squid
42
43 diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
44 index ebfe28c2f544..a1fd3e63e86e 100644
45 --- a/arch/arm/boot/dts/am3517-evm.dts
46 +++ b/arch/arm/boot/dts/am3517-evm.dts
47 @@ -124,10 +124,11 @@
48 };
49
50 lcd0: display@0 {
51 - compatible = "panel-dpi";
52 + /* This isn't the exact LCD, but the timings meet spec */
53 + /* To make it work, set CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4 */
54 + compatible = "newhaven,nhd-4.3-480272ef-atxl";
55 label = "15";
56 - status = "okay";
57 - pinctrl-names = "default";
58 + backlight = <&bl>;
59 enable-gpios = <&gpio6 16 GPIO_ACTIVE_HIGH>; /* gpio176, lcd INI */
60 vcc-supply = <&vdd_io_reg>;
61
62 @@ -136,22 +137,6 @@
63 remote-endpoint = <&dpi_out>;
64 };
65 };
66 -
67 - panel-timing {
68 - clock-frequency = <9000000>;
69 - hactive = <480>;
70 - vactive = <272>;
71 - hfront-porch = <3>;
72 - hback-porch = <2>;
73 - hsync-len = <42>;
74 - vback-porch = <3>;
75 - vfront-porch = <4>;
76 - vsync-len = <11>;
77 - hsync-active = <0>;
78 - vsync-active = <0>;
79 - de-active = <1>;
80 - pixelclk-active = <1>;
81 - };
82 };
83
84 bl: backlight {
85 diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
86 index f78db6809cca..9eb48cabcca4 100644
87 --- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
88 +++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
89 @@ -440,6 +440,7 @@
90 regulator-name = "vdd_ldo10";
91 regulator-min-microvolt = <1800000>;
92 regulator-max-microvolt = <1800000>;
93 + regulator-always-on;
94 regulator-state-mem {
95 regulator-off-in-suspend;
96 };
97 diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
98 index e0f470fe54c8..4398f2d1fe88 100644
99 --- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
100 +++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
101 @@ -440,6 +440,7 @@
102 regulator-name = "vdd_ldo10";
103 regulator-min-microvolt = <1800000>;
104 regulator-max-microvolt = <1800000>;
105 + regulator-always-on;
106 regulator-state-mem {
107 regulator-off-in-suspend;
108 };
109 diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
110 index 895fbde4d433..c1ed83131b49 100644
111 --- a/arch/arm/boot/dts/imx7-colibri.dtsi
112 +++ b/arch/arm/boot/dts/imx7-colibri.dtsi
113 @@ -323,6 +323,7 @@
114 vmmc-supply = <&reg_module_3v3>;
115 vqmmc-supply = <&reg_DCDC3>;
116 non-removable;
117 + sdhci-caps-mask = <0x80000000 0x0>;
118 };
119
120 &iomuxc {
121 diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
122 index e61567437d73..62d5e9a4a781 100644
123 --- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
124 +++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
125 @@ -44,7 +44,7 @@
126 <&clks IMX7D_ENET1_TIME_ROOT_CLK>;
127 assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
128 assigned-clock-rates = <0>, <100000000>;
129 - phy-mode = "rgmii";
130 + phy-mode = "rgmii-id";
131 phy-handle = <&ethphy0>;
132 fsl,magic-packet;
133 status = "okay";
134 @@ -70,7 +70,7 @@
135 <&clks IMX7D_ENET2_TIME_ROOT_CLK>;
136 assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
137 assigned-clock-rates = <0>, <100000000>;
138 - phy-mode = "rgmii";
139 + phy-mode = "rgmii-id";
140 phy-handle = <&ethphy1>;
141 fsl,magic-packet;
142 status = "okay";
143 diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
144 index 642e809e757a..449cc7616da6 100644
145 --- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
146 +++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
147 @@ -108,7 +108,6 @@
148 &dss {
149 status = "ok";
150 vdds_dsi-supply = <&vpll2>;
151 - vdda_video-supply = <&video_reg>;
152 pinctrl-names = "default";
153 pinctrl-0 = <&dss_dpi_pins1>;
154 port {
155 @@ -124,44 +123,20 @@
156 display0 = &lcd0;
157 };
158
159 - video_reg: video_reg {
160 - pinctrl-names = "default";
161 - pinctrl-0 = <&panel_pwr_pins>;
162 - compatible = "regulator-fixed";
163 - regulator-name = "fixed-supply";
164 - regulator-min-microvolt = <3300000>;
165 - regulator-max-microvolt = <3300000>;
166 - gpio = <&gpio5 27 GPIO_ACTIVE_HIGH>; /* gpio155, lcd INI */
167 - };
168 -
169 lcd0: display {
170 - compatible = "panel-dpi";
171 + /* This isn't the exact LCD, but the timings meet spec */
172 + /* To make it work, set CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4 */
173 + compatible = "newhaven,nhd-4.3-480272ef-atxl";
174 label = "15";
175 - status = "okay";
176 - /* default-on; */
177 pinctrl-names = "default";
178 -
179 + pinctrl-0 = <&panel_pwr_pins>;
180 + backlight = <&bl>;
181 + enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>;
182 port {
183 lcd_in: endpoint {
184 remote-endpoint = <&dpi_out>;
185 };
186 };
187 -
188 - panel-timing {
189 - clock-frequency = <9000000>;
190 - hactive = <480>;
191 - vactive = <272>;
192 - hfront-porch = <3>;
193 - hback-porch = <2>;
194 - hsync-len = <42>;
195 - vback-porch = <3>;
196 - vfront-porch = <4>;
197 - vsync-len = <11>;
198 - hsync-active = <0>;
199 - vsync-active = <0>;
200 - de-active = <1>;
201 - pixelclk-active = <1>;
202 - };
203 };
204
205 bl: backlight {
206 diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
207 index c7bf9c493646..64eb896907bf 100644
208 --- a/arch/arm/configs/omap2plus_defconfig
209 +++ b/arch/arm/configs/omap2plus_defconfig
210 @@ -363,6 +363,7 @@ CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1=m
211 CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1=m
212 CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11=m
213 CONFIG_DRM_TILCDC=m
214 +CONFIG_DRM_PANEL_SIMPLE=m
215 CONFIG_FB=y
216 CONFIG_FIRMWARE_EDID=y
217 CONFIG_FB_MODE_HELPERS=y
218 diff --git a/arch/arm/mach-at91/.gitignore b/arch/arm/mach-at91/.gitignore
219 new file mode 100644
220 index 000000000000..2ecd6f51c8a9
221 --- /dev/null
222 +++ b/arch/arm/mach-at91/.gitignore
223 @@ -0,0 +1 @@
224 +pm_data-offsets.h
225 diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
226 index 31b61f0e1c07..de64301dcff2 100644
227 --- a/arch/arm/mach-at91/Makefile
228 +++ b/arch/arm/mach-at91/Makefile
229 @@ -19,9 +19,10 @@ ifeq ($(CONFIG_PM_DEBUG),y)
230 CFLAGS_pm.o += -DDEBUG
231 endif
232
233 -include/generated/at91_pm_data-offsets.h: arch/arm/mach-at91/pm_data-offsets.s FORCE
234 +$(obj)/pm_data-offsets.h: $(obj)/pm_data-offsets.s FORCE
235 $(call filechk,offsets,__PM_DATA_OFFSETS_H__)
236
237 -arch/arm/mach-at91/pm_suspend.o: include/generated/at91_pm_data-offsets.h
238 +$(obj)/pm_suspend.o: $(obj)/pm_data-offsets.h
239
240 targets += pm_data-offsets.s
241 +clean-files += pm_data-offsets.h
242 diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
243 index c751f047b116..ed57c879d4e1 100644
244 --- a/arch/arm/mach-at91/pm_suspend.S
245 +++ b/arch/arm/mach-at91/pm_suspend.S
246 @@ -10,7 +10,7 @@
247 #include <linux/linkage.h>
248 #include <linux/clk/at91_pmc.h>
249 #include "pm.h"
250 -#include "generated/at91_pm_data-offsets.h"
251 +#include "pm_data-offsets.h"
252
253 #define SRAMC_SELF_FRESH_ACTIVE 0x01
254 #define SRAMC_SELF_FRESH_EXIT 0x00
255 diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
256 index 1f0da76a39de..7b7280c21ee0 100644
257 --- a/arch/arm/mach-ep93xx/edb93xx.c
258 +++ b/arch/arm/mach-ep93xx/edb93xx.c
259 @@ -103,7 +103,7 @@ static struct spi_board_info edb93xx_spi_board_info[] __initdata = {
260 };
261
262 static struct gpiod_lookup_table edb93xx_spi_cs_gpio_table = {
263 - .dev_id = "ep93xx-spi.0",
264 + .dev_id = "spi0",
265 .table = {
266 GPIO_LOOKUP("A", 6, "cs", GPIO_ACTIVE_LOW),
267 { },
268 diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c
269 index e2658e22bba1..8a53b74dc4b2 100644
270 --- a/arch/arm/mach-ep93xx/simone.c
271 +++ b/arch/arm/mach-ep93xx/simone.c
272 @@ -73,7 +73,7 @@ static struct spi_board_info simone_spi_devices[] __initdata = {
273 * v1.3 parts will still work, since the signal on SFRMOUT is automatic.
274 */
275 static struct gpiod_lookup_table simone_spi_cs_gpio_table = {
276 - .dev_id = "ep93xx-spi.0",
277 + .dev_id = "spi0",
278 .table = {
279 GPIO_LOOKUP("A", 1, "cs", GPIO_ACTIVE_LOW),
280 { },
281 diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
282 index 582e06e104fd..e0e1b11032f1 100644
283 --- a/arch/arm/mach-ep93xx/ts72xx.c
284 +++ b/arch/arm/mach-ep93xx/ts72xx.c
285 @@ -267,7 +267,7 @@ static struct spi_board_info bk3_spi_board_info[] __initdata = {
286 * goes through CPLD
287 */
288 static struct gpiod_lookup_table bk3_spi_cs_gpio_table = {
289 - .dev_id = "ep93xx-spi.0",
290 + .dev_id = "spi0",
291 .table = {
292 GPIO_LOOKUP("F", 3, "cs", GPIO_ACTIVE_LOW),
293 { },
294 @@ -316,7 +316,7 @@ static struct spi_board_info ts72xx_spi_devices[] __initdata = {
295 };
296
297 static struct gpiod_lookup_table ts72xx_spi_cs_gpio_table = {
298 - .dev_id = "ep93xx-spi.0",
299 + .dev_id = "spi0",
300 .table = {
301 /* DIO_17 */
302 GPIO_LOOKUP("F", 2, "cs", GPIO_ACTIVE_LOW),
303 diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
304 index a88a1d807b32..cbcba3136d74 100644
305 --- a/arch/arm/mach-ep93xx/vision_ep9307.c
306 +++ b/arch/arm/mach-ep93xx/vision_ep9307.c
307 @@ -242,7 +242,7 @@ static struct spi_board_info vision_spi_board_info[] __initdata = {
308 };
309
310 static struct gpiod_lookup_table vision_spi_cs_gpio_table = {
311 - .dev_id = "ep93xx-spi.0",
312 + .dev_id = "spi0",
313 .table = {
314 GPIO_LOOKUP_IDX("A", 6, "cs", 0, GPIO_ACTIVE_LOW),
315 GPIO_LOOKUP_IDX("A", 7, "cs", 1, GPIO_ACTIVE_LOW),
316 diff --git a/arch/arm/mach-omap2/.gitignore b/arch/arm/mach-omap2/.gitignore
317 new file mode 100644
318 index 000000000000..79a8d6ea7152
319 --- /dev/null
320 +++ b/arch/arm/mach-omap2/.gitignore
321 @@ -0,0 +1 @@
322 +pm-asm-offsets.h
323 diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
324 index 600650551621..21c6d4bca3c0 100644
325 --- a/arch/arm/mach-omap2/Makefile
326 +++ b/arch/arm/mach-omap2/Makefile
327 @@ -223,9 +223,10 @@ obj-y += omap_phy_internal.o
328
329 obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
330
331 -include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORCE
332 +$(obj)/pm-asm-offsets.h: $(obj)/pm-asm-offsets.s FORCE
333 $(call filechk,offsets,__TI_PM_ASM_OFFSETS_H__)
334
335 -$(obj)/sleep33xx.o $(obj)/sleep43xx.o: include/generated/ti-pm-asm-offsets.h
336 +$(obj)/sleep33xx.o $(obj)/sleep43xx.o: $(obj)/pm-asm-offsets.h
337
338 targets += pm-asm-offsets.s
339 +clean-files += pm-asm-offsets.h
340 diff --git a/arch/arm/mach-omap2/sleep33xx.S b/arch/arm/mach-omap2/sleep33xx.S
341 index 68fee339d3f1..dc221249bc22 100644
342 --- a/arch/arm/mach-omap2/sleep33xx.S
343 +++ b/arch/arm/mach-omap2/sleep33xx.S
344 @@ -6,7 +6,6 @@
345 * Dave Gerlach, Vaibhav Bedia
346 */
347
348 -#include <generated/ti-pm-asm-offsets.h>
349 #include <linux/linkage.h>
350 #include <linux/platform_data/pm33xx.h>
351 #include <linux/ti-emif-sram.h>
352 @@ -15,6 +14,7 @@
353
354 #include "iomap.h"
355 #include "cm33xx.h"
356 +#include "pm-asm-offsets.h"
357
358 #define AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED 0x00030000
359 #define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE 0x0003
360 diff --git a/arch/arm/mach-omap2/sleep43xx.S b/arch/arm/mach-omap2/sleep43xx.S
361 index c1f4e4852644..90d2907a2eb2 100644
362 --- a/arch/arm/mach-omap2/sleep43xx.S
363 +++ b/arch/arm/mach-omap2/sleep43xx.S
364 @@ -6,7 +6,6 @@
365 * Dave Gerlach, Vaibhav Bedia
366 */
367
368 -#include <generated/ti-pm-asm-offsets.h>
369 #include <linux/linkage.h>
370 #include <linux/ti-emif-sram.h>
371 #include <linux/platform_data/pm33xx.h>
372 @@ -19,6 +18,7 @@
373 #include "iomap.h"
374 #include "omap-secure.h"
375 #include "omap44xx.h"
376 +#include "pm-asm-offsets.h"
377 #include "prm33xx.h"
378 #include "prcm43xx.h"
379
380 diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
381 index a7cfe07156f4..e65ee8180c35 100644
382 --- a/arch/arm/mach-zynq/platsmp.c
383 +++ b/arch/arm/mach-zynq/platsmp.c
384 @@ -57,7 +57,7 @@ int zynq_cpun_start(u32 address, int cpu)
385 * 0x4: Jump by mov instruction
386 * 0x8: Jumping address
387 */
388 - memcpy((__force void *)zero, &zynq_secondary_trampoline,
389 + memcpy_toio(zero, &zynq_secondary_trampoline,
390 trampoline_size);
391 writel(address, zero + trampoline_size);
392
393 diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
394 index 61d834157bc0..382e1c2855e8 100644
395 --- a/arch/arm/mm/copypage-xscale.c
396 +++ b/arch/arm/mm/copypage-xscale.c
397 @@ -42,6 +42,7 @@ static void mc_copy_user_page(void *from, void *to)
398 * when prefetching destination as well. (NP)
399 */
400 asm volatile ("\
401 +.arch xscale \n\
402 pld [%0, #0] \n\
403 pld [%0, #32] \n\
404 pld [%1, #0] \n\
405 @@ -106,8 +107,9 @@ void
406 xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
407 {
408 void *ptr, *kaddr = kmap_atomic(page);
409 - asm volatile(
410 - "mov r1, %2 \n\
411 + asm volatile("\
412 +.arch xscale \n\
413 + mov r1, %2 \n\
414 mov r2, #0 \n\
415 mov r3, #0 \n\
416 1: mov ip, %0 \n\
417 diff --git a/arch/arm/plat-samsung/watchdog-reset.c b/arch/arm/plat-samsung/watchdog-reset.c
418 index ce42cc640a61..71d85ff323f7 100644
419 --- a/arch/arm/plat-samsung/watchdog-reset.c
420 +++ b/arch/arm/plat-samsung/watchdog-reset.c
421 @@ -62,6 +62,7 @@ void samsung_wdt_reset(void)
422 #ifdef CONFIG_OF
423 static const struct of_device_id s3c2410_wdt_match[] = {
424 { .compatible = "samsung,s3c2410-wdt" },
425 + { .compatible = "samsung,s3c6410-wdt" },
426 {},
427 };
428
429 diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
430 index 4e916e1f71f7..1c2a9ca491c0 100644
431 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
432 +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
433 @@ -66,8 +66,8 @@
434 gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>;
435 gpios-states = <0>;
436
437 - states = <3300000 0
438 - 1800000 1>;
439 + states = <3300000 0>,
440 + <1800000 1>;
441 };
442
443 flash_1v8: regulator-flash_1v8 {
444 diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
445 index b636912a2715..afcf8a9f667b 100644
446 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
447 +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
448 @@ -75,8 +75,8 @@
449 gpios-states = <1>;
450
451 /* Based on P200 schematics, signal CARD_1.8V/3.3V_CTR */
452 - states = <1800000 0
453 - 3300000 1>;
454 + states = <1800000 0>,
455 + <3300000 1>;
456 };
457
458 vddio_boot: regulator-vddio_boot {
459 diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
460 index 9972b1515da6..6039adda12ee 100644
461 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
462 +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
463 @@ -77,8 +77,8 @@
464 gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>;
465 gpios-states = <0>;
466
467 - states = <3300000 0
468 - 1800000 1>;
469 + states = <3300000 0>,
470 + <1800000 1>;
471 };
472
473 vcc1v8: regulator-vcc1v8 {
474 diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
475 index e8f925871edf..89f7b41b0e9e 100644
476 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
477 +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
478 @@ -46,8 +46,8 @@
479 gpios-states = <1>;
480
481 /* Based on P200 schematics, signal CARD_1.8V/3.3V_CTR */
482 - states = <1800000 0
483 - 3300000 1>;
484 + states = <1800000 0>,
485 + <3300000 1>;
486
487 regulator-settling-time-up-us = <10000>;
488 regulator-settling-time-down-us = <150000>;
489 diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
490 index 796baea7a0bf..c8d74e61dec1 100644
491 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
492 +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
493 @@ -38,8 +38,8 @@
494 gpios-states = <1>;
495
496 /* Based on P200 schematics, signal CARD_1.8V/3.3V_CTR */
497 - states = <1800000 0
498 - 3300000 1>;
499 + states = <1800000 0>,
500 + <3300000 1>;
501 };
502
503 vddio_boot: regulator-vddio_boot {
504 diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
505 index 26907ac82930..c433a031841f 100644
506 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
507 +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
508 @@ -38,8 +38,8 @@
509 gpios-states = <1>;
510
511 /* Based on P200 schematics, signal CARD_1.8V/3.3V_CTR */
512 - states = <1800000 0
513 - 3300000 1>;
514 + states = <1800000 0>,
515 + <3300000 1>;
516 };
517
518 vddio_boot: regulator-vddio_boot {
519 diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
520 index 52aae341d0da..d1f4eb197af2 100644
521 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
522 +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
523 @@ -169,15 +169,14 @@
524 opp-1300000000 {
525 opp-hz = /bits/ 64 <1300000000>;
526 opp-microvolt = <1000000>;
527 - opp-supported-hw = <0xc>, <0x7>;
528 + opp-supported-hw = <0xc>, <0x4>;
529 clock-latency-ns = <150000>;
530 };
531
532 opp-1500000000 {
533 opp-hz = /bits/ 64 <1500000000>;
534 opp-microvolt = <1000000>;
535 - /* Consumer only but rely on speed grading */
536 - opp-supported-hw = <0x8>, <0x7>;
537 + opp-supported-hw = <0x8>, <0x3>;
538 clock-latency-ns = <150000>;
539 };
540 };
541 diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
542 index 11c0a7137823..db6df76e97a1 100644
543 --- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
544 +++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
545 @@ -61,7 +61,9 @@
546 protected-clocks = <GCC_BIMC_CDSP_CLK>,
547 <GCC_CDSP_CFG_AHB_CLK>,
548 <GCC_CDSP_BIMC_CLK_SRC>,
549 - <GCC_CDSP_TBU_CLK>;
550 + <GCC_CDSP_TBU_CLK>,
551 + <141>, /* GCC_WCSS_Q6_AHB_CLK */
552 + <142>; /* GCC_WCSS_Q6_AXIM_CLK */
553 };
554
555 &pms405_spmi_regulators {
556 diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
557 index e9fefd8a7e02..f0f2c555033b 100644
558 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
559 +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
560 @@ -801,6 +801,7 @@
561 <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
562 clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
563 fifo-depth = <0x100>;
564 + max-frequency = <150000000>;
565 status = "disabled";
566 };
567
568 @@ -812,6 +813,7 @@
569 <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
570 clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
571 fifo-depth = <0x100>;
572 + max-frequency = <150000000>;
573 status = "disabled";
574 };
575
576 @@ -823,6 +825,7 @@
577 <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
578 clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
579 fifo-depth = <0x100>;
580 + max-frequency = <150000000>;
581 status = "disabled";
582 };
583
584 diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
585 index c8c850bc3dfb..6dd011e0b434 100644
586 --- a/arch/arm64/include/asm/atomic_ll_sc.h
587 +++ b/arch/arm64/include/asm/atomic_ll_sc.h
588 @@ -26,7 +26,7 @@
589 * (the optimize attribute silently ignores these options).
590 */
591
592 -#define ATOMIC_OP(op, asm_op) \
593 +#define ATOMIC_OP(op, asm_op, constraint) \
594 __LL_SC_INLINE void \
595 __LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \
596 { \
597 @@ -40,11 +40,11 @@ __LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \
598 " stxr %w1, %w0, %2\n" \
599 " cbnz %w1, 1b" \
600 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
601 - : "Ir" (i)); \
602 + : #constraint "r" (i)); \
603 } \
604 __LL_SC_EXPORT(arch_atomic_##op);
605
606 -#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
607 +#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
608 __LL_SC_INLINE int \
609 __LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \
610 { \
611 @@ -59,14 +59,14 @@ __LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \
612 " cbnz %w1, 1b\n" \
613 " " #mb \
614 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
615 - : "Ir" (i) \
616 + : #constraint "r" (i) \
617 : cl); \
618 \
619 return result; \
620 } \
621 __LL_SC_EXPORT(arch_atomic_##op##_return##name);
622
623 -#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
624 +#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
625 __LL_SC_INLINE int \
626 __LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \
627 { \
628 @@ -81,7 +81,7 @@ __LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \
629 " cbnz %w2, 1b\n" \
630 " " #mb \
631 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
632 - : "Ir" (i) \
633 + : #constraint "r" (i) \
634 : cl); \
635 \
636 return result; \
637 @@ -99,8 +99,8 @@ __LL_SC_EXPORT(arch_atomic_fetch_##op##name);
638 ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
639 ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
640
641 -ATOMIC_OPS(add, add)
642 -ATOMIC_OPS(sub, sub)
643 +ATOMIC_OPS(add, add, I)
644 +ATOMIC_OPS(sub, sub, J)
645
646 #undef ATOMIC_OPS
647 #define ATOMIC_OPS(...) \
648 @@ -110,17 +110,17 @@ ATOMIC_OPS(sub, sub)
649 ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
650 ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
651
652 -ATOMIC_OPS(and, and)
653 -ATOMIC_OPS(andnot, bic)
654 -ATOMIC_OPS(or, orr)
655 -ATOMIC_OPS(xor, eor)
656 +ATOMIC_OPS(and, and, )
657 +ATOMIC_OPS(andnot, bic, )
658 +ATOMIC_OPS(or, orr, )
659 +ATOMIC_OPS(xor, eor, )
660
661 #undef ATOMIC_OPS
662 #undef ATOMIC_FETCH_OP
663 #undef ATOMIC_OP_RETURN
664 #undef ATOMIC_OP
665
666 -#define ATOMIC64_OP(op, asm_op) \
667 +#define ATOMIC64_OP(op, asm_op, constraint) \
668 __LL_SC_INLINE void \
669 __LL_SC_PREFIX(arch_atomic64_##op(s64 i, atomic64_t *v)) \
670 { \
671 @@ -134,11 +134,11 @@ __LL_SC_PREFIX(arch_atomic64_##op(s64 i, atomic64_t *v)) \
672 " stxr %w1, %0, %2\n" \
673 " cbnz %w1, 1b" \
674 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
675 - : "Ir" (i)); \
676 + : #constraint "r" (i)); \
677 } \
678 __LL_SC_EXPORT(arch_atomic64_##op);
679
680 -#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
681 +#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
682 __LL_SC_INLINE s64 \
683 __LL_SC_PREFIX(arch_atomic64_##op##_return##name(s64 i, atomic64_t *v))\
684 { \
685 @@ -153,14 +153,14 @@ __LL_SC_PREFIX(arch_atomic64_##op##_return##name(s64 i, atomic64_t *v))\
686 " cbnz %w1, 1b\n" \
687 " " #mb \
688 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
689 - : "Ir" (i) \
690 + : #constraint "r" (i) \
691 : cl); \
692 \
693 return result; \
694 } \
695 __LL_SC_EXPORT(arch_atomic64_##op##_return##name);
696
697 -#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
698 +#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
699 __LL_SC_INLINE s64 \
700 __LL_SC_PREFIX(arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v)) \
701 { \
702 @@ -175,7 +175,7 @@ __LL_SC_PREFIX(arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v)) \
703 " cbnz %w2, 1b\n" \
704 " " #mb \
705 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
706 - : "Ir" (i) \
707 + : #constraint "r" (i) \
708 : cl); \
709 \
710 return result; \
711 @@ -193,8 +193,8 @@ __LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
712 ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
713 ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
714
715 -ATOMIC64_OPS(add, add)
716 -ATOMIC64_OPS(sub, sub)
717 +ATOMIC64_OPS(add, add, I)
718 +ATOMIC64_OPS(sub, sub, J)
719
720 #undef ATOMIC64_OPS
721 #define ATOMIC64_OPS(...) \
722 @@ -204,10 +204,10 @@ ATOMIC64_OPS(sub, sub)
723 ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
724 ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
725
726 -ATOMIC64_OPS(and, and)
727 -ATOMIC64_OPS(andnot, bic)
728 -ATOMIC64_OPS(or, orr)
729 -ATOMIC64_OPS(xor, eor)
730 +ATOMIC64_OPS(and, and, L)
731 +ATOMIC64_OPS(andnot, bic, )
732 +ATOMIC64_OPS(or, orr, L)
733 +ATOMIC64_OPS(xor, eor, L)
734
735 #undef ATOMIC64_OPS
736 #undef ATOMIC64_FETCH_OP
737 @@ -237,7 +237,7 @@ __LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
738 }
739 __LL_SC_EXPORT(arch_atomic64_dec_if_positive);
740
741 -#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \
742 +#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \
743 __LL_SC_INLINE u##sz \
744 __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
745 unsigned long old, \
746 @@ -265,29 +265,34 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
747 "2:" \
748 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
749 [v] "+Q" (*(u##sz *)ptr) \
750 - : [old] "Kr" (old), [new] "r" (new) \
751 + : [old] #constraint "r" (old), [new] "r" (new) \
752 : cl); \
753 \
754 return oldval; \
755 } \
756 __LL_SC_EXPORT(__cmpxchg_case_##name##sz);
757
758 -__CMPXCHG_CASE(w, b, , 8, , , , )
759 -__CMPXCHG_CASE(w, h, , 16, , , , )
760 -__CMPXCHG_CASE(w, , , 32, , , , )
761 -__CMPXCHG_CASE( , , , 64, , , , )
762 -__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory")
763 -__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory")
764 -__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory")
765 -__CMPXCHG_CASE( , , acq_, 64, , a, , "memory")
766 -__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory")
767 -__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory")
768 -__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory")
769 -__CMPXCHG_CASE( , , rel_, 64, , , l, "memory")
770 -__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory")
771 -__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory")
772 -__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory")
773 -__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory")
774 +/*
775 + * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly
776 + * handle the 'K' constraint for the value 4294967295 - thus we use no
777 + * constraint for 32 bit operations.
778 + */
779 +__CMPXCHG_CASE(w, b, , 8, , , , , )
780 +__CMPXCHG_CASE(w, h, , 16, , , , , )
781 +__CMPXCHG_CASE(w, , , 32, , , , , )
782 +__CMPXCHG_CASE( , , , 64, , , , , L)
783 +__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", )
784 +__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", )
785 +__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", )
786 +__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L)
787 +__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", )
788 +__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", )
789 +__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", )
790 +__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L)
791 +__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", )
792 +__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", )
793 +__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", )
794 +__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
795
796 #undef __CMPXCHG_CASE
797
798 diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
799 index e7d46631cc42..b1454d117cd2 100644
800 --- a/arch/arm64/include/asm/cputype.h
801 +++ b/arch/arm64/include/asm/cputype.h
802 @@ -51,14 +51,6 @@
803 #define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
804 MIDR_ARCHITECTURE_MASK)
805
806 -#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \
807 -({ \
808 - u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \
809 - u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \
810 - \
811 - _model == (model) && rv >= (rv_min) && rv <= (rv_max); \
812 - })
813 -
814 #define ARM_CPU_IMP_ARM 0x41
815 #define ARM_CPU_IMP_APM 0x50
816 #define ARM_CPU_IMP_CAVIUM 0x43
817 @@ -159,10 +151,19 @@ struct midr_range {
818 #define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r)
819 #define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
820
821 +static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
822 + u32 rv_max)
823 +{
824 + u32 _model = midr & MIDR_CPU_MODEL_MASK;
825 + u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);
826 +
827 + return _model == model && rv >= rv_min && rv <= rv_max;
828 +}
829 +
830 static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
831 {
832 - return MIDR_IS_CPU_MODEL_RANGE(midr, range->model,
833 - range->rv_min, range->rv_max);
834 + return midr_is_cpu_model_range(midr, range->model,
835 + range->rv_min, range->rv_max);
836 }
837
838 static inline bool
839 diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
840 index ed57b760f38c..a17393ff6677 100644
841 --- a/arch/arm64/include/asm/exception.h
842 +++ b/arch/arm64/include/asm/exception.h
843 @@ -30,4 +30,6 @@ static inline u32 disr_to_esr(u64 disr)
844 return esr;
845 }
846
847 +asmlinkage void enter_from_user_mode(void);
848 +
849 #endif /* __ASM_EXCEPTION_H */
850 diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
851 index 8af7a85f76bd..bc3949064725 100644
852 --- a/arch/arm64/include/asm/tlbflush.h
853 +++ b/arch/arm64/include/asm/tlbflush.h
854 @@ -251,6 +251,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
855 dsb(ishst);
856 __tlbi(vaae1is, addr);
857 dsb(ish);
858 + isb();
859 }
860 #endif
861
862 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
863 index b1fdc486aed8..9323bcc40a58 100644
864 --- a/arch/arm64/kernel/cpufeature.c
865 +++ b/arch/arm64/kernel/cpufeature.c
866 @@ -894,7 +894,7 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _
867 u32 midr = read_cpuid_id();
868
869 /* Cavium ThunderX pass 1.x and 2.x */
870 - return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
871 + return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
872 MIDR_CPU_VAR_REV(0, 0),
873 MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
874 }
875 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
876 index 320a30dbe35e..84a822748c84 100644
877 --- a/arch/arm64/kernel/entry.S
878 +++ b/arch/arm64/kernel/entry.S
879 @@ -30,9 +30,9 @@
880 * Context tracking subsystem. Used to instrument transitions
881 * between user and kernel mode.
882 */
883 - .macro ct_user_exit
884 + .macro ct_user_exit_irqoff
885 #ifdef CONFIG_CONTEXT_TRACKING
886 - bl context_tracking_user_exit
887 + bl enter_from_user_mode
888 #endif
889 .endm
890
891 @@ -792,8 +792,8 @@ el0_cp15:
892 /*
893 * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
894 */
895 + ct_user_exit_irqoff
896 enable_daif
897 - ct_user_exit
898 mov x0, x25
899 mov x1, sp
900 bl do_cp15instr
901 @@ -805,8 +805,8 @@ el0_da:
902 * Data abort handling
903 */
904 mrs x26, far_el1
905 + ct_user_exit_irqoff
906 enable_daif
907 - ct_user_exit
908 clear_address_tag x0, x26
909 mov x1, x25
910 mov x2, sp
911 @@ -818,11 +818,11 @@ el0_ia:
912 */
913 mrs x26, far_el1
914 gic_prio_kentry_setup tmp=x0
915 + ct_user_exit_irqoff
916 enable_da_f
917 #ifdef CONFIG_TRACE_IRQFLAGS
918 bl trace_hardirqs_off
919 #endif
920 - ct_user_exit
921 mov x0, x26
922 mov x1, x25
923 mov x2, sp
924 @@ -832,8 +832,8 @@ el0_fpsimd_acc:
925 /*
926 * Floating Point or Advanced SIMD access
927 */
928 + ct_user_exit_irqoff
929 enable_daif
930 - ct_user_exit
931 mov x0, x25
932 mov x1, sp
933 bl do_fpsimd_acc
934 @@ -842,8 +842,8 @@ el0_sve_acc:
935 /*
936 * Scalable Vector Extension access
937 */
938 + ct_user_exit_irqoff
939 enable_daif
940 - ct_user_exit
941 mov x0, x25
942 mov x1, sp
943 bl do_sve_acc
944 @@ -852,8 +852,8 @@ el0_fpsimd_exc:
945 /*
946 * Floating Point, Advanced SIMD or SVE exception
947 */
948 + ct_user_exit_irqoff
949 enable_daif
950 - ct_user_exit
951 mov x0, x25
952 mov x1, sp
953 bl do_fpsimd_exc
954 @@ -868,11 +868,11 @@ el0_sp_pc:
955 * Stack or PC alignment exception handling
956 */
957 gic_prio_kentry_setup tmp=x0
958 + ct_user_exit_irqoff
959 enable_da_f
960 #ifdef CONFIG_TRACE_IRQFLAGS
961 bl trace_hardirqs_off
962 #endif
963 - ct_user_exit
964 mov x0, x26
965 mov x1, x25
966 mov x2, sp
967 @@ -882,8 +882,8 @@ el0_undef:
968 /*
969 * Undefined instruction
970 */
971 + ct_user_exit_irqoff
972 enable_daif
973 - ct_user_exit
974 mov x0, sp
975 bl do_undefinstr
976 b ret_to_user
977 @@ -891,8 +891,8 @@ el0_sys:
978 /*
979 * System instructions, for trapped cache maintenance instructions
980 */
981 + ct_user_exit_irqoff
982 enable_daif
983 - ct_user_exit
984 mov x0, x25
985 mov x1, sp
986 bl do_sysinstr
987 @@ -902,17 +902,18 @@ el0_dbg:
988 * Debug exception handling
989 */
990 tbnz x24, #0, el0_inv // EL0 only
991 + mrs x24, far_el1
992 gic_prio_kentry_setup tmp=x3
993 - mrs x0, far_el1
994 + ct_user_exit_irqoff
995 + mov x0, x24
996 mov x1, x25
997 mov x2, sp
998 bl do_debug_exception
999 enable_da_f
1000 - ct_user_exit
1001 b ret_to_user
1002 el0_inv:
1003 + ct_user_exit_irqoff
1004 enable_daif
1005 - ct_user_exit
1006 mov x0, sp
1007 mov x1, #BAD_SYNC
1008 mov x2, x25
1009 @@ -925,13 +926,13 @@ el0_irq:
1010 kernel_entry 0
1011 el0_irq_naked:
1012 gic_prio_irq_setup pmr=x20, tmp=x0
1013 + ct_user_exit_irqoff
1014 enable_da_f
1015
1016 #ifdef CONFIG_TRACE_IRQFLAGS
1017 bl trace_hardirqs_off
1018 #endif
1019
1020 - ct_user_exit
1021 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1022 tbz x22, #55, 1f
1023 bl do_el0_irq_bp_hardening
1024 @@ -958,13 +959,14 @@ ENDPROC(el1_error)
1025 el0_error:
1026 kernel_entry 0
1027 el0_error_naked:
1028 - mrs x1, esr_el1
1029 + mrs x25, esr_el1
1030 gic_prio_kentry_setup tmp=x2
1031 + ct_user_exit_irqoff
1032 enable_dbg
1033 mov x0, sp
1034 + mov x1, x25
1035 bl do_serror
1036 enable_da_f
1037 - ct_user_exit
1038 b ret_to_user
1039 ENDPROC(el0_error)
1040
1041 diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
1042 new file mode 100644
1043 index 000000000000..25a2a9b479c2
1044 --- /dev/null
1045 +++ b/arch/arm64/kernel/image-vars.h
1046 @@ -0,0 +1,51 @@
1047 +/* SPDX-License-Identifier: GPL-2.0-only */
1048 +/*
1049 + * Linker script variables to be set after section resolution, as
1050 + * ld.lld does not like variables assigned before SECTIONS is processed.
1051 + */
1052 +#ifndef __ARM64_KERNEL_IMAGE_VARS_H
1053 +#define __ARM64_KERNEL_IMAGE_VARS_H
1054 +
1055 +#ifndef LINKER_SCRIPT
1056 +#error This file should only be included in vmlinux.lds.S
1057 +#endif
1058 +
1059 +#ifdef CONFIG_EFI
1060 +
1061 +__efistub_stext_offset = stext - _text;
1062 +
1063 +/*
1064 + * The EFI stub has its own symbol namespace prefixed by __efistub_, to
1065 + * isolate it from the kernel proper. The following symbols are legally
1066 + * accessed by the stub, so provide some aliases to make them accessible.
1067 + * Only include data symbols here, or text symbols of functions that are
1068 + * guaranteed to be safe when executed at another offset than they were
1069 + * linked at. The routines below are all implemented in assembler in a
1070 + * position independent manner
1071 + */
1072 +__efistub_memcmp = __pi_memcmp;
1073 +__efistub_memchr = __pi_memchr;
1074 +__efistub_memcpy = __pi_memcpy;
1075 +__efistub_memmove = __pi_memmove;
1076 +__efistub_memset = __pi_memset;
1077 +__efistub_strlen = __pi_strlen;
1078 +__efistub_strnlen = __pi_strnlen;
1079 +__efistub_strcmp = __pi_strcmp;
1080 +__efistub_strncmp = __pi_strncmp;
1081 +__efistub_strrchr = __pi_strrchr;
1082 +__efistub___flush_dcache_area = __pi___flush_dcache_area;
1083 +
1084 +#ifdef CONFIG_KASAN
1085 +__efistub___memcpy = __pi_memcpy;
1086 +__efistub___memmove = __pi_memmove;
1087 +__efistub___memset = __pi_memset;
1088 +#endif
1089 +
1090 +__efistub__text = _text;
1091 +__efistub__end = _end;
1092 +__efistub__edata = _edata;
1093 +__efistub_screen_info = screen_info;
1094 +
1095 +#endif
1096 +
1097 +#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
1098 diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
1099 index 2b85c0d6fa3d..c7d38c660372 100644
1100 --- a/arch/arm64/kernel/image.h
1101 +++ b/arch/arm64/kernel/image.h
1102 @@ -65,46 +65,4 @@
1103 DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \
1104 DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
1105
1106 -#ifdef CONFIG_EFI
1107 -
1108 -/*
1109 - * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol:
1110 - * https://github.com/ClangBuiltLinux/linux/issues/561
1111 - */
1112 -__efistub_stext_offset = ABSOLUTE(stext - _text);
1113 -
1114 -/*
1115 - * The EFI stub has its own symbol namespace prefixed by __efistub_, to
1116 - * isolate it from the kernel proper. The following symbols are legally
1117 - * accessed by the stub, so provide some aliases to make them accessible.
1118 - * Only include data symbols here, or text symbols of functions that are
1119 - * guaranteed to be safe when executed at another offset than they were
1120 - * linked at. The routines below are all implemented in assembler in a
1121 - * position independent manner
1122 - */
1123 -__efistub_memcmp = __pi_memcmp;
1124 -__efistub_memchr = __pi_memchr;
1125 -__efistub_memcpy = __pi_memcpy;
1126 -__efistub_memmove = __pi_memmove;
1127 -__efistub_memset = __pi_memset;
1128 -__efistub_strlen = __pi_strlen;
1129 -__efistub_strnlen = __pi_strnlen;
1130 -__efistub_strcmp = __pi_strcmp;
1131 -__efistub_strncmp = __pi_strncmp;
1132 -__efistub_strrchr = __pi_strrchr;
1133 -__efistub___flush_dcache_area = __pi___flush_dcache_area;
1134 -
1135 -#ifdef CONFIG_KASAN
1136 -__efistub___memcpy = __pi_memcpy;
1137 -__efistub___memmove = __pi_memmove;
1138 -__efistub___memset = __pi_memset;
1139 -#endif
1140 -
1141 -__efistub__text = _text;
1142 -__efistub__end = _end;
1143 -__efistub__edata = _edata;
1144 -__efistub_screen_info = screen_info;
1145 -
1146 -#endif
1147 -
1148 #endif /* __ARM64_KERNEL_IMAGE_H */
1149 diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
1150 index 32893b3d9164..742a636861e7 100644
1151 --- a/arch/arm64/kernel/traps.c
1152 +++ b/arch/arm64/kernel/traps.c
1153 @@ -7,9 +7,11 @@
1154 */
1155
1156 #include <linux/bug.h>
1157 +#include <linux/context_tracking.h>
1158 #include <linux/signal.h>
1159 #include <linux/personality.h>
1160 #include <linux/kallsyms.h>
1161 +#include <linux/kprobes.h>
1162 #include <linux/spinlock.h>
1163 #include <linux/uaccess.h>
1164 #include <linux/hardirq.h>
1165 @@ -900,6 +902,13 @@ asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
1166 nmi_exit();
1167 }
1168
1169 +asmlinkage void enter_from_user_mode(void)
1170 +{
1171 + CT_WARN_ON(ct_state() != CONTEXT_USER);
1172 + user_exit_irqoff();
1173 +}
1174 +NOKPROBE_SYMBOL(enter_from_user_mode);
1175 +
1176 void __pte_error(const char *file, int line, unsigned long val)
1177 {
1178 pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
1179 diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
1180 index 7fa008374907..803b24d2464a 100644
1181 --- a/arch/arm64/kernel/vmlinux.lds.S
1182 +++ b/arch/arm64/kernel/vmlinux.lds.S
1183 @@ -245,6 +245,8 @@ SECTIONS
1184 HEAD_SYMBOLS
1185 }
1186
1187 +#include "image-vars.h"
1188 +
1189 /*
1190 * The HYP init code and ID map text can't be longer than a page each,
1191 * and should not cross a page boundary.
1192 diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
1193 index f3c795278def..b1ee6cb4b17f 100644
1194 --- a/arch/arm64/mm/init.c
1195 +++ b/arch/arm64/mm/init.c
1196 @@ -570,8 +570,12 @@ void free_initmem(void)
1197 #ifdef CONFIG_BLK_DEV_INITRD
1198 void __init free_initrd_mem(unsigned long start, unsigned long end)
1199 {
1200 + unsigned long aligned_start, aligned_end;
1201 +
1202 + aligned_start = __virt_to_phys(start) & PAGE_MASK;
1203 + aligned_end = PAGE_ALIGN(__virt_to_phys(end));
1204 + memblock_free(aligned_start, aligned_end - aligned_start);
1205 free_reserved_area((void *)start, (void *)end, 0, "initrd");
1206 - memblock_free(__virt_to_phys(start), end - start);
1207 }
1208 #endif
1209
1210 diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
1211 index 7dbf2be470f6..28a8f7b87ff0 100644
1212 --- a/arch/arm64/mm/proc.S
1213 +++ b/arch/arm64/mm/proc.S
1214 @@ -286,6 +286,15 @@ skip_pgd:
1215 msr sctlr_el1, x18
1216 isb
1217
1218 + /*
1219 + * Invalidate the local I-cache so that any instructions fetched
1220 + * speculatively from the PoC are discarded, since they may have
1221 + * been dynamically patched at the PoU.
1222 + */
1223 + ic iallu
1224 + dsb nsh
1225 + isb
1226 +
1227 /* Set the flag to zero to indicate that we're all done */
1228 str wzr, [flag_ptr]
1229 ret
1230 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1231 index 326448f9df16..1a42ba885188 100644
1232 --- a/arch/ia64/kernel/module.c
1233 +++ b/arch/ia64/kernel/module.c
1234 @@ -914,10 +914,14 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo
1235 void
1236 module_arch_cleanup (struct module *mod)
1237 {
1238 - if (mod->arch.init_unw_table)
1239 + if (mod->arch.init_unw_table) {
1240 unw_remove_unwind_table(mod->arch.init_unw_table);
1241 - if (mod->arch.core_unw_table)
1242 + mod->arch.init_unw_table = NULL;
1243 + }
1244 + if (mod->arch.core_unw_table) {
1245 unw_remove_unwind_table(mod->arch.core_unw_table);
1246 + mod->arch.core_unw_table = NULL;
1247 + }
1248 }
1249
1250 void *dereference_module_function_descriptor(struct module *mod, void *ptr)
1251 diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
1252 index 533008262b69..5e5601c382b8 100644
1253 --- a/arch/m68k/include/asm/atarihw.h
1254 +++ b/arch/m68k/include/asm/atarihw.h
1255 @@ -22,7 +22,6 @@
1256
1257 #include <linux/types.h>
1258 #include <asm/bootinfo-atari.h>
1259 -#include <asm/raw_io.h>
1260 #include <asm/kmap.h>
1261
1262 extern u_long atari_mch_cookie;
1263 @@ -132,14 +131,6 @@ extern struct atari_hw_present atari_hw_present;
1264 */
1265
1266
1267 -#define atari_readb raw_inb
1268 -#define atari_writeb raw_outb
1269 -
1270 -#define atari_inb_p raw_inb
1271 -#define atari_outb_p raw_outb
1272 -
1273 -
1274 -
1275 #include <linux/mm.h>
1276 #include <asm/cacheflush.h>
1277
1278 diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
1279 index 6c03ca5bc436..819f611dccf2 100644
1280 --- a/arch/m68k/include/asm/io_mm.h
1281 +++ b/arch/m68k/include/asm/io_mm.h
1282 @@ -29,7 +29,11 @@
1283 #include <asm-generic/iomap.h>
1284
1285 #ifdef CONFIG_ATARI
1286 -#include <asm/atarihw.h>
1287 +#define atari_readb raw_inb
1288 +#define atari_writeb raw_outb
1289 +
1290 +#define atari_inb_p raw_inb
1291 +#define atari_outb_p raw_outb
1292 #endif
1293
1294
1295 diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
1296 index d9a08bed4b12..f653b60f2afc 100644
1297 --- a/arch/m68k/include/asm/macintosh.h
1298 +++ b/arch/m68k/include/asm/macintosh.h
1299 @@ -4,6 +4,7 @@
1300
1301 #include <linux/seq_file.h>
1302 #include <linux/interrupt.h>
1303 +#include <linux/irq.h>
1304
1305 #include <asm/bootinfo-mac.h>
1306
1307 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
1308 index c345b79414a9..403f7e193833 100644
1309 --- a/arch/powerpc/Makefile
1310 +++ b/arch/powerpc/Makefile
1311 @@ -39,13 +39,11 @@ endif
1312 uname := $(shell uname -m)
1313 KBUILD_DEFCONFIG := $(if $(filter ppc%,$(uname)),$(uname),ppc64)_defconfig
1314
1315 -ifdef CONFIG_PPC64
1316 new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
1317
1318 ifeq ($(new_nm),y)
1319 NM := $(NM) --synthetic
1320 endif
1321 -endif
1322
1323 # BITS is used as extension for files which are available in a 32 bit
1324 # and a 64 bit version to simplify shared Makefiles.
1325 diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
1326 index 186109bdd41b..e04b20625cb9 100644
1327 --- a/arch/powerpc/platforms/powernv/opal-imc.c
1328 +++ b/arch/powerpc/platforms/powernv/opal-imc.c
1329 @@ -53,9 +53,9 @@ static void export_imc_mode_and_cmd(struct device_node *node,
1330 struct imc_pmu *pmu_ptr)
1331 {
1332 static u64 loc, *imc_mode_addr, *imc_cmd_addr;
1333 - int chip = 0, nid;
1334 char mode[16], cmd[16];
1335 u32 cb_offset;
1336 + struct imc_mem_info *ptr = pmu_ptr->mem_info;
1337
1338 imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root);
1339
1340 @@ -69,20 +69,20 @@ static void export_imc_mode_and_cmd(struct device_node *node,
1341 if (of_property_read_u32(node, "cb_offset", &cb_offset))
1342 cb_offset = IMC_CNTL_BLK_OFFSET;
1343
1344 - for_each_node(nid) {
1345 - loc = (u64)(pmu_ptr->mem_info[chip].vbase) + cb_offset;
1346 + while (ptr->vbase != NULL) {
1347 + loc = (u64)(ptr->vbase) + cb_offset;
1348 imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET);
1349 - sprintf(mode, "imc_mode_%d", nid);
1350 + sprintf(mode, "imc_mode_%d", (u32)(ptr->id));
1351 if (!imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
1352 imc_mode_addr))
1353 goto err;
1354
1355 imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET);
1356 - sprintf(cmd, "imc_cmd_%d", nid);
1357 + sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id));
1358 if (!imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
1359 imc_cmd_addr))
1360 goto err;
1361 - chip++;
1362 + ptr++;
1363 }
1364 return;
1365
1366 diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
1367 index d00f84add5f4..6d2dbb5089d5 100644
1368 --- a/arch/s390/crypto/aes_s390.c
1369 +++ b/arch/s390/crypto/aes_s390.c
1370 @@ -586,6 +586,9 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc,
1371 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
1372 struct blkcipher_walk walk;
1373
1374 + if (!nbytes)
1375 + return -EINVAL;
1376 +
1377 if (unlikely(!xts_ctx->fc))
1378 return xts_fallback_encrypt(desc, dst, src, nbytes);
1379
1380 @@ -600,6 +603,9 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc,
1381 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
1382 struct blkcipher_walk walk;
1383
1384 + if (!nbytes)
1385 + return -EINVAL;
1386 +
1387 if (unlikely(!xts_ctx->fc))
1388 return xts_fallback_decrypt(desc, dst, src, nbytes);
1389
1390 diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
1391 index 70d87db54e62..4c0690fc5167 100644
1392 --- a/arch/s390/include/asm/string.h
1393 +++ b/arch/s390/include/asm/string.h
1394 @@ -71,11 +71,16 @@ extern void *__memmove(void *dest, const void *src, size_t n);
1395 #define memcpy(dst, src, len) __memcpy(dst, src, len)
1396 #define memmove(dst, src, len) __memmove(dst, src, len)
1397 #define memset(s, c, n) __memset(s, c, n)
1398 +#define strlen(s) __strlen(s)
1399 +
1400 +#define __no_sanitize_prefix_strfunc(x) __##x
1401
1402 #ifndef __NO_FORTIFY
1403 #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
1404 #endif
1405
1406 +#else
1407 +#define __no_sanitize_prefix_strfunc(x) x
1408 #endif /* defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) */
1409
1410 void *__memset16(uint16_t *s, uint16_t v, size_t count);
1411 @@ -163,8 +168,8 @@ static inline char *strcpy(char *dst, const char *src)
1412 }
1413 #endif
1414
1415 -#ifdef __HAVE_ARCH_STRLEN
1416 -static inline size_t strlen(const char *s)
1417 +#if defined(__HAVE_ARCH_STRLEN) || (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__))
1418 +static inline size_t __no_sanitize_prefix_strfunc(strlen)(const char *s)
1419 {
1420 register unsigned long r0 asm("0") = 0;
1421 const char *tmp = s;
1422 diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
1423 index fe7c205233f1..9ae1c0f05fd2 100644
1424 --- a/arch/x86/include/asm/intel-family.h
1425 +++ b/arch/x86/include/asm/intel-family.h
1426 @@ -73,6 +73,9 @@
1427 #define INTEL_FAM6_ICELAKE_MOBILE 0x7E
1428 #define INTEL_FAM6_ICELAKE_NNPI 0x9D
1429
1430 +#define INTEL_FAM6_TIGERLAKE_L 0x8C
1431 +#define INTEL_FAM6_TIGERLAKE 0x8D
1432 +
1433 /* "Small Core" Processors (Atom) */
1434
1435 #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
1436 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
1437 index bdc16b0aa7c6..dd0ca154a958 100644
1438 --- a/arch/x86/include/asm/kvm_host.h
1439 +++ b/arch/x86/include/asm/kvm_host.h
1440 @@ -1583,6 +1583,13 @@ bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
1441 void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
1442 struct kvm_lapic_irq *irq);
1443
1444 +static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
1445 +{
1446 + /* We can only post Fixed and LowPrio IRQs */
1447 + return (irq->delivery_mode == dest_Fixed ||
1448 + irq->delivery_mode == dest_LowestPrio);
1449 +}
1450 +
1451 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
1452 {
1453 if (kvm_x86_ops->vcpu_blocking)
1454 diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
1455 index d63e63b7d1d9..251c795b4eb3 100644
1456 --- a/arch/x86/kernel/amd_nb.c
1457 +++ b/arch/x86/kernel/amd_nb.c
1458 @@ -21,6 +21,7 @@
1459 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
1460 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
1461 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
1462 +#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
1463
1464 /* Protect the PCI config register pairs used for SMN and DF indirect access. */
1465 static DEFINE_MUTEX(smn_mutex);
1466 @@ -50,6 +51,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
1467 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
1468 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
1469 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
1470 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
1471 {}
1472 };
1473 EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
1474 @@ -63,6 +65,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
1475 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
1476 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
1477 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
1478 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
1479 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
1480 {}
1481 };
1482 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
1483 index 08fb79f37793..ad0d5ced82b3 100644
1484 --- a/arch/x86/kernel/apic/apic.c
1485 +++ b/arch/x86/kernel/apic/apic.c
1486 @@ -1495,54 +1495,72 @@ static void lapic_setup_esr(void)
1487 oldvalue, value);
1488 }
1489
1490 -static void apic_pending_intr_clear(void)
1491 +#define APIC_IR_REGS APIC_ISR_NR
1492 +#define APIC_IR_BITS (APIC_IR_REGS * 32)
1493 +#define APIC_IR_MAPSIZE (APIC_IR_BITS / BITS_PER_LONG)
1494 +
1495 +union apic_ir {
1496 + unsigned long map[APIC_IR_MAPSIZE];
1497 + u32 regs[APIC_IR_REGS];
1498 +};
1499 +
1500 +static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr)
1501 {
1502 - long long max_loops = cpu_khz ? cpu_khz : 1000000;
1503 - unsigned long long tsc = 0, ntsc;
1504 - unsigned int queued;
1505 - unsigned long value;
1506 - int i, j, acked = 0;
1507 + int i, bit;
1508 +
1509 + /* Read the IRRs */
1510 + for (i = 0; i < APIC_IR_REGS; i++)
1511 + irr->regs[i] = apic_read(APIC_IRR + i * 0x10);
1512 +
1513 + /* Read the ISRs */
1514 + for (i = 0; i < APIC_IR_REGS; i++)
1515 + isr->regs[i] = apic_read(APIC_ISR + i * 0x10);
1516
1517 - if (boot_cpu_has(X86_FEATURE_TSC))
1518 - tsc = rdtsc();
1519 /*
1520 - * After a crash, we no longer service the interrupts and a pending
1521 - * interrupt from previous kernel might still have ISR bit set.
1522 - *
1523 - * Most probably by now CPU has serviced that pending interrupt and
1524 - * it might not have done the ack_APIC_irq() because it thought,
1525 - * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
1526 - * does not clear the ISR bit and cpu thinks it has already serivced
1527 - * the interrupt. Hence a vector might get locked. It was noticed
1528 - * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
1529 + * If the ISR map is not empty. ACK the APIC and run another round
1530 + * to verify whether a pending IRR has been unblocked and turned
1531 + * into a ISR.
1532 */
1533 - do {
1534 - queued = 0;
1535 - for (i = APIC_ISR_NR - 1; i >= 0; i--)
1536 - queued |= apic_read(APIC_IRR + i*0x10);
1537 -
1538 - for (i = APIC_ISR_NR - 1; i >= 0; i--) {
1539 - value = apic_read(APIC_ISR + i*0x10);
1540 - for_each_set_bit(j, &value, 32) {
1541 - ack_APIC_irq();
1542 - acked++;
1543 - }
1544 - }
1545 - if (acked > 256) {
1546 - pr_err("LAPIC pending interrupts after %d EOI\n", acked);
1547 - break;
1548 - }
1549 - if (queued) {
1550 - if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
1551 - ntsc = rdtsc();
1552 - max_loops = (long long)cpu_khz << 10;
1553 - max_loops -= ntsc - tsc;
1554 - } else {
1555 - max_loops--;
1556 - }
1557 - }
1558 - } while (queued && max_loops > 0);
1559 - WARN_ON(max_loops <= 0);
1560 + if (!bitmap_empty(isr->map, APIC_IR_BITS)) {
1561 + /*
1562 + * There can be multiple ISR bits set when a high priority
1563 + * interrupt preempted a lower priority one. Issue an ACK
1564 + * per set bit.
1565 + */
1566 + for_each_set_bit(bit, isr->map, APIC_IR_BITS)
1567 + ack_APIC_irq();
1568 + return true;
1569 + }
1570 +
1571 + return !bitmap_empty(irr->map, APIC_IR_BITS);
1572 +}
1573 +
1574 +/*
1575 + * After a crash, we no longer service the interrupts and a pending
1576 + * interrupt from previous kernel might still have ISR bit set.
1577 + *
1578 + * Most probably by now the CPU has serviced that pending interrupt and it
1579 + * might not have done the ack_APIC_irq() because it thought, interrupt
1580 + * came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear
1581 + * the ISR bit and cpu thinks it has already serivced the interrupt. Hence
1582 + * a vector might get locked. It was noticed for timer irq (vector
1583 + * 0x31). Issue an extra EOI to clear ISR.
1584 + *
1585 + * If there are pending IRR bits they turn into ISR bits after a higher
1586 + * priority ISR bit has been acked.
1587 + */
1588 +static void apic_pending_intr_clear(void)
1589 +{
1590 + union apic_ir irr, isr;
1591 + unsigned int i;
1592 +
1593 + /* 512 loops are way oversized and give the APIC a chance to obey. */
1594 + for (i = 0; i < 512; i++) {
1595 + if (!apic_check_and_ack(&irr, &isr))
1596 + return;
1597 + }
1598 + /* Dump the IRR/ISR content if that failed */
1599 + pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map);
1600 }
1601
1602 /**
1603 @@ -1565,6 +1583,14 @@ static void setup_local_APIC(void)
1604 return;
1605 }
1606
1607 + /*
1608 + * If this comes from kexec/kcrash the APIC might be enabled in
1609 + * SPIV. Soft disable it before doing further initialization.
1610 + */
1611 + value = apic_read(APIC_SPIV);
1612 + value &= ~APIC_SPIV_APIC_ENABLED;
1613 + apic_write(APIC_SPIV, value);
1614 +
1615 #ifdef CONFIG_X86_32
1616 /* Pound the ESR really hard over the head with a big hammer - mbligh */
1617 if (lapic_is_integrated() && apic->disable_esr) {
1618 @@ -1610,6 +1636,7 @@ static void setup_local_APIC(void)
1619 value &= ~APIC_TPRI_MASK;
1620 apic_write(APIC_TASKPRI, value);
1621
1622 + /* Clear eventually stale ISR/IRR bits */
1623 apic_pending_intr_clear();
1624
1625 /*
1626 diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
1627 index fdacb864c3dd..2c5676b0a6e7 100644
1628 --- a/arch/x86/kernel/apic/vector.c
1629 +++ b/arch/x86/kernel/apic/vector.c
1630 @@ -398,6 +398,17 @@ static int activate_reserved(struct irq_data *irqd)
1631 if (!irqd_can_reserve(irqd))
1632 apicd->can_reserve = false;
1633 }
1634 +
1635 + /*
1636 + * Check to ensure that the effective affinity mask is a subset
1637 + * the user supplied affinity mask, and warn the user if it is not
1638 + */
1639 + if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
1640 + irq_data_get_affinity_mask(irqd))) {
1641 + pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
1642 + irqd->irq);
1643 + }
1644 +
1645 return ret;
1646 }
1647
1648 diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
1649 index 96421f97e75c..231fa230ebc7 100644
1650 --- a/arch/x86/kernel/smp.c
1651 +++ b/arch/x86/kernel/smp.c
1652 @@ -179,6 +179,12 @@ asmlinkage __visible void smp_reboot_interrupt(void)
1653 irq_exit();
1654 }
1655
1656 +static int register_stop_handler(void)
1657 +{
1658 + return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
1659 + NMI_FLAG_FIRST, "smp_stop");
1660 +}
1661 +
1662 static void native_stop_other_cpus(int wait)
1663 {
1664 unsigned long flags;
1665 @@ -212,39 +218,41 @@ static void native_stop_other_cpus(int wait)
1666 apic->send_IPI_allbutself(REBOOT_VECTOR);
1667
1668 /*
1669 - * Don't wait longer than a second if the caller
1670 - * didn't ask us to wait.
1671 + * Don't wait longer than a second for IPI completion. The
1672 + * wait request is not checked here because that would
1673 + * prevent an NMI shutdown attempt in case that not all
1674 + * CPUs reach shutdown state.
1675 */
1676 timeout = USEC_PER_SEC;
1677 - while (num_online_cpus() > 1 && (wait || timeout--))
1678 + while (num_online_cpus() > 1 && timeout--)
1679 udelay(1);
1680 }
1681 -
1682 - /* if the REBOOT_VECTOR didn't work, try with the NMI */
1683 - if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) {
1684 - if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
1685 - NMI_FLAG_FIRST, "smp_stop"))
1686 - /* Note: we ignore failures here */
1687 - /* Hope the REBOOT_IRQ is good enough */
1688 - goto finish;
1689 -
1690 - /* sync above data before sending IRQ */
1691 - wmb();
1692
1693 - pr_emerg("Shutting down cpus with NMI\n");
1694 + /* if the REBOOT_VECTOR didn't work, try with the NMI */
1695 + if (num_online_cpus() > 1) {
1696 + /*
1697 + * If NMI IPI is enabled, try to register the stop handler
1698 + * and send the IPI. In any case try to wait for the other
1699 + * CPUs to stop.
1700 + */
1701 + if (!smp_no_nmi_ipi && !register_stop_handler()) {
1702 + /* Sync above data before sending IRQ */
1703 + wmb();
1704
1705 - apic->send_IPI_allbutself(NMI_VECTOR);
1706 + pr_emerg("Shutting down cpus with NMI\n");
1707
1708 + apic->send_IPI_allbutself(NMI_VECTOR);
1709 + }
1710 /*
1711 - * Don't wait longer than a 10 ms if the caller
1712 - * didn't ask us to wait.
1713 + * Don't wait longer than 10 ms if the caller didn't
1714 + * reqeust it. If wait is true, the machine hangs here if
1715 + * one or more CPUs do not reach shutdown state.
1716 */
1717 timeout = USEC_PER_MSEC * 10;
1718 while (num_online_cpus() > 1 && (wait || timeout--))
1719 udelay(1);
1720 }
1721
1722 -finish:
1723 local_irq_save(flags);
1724 disable_local_APIC();
1725 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
1726 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
1727 index 22c2720cd948..e7d25f436466 100644
1728 --- a/arch/x86/kvm/cpuid.c
1729 +++ b/arch/x86/kvm/cpuid.c
1730 @@ -304,7 +304,13 @@ static void do_host_cpuid(struct kvm_cpuid_entry2 *entry, u32 function,
1731 case 7:
1732 case 0xb:
1733 case 0xd:
1734 + case 0xf:
1735 + case 0x10:
1736 + case 0x12:
1737 case 0x14:
1738 + case 0x17:
1739 + case 0x18:
1740 + case 0x1f:
1741 case 0x8000001d:
1742 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1743 break;
1744 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
1745 index 718f7d9afedc..3b971026a653 100644
1746 --- a/arch/x86/kvm/emulate.c
1747 +++ b/arch/x86/kvm/emulate.c
1748 @@ -5395,6 +5395,8 @@ done_prefixes:
1749 ctxt->memopp->addr.mem.ea + ctxt->_eip);
1750
1751 done:
1752 + if (rc == X86EMUL_PROPAGATE_FAULT)
1753 + ctxt->have_exception = true;
1754 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
1755 }
1756
1757 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1758 index a63964e7cec7..94aa6102010d 100644
1759 --- a/arch/x86/kvm/mmu.c
1760 +++ b/arch/x86/kvm/mmu.c
1761 @@ -395,8 +395,6 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
1762 mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
1763 << shadow_nonpresent_or_rsvd_mask_len;
1764
1765 - page_header(__pa(sptep))->mmio_cached = true;
1766 -
1767 trace_mark_mmio_spte(sptep, gfn, access, gen);
1768 mmu_spte_set(sptep, mask);
1769 }
1770 @@ -5611,13 +5609,13 @@ slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
1771 PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
1772 }
1773
1774 -static void free_mmu_pages(struct kvm_vcpu *vcpu)
1775 +static void free_mmu_pages(struct kvm_mmu *mmu)
1776 {
1777 - free_page((unsigned long)vcpu->arch.mmu->pae_root);
1778 - free_page((unsigned long)vcpu->arch.mmu->lm_root);
1779 + free_page((unsigned long)mmu->pae_root);
1780 + free_page((unsigned long)mmu->lm_root);
1781 }
1782
1783 -static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1784 +static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
1785 {
1786 struct page *page;
1787 int i;
1788 @@ -5638,9 +5636,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1789 if (!page)
1790 return -ENOMEM;
1791
1792 - vcpu->arch.mmu->pae_root = page_address(page);
1793 + mmu->pae_root = page_address(page);
1794 for (i = 0; i < 4; ++i)
1795 - vcpu->arch.mmu->pae_root[i] = INVALID_PAGE;
1796 + mmu->pae_root[i] = INVALID_PAGE;
1797
1798 return 0;
1799 }
1800 @@ -5648,6 +5646,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1801 int kvm_mmu_create(struct kvm_vcpu *vcpu)
1802 {
1803 uint i;
1804 + int ret;
1805
1806 vcpu->arch.mmu = &vcpu->arch.root_mmu;
1807 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
1808 @@ -5665,7 +5664,19 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
1809 vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
1810
1811 vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
1812 - return alloc_mmu_pages(vcpu);
1813 +
1814 + ret = alloc_mmu_pages(vcpu, &vcpu->arch.guest_mmu);
1815 + if (ret)
1816 + return ret;
1817 +
1818 + ret = alloc_mmu_pages(vcpu, &vcpu->arch.root_mmu);
1819 + if (ret)
1820 + goto fail_allocate_root;
1821 +
1822 + return ret;
1823 + fail_allocate_root:
1824 + free_mmu_pages(&vcpu->arch.guest_mmu);
1825 + return ret;
1826 }
1827
1828
1829 @@ -5943,7 +5954,7 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
1830 }
1831 EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
1832
1833 -static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
1834 +void kvm_mmu_zap_all(struct kvm *kvm)
1835 {
1836 struct kvm_mmu_page *sp, *node;
1837 LIST_HEAD(invalid_list);
1838 @@ -5952,14 +5963,10 @@ static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
1839 spin_lock(&kvm->mmu_lock);
1840 restart:
1841 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
1842 - if (mmio_only && !sp->mmio_cached)
1843 - continue;
1844 if (sp->role.invalid && sp->root_count)
1845 continue;
1846 - if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) {
1847 - WARN_ON_ONCE(mmio_only);
1848 + if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
1849 goto restart;
1850 - }
1851 if (cond_resched_lock(&kvm->mmu_lock))
1852 goto restart;
1853 }
1854 @@ -5968,11 +5975,6 @@ restart:
1855 spin_unlock(&kvm->mmu_lock);
1856 }
1857
1858 -void kvm_mmu_zap_all(struct kvm *kvm)
1859 -{
1860 - return __kvm_mmu_zap_all(kvm, false);
1861 -}
1862 -
1863 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
1864 {
1865 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1866 @@ -5994,7 +5996,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
1867 */
1868 if (unlikely(gen == 0)) {
1869 kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
1870 - __kvm_mmu_zap_all(kvm, true);
1871 + kvm_mmu_zap_all_fast(kvm);
1872 }
1873 }
1874
1875 @@ -6168,7 +6170,8 @@ unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
1876 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1877 {
1878 kvm_mmu_unload(vcpu);
1879 - free_mmu_pages(vcpu);
1880 + free_mmu_pages(&vcpu->arch.root_mmu);
1881 + free_mmu_pages(&vcpu->arch.guest_mmu);
1882 mmu_free_memory_caches(vcpu);
1883 }
1884
1885 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1886 index e0368076a1ef..45e425c5e6f5 100644
1887 --- a/arch/x86/kvm/svm.c
1888 +++ b/arch/x86/kvm/svm.c
1889 @@ -5274,7 +5274,8 @@ get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
1890
1891 kvm_set_msi_irq(kvm, e, &irq);
1892
1893 - if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
1894 + if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
1895 + !kvm_irq_is_postable(&irq)) {
1896 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
1897 __func__, irq.vector);
1898 return -1;
1899 @@ -5328,6 +5329,7 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
1900 * 1. When cannot target interrupt to a specific vcpu.
1901 * 2. Unsetting posted interrupt.
1902 * 3. APIC virtialization is disabled for the vcpu.
1903 + * 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
1904 */
1905 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
1906 kvm_vcpu_apicv_active(&svm->vcpu)) {
1907 diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
1908 index c030c96fc81a..1d11bf4bab8b 100644
1909 --- a/arch/x86/kvm/vmx/vmx.c
1910 +++ b/arch/x86/kvm/vmx/vmx.c
1911 @@ -7369,10 +7369,14 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
1912 * irqbalance to make the interrupts single-CPU.
1913 *
1914 * We will support full lowest-priority interrupt later.
1915 + *
1916 + * In addition, we can only inject generic interrupts using
1917 + * the PI mechanism, refuse to route others through it.
1918 */
1919
1920 kvm_set_msi_irq(kvm, e, &irq);
1921 - if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
1922 + if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
1923 + !kvm_irq_is_postable(&irq)) {
1924 /*
1925 * Make sure the IRTE is in remapped mode if
1926 * we don't handle it in posted mode.
1927 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1928 index 91602d310a3f..350adc83eb50 100644
1929 --- a/arch/x86/kvm/x86.c
1930 +++ b/arch/x86/kvm/x86.c
1931 @@ -674,8 +674,14 @@ static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
1932 data, offset, len, access);
1933 }
1934
1935 +static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
1936 +{
1937 + return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) |
1938 + rsvd_bits(1, 2);
1939 +}
1940 +
1941 /*
1942 - * Load the pae pdptrs. Return true is they are all valid.
1943 + * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise.
1944 */
1945 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
1946 {
1947 @@ -694,8 +700,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
1948 }
1949 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
1950 if ((pdpte[i] & PT_PRESENT_MASK) &&
1951 - (pdpte[i] &
1952 - vcpu->arch.mmu->guest_rsvd_check.rsvd_bits_mask[0][2])) {
1953 + (pdpte[i] & pdptr_rsvd_bits(vcpu))) {
1954 ret = 0;
1955 goto out;
1956 }
1957 @@ -6528,8 +6533,16 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
1958 if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
1959 emulation_type))
1960 return EMULATE_DONE;
1961 - if (ctxt->have_exception && inject_emulated_exception(vcpu))
1962 + if (ctxt->have_exception) {
1963 + /*
1964 + * #UD should result in just EMULATION_FAILED, and trap-like
1965 + * exception should not be encountered during decode.
1966 + */
1967 + WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR ||
1968 + exception_type(ctxt->exception.vector) == EXCPT_TRAP);
1969 + inject_emulated_exception(vcpu);
1970 return EMULATE_DONE;
1971 + }
1972 if (emulation_type & EMULTYPE_SKIP)
1973 return EMULATE_FAIL;
1974 return handle_emulation_failure(vcpu, emulation_type);
1975 diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
1976 index e6dad600614c..4123100e0eaf 100644
1977 --- a/arch/x86/mm/numa.c
1978 +++ b/arch/x86/mm/numa.c
1979 @@ -861,9 +861,9 @@ void numa_remove_cpu(int cpu)
1980 */
1981 const struct cpumask *cpumask_of_node(int node)
1982 {
1983 - if (node >= nr_node_ids) {
1984 + if ((unsigned)node >= nr_node_ids) {
1985 printk(KERN_WARNING
1986 - "cpumask_of_node(%d): node > nr_node_ids(%u)\n",
1987 + "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n",
1988 node, nr_node_ids);
1989 dump_stack();
1990 return cpu_none_mask;
1991 diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
1992 index b196524759ec..7f2140414440 100644
1993 --- a/arch/x86/mm/pti.c
1994 +++ b/arch/x86/mm/pti.c
1995 @@ -330,13 +330,15 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
1996
1997 pud = pud_offset(p4d, addr);
1998 if (pud_none(*pud)) {
1999 - addr += PUD_SIZE;
2000 + WARN_ON_ONCE(addr & ~PUD_MASK);
2001 + addr = round_up(addr + 1, PUD_SIZE);
2002 continue;
2003 }
2004
2005 pmd = pmd_offset(pud, addr);
2006 if (pmd_none(*pmd)) {
2007 - addr += PMD_SIZE;
2008 + WARN_ON_ONCE(addr & ~PMD_MASK);
2009 + addr = round_up(addr + 1, PMD_SIZE);
2010 continue;
2011 }
2012
2013 @@ -666,6 +668,8 @@ void __init pti_init(void)
2014 */
2015 void pti_finalize(void)
2016 {
2017 + if (!boot_cpu_has(X86_FEATURE_PTI))
2018 + return;
2019 /*
2020 * We need to clone everything (again) that maps parts of the
2021 * kernel image.
2022 diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
2023 index 2e796b54cbde..9e2444500428 100644
2024 --- a/arch/x86/platform/intel/iosf_mbi.c
2025 +++ b/arch/x86/platform/intel/iosf_mbi.c
2026 @@ -17,6 +17,7 @@
2027 #include <linux/debugfs.h>
2028 #include <linux/capability.h>
2029 #include <linux/pm_qos.h>
2030 +#include <linux/wait.h>
2031
2032 #include <asm/iosf_mbi.h>
2033
2034 @@ -201,23 +202,45 @@ EXPORT_SYMBOL(iosf_mbi_available);
2035 #define PUNIT_SEMAPHORE_BIT BIT(0)
2036 #define PUNIT_SEMAPHORE_ACQUIRE BIT(1)
2037
2038 -static DEFINE_MUTEX(iosf_mbi_punit_mutex);
2039 -static DEFINE_MUTEX(iosf_mbi_block_punit_i2c_access_count_mutex);
2040 +static DEFINE_MUTEX(iosf_mbi_pmic_access_mutex);
2041 static BLOCKING_NOTIFIER_HEAD(iosf_mbi_pmic_bus_access_notifier);
2042 -static u32 iosf_mbi_block_punit_i2c_access_count;
2043 +static DECLARE_WAIT_QUEUE_HEAD(iosf_mbi_pmic_access_waitq);
2044 +static u32 iosf_mbi_pmic_punit_access_count;
2045 +static u32 iosf_mbi_pmic_i2c_access_count;
2046 static u32 iosf_mbi_sem_address;
2047 static unsigned long iosf_mbi_sem_acquired;
2048 static struct pm_qos_request iosf_mbi_pm_qos;
2049
2050 void iosf_mbi_punit_acquire(void)
2051 {
2052 - mutex_lock(&iosf_mbi_punit_mutex);
2053 + /* Wait for any I2C PMIC accesses from in kernel drivers to finish. */
2054 + mutex_lock(&iosf_mbi_pmic_access_mutex);
2055 + while (iosf_mbi_pmic_i2c_access_count != 0) {
2056 + mutex_unlock(&iosf_mbi_pmic_access_mutex);
2057 + wait_event(iosf_mbi_pmic_access_waitq,
2058 + iosf_mbi_pmic_i2c_access_count == 0);
2059 + mutex_lock(&iosf_mbi_pmic_access_mutex);
2060 + }
2061 + /*
2062 + * We do not need to do anything to allow the PUNIT to safely access
2063 + * the PMIC, other then block in kernel accesses to the PMIC.
2064 + */
2065 + iosf_mbi_pmic_punit_access_count++;
2066 + mutex_unlock(&iosf_mbi_pmic_access_mutex);
2067 }
2068 EXPORT_SYMBOL(iosf_mbi_punit_acquire);
2069
2070 void iosf_mbi_punit_release(void)
2071 {
2072 - mutex_unlock(&iosf_mbi_punit_mutex);
2073 + bool do_wakeup;
2074 +
2075 + mutex_lock(&iosf_mbi_pmic_access_mutex);
2076 + iosf_mbi_pmic_punit_access_count--;
2077 + do_wakeup = iosf_mbi_pmic_punit_access_count == 0;
2078 + mutex_unlock(&iosf_mbi_pmic_access_mutex);
2079 +
2080 + if (do_wakeup)
2081 + wake_up(&iosf_mbi_pmic_access_waitq);
2082 }
2083 EXPORT_SYMBOL(iosf_mbi_punit_release);
2084
2085 @@ -256,34 +279,32 @@ static void iosf_mbi_reset_semaphore(void)
2086 * already blocked P-Unit accesses because it wants them blocked over multiple
2087 * i2c-transfers, for e.g. read-modify-write of an I2C client register.
2088 *
2089 - * The P-Unit accesses already being blocked is tracked through the
2090 - * iosf_mbi_block_punit_i2c_access_count variable which is protected by the
2091 - * iosf_mbi_block_punit_i2c_access_count_mutex this mutex is hold for the
2092 - * entire duration of the function.
2093 - *
2094 - * If access is not blocked yet, this function takes the following steps:
2095 + * To allow safe PMIC i2c bus accesses this function takes the following steps:
2096 *
2097 * 1) Some code sends request to the P-Unit which make it access the PMIC
2098 * I2C bus. Testing has shown that the P-Unit does not check its internal
2099 * PMIC bus semaphore for these requests. Callers of these requests call
2100 * iosf_mbi_punit_acquire()/_release() around their P-Unit accesses, these
2101 - * functions lock/unlock the iosf_mbi_punit_mutex.
2102 - * As the first step we lock the iosf_mbi_punit_mutex, to wait for any in
2103 - * flight requests to finish and to block any new requests.
2104 + * functions increase/decrease iosf_mbi_pmic_punit_access_count, so first
2105 + * we wait for iosf_mbi_pmic_punit_access_count to become 0.
2106 + *
2107 + * 2) Check iosf_mbi_pmic_i2c_access_count, if access has already
2108 + * been blocked by another caller, we only need to increment
2109 + * iosf_mbi_pmic_i2c_access_count and we can skip the other steps.
2110 *
2111 - * 2) Some code makes such P-Unit requests from atomic contexts where it
2112 + * 3) Some code makes such P-Unit requests from atomic contexts where it
2113 * cannot call iosf_mbi_punit_acquire() as that may sleep.
2114 * As the second step we call a notifier chain which allows any code
2115 * needing P-Unit resources from atomic context to acquire them before
2116 * we take control over the PMIC I2C bus.
2117 *
2118 - * 3) When CPU cores enter C6 or C7 the P-Unit needs to talk to the PMIC
2119 + * 4) When CPU cores enter C6 or C7 the P-Unit needs to talk to the PMIC
2120 * if this happens while the kernel itself is accessing the PMIC I2C bus
2121 * the SoC hangs.
2122 * As the third step we call pm_qos_update_request() to disallow the CPU
2123 * to enter C6 or C7.
2124 *
2125 - * 4) The P-Unit has a PMIC bus semaphore which we can request to stop
2126 + * 5) The P-Unit has a PMIC bus semaphore which we can request to stop
2127 * autonomous P-Unit tasks from accessing the PMIC I2C bus while we hold it.
2128 * As the fourth and final step we request this semaphore and wait for our
2129 * request to be acknowledged.
2130 @@ -297,12 +318,18 @@ int iosf_mbi_block_punit_i2c_access(void)
2131 if (WARN_ON(!mbi_pdev || !iosf_mbi_sem_address))
2132 return -ENXIO;
2133
2134 - mutex_lock(&iosf_mbi_block_punit_i2c_access_count_mutex);
2135 + mutex_lock(&iosf_mbi_pmic_access_mutex);
2136
2137 - if (iosf_mbi_block_punit_i2c_access_count > 0)
2138 + while (iosf_mbi_pmic_punit_access_count != 0) {
2139 + mutex_unlock(&iosf_mbi_pmic_access_mutex);
2140 + wait_event(iosf_mbi_pmic_access_waitq,
2141 + iosf_mbi_pmic_punit_access_count == 0);
2142 + mutex_lock(&iosf_mbi_pmic_access_mutex);
2143 + }
2144 +
2145 + if (iosf_mbi_pmic_i2c_access_count > 0)
2146 goto success;
2147
2148 - mutex_lock(&iosf_mbi_punit_mutex);
2149 blocking_notifier_call_chain(&iosf_mbi_pmic_bus_access_notifier,
2150 MBI_PMIC_BUS_ACCESS_BEGIN, NULL);
2151
2152 @@ -330,10 +357,6 @@ int iosf_mbi_block_punit_i2c_access(void)
2153 iosf_mbi_sem_acquired = jiffies;
2154 dev_dbg(&mbi_pdev->dev, "P-Unit semaphore acquired after %ums\n",
2155 jiffies_to_msecs(jiffies - start));
2156 - /*
2157 - * Success, keep iosf_mbi_punit_mutex locked till
2158 - * iosf_mbi_unblock_punit_i2c_access() gets called.
2159 - */
2160 goto success;
2161 }
2162
2163 @@ -344,15 +367,13 @@ int iosf_mbi_block_punit_i2c_access(void)
2164 dev_err(&mbi_pdev->dev, "Error P-Unit semaphore timed out, resetting\n");
2165 error:
2166 iosf_mbi_reset_semaphore();
2167 - mutex_unlock(&iosf_mbi_punit_mutex);
2168 -
2169 if (!iosf_mbi_get_sem(&sem))
2170 dev_err(&mbi_pdev->dev, "P-Unit semaphore: %d\n", sem);
2171 success:
2172 if (!WARN_ON(ret))
2173 - iosf_mbi_block_punit_i2c_access_count++;
2174 + iosf_mbi_pmic_i2c_access_count++;
2175
2176 - mutex_unlock(&iosf_mbi_block_punit_i2c_access_count_mutex);
2177 + mutex_unlock(&iosf_mbi_pmic_access_mutex);
2178
2179 return ret;
2180 }
2181 @@ -360,17 +381,20 @@ EXPORT_SYMBOL(iosf_mbi_block_punit_i2c_access);
2182
2183 void iosf_mbi_unblock_punit_i2c_access(void)
2184 {
2185 - mutex_lock(&iosf_mbi_block_punit_i2c_access_count_mutex);
2186 + bool do_wakeup = false;
2187
2188 - iosf_mbi_block_punit_i2c_access_count--;
2189 - if (iosf_mbi_block_punit_i2c_access_count == 0) {
2190 + mutex_lock(&iosf_mbi_pmic_access_mutex);
2191 + iosf_mbi_pmic_i2c_access_count--;
2192 + if (iosf_mbi_pmic_i2c_access_count == 0) {
2193 iosf_mbi_reset_semaphore();
2194 - mutex_unlock(&iosf_mbi_punit_mutex);
2195 dev_dbg(&mbi_pdev->dev, "punit semaphore held for %ums\n",
2196 jiffies_to_msecs(jiffies - iosf_mbi_sem_acquired));
2197 + do_wakeup = true;
2198 }
2199 + mutex_unlock(&iosf_mbi_pmic_access_mutex);
2200
2201 - mutex_unlock(&iosf_mbi_block_punit_i2c_access_count_mutex);
2202 + if (do_wakeup)
2203 + wake_up(&iosf_mbi_pmic_access_waitq);
2204 }
2205 EXPORT_SYMBOL(iosf_mbi_unblock_punit_i2c_access);
2206
2207 @@ -379,10 +403,10 @@ int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb)
2208 int ret;
2209
2210 /* Wait for the bus to go inactive before registering */
2211 - mutex_lock(&iosf_mbi_punit_mutex);
2212 + iosf_mbi_punit_acquire();
2213 ret = blocking_notifier_chain_register(
2214 &iosf_mbi_pmic_bus_access_notifier, nb);
2215 - mutex_unlock(&iosf_mbi_punit_mutex);
2216 + iosf_mbi_punit_release();
2217
2218 return ret;
2219 }
2220 @@ -403,9 +427,9 @@ int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
2221 int ret;
2222
2223 /* Wait for the bus to go inactive before unregistering */
2224 - mutex_lock(&iosf_mbi_punit_mutex);
2225 + iosf_mbi_punit_acquire();
2226 ret = iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(nb);
2227 - mutex_unlock(&iosf_mbi_punit_mutex);
2228 + iosf_mbi_punit_release();
2229
2230 return ret;
2231 }
2232 @@ -413,7 +437,7 @@ EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier);
2233
2234 void iosf_mbi_assert_punit_acquired(void)
2235 {
2236 - WARN_ON(!mutex_is_locked(&iosf_mbi_punit_mutex));
2237 + WARN_ON(iosf_mbi_pmic_punit_access_count == 0);
2238 }
2239 EXPORT_SYMBOL(iosf_mbi_assert_punit_acquired);
2240
2241 diff --git a/block/blk-flush.c b/block/blk-flush.c
2242 index aedd9320e605..1eec9cbe5a0a 100644
2243 --- a/block/blk-flush.c
2244 +++ b/block/blk-flush.c
2245 @@ -214,6 +214,16 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
2246
2247 /* release the tag's ownership to the req cloned from */
2248 spin_lock_irqsave(&fq->mq_flush_lock, flags);
2249 +
2250 + if (!refcount_dec_and_test(&flush_rq->ref)) {
2251 + fq->rq_status = error;
2252 + spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
2253 + return;
2254 + }
2255 +
2256 + if (fq->rq_status != BLK_STS_OK)
2257 + error = fq->rq_status;
2258 +
2259 hctx = flush_rq->mq_hctx;
2260 if (!q->elevator) {
2261 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
2262 diff --git a/block/blk-mq.c b/block/blk-mq.c
2263 index 0835f4d8d42e..a79b9ad1aba1 100644
2264 --- a/block/blk-mq.c
2265 +++ b/block/blk-mq.c
2266 @@ -44,12 +44,12 @@ static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
2267
2268 static int blk_mq_poll_stats_bkt(const struct request *rq)
2269 {
2270 - int ddir, bytes, bucket;
2271 + int ddir, sectors, bucket;
2272
2273 ddir = rq_data_dir(rq);
2274 - bytes = blk_rq_bytes(rq);
2275 + sectors = blk_rq_stats_sectors(rq);
2276
2277 - bucket = ddir + 2*(ilog2(bytes) - 9);
2278 + bucket = ddir + 2 * ilog2(sectors);
2279
2280 if (bucket < 0)
2281 return -1;
2282 @@ -330,6 +330,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
2283 else
2284 rq->start_time_ns = 0;
2285 rq->io_start_time_ns = 0;
2286 + rq->stats_sectors = 0;
2287 rq->nr_phys_segments = 0;
2288 #if defined(CONFIG_BLK_DEV_INTEGRITY)
2289 rq->nr_integrity_segments = 0;
2290 @@ -673,9 +674,7 @@ void blk_mq_start_request(struct request *rq)
2291
2292 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
2293 rq->io_start_time_ns = ktime_get_ns();
2294 -#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2295 - rq->throtl_size = blk_rq_sectors(rq);
2296 -#endif
2297 + rq->stats_sectors = blk_rq_sectors(rq);
2298 rq->rq_flags |= RQF_STATS;
2299 rq_qos_issue(q, rq);
2300 }
2301 @@ -905,7 +904,10 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
2302 */
2303 if (blk_mq_req_expired(rq, next))
2304 blk_mq_rq_timed_out(rq, reserved);
2305 - if (refcount_dec_and_test(&rq->ref))
2306 +
2307 + if (is_flush_rq(rq, hctx))
2308 + rq->end_io(rq, 0);
2309 + else if (refcount_dec_and_test(&rq->ref))
2310 __blk_mq_free_request(rq);
2311
2312 return true;
2313 @@ -2841,6 +2843,8 @@ static unsigned int nr_hw_queues(struct blk_mq_tag_set *set)
2314 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2315 struct request_queue *q)
2316 {
2317 + int ret = -ENOMEM;
2318 +
2319 /* mark the queue as mq asap */
2320 q->mq_ops = set->ops;
2321
2322 @@ -2902,17 +2906,18 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2323 blk_mq_map_swqueue(q);
2324
2325 if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2326 - int ret;
2327 -
2328 ret = elevator_init_mq(q);
2329 if (ret)
2330 - return ERR_PTR(ret);
2331 + goto err_tag_set;
2332 }
2333
2334 return q;
2335
2336 +err_tag_set:
2337 + blk_mq_del_queue_tag_set(q);
2338 err_hctxs:
2339 kfree(q->queue_hw_ctx);
2340 + q->nr_hw_queues = 0;
2341 err_sys_init:
2342 blk_mq_sysfs_deinit(q);
2343 err_poll:
2344 diff --git a/block/blk-throttle.c b/block/blk-throttle.c
2345 index 8ab6c8153223..ee74bffe3504 100644
2346 --- a/block/blk-throttle.c
2347 +++ b/block/blk-throttle.c
2348 @@ -2246,7 +2246,8 @@ void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2349 struct request_queue *q = rq->q;
2350 struct throtl_data *td = q->td;
2351
2352 - throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10);
2353 + throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
2354 + time_ns >> 10);
2355 }
2356
2357 void blk_throtl_bio_endio(struct bio *bio)
2358 diff --git a/block/blk.h b/block/blk.h
2359 index de6b2e146d6e..d5edfd73d45e 100644
2360 --- a/block/blk.h
2361 +++ b/block/blk.h
2362 @@ -19,6 +19,7 @@ struct blk_flush_queue {
2363 unsigned int flush_queue_delayed:1;
2364 unsigned int flush_pending_idx:1;
2365 unsigned int flush_running_idx:1;
2366 + blk_status_t rq_status;
2367 unsigned long flush_pending_since;
2368 struct list_head flush_queue[2];
2369 struct list_head flush_data_in_flight;
2370 @@ -47,6 +48,12 @@ static inline void __blk_get_queue(struct request_queue *q)
2371 kobject_get(&q->kobj);
2372 }
2373
2374 +static inline bool
2375 +is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
2376 +{
2377 + return hctx->fq->flush_rq == req;
2378 +}
2379 +
2380 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
2381 int node, int cmd_size, gfp_t flags);
2382 void blk_free_flush_queue(struct blk_flush_queue *q);
2383 diff --git a/block/mq-deadline.c b/block/mq-deadline.c
2384 index 2a2a2e82832e..35e84bc0ec8c 100644
2385 --- a/block/mq-deadline.c
2386 +++ b/block/mq-deadline.c
2387 @@ -377,13 +377,6 @@ done:
2388 * hardware queue, but we may return a request that is for a
2389 * different hardware queue. This is because mq-deadline has shared
2390 * state for all hardware queues, in terms of sorting, FIFOs, etc.
2391 - *
2392 - * For a zoned block device, __dd_dispatch_request() may return NULL
2393 - * if all the queued write requests are directed at zones that are already
2394 - * locked due to on-going write requests. In this case, make sure to mark
2395 - * the queue as needing a restart to ensure that the queue is run again
2396 - * and the pending writes dispatched once the target zones for the ongoing
2397 - * write requests are unlocked in dd_finish_request().
2398 */
2399 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
2400 {
2401 @@ -392,9 +385,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
2402
2403 spin_lock(&dd->lock);
2404 rq = __dd_dispatch_request(dd);
2405 - if (!rq && blk_queue_is_zoned(hctx->queue) &&
2406 - !list_empty(&dd->fifo_list[WRITE]))
2407 - blk_mq_sched_mark_restart_hctx(hctx);
2408 spin_unlock(&dd->lock);
2409
2410 return rq;
2411 @@ -561,6 +551,13 @@ static void dd_prepare_request(struct request *rq, struct bio *bio)
2412 * spinlock so that the zone is never unlocked while deadline_fifo_request()
2413 * or deadline_next_request() are executing. This function is called for
2414 * all requests, whether or not these requests complete successfully.
2415 + *
2416 + * For a zoned block device, __dd_dispatch_request() may have stopped
2417 + * dispatching requests if all the queued requests are write requests directed
2418 + * at zones that are already locked due to on-going write requests. To ensure
2419 + * write request dispatch progress in this case, mark the queue as needing a
2420 + * restart to ensure that the queue is run again after completion of the
2421 + * request and zones being unlocked.
2422 */
2423 static void dd_finish_request(struct request *rq)
2424 {
2425 @@ -572,6 +569,8 @@ static void dd_finish_request(struct request *rq)
2426
2427 spin_lock_irqsave(&dd->zone_lock, flags);
2428 blk_req_zone_write_unlock(rq);
2429 + if (!list_empty(&dd->fifo_list[WRITE]))
2430 + blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
2431 spin_unlock_irqrestore(&dd->zone_lock, flags);
2432 }
2433 }
2434 diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
2435 index d696f165a50e..60bbc5090abe 100644
2436 --- a/drivers/acpi/acpi_lpss.c
2437 +++ b/drivers/acpi/acpi_lpss.c
2438 @@ -219,12 +219,13 @@ static void bsw_pwm_setup(struct lpss_private_data *pdata)
2439 }
2440
2441 static const struct lpss_device_desc lpt_dev_desc = {
2442 - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
2443 + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
2444 + | LPSS_SAVE_CTX,
2445 .prv_offset = 0x800,
2446 };
2447
2448 static const struct lpss_device_desc lpt_i2c_dev_desc = {
2449 - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
2450 + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX,
2451 .prv_offset = 0x800,
2452 };
2453
2454 @@ -236,7 +237,8 @@ static struct property_entry uart_properties[] = {
2455 };
2456
2457 static const struct lpss_device_desc lpt_uart_dev_desc = {
2458 - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
2459 + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
2460 + | LPSS_SAVE_CTX,
2461 .clk_con_id = "baudclk",
2462 .prv_offset = 0x800,
2463 .setup = lpss_uart_setup,
2464 diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
2465 index 24f065114d42..2c4dda0787e8 100644
2466 --- a/drivers/acpi/acpi_processor.c
2467 +++ b/drivers/acpi/acpi_processor.c
2468 @@ -279,9 +279,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
2469 }
2470
2471 if (acpi_duplicate_processor_id(pr->acpi_id)) {
2472 - dev_err(&device->dev,
2473 - "Failed to get unique processor _UID (0x%x)\n",
2474 - pr->acpi_id);
2475 + if (pr->acpi_id == 0xff)
2476 + dev_info_once(&device->dev,
2477 + "Entry not well-defined, consider updating BIOS\n");
2478 + else
2479 + dev_err(&device->dev,
2480 + "Failed to get unique processor _UID (0x%x)\n",
2481 + pr->acpi_id);
2482 return -ENODEV;
2483 }
2484
2485 diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
2486 index a66e00fe31fe..66205ec54555 100644
2487 --- a/drivers/acpi/apei/ghes.c
2488 +++ b/drivers/acpi/apei/ghes.c
2489 @@ -153,6 +153,7 @@ static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
2490 int ghes_estatus_pool_init(int num_ghes)
2491 {
2492 unsigned long addr, len;
2493 + int rc;
2494
2495 ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
2496 if (!ghes_estatus_pool)
2497 @@ -164,7 +165,7 @@ int ghes_estatus_pool_init(int num_ghes)
2498 ghes_estatus_pool_size_request = PAGE_ALIGN(len);
2499 addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
2500 if (!addr)
2501 - return -ENOMEM;
2502 + goto err_pool_alloc;
2503
2504 /*
2505 * New allocation must be visible in all pgd before it can be found by
2506 @@ -172,7 +173,19 @@ int ghes_estatus_pool_init(int num_ghes)
2507 */
2508 vmalloc_sync_all();
2509
2510 - return gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
2511 + rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
2512 + if (rc)
2513 + goto err_pool_add;
2514 +
2515 + return 0;
2516 +
2517 +err_pool_add:
2518 + vfree((void *)addr);
2519 +
2520 +err_pool_alloc:
2521 + gen_pool_destroy(ghes_estatus_pool);
2522 +
2523 + return -ENOMEM;
2524 }
2525
2526 static int map_gen_v2(struct ghes *ghes)
2527 diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
2528 index 15f103d7532b..3b2525908dd8 100644
2529 --- a/drivers/acpi/cppc_acpi.c
2530 +++ b/drivers/acpi/cppc_acpi.c
2531 @@ -365,8 +365,10 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
2532 union acpi_object *psd = NULL;
2533 struct acpi_psd_package *pdomain;
2534
2535 - status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
2536 - ACPI_TYPE_PACKAGE);
2537 + status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
2538 + &buffer, ACPI_TYPE_PACKAGE);
2539 + if (status == AE_NOT_FOUND) /* _PSD is optional */
2540 + return 0;
2541 if (ACPI_FAILURE(status))
2542 return -ENODEV;
2543
2544 diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
2545 index b2ef4c2ec955..fd66a736621c 100644
2546 --- a/drivers/acpi/custom_method.c
2547 +++ b/drivers/acpi/custom_method.c
2548 @@ -49,8 +49,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
2549 if ((*ppos > max_size) ||
2550 (*ppos + count > max_size) ||
2551 (*ppos + count < count) ||
2552 - (count > uncopied_bytes))
2553 + (count > uncopied_bytes)) {
2554 + kfree(buf);
2555 return -EINVAL;
2556 + }
2557
2558 if (copy_from_user(buf + (*ppos), user_buf, count)) {
2559 kfree(buf);
2560 @@ -70,6 +72,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
2561 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
2562 }
2563
2564 + kfree(buf);
2565 return count;
2566 }
2567
2568 diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
2569 index d2549ae65e1b..dea8a60e18a4 100644
2570 --- a/drivers/acpi/pci_irq.c
2571 +++ b/drivers/acpi/pci_irq.c
2572 @@ -449,8 +449,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
2573 * No IRQ known to the ACPI subsystem - maybe the BIOS /
2574 * driver reported one, then use it. Exit in any case.
2575 */
2576 - if (!acpi_pci_irq_valid(dev, pin))
2577 + if (!acpi_pci_irq_valid(dev, pin)) {
2578 + kfree(entry);
2579 return 0;
2580 + }
2581
2582 if (acpi_isa_register_gsi(dev))
2583 dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
2584 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
2585 index f7652baa6337..3e63294304c7 100644
2586 --- a/drivers/ata/ahci.c
2587 +++ b/drivers/ata/ahci.c
2588 @@ -65,6 +65,12 @@ enum board_ids {
2589 board_ahci_sb700, /* for SB700 and SB800 */
2590 board_ahci_vt8251,
2591
2592 + /*
2593 + * board IDs for Intel chipsets that support more than 6 ports
2594 + * *and* end up needing the PCS quirk.
2595 + */
2596 + board_ahci_pcs7,
2597 +
2598 /* aliases */
2599 board_ahci_mcp_linux = board_ahci_mcp65,
2600 board_ahci_mcp67 = board_ahci_mcp65,
2601 @@ -220,6 +226,12 @@ static const struct ata_port_info ahci_port_info[] = {
2602 .udma_mask = ATA_UDMA6,
2603 .port_ops = &ahci_vt8251_ops,
2604 },
2605 + [board_ahci_pcs7] = {
2606 + .flags = AHCI_FLAG_COMMON,
2607 + .pio_mask = ATA_PIO4,
2608 + .udma_mask = ATA_UDMA6,
2609 + .port_ops = &ahci_ops,
2610 + },
2611 };
2612
2613 static const struct pci_device_id ahci_pci_tbl[] = {
2614 @@ -264,26 +276,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
2615 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
2616 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */
2617 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
2618 - { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
2619 - { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
2620 - { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
2621 - { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
2622 - { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
2623 - { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
2624 - { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
2625 - { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
2626 - { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
2627 - { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
2628 - { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
2629 - { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
2630 - { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
2631 - { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
2632 - { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
2633 - { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
2634 - { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
2635 - { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
2636 - { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
2637 - { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
2638 + { PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */
2639 + { PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */
2640 + { PCI_VDEVICE(INTEL, 0x19b2), board_ahci_pcs7 }, /* DNV AHCI */
2641 + { PCI_VDEVICE(INTEL, 0x19b3), board_ahci_pcs7 }, /* DNV AHCI */
2642 + { PCI_VDEVICE(INTEL, 0x19b4), board_ahci_pcs7 }, /* DNV AHCI */
2643 + { PCI_VDEVICE(INTEL, 0x19b5), board_ahci_pcs7 }, /* DNV AHCI */
2644 + { PCI_VDEVICE(INTEL, 0x19b6), board_ahci_pcs7 }, /* DNV AHCI */
2645 + { PCI_VDEVICE(INTEL, 0x19b7), board_ahci_pcs7 }, /* DNV AHCI */
2646 + { PCI_VDEVICE(INTEL, 0x19bE), board_ahci_pcs7 }, /* DNV AHCI */
2647 + { PCI_VDEVICE(INTEL, 0x19bF), board_ahci_pcs7 }, /* DNV AHCI */
2648 + { PCI_VDEVICE(INTEL, 0x19c0), board_ahci_pcs7 }, /* DNV AHCI */
2649 + { PCI_VDEVICE(INTEL, 0x19c1), board_ahci_pcs7 }, /* DNV AHCI */
2650 + { PCI_VDEVICE(INTEL, 0x19c2), board_ahci_pcs7 }, /* DNV AHCI */
2651 + { PCI_VDEVICE(INTEL, 0x19c3), board_ahci_pcs7 }, /* DNV AHCI */
2652 + { PCI_VDEVICE(INTEL, 0x19c4), board_ahci_pcs7 }, /* DNV AHCI */
2653 + { PCI_VDEVICE(INTEL, 0x19c5), board_ahci_pcs7 }, /* DNV AHCI */
2654 + { PCI_VDEVICE(INTEL, 0x19c6), board_ahci_pcs7 }, /* DNV AHCI */
2655 + { PCI_VDEVICE(INTEL, 0x19c7), board_ahci_pcs7 }, /* DNV AHCI */
2656 + { PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */
2657 + { PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */
2658 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
2659 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */
2660 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
2661 @@ -623,30 +635,6 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
2662 ahci_save_initial_config(&pdev->dev, hpriv);
2663 }
2664
2665 -static int ahci_pci_reset_controller(struct ata_host *host)
2666 -{
2667 - struct pci_dev *pdev = to_pci_dev(host->dev);
2668 - int rc;
2669 -
2670 - rc = ahci_reset_controller(host);
2671 - if (rc)
2672 - return rc;
2673 -
2674 - if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
2675 - struct ahci_host_priv *hpriv = host->private_data;
2676 - u16 tmp16;
2677 -
2678 - /* configure PCS */
2679 - pci_read_config_word(pdev, 0x92, &tmp16);
2680 - if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
2681 - tmp16 |= hpriv->port_map;
2682 - pci_write_config_word(pdev, 0x92, tmp16);
2683 - }
2684 - }
2685 -
2686 - return 0;
2687 -}
2688 -
2689 static void ahci_pci_init_controller(struct ata_host *host)
2690 {
2691 struct ahci_host_priv *hpriv = host->private_data;
2692 @@ -849,7 +837,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
2693 struct ata_host *host = pci_get_drvdata(pdev);
2694 int rc;
2695
2696 - rc = ahci_pci_reset_controller(host);
2697 + rc = ahci_reset_controller(host);
2698 if (rc)
2699 return rc;
2700 ahci_pci_init_controller(host);
2701 @@ -884,7 +872,7 @@ static int ahci_pci_device_resume(struct device *dev)
2702 ahci_mcp89_apple_enable(pdev);
2703
2704 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2705 - rc = ahci_pci_reset_controller(host);
2706 + rc = ahci_reset_controller(host);
2707 if (rc)
2708 return rc;
2709
2710 @@ -1619,6 +1607,34 @@ update_policy:
2711 ap->target_lpm_policy = policy;
2712 }
2713
2714 +static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
2715 +{
2716 + const struct pci_device_id *id = pci_match_id(ahci_pci_tbl, pdev);
2717 + u16 tmp16;
2718 +
2719 + /*
2720 + * Only apply the 6-port PCS quirk for known legacy platforms.
2721 + */
2722 + if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
2723 + return;
2724 + if (((enum board_ids) id->driver_data) < board_ahci_pcs7)
2725 + return;
2726 +
2727 + /*
2728 + * port_map is determined from PORTS_IMPL PCI register which is
2729 + * implemented as write or write-once register. If the register
2730 + * isn't programmed, ahci automatically generates it from number
2731 + * of ports, which is good enough for PCS programming. It is
2732 + * otherwise expected that platform firmware enables the ports
2733 + * before the OS boots.
2734 + */
2735 + pci_read_config_word(pdev, PCS_6, &tmp16);
2736 + if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
2737 + tmp16 |= hpriv->port_map;
2738 + pci_write_config_word(pdev, PCS_6, tmp16);
2739 + }
2740 +}
2741 +
2742 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2743 {
2744 unsigned int board_id = ent->driver_data;
2745 @@ -1731,6 +1747,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2746 /* save initial config */
2747 ahci_pci_save_initial_config(pdev, hpriv);
2748
2749 + /*
2750 + * If platform firmware failed to enable ports, try to enable
2751 + * them here.
2752 + */
2753 + ahci_intel_pcs_quirk(pdev, hpriv);
2754 +
2755 /* prepare host */
2756 if (hpriv->cap & HOST_CAP_NCQ) {
2757 pi.flags |= ATA_FLAG_NCQ;
2758 @@ -1840,7 +1862,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2759 if (rc)
2760 return rc;
2761
2762 - rc = ahci_pci_reset_controller(host);
2763 + rc = ahci_reset_controller(host);
2764 if (rc)
2765 return rc;
2766
2767 diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
2768 index 0570629d719d..3dbf398c92ea 100644
2769 --- a/drivers/ata/ahci.h
2770 +++ b/drivers/ata/ahci.h
2771 @@ -247,6 +247,8 @@ enum {
2772 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
2773
2774 ICH_MAP = 0x90, /* ICH MAP register */
2775 + PCS_6 = 0x92, /* 6 port PCS */
2776 + PCS_7 = 0x94, /* 7+ port PCS (Denverton) */
2777
2778 /* em constants */
2779 EM_MAX_SLOTS = 8,
2780 diff --git a/drivers/base/soc.c b/drivers/base/soc.c
2781 index 10b280f30217..7e91894a380b 100644
2782 --- a/drivers/base/soc.c
2783 +++ b/drivers/base/soc.c
2784 @@ -157,6 +157,7 @@ out2:
2785 out1:
2786 return ERR_PTR(ret);
2787 }
2788 +EXPORT_SYMBOL_GPL(soc_device_register);
2789
2790 /* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
2791 void soc_device_unregister(struct soc_device *soc_dev)
2792 @@ -166,6 +167,7 @@ void soc_device_unregister(struct soc_device *soc_dev)
2793 device_unregister(&soc_dev->dev);
2794 early_soc_dev_attr = NULL;
2795 }
2796 +EXPORT_SYMBOL_GPL(soc_device_unregister);
2797
2798 static int __init soc_bus_register(void)
2799 {
2800 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
2801 index ab7ca5989097..1410fa893653 100644
2802 --- a/drivers/block/loop.c
2803 +++ b/drivers/block/loop.c
2804 @@ -1755,6 +1755,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
2805 case LOOP_SET_FD:
2806 case LOOP_CHANGE_FD:
2807 case LOOP_SET_BLOCK_SIZE:
2808 + case LOOP_SET_DIRECT_IO:
2809 err = lo_ioctl(bdev, mode, cmd, arg);
2810 break;
2811 default:
2812 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
2813 index e21d2ded732b..a69a90ad9208 100644
2814 --- a/drivers/block/nbd.c
2815 +++ b/drivers/block/nbd.c
2816 @@ -357,8 +357,10 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
2817 }
2818 config = nbd->config;
2819
2820 - if (!mutex_trylock(&cmd->lock))
2821 + if (!mutex_trylock(&cmd->lock)) {
2822 + nbd_config_put(nbd);
2823 return BLK_EH_RESET_TIMER;
2824 + }
2825
2826 if (config->num_connections > 1) {
2827 dev_err_ratelimited(nbd_to_dev(nbd),
2828 diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
2829 index 9044d31ab1a1..8d53b8ef545c 100644
2830 --- a/drivers/char/hw_random/core.c
2831 +++ b/drivers/char/hw_random/core.c
2832 @@ -67,7 +67,7 @@ static void add_early_randomness(struct hwrng *rng)
2833 size_t size = min_t(size_t, 16, rng_buffer_size());
2834
2835 mutex_lock(&reading_mutex);
2836 - bytes_read = rng_get_data(rng, rng_buffer, size, 1);
2837 + bytes_read = rng_get_data(rng, rng_buffer, size, 0);
2838 mutex_unlock(&reading_mutex);
2839 if (bytes_read > 0)
2840 add_device_randomness(rng_buffer, bytes_read);
2841 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
2842 index 6707659cffd6..44bd3dda01c2 100644
2843 --- a/drivers/char/ipmi/ipmi_msghandler.c
2844 +++ b/drivers/char/ipmi/ipmi_msghandler.c
2845 @@ -4215,7 +4215,53 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
2846 int chan;
2847
2848 ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size);
2849 - if (msg->rsp_size < 2) {
2850 +
2851 + if ((msg->data_size >= 2)
2852 + && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
2853 + && (msg->data[1] == IPMI_SEND_MSG_CMD)
2854 + && (msg->user_data == NULL)) {
2855 +
2856 + if (intf->in_shutdown)
2857 + goto free_msg;
2858 +
2859 + /*
2860 + * This is the local response to a command send, start
2861 + * the timer for these. The user_data will not be
2862 + * NULL if this is a response send, and we will let
2863 + * response sends just go through.
2864 + */
2865 +
2866 + /*
2867 + * Check for errors, if we get certain errors (ones
2868 + * that mean basically we can try again later), we
2869 + * ignore them and start the timer. Otherwise we
2870 + * report the error immediately.
2871 + */
2872 + if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
2873 + && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
2874 + && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
2875 + && (msg->rsp[2] != IPMI_BUS_ERR)
2876 + && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
2877 + int ch = msg->rsp[3] & 0xf;
2878 + struct ipmi_channel *chans;
2879 +
2880 + /* Got an error sending the message, handle it. */
2881 +
2882 + chans = READ_ONCE(intf->channel_list)->c;
2883 + if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
2884 + || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
2885 + ipmi_inc_stat(intf, sent_lan_command_errs);
2886 + else
2887 + ipmi_inc_stat(intf, sent_ipmb_command_errs);
2888 + intf_err_seq(intf, msg->msgid, msg->rsp[2]);
2889 + } else
2890 + /* The message was sent, start the timer. */
2891 + intf_start_seq_timer(intf, msg->msgid);
2892 +free_msg:
2893 + requeue = 0;
2894 + goto out;
2895 +
2896 + } else if (msg->rsp_size < 2) {
2897 /* Message is too small to be correct. */
2898 dev_warn(intf->si_dev,
2899 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
2900 @@ -4472,62 +4518,16 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
2901 unsigned long flags = 0; /* keep us warning-free. */
2902 int run_to_completion = intf->run_to_completion;
2903
2904 - if ((msg->data_size >= 2)
2905 - && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
2906 - && (msg->data[1] == IPMI_SEND_MSG_CMD)
2907 - && (msg->user_data == NULL)) {
2908 -
2909 - if (intf->in_shutdown)
2910 - goto free_msg;
2911 -
2912 - /*
2913 - * This is the local response to a command send, start
2914 - * the timer for these. The user_data will not be
2915 - * NULL if this is a response send, and we will let
2916 - * response sends just go through.
2917 - */
2918 -
2919 - /*
2920 - * Check for errors, if we get certain errors (ones
2921 - * that mean basically we can try again later), we
2922 - * ignore them and start the timer. Otherwise we
2923 - * report the error immediately.
2924 - */
2925 - if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
2926 - && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
2927 - && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
2928 - && (msg->rsp[2] != IPMI_BUS_ERR)
2929 - && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
2930 - int ch = msg->rsp[3] & 0xf;
2931 - struct ipmi_channel *chans;
2932 -
2933 - /* Got an error sending the message, handle it. */
2934 -
2935 - chans = READ_ONCE(intf->channel_list)->c;
2936 - if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
2937 - || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
2938 - ipmi_inc_stat(intf, sent_lan_command_errs);
2939 - else
2940 - ipmi_inc_stat(intf, sent_ipmb_command_errs);
2941 - intf_err_seq(intf, msg->msgid, msg->rsp[2]);
2942 - } else
2943 - /* The message was sent, start the timer. */
2944 - intf_start_seq_timer(intf, msg->msgid);
2945 -
2946 -free_msg:
2947 - ipmi_free_smi_msg(msg);
2948 - } else {
2949 - /*
2950 - * To preserve message order, we keep a queue and deliver from
2951 - * a tasklet.
2952 - */
2953 - if (!run_to_completion)
2954 - spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
2955 - list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
2956 - if (!run_to_completion)
2957 - spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
2958 - flags);
2959 - }
2960 + /*
2961 + * To preserve message order, we keep a queue and deliver from
2962 + * a tasklet.
2963 + */
2964 + if (!run_to_completion)
2965 + spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
2966 + list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
2967 + if (!run_to_completion)
2968 + spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
2969 + flags);
2970
2971 if (!run_to_completion)
2972 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
2973 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
2974 index b08dc50f9f26..9eb564c002f6 100644
2975 --- a/drivers/char/mem.c
2976 +++ b/drivers/char/mem.c
2977 @@ -97,6 +97,13 @@ void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
2978 }
2979 #endif
2980
2981 +static inline bool should_stop_iteration(void)
2982 +{
2983 + if (need_resched())
2984 + cond_resched();
2985 + return fatal_signal_pending(current);
2986 +}
2987 +
2988 /*
2989 * This funcion reads the *physical* memory. The f_pos points directly to the
2990 * memory location.
2991 @@ -175,6 +182,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
2992 p += sz;
2993 count -= sz;
2994 read += sz;
2995 + if (should_stop_iteration())
2996 + break;
2997 }
2998 kfree(bounce);
2999
3000 @@ -251,6 +260,8 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
3001 p += sz;
3002 count -= sz;
3003 written += sz;
3004 + if (should_stop_iteration())
3005 + break;
3006 }
3007
3008 *ppos += written;
3009 @@ -468,6 +479,10 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
3010 read += sz;
3011 low_count -= sz;
3012 count -= sz;
3013 + if (should_stop_iteration()) {
3014 + count = 0;
3015 + break;
3016 + }
3017 }
3018 }
3019
3020 @@ -492,6 +507,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
3021 buf += sz;
3022 read += sz;
3023 p += sz;
3024 + if (should_stop_iteration())
3025 + break;
3026 }
3027 free_page((unsigned long)kbuf);
3028 }
3029 @@ -544,6 +561,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
3030 p += sz;
3031 count -= sz;
3032 written += sz;
3033 + if (should_stop_iteration())
3034 + break;
3035 }
3036
3037 *ppos += written;
3038 @@ -595,6 +614,8 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
3039 buf += sz;
3040 virtr += sz;
3041 p += sz;
3042 + if (should_stop_iteration())
3043 + break;
3044 }
3045 free_page((unsigned long)kbuf);
3046 }
3047 diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
3048 index 1b4f95c13e00..d7a3888ad80f 100644
3049 --- a/drivers/char/tpm/tpm-interface.c
3050 +++ b/drivers/char/tpm/tpm-interface.c
3051 @@ -320,18 +320,22 @@ int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
3052 if (!chip)
3053 return -ENODEV;
3054
3055 - for (i = 0; i < chip->nr_allocated_banks; i++)
3056 - if (digests[i].alg_id != chip->allocated_banks[i].alg_id)
3057 - return -EINVAL;
3058 + for (i = 0; i < chip->nr_allocated_banks; i++) {
3059 + if (digests[i].alg_id != chip->allocated_banks[i].alg_id) {
3060 + rc = EINVAL;
3061 + goto out;
3062 + }
3063 + }
3064
3065 if (chip->flags & TPM_CHIP_FLAG_TPM2) {
3066 rc = tpm2_pcr_extend(chip, pcr_idx, digests);
3067 - tpm_put_ops(chip);
3068 - return rc;
3069 + goto out;
3070 }
3071
3072 rc = tpm1_pcr_extend(chip, pcr_idx, digests[0].digest,
3073 "attempting extend a PCR value");
3074 +
3075 +out:
3076 tpm_put_ops(chip);
3077 return rc;
3078 }
3079 @@ -354,14 +358,9 @@ int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
3080 if (!chip)
3081 return -ENODEV;
3082
3083 - rc = tpm_buf_init(&buf, 0, 0);
3084 - if (rc)
3085 - goto out;
3086 -
3087 - memcpy(buf.data, cmd, buflen);
3088 + buf.data = cmd;
3089 rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to a send a command");
3090 - tpm_buf_destroy(&buf);
3091 -out:
3092 +
3093 tpm_put_ops(chip);
3094 return rc;
3095 }
3096 diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
3097 index c3181ea9f271..270f43acbb77 100644
3098 --- a/drivers/char/tpm/tpm_tis_core.c
3099 +++ b/drivers/char/tpm/tpm_tis_core.c
3100 @@ -980,6 +980,8 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
3101 goto out_err;
3102 }
3103
3104 + tpm_chip_start(chip);
3105 + chip->flags |= TPM_CHIP_FLAG_IRQ;
3106 if (irq) {
3107 tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
3108 irq);
3109 @@ -989,6 +991,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
3110 } else {
3111 tpm_tis_probe_irq(chip, intmask);
3112 }
3113 + tpm_chip_stop(chip);
3114 }
3115
3116 rc = tpm_chip_register(chip);
3117 diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
3118 index 988ebc326bdb..39e34f5066d3 100644
3119 --- a/drivers/cpufreq/armada-8k-cpufreq.c
3120 +++ b/drivers/cpufreq/armada-8k-cpufreq.c
3121 @@ -136,6 +136,8 @@ static int __init armada_8k_cpufreq_init(void)
3122
3123 nb_cpus = num_possible_cpus();
3124 freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
3125 + if (!freq_tables)
3126 + return -ENOMEM;
3127 cpumask_copy(&cpus, cpu_possible_mask);
3128
3129 /*
3130 diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
3131 index 4f85f3112784..35db14cf3102 100644
3132 --- a/drivers/cpufreq/imx-cpufreq-dt.c
3133 +++ b/drivers/cpufreq/imx-cpufreq-dt.c
3134 @@ -16,6 +16,7 @@
3135
3136 #define OCOTP_CFG3_SPEED_GRADE_SHIFT 8
3137 #define OCOTP_CFG3_SPEED_GRADE_MASK (0x3 << 8)
3138 +#define IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK (0xf << 8)
3139 #define OCOTP_CFG3_MKT_SEGMENT_SHIFT 6
3140 #define OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 6)
3141
3142 @@ -34,7 +35,12 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
3143 if (ret)
3144 return ret;
3145
3146 - speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK) >> OCOTP_CFG3_SPEED_GRADE_SHIFT;
3147 + if (of_machine_is_compatible("fsl,imx8mn"))
3148 + speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK)
3149 + >> OCOTP_CFG3_SPEED_GRADE_SHIFT;
3150 + else
3151 + speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK)
3152 + >> OCOTP_CFG3_SPEED_GRADE_SHIFT;
3153 mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
3154
3155 /*
3156 diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
3157 index 7d05efdbd3c6..12d9e6cecf1d 100644
3158 --- a/drivers/cpuidle/governors/teo.c
3159 +++ b/drivers/cpuidle/governors/teo.c
3160 @@ -242,7 +242,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
3161 struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
3162 int latency_req = cpuidle_governor_latency_req(dev->cpu);
3163 unsigned int duration_us, count;
3164 - int max_early_idx, idx, i;
3165 + int max_early_idx, constraint_idx, idx, i;
3166 ktime_t delta_tick;
3167
3168 if (cpu_data->last_state >= 0) {
3169 @@ -257,6 +257,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
3170
3171 count = 0;
3172 max_early_idx = -1;
3173 + constraint_idx = drv->state_count;
3174 idx = -1;
3175
3176 for (i = 0; i < drv->state_count; i++) {
3177 @@ -286,16 +287,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
3178 if (s->target_residency > duration_us)
3179 break;
3180
3181 - if (s->exit_latency > latency_req) {
3182 - /*
3183 - * If we break out of the loop for latency reasons, use
3184 - * the target residency of the selected state as the
3185 - * expected idle duration to avoid stopping the tick
3186 - * as long as that target residency is low enough.
3187 - */
3188 - duration_us = drv->states[idx].target_residency;
3189 - goto refine;
3190 - }
3191 + if (s->exit_latency > latency_req && constraint_idx > i)
3192 + constraint_idx = i;
3193
3194 idx = i;
3195
3196 @@ -321,7 +314,13 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
3197 duration_us = drv->states[idx].target_residency;
3198 }
3199
3200 -refine:
3201 + /*
3202 + * If there is a latency constraint, it may be necessary to use a
3203 + * shallower idle state than the one selected so far.
3204 + */
3205 + if (constraint_idx < idx)
3206 + idx = constraint_idx;
3207 +
3208 if (idx < 0) {
3209 idx = 0; /* No states enabled. Must use 0. */
3210 } else if (idx > 0) {
3211 @@ -331,13 +330,12 @@ refine:
3212
3213 /*
3214 * Count and sum the most recent idle duration values less than
3215 - * the target residency of the state selected so far, find the
3216 - * max.
3217 + * the current expected idle duration value.
3218 */
3219 for (i = 0; i < INTERVALS; i++) {
3220 unsigned int val = cpu_data->intervals[i];
3221
3222 - if (val >= drv->states[idx].target_residency)
3223 + if (val >= duration_us)
3224 continue;
3225
3226 count++;
3227 @@ -356,8 +354,10 @@ refine:
3228 * would be too shallow.
3229 */
3230 if (!(tick_nohz_tick_stopped() && avg_us < TICK_USEC)) {
3231 - idx = teo_find_shallower_state(drv, dev, idx, avg_us);
3232 duration_us = avg_us;
3233 + if (drv->states[idx].target_residency > avg_us)
3234 + idx = teo_find_shallower_state(drv, dev,
3235 + idx, avg_us);
3236 }
3237 }
3238 }
3239 diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
3240 index ab22bf8a12d6..a0e19802149f 100644
3241 --- a/drivers/devfreq/devfreq.c
3242 +++ b/drivers/devfreq/devfreq.c
3243 @@ -254,7 +254,7 @@ static struct devfreq_governor *try_then_request_governor(const char *name)
3244 /* Restore previous state before return */
3245 mutex_lock(&devfreq_list_lock);
3246 if (err)
3247 - return ERR_PTR(err);
3248 + return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL);
3249
3250 governor = find_devfreq_governor(name);
3251 }
3252 diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
3253 index d9f377912c10..7c06df8bd74f 100644
3254 --- a/drivers/devfreq/exynos-bus.c
3255 +++ b/drivers/devfreq/exynos-bus.c
3256 @@ -191,11 +191,10 @@ static void exynos_bus_exit(struct device *dev)
3257 if (ret < 0)
3258 dev_warn(dev, "failed to disable the devfreq-event devices\n");
3259
3260 - if (bus->regulator)
3261 - regulator_disable(bus->regulator);
3262 -
3263 dev_pm_opp_of_remove_table(dev);
3264 clk_disable_unprepare(bus->clk);
3265 + if (bus->regulator)
3266 + regulator_disable(bus->regulator);
3267 }
3268
3269 /*
3270 @@ -383,6 +382,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
3271 struct exynos_bus *bus;
3272 int ret, max_state;
3273 unsigned long min_freq, max_freq;
3274 + bool passive = false;
3275
3276 if (!np) {
3277 dev_err(dev, "failed to find devicetree node\n");
3278 @@ -396,27 +396,27 @@ static int exynos_bus_probe(struct platform_device *pdev)
3279 bus->dev = &pdev->dev;
3280 platform_set_drvdata(pdev, bus);
3281
3282 - /* Parse the device-tree to get the resource information */
3283 - ret = exynos_bus_parse_of(np, bus);
3284 - if (ret < 0)
3285 - return ret;
3286 -
3287 profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
3288 - if (!profile) {
3289 - ret = -ENOMEM;
3290 - goto err;
3291 - }
3292 + if (!profile)
3293 + return -ENOMEM;
3294
3295 node = of_parse_phandle(dev->of_node, "devfreq", 0);
3296 if (node) {
3297 of_node_put(node);
3298 - goto passive;
3299 + passive = true;
3300 } else {
3301 ret = exynos_bus_parent_parse_of(np, bus);
3302 + if (ret < 0)
3303 + return ret;
3304 }
3305
3306 + /* Parse the device-tree to get the resource information */
3307 + ret = exynos_bus_parse_of(np, bus);
3308 if (ret < 0)
3309 - goto err;
3310 + goto err_reg;
3311 +
3312 + if (passive)
3313 + goto passive;
3314
3315 /* Initialize the struct profile and governor data for parent device */
3316 profile->polling_ms = 50;
3317 @@ -507,6 +507,9 @@ out:
3318 err:
3319 dev_pm_opp_of_remove_table(dev);
3320 clk_disable_unprepare(bus->clk);
3321 +err_reg:
3322 + if (!passive)
3323 + regulator_disable(bus->regulator);
3324
3325 return ret;
3326 }
3327 diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
3328 index 58308948b863..be6eeab9c814 100644
3329 --- a/drivers/devfreq/governor_passive.c
3330 +++ b/drivers/devfreq/governor_passive.c
3331 @@ -149,7 +149,6 @@ static int devfreq_passive_notifier_call(struct notifier_block *nb,
3332 static int devfreq_passive_event_handler(struct devfreq *devfreq,
3333 unsigned int event, void *data)
3334 {
3335 - struct device *dev = devfreq->dev.parent;
3336 struct devfreq_passive_data *p_data
3337 = (struct devfreq_passive_data *)devfreq->data;
3338 struct devfreq *parent = (struct devfreq *)p_data->parent;
3339 @@ -165,12 +164,12 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
3340 p_data->this = devfreq;
3341
3342 nb->notifier_call = devfreq_passive_notifier_call;
3343 - ret = devm_devfreq_register_notifier(dev, parent, nb,
3344 + ret = devfreq_register_notifier(parent, nb,
3345 DEVFREQ_TRANSITION_NOTIFIER);
3346 break;
3347 case DEVFREQ_GOV_STOP:
3348 - devm_devfreq_unregister_notifier(dev, parent, nb,
3349 - DEVFREQ_TRANSITION_NOTIFIER);
3350 + WARN_ON(devfreq_unregister_notifier(parent, nb,
3351 + DEVFREQ_TRANSITION_NOTIFIER));
3352 break;
3353 default:
3354 break;
3355 diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
3356 index 8101ff2f05c1..970f654611bd 100644
3357 --- a/drivers/dma/bcm2835-dma.c
3358 +++ b/drivers/dma/bcm2835-dma.c
3359 @@ -871,8 +871,10 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
3360 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
3361
3362 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3363 - if (rc)
3364 + if (rc) {
3365 + dev_err(&pdev->dev, "Unable to set DMA mask\n");
3366 return rc;
3367 + }
3368
3369 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
3370 if (!od)
3371 diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
3372 index c6c0143670d9..a776857d89c8 100644
3373 --- a/drivers/dma/iop-adma.c
3374 +++ b/drivers/dma/iop-adma.c
3375 @@ -116,9 +116,9 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
3376 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
3377 chain_node) {
3378 pr_debug("\tcookie: %d slot: %d busy: %d "
3379 - "this_desc: %#x next_desc: %#x ack: %d\n",
3380 + "this_desc: %#x next_desc: %#llx ack: %d\n",
3381 iter->async_tx.cookie, iter->idx, busy,
3382 - iter->async_tx.phys, iop_desc_get_next_desc(iter),
3383 + iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
3384 async_tx_test_ack(&iter->async_tx));
3385 prefetch(_iter);
3386 prefetch(&_iter->async_tx);
3387 @@ -306,9 +306,9 @@ retry:
3388 int i;
3389 dev_dbg(iop_chan->device->common.dev,
3390 "allocated slot: %d "
3391 - "(desc %p phys: %#x) slots_per_op %d\n",
3392 + "(desc %p phys: %#llx) slots_per_op %d\n",
3393 iter->idx, iter->hw_desc,
3394 - iter->async_tx.phys, slots_per_op);
3395 + (u64)iter->async_tx.phys, slots_per_op);
3396
3397 /* pre-ack all but the last descriptor */
3398 if (num_slots != slots_per_op)
3399 @@ -516,7 +516,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
3400 return NULL;
3401 BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
3402
3403 - dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
3404 + dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
3405 __func__, len);
3406
3407 spin_lock_bh(&iop_chan->lock);
3408 @@ -549,7 +549,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
3409 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
3410
3411 dev_dbg(iop_chan->device->common.dev,
3412 - "%s src_cnt: %d len: %u flags: %lx\n",
3413 + "%s src_cnt: %d len: %zu flags: %lx\n",
3414 __func__, src_cnt, len, flags);
3415
3416 spin_lock_bh(&iop_chan->lock);
3417 @@ -582,7 +582,7 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
3418 if (unlikely(!len))
3419 return NULL;
3420
3421 - dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
3422 + dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
3423 __func__, src_cnt, len);
3424
3425 spin_lock_bh(&iop_chan->lock);
3426 @@ -620,7 +620,7 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
3427 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
3428
3429 dev_dbg(iop_chan->device->common.dev,
3430 - "%s src_cnt: %d len: %u flags: %lx\n",
3431 + "%s src_cnt: %d len: %zu flags: %lx\n",
3432 __func__, src_cnt, len, flags);
3433
3434 if (dmaf_p_disabled_continue(flags))
3435 @@ -683,7 +683,7 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
3436 return NULL;
3437 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
3438
3439 - dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
3440 + dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
3441 __func__, src_cnt, len);
3442
3443 spin_lock_bh(&iop_chan->lock);
3444 diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
3445 index ceabdea40ae0..982631d4e1f8 100644
3446 --- a/drivers/dma/ti/edma.c
3447 +++ b/drivers/dma/ti/edma.c
3448 @@ -2273,9 +2273,6 @@ static int edma_probe(struct platform_device *pdev)
3449
3450 ecc->default_queue = info->default_queue;
3451
3452 - for (i = 0; i < ecc->num_slots; i++)
3453 - edma_write_slot(ecc, i, &dummy_paramset);
3454 -
3455 if (info->rsv) {
3456 /* Set the reserved slots in inuse list */
3457 rsv_slots = info->rsv->rsv_slots;
3458 @@ -2288,6 +2285,12 @@ static int edma_probe(struct platform_device *pdev)
3459 }
3460 }
3461
3462 + for (i = 0; i < ecc->num_slots; i++) {
3463 + /* Reset only unused - not reserved - paRAM slots */
3464 + if (!test_bit(i, ecc->slot_inuse))
3465 + edma_write_slot(ecc, i, &dummy_paramset);
3466 + }
3467 +
3468 /* Clear the xbar mapped channels in unused list */
3469 xbar_chans = info->xbar_chans;
3470 if (xbar_chans) {
3471 diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
3472 index c2e693e34d43..bf024ec0116c 100644
3473 --- a/drivers/edac/altera_edac.c
3474 +++ b/drivers/edac/altera_edac.c
3475 @@ -1866,6 +1866,7 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
3476 struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc);
3477 struct irq_chip *chip = irq_desc_get_chip(desc);
3478 int irq = irq_desc_get_irq(desc);
3479 + unsigned long bits;
3480
3481 dberr = (irq == edac->db_irq) ? 1 : 0;
3482 sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST :
3483 @@ -1875,7 +1876,8 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
3484
3485 regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
3486
3487 - for_each_set_bit(bit, (unsigned long *)&irq_status, 32) {
3488 + bits = irq_status;
3489 + for_each_set_bit(bit, &bits, 32) {
3490 irq = irq_linear_revmap(edac->domain, dberr * 32 + bit);
3491 if (irq)
3492 generic_handle_irq(irq);
3493 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
3494 index 873437be86d9..608fdab566b3 100644
3495 --- a/drivers/edac/amd64_edac.c
3496 +++ b/drivers/edac/amd64_edac.c
3497 @@ -810,7 +810,7 @@ static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
3498
3499 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
3500
3501 - for (dimm = 0; dimm < 4; dimm++) {
3502 + for (dimm = 0; dimm < 2; dimm++) {
3503 size0 = 0;
3504 cs0 = dimm * 2;
3505
3506 @@ -942,89 +942,102 @@ static void prep_chip_selects(struct amd64_pvt *pvt)
3507 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
3508 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
3509 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
3510 + } else if (pvt->fam >= 0x17) {
3511 + int umc;
3512 +
3513 + for_each_umc(umc) {
3514 + pvt->csels[umc].b_cnt = 4;
3515 + pvt->csels[umc].m_cnt = 2;
3516 + }
3517 +
3518 } else {
3519 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
3520 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
3521 }
3522 }
3523
3524 +static void read_umc_base_mask(struct amd64_pvt *pvt)
3525 +{
3526 + u32 umc_base_reg, umc_mask_reg;
3527 + u32 base_reg, mask_reg;
3528 + u32 *base, *mask;
3529 + int cs, umc;
3530 +
3531 + for_each_umc(umc) {
3532 + umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
3533 +
3534 + for_each_chip_select(cs, umc, pvt) {
3535 + base = &pvt->csels[umc].csbases[cs];
3536 +
3537 + base_reg = umc_base_reg + (cs * 4);
3538 +
3539 + if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
3540 + edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
3541 + umc, cs, *base, base_reg);
3542 + }
3543 +
3544 + umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
3545 +
3546 + for_each_chip_select_mask(cs, umc, pvt) {
3547 + mask = &pvt->csels[umc].csmasks[cs];
3548 +
3549 + mask_reg = umc_mask_reg + (cs * 4);
3550 +
3551 + if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
3552 + edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
3553 + umc, cs, *mask, mask_reg);
3554 + }
3555 + }
3556 +}
3557 +
3558 /*
3559 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
3560 */
3561 static void read_dct_base_mask(struct amd64_pvt *pvt)
3562 {
3563 - int base_reg0, base_reg1, mask_reg0, mask_reg1, cs;
3564 + int cs;
3565
3566 prep_chip_selects(pvt);
3567
3568 - if (pvt->umc) {
3569 - base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR;
3570 - base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR;
3571 - mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK;
3572 - mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK;
3573 - } else {
3574 - base_reg0 = DCSB0;
3575 - base_reg1 = DCSB1;
3576 - mask_reg0 = DCSM0;
3577 - mask_reg1 = DCSM1;
3578 - }
3579 + if (pvt->umc)
3580 + return read_umc_base_mask(pvt);
3581
3582 for_each_chip_select(cs, 0, pvt) {
3583 - int reg0 = base_reg0 + (cs * 4);
3584 - int reg1 = base_reg1 + (cs * 4);
3585 + int reg0 = DCSB0 + (cs * 4);
3586 + int reg1 = DCSB1 + (cs * 4);
3587 u32 *base0 = &pvt->csels[0].csbases[cs];
3588 u32 *base1 = &pvt->csels[1].csbases[cs];
3589
3590 - if (pvt->umc) {
3591 - if (!amd_smn_read(pvt->mc_node_id, reg0, base0))
3592 - edac_dbg(0, " DCSB0[%d]=0x%08x reg: 0x%x\n",
3593 - cs, *base0, reg0);
3594 -
3595 - if (!amd_smn_read(pvt->mc_node_id, reg1, base1))
3596 - edac_dbg(0, " DCSB1[%d]=0x%08x reg: 0x%x\n",
3597 - cs, *base1, reg1);
3598 - } else {
3599 - if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
3600 - edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
3601 - cs, *base0, reg0);
3602 + if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
3603 + edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
3604 + cs, *base0, reg0);
3605
3606 - if (pvt->fam == 0xf)
3607 - continue;
3608 + if (pvt->fam == 0xf)
3609 + continue;
3610
3611 - if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
3612 - edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
3613 - cs, *base1, (pvt->fam == 0x10) ? reg1
3614 - : reg0);
3615 - }
3616 + if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
3617 + edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
3618 + cs, *base1, (pvt->fam == 0x10) ? reg1
3619 + : reg0);
3620 }
3621
3622 for_each_chip_select_mask(cs, 0, pvt) {
3623 - int reg0 = mask_reg0 + (cs * 4);
3624 - int reg1 = mask_reg1 + (cs * 4);
3625 + int reg0 = DCSM0 + (cs * 4);
3626 + int reg1 = DCSM1 + (cs * 4);
3627 u32 *mask0 = &pvt->csels[0].csmasks[cs];
3628 u32 *mask1 = &pvt->csels[1].csmasks[cs];
3629
3630 - if (pvt->umc) {
3631 - if (!amd_smn_read(pvt->mc_node_id, reg0, mask0))
3632 - edac_dbg(0, " DCSM0[%d]=0x%08x reg: 0x%x\n",
3633 - cs, *mask0, reg0);
3634 + if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
3635 + edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
3636 + cs, *mask0, reg0);
3637
3638 - if (!amd_smn_read(pvt->mc_node_id, reg1, mask1))
3639 - edac_dbg(0, " DCSM1[%d]=0x%08x reg: 0x%x\n",
3640 - cs, *mask1, reg1);
3641 - } else {
3642 - if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
3643 - edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
3644 - cs, *mask0, reg0);
3645 -
3646 - if (pvt->fam == 0xf)
3647 - continue;
3648 + if (pvt->fam == 0xf)
3649 + continue;
3650
3651 - if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
3652 - edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
3653 - cs, *mask1, (pvt->fam == 0x10) ? reg1
3654 - : reg0);
3655 - }
3656 + if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
3657 + edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
3658 + cs, *mask1, (pvt->fam == 0x10) ? reg1
3659 + : reg0);
3660 }
3661 }
3662
3663 @@ -2537,13 +2550,6 @@ static void decode_umc_error(int node_id, struct mce *m)
3664
3665 err.channel = find_umc_channel(m);
3666
3667 - if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
3668 - err.err_code = ERR_NORM_ADDR;
3669 - goto log_error;
3670 - }
3671 -
3672 - error_address_to_page_and_offset(sys_addr, &err);
3673 -
3674 if (!(m->status & MCI_STATUS_SYNDV)) {
3675 err.err_code = ERR_SYND;
3676 goto log_error;
3677 @@ -2560,6 +2566,13 @@ static void decode_umc_error(int node_id, struct mce *m)
3678
3679 err.csrow = m->synd & 0x7;
3680
3681 + if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
3682 + err.err_code = ERR_NORM_ADDR;
3683 + goto log_error;
3684 + }
3685 +
3686 + error_address_to_page_and_offset(sys_addr, &err);
3687 +
3688 log_error:
3689 __log_ecc_error(mci, &err, ecc_type);
3690 }
3691 @@ -3137,12 +3150,15 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
3692 static inline void
3693 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3694 {
3695 - u8 i, ecc_en = 1, cpk_en = 1;
3696 + u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3697
3698 for_each_umc(i) {
3699 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3700 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3701 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3702 +
3703 + dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3704 + dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3705 }
3706 }
3707
3708 @@ -3150,8 +3166,15 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3709 if (ecc_en) {
3710 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3711
3712 - if (cpk_en)
3713 + if (!cpk_en)
3714 + return;
3715 +
3716 + if (dev_x4)
3717 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3718 + else if (dev_x16)
3719 + mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3720 + else
3721 + mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3722 }
3723 }
3724
3725 diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
3726 index 8f66472f7adc..4dce6a2ac75f 100644
3727 --- a/drivers/edac/amd64_edac.h
3728 +++ b/drivers/edac/amd64_edac.h
3729 @@ -96,6 +96,7 @@
3730 /* Hardware limit on ChipSelect rows per MC and processors per system */
3731 #define NUM_CHIPSELECTS 8
3732 #define DRAM_RANGES 8
3733 +#define NUM_CONTROLLERS 8
3734
3735 #define ON true
3736 #define OFF false
3737 @@ -351,8 +352,8 @@ struct amd64_pvt {
3738 u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
3739 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
3740
3741 - /* one for each DCT */
3742 - struct chip_select csels[2];
3743 + /* one for each DCT/UMC */
3744 + struct chip_select csels[NUM_CONTROLLERS];
3745
3746 /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
3747 struct dram_range ranges[DRAM_RANGES];
3748 diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
3749 index 64922c8fa7e3..d899d86897d0 100644
3750 --- a/drivers/edac/edac_mc.c
3751 +++ b/drivers/edac/edac_mc.c
3752 @@ -1235,9 +1235,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
3753 if (p > e->location)
3754 *(p - 1) = '\0';
3755
3756 - /* Report the error via the trace interface */
3757 - grain_bits = fls_long(e->grain) + 1;
3758 + /* Sanity-check driver-supplied grain value. */
3759 + if (WARN_ON_ONCE(!e->grain))
3760 + e->grain = 1;
3761 +
3762 + grain_bits = fls_long(e->grain - 1);
3763
3764 + /* Report the error via the trace interface */
3765 if (IS_ENABLED(CONFIG_RAS))
3766 trace_mc_event(type, e->msg, e->label, e->error_count,
3767 mci->mc_idx, e->top_layer, e->mid_layer,
3768 diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
3769 index ca25f8fe57ef..1ad538baaa4a 100644
3770 --- a/drivers/edac/pnd2_edac.c
3771 +++ b/drivers/edac/pnd2_edac.c
3772 @@ -260,11 +260,14 @@ static u64 get_sideband_reg_base_addr(void)
3773 }
3774 }
3775
3776 +#define DNV_MCHBAR_SIZE 0x8000
3777 +#define DNV_SB_PORT_SIZE 0x10000
3778 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
3779 {
3780 struct pci_dev *pdev;
3781 char *base;
3782 u64 addr;
3783 + unsigned long size;
3784
3785 if (op == 4) {
3786 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
3787 @@ -279,15 +282,17 @@ static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *na
3788 addr = get_mem_ctrl_hub_base_addr();
3789 if (!addr)
3790 return -ENODEV;
3791 + size = DNV_MCHBAR_SIZE;
3792 } else {
3793 /* MMIO via sideband register base address */
3794 addr = get_sideband_reg_base_addr();
3795 if (!addr)
3796 return -ENODEV;
3797 addr += (port << 16);
3798 + size = DNV_SB_PORT_SIZE;
3799 }
3800
3801 - base = ioremap((resource_size_t)addr, 0x10000);
3802 + base = ioremap((resource_size_t)addr, size);
3803 if (!base)
3804 return -ENODEV;
3805
3806 diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
3807 index b5bc4c7a8fab..b49c9e6f4bf1 100644
3808 --- a/drivers/firmware/arm_scmi/driver.c
3809 +++ b/drivers/firmware/arm_scmi/driver.c
3810 @@ -271,6 +271,14 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
3811 struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
3812 struct scmi_shared_mem __iomem *mem = cinfo->payload;
3813
3814 + /*
3815 + * Ideally channel must be free by now unless OS timeout last
3816 + * request and platform continued to process the same, wait
3817 + * until it releases the shared memory, otherwise we may endup
3818 + * overwriting its response with new message payload or vice-versa
3819 + */
3820 + spin_until_cond(ioread32(&mem->channel_status) &
3821 + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
3822 /* Mark channel busy + clear error */
3823 iowrite32(0x0, &mem->channel_status);
3824 iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
3825 diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
3826 index 8fa977c7861f..addf0749dd8b 100644
3827 --- a/drivers/firmware/efi/cper.c
3828 +++ b/drivers/firmware/efi/cper.c
3829 @@ -390,6 +390,21 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
3830 printk(
3831 "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
3832 pfx, pcie->bridge.secondary_status, pcie->bridge.control);
3833 +
3834 + /* Fatal errors call __ghes_panic() before AER handler prints this */
3835 + if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) &&
3836 + (gdata->error_severity & CPER_SEV_FATAL)) {
3837 + struct aer_capability_regs *aer;
3838 +
3839 + aer = (struct aer_capability_regs *)pcie->aer_info;
3840 + printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n",
3841 + pfx, aer->uncor_status, aer->uncor_mask);
3842 + printk("%saer_uncor_severity: 0x%08x\n",
3843 + pfx, aer->uncor_severity);
3844 + printk("%sTLP Header: %08x %08x %08x %08x\n", pfx,
3845 + aer->header_log.dw0, aer->header_log.dw1,
3846 + aer->header_log.dw2, aer->header_log.dw3);
3847 + }
3848 }
3849
3850 static void cper_print_tstamp(const char *pfx,
3851 diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
3852 index 2ddc118dba1b..74b84244a0db 100644
3853 --- a/drivers/firmware/qcom_scm.c
3854 +++ b/drivers/firmware/qcom_scm.c
3855 @@ -9,6 +9,7 @@
3856 #include <linux/init.h>
3857 #include <linux/cpumask.h>
3858 #include <linux/export.h>
3859 +#include <linux/dma-direct.h>
3860 #include <linux/dma-mapping.h>
3861 #include <linux/module.h>
3862 #include <linux/types.h>
3863 @@ -440,6 +441,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
3864 phys_addr_t mem_to_map_phys;
3865 phys_addr_t dest_phys;
3866 phys_addr_t ptr_phys;
3867 + dma_addr_t ptr_dma;
3868 size_t mem_to_map_sz;
3869 size_t dest_sz;
3870 size_t src_sz;
3871 @@ -457,9 +459,10 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
3872 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
3873 ALIGN(dest_sz, SZ_64);
3874
3875 - ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
3876 + ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL);
3877 if (!ptr)
3878 return -ENOMEM;
3879 + ptr_phys = dma_to_phys(__scm->dev, ptr_dma);
3880
3881 /* Fill source vmid detail */
3882 src = ptr;
3883 @@ -489,7 +492,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
3884
3885 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
3886 ptr_phys, src_sz, dest_phys, dest_sz);
3887 - dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
3888 + dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma);
3889 if (ret) {
3890 dev_err(__scm->dev,
3891 "Assign memory protection call failed %d.\n", ret);
3892 diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c
3893 index 4dbc837d1215..be963113f672 100644
3894 --- a/drivers/gpio/gpio-madera.c
3895 +++ b/drivers/gpio/gpio-madera.c
3896 @@ -136,6 +136,9 @@ static int madera_gpio_probe(struct platform_device *pdev)
3897 madera_gpio->gpio_chip.parent = pdev->dev.parent;
3898
3899 switch (madera->type) {
3900 + case CS47L15:
3901 + madera_gpio->gpio_chip.ngpio = CS47L15_NUM_GPIOS;
3902 + break;
3903 case CS47L35:
3904 madera_gpio->gpio_chip.ngpio = CS47L35_NUM_GPIOS;
3905 break;
3906 @@ -147,6 +150,11 @@ static int madera_gpio_probe(struct platform_device *pdev)
3907 case CS47L91:
3908 madera_gpio->gpio_chip.ngpio = CS47L90_NUM_GPIOS;
3909 break;
3910 + case CS42L92:
3911 + case CS47L92:
3912 + case CS47L93:
3913 + madera_gpio->gpio_chip.ngpio = CS47L92_NUM_GPIOS;
3914 + break;
3915 default:
3916 dev_err(&pdev->dev, "Unknown chip variant %d\n", madera->type);
3917 return -EINVAL;
3918 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3919 index beb2d268d1ef..421ca93a8ab8 100644
3920 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3921 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3922 @@ -2107,6 +2107,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3923 }
3924
3925 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3926 + .options = BL_CORE_SUSPENDRESUME,
3927 .get_brightness = amdgpu_dm_backlight_get_brightness,
3928 .update_status = amdgpu_dm_backlight_update_status,
3929 };
3930 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
3931 index 5cc3acccda2a..b1e657e137a9 100644
3932 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
3933 +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
3934 @@ -98,11 +98,14 @@ uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
3935 struct dc_stream_state *stream = context->streams[j];
3936 uint32_t vertical_blank_in_pixels = 0;
3937 uint32_t vertical_blank_time = 0;
3938 + uint32_t vertical_total_min = stream->timing.v_total;
3939 + struct dc_crtc_timing_adjust adjust = stream->adjust;
3940 + if (adjust.v_total_max != adjust.v_total_min)
3941 + vertical_total_min = adjust.v_total_min;
3942
3943 vertical_blank_in_pixels = stream->timing.h_total *
3944 - (stream->timing.v_total
3945 + (vertical_total_min
3946 - stream->timing.v_addressable);
3947 -
3948 vertical_blank_time = vertical_blank_in_pixels
3949 * 10000 / stream->timing.pix_clk_100hz;
3950
3951 @@ -171,6 +174,10 @@ void dce11_pplib_apply_display_requirements(
3952 struct dc_state *context)
3953 {
3954 struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
3955 + int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ;
3956 +
3957 + if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm)
3958 + memory_type_multiplier = MEMORY_TYPE_HBM;
3959
3960 pp_display_cfg->all_displays_in_sync =
3961 context->bw_ctx.bw.dce.all_displays_in_sync;
3962 @@ -183,8 +190,20 @@ void dce11_pplib_apply_display_requirements(
3963 pp_display_cfg->cpu_pstate_separation_time =
3964 context->bw_ctx.bw.dce.blackout_recovery_time_us;
3965
3966 - pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz
3967 - / MEMORY_TYPE_MULTIPLIER_CZ;
3968 + /*
3969 + * TODO: determine whether the bandwidth has reached memory's limitation
3970 + * , then change minimum memory clock based on real-time bandwidth
3971 + * limitation.
3972 + */
3973 + if (ASICREV_IS_VEGA20_P(dc->ctx->asic_id.hw_internal_rev) && (context->stream_count >= 2)) {
3974 + pp_display_cfg->min_memory_clock_khz = max(pp_display_cfg->min_memory_clock_khz,
3975 + (uint32_t) div64_s64(
3976 + div64_s64(dc->bw_vbios->high_yclk.value,
3977 + memory_type_multiplier), 10000));
3978 + } else {
3979 + pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz
3980 + / memory_type_multiplier;
3981 + }
3982
3983 pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
3984 dc,
3985 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
3986 index a24a2bda8656..1596ddcb26e6 100644
3987 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
3988 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
3989 @@ -148,7 +148,7 @@ static void dce_mi_program_pte_vm(
3990 pte->min_pte_before_flip_horiz_scan;
3991
3992 REG_UPDATE(GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT,
3993 - GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, 0xff);
3994 + GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, 0x7f);
3995
3996 REG_UPDATE_3(DVMM_PTE_CONTROL,
3997 DVMM_PAGE_WIDTH, page_width,
3998 @@ -157,7 +157,7 @@ static void dce_mi_program_pte_vm(
3999
4000 REG_UPDATE_2(DVMM_PTE_ARB_CONTROL,
4001 DVMM_PTE_REQ_PER_CHUNK, pte->pte_req_per_chunk,
4002 - DVMM_MAX_PTE_REQ_OUTSTANDING, 0xff);
4003 + DVMM_MAX_PTE_REQ_OUTSTANDING, 0x7f);
4004 }
4005
4006 static void program_urgency_watermark(
4007 diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
4008 index c6136e0ed1a4..7a04be74c9cf 100644
4009 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
4010 +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
4011 @@ -987,6 +987,10 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
4012 struct dm_pp_clock_levels_with_latency mem_clks = {0};
4013 struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0};
4014 struct dm_pp_clock_levels clks = {0};
4015 + int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ;
4016 +
4017 + if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm)
4018 + memory_type_multiplier = MEMORY_TYPE_HBM;
4019
4020 /*do system clock TODO PPLIB: after PPLIB implement,
4021 * then remove old way
4022 @@ -1026,12 +1030,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
4023 &clks);
4024
4025 dc->bw_vbios->low_yclk = bw_frc_to_fixed(
4026 - clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
4027 + clks.clocks_in_khz[0] * memory_type_multiplier, 1000);
4028 dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
4029 - clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ,
4030 + clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier,
4031 1000);
4032 dc->bw_vbios->high_yclk = bw_frc_to_fixed(
4033 - clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ,
4034 + clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier,
4035 1000);
4036
4037 return;
4038 @@ -1067,12 +1071,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
4039 * YCLK = UMACLK*m_memoryTypeMultiplier
4040 */
4041 dc->bw_vbios->low_yclk = bw_frc_to_fixed(
4042 - mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
4043 + mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
4044 dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
4045 - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
4046 + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
4047 1000);
4048 dc->bw_vbios->high_yclk = bw_frc_to_fixed(
4049 - mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
4050 + mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
4051 1000);
4052
4053 /* Now notify PPLib/SMU about which Watermarks sets they should select
4054 diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
4055 index 4a6ba3173a5a..ae38c9c7277c 100644
4056 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
4057 +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
4058 @@ -847,6 +847,8 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
4059 int i;
4060 unsigned int clk;
4061 unsigned int latency;
4062 + /*original logic in dal3*/
4063 + int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ;
4064
4065 /*do system clock*/
4066 if (!dm_pp_get_clock_levels_by_type_with_latency(
4067 @@ -905,13 +907,16 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
4068 * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula):
4069 * YCLK = UMACLK*m_memoryTypeMultiplier
4070 */
4071 + if (dc->bw_vbios->memory_type == bw_def_hbm)
4072 + memory_type_multiplier = MEMORY_TYPE_HBM;
4073 +
4074 dc->bw_vbios->low_yclk = bw_frc_to_fixed(
4075 - mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
4076 + mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
4077 dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
4078 - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
4079 + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
4080 1000);
4081 dc->bw_vbios->high_yclk = bw_frc_to_fixed(
4082 - mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
4083 + mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
4084 1000);
4085
4086 /* Now notify PPLib/SMU about which Watermarks sets they should select
4087 diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
4088 index 47f81072d7e9..c0424b4035a5 100644
4089 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
4090 +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
4091 @@ -31,6 +31,8 @@
4092 #include "dm_pp_smu.h"
4093
4094 #define MEMORY_TYPE_MULTIPLIER_CZ 4
4095 +#define MEMORY_TYPE_HBM 2
4096 +
4097
4098 enum dce_version resource_parse_asic_id(
4099 struct hw_asic_id asic_id);
4100 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
4101 index 487aeee1cf8a..3c1084de5d59 100644
4102 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
4103 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
4104 @@ -4068,6 +4068,11 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
4105
4106 data->frame_time_x2 = frame_time_in_us * 2 / 100;
4107
4108 + if (data->frame_time_x2 < 280) {
4109 + pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
4110 + data->frame_time_x2 = 280;
4111 + }
4112 +
4113 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4114
4115 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4116 diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
4117 index d9a5ac81949e..221a8528c993 100644
4118 --- a/drivers/gpu/drm/drm_kms_helper_common.c
4119 +++ b/drivers/gpu/drm/drm_kms_helper_common.c
4120 @@ -40,7 +40,7 @@ MODULE_LICENSE("GPL and additional rights");
4121 /* Backward compatibility for drm_kms_helper.edid_firmware */
4122 static int edid_firmware_set(const char *val, const struct kernel_param *kp)
4123 {
4124 - DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware instead.\n");
4125 + DRM_NOTE("drm_kms_helper.edid_firmware is deprecated, please use drm.edid_firmware instead.\n");
4126
4127 return __drm_set_edid_firmware_path(val);
4128 }
4129 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
4130 index 6ba1a08253f0..4cf25458f0b9 100644
4131 --- a/drivers/hwmon/acpi_power_meter.c
4132 +++ b/drivers/hwmon/acpi_power_meter.c
4133 @@ -681,8 +681,8 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
4134
4135 if (resource->caps.flags & POWER_METER_CAN_CAP) {
4136 if (!can_cap_in_hardware()) {
4137 - dev_err(&resource->acpi_dev->dev,
4138 - "Ignoring unsafe software power cap!\n");
4139 + dev_warn(&resource->acpi_dev->dev,
4140 + "Ignoring unsafe software power cap!\n");
4141 goto skip_unsafe_cap;
4142 }
4143
4144 diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
4145 index c77e89239dcd..5c1dddde193c 100644
4146 --- a/drivers/hwmon/k10temp.c
4147 +++ b/drivers/hwmon/k10temp.c
4148 @@ -349,6 +349,7 @@ static const struct pci_device_id k10temp_id_table[] = {
4149 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
4150 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
4151 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
4152 + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
4153 { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
4154 {}
4155 };
4156 diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
4157 index f31413fd9521..800414886f6b 100644
4158 --- a/drivers/i2c/busses/i2c-riic.c
4159 +++ b/drivers/i2c/busses/i2c-riic.c
4160 @@ -202,6 +202,7 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
4161 if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) {
4162 /* We got a NACKIE */
4163 readb(riic->base + RIIC_ICDRR); /* dummy read */
4164 + riic_clear_set_bit(riic, ICSR2_NACKF, 0, RIIC_ICSR2);
4165 riic->err = -ENXIO;
4166 } else if (riic->bytes_left) {
4167 return IRQ_NONE;
4168 diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
4169 index 9b76a8fcdd24..bf539c34ccd3 100644
4170 --- a/drivers/infiniband/core/addr.c
4171 +++ b/drivers/infiniband/core/addr.c
4172 @@ -352,7 +352,7 @@ static bool has_gateway(const struct dst_entry *dst, sa_family_t family)
4173
4174 if (family == AF_INET) {
4175 rt = container_of(dst, struct rtable, dst);
4176 - return rt->rt_gw_family == AF_INET;
4177 + return rt->rt_uses_gateway;
4178 }
4179
4180 rt6 = container_of(dst, struct rt6_info, dst);
4181 diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
4182 index 7ddd0e5bc6b3..bb8b71cc3821 100644
4183 --- a/drivers/infiniband/core/uverbs_cmd.c
4184 +++ b/drivers/infiniband/core/uverbs_cmd.c
4185 @@ -3484,7 +3484,8 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
4186
4187 err_copy:
4188 ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs));
4189 -
4190 + /* It was released in ib_destroy_srq_user */
4191 + srq = NULL;
4192 err_free:
4193 kfree(srq);
4194 err_put:
4195 diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
4196 index 184dba3c2828..d8ff063a5419 100644
4197 --- a/drivers/infiniband/hw/hfi1/mad.c
4198 +++ b/drivers/infiniband/hw/hfi1/mad.c
4199 @@ -2326,7 +2326,7 @@ struct opa_port_status_req {
4200 __be32 vl_select_mask;
4201 };
4202
4203 -#define VL_MASK_ALL 0x000080ff
4204 +#define VL_MASK_ALL 0x00000000000080ffUL
4205
4206 struct opa_port_status_rsp {
4207 __u8 port_num;
4208 @@ -2625,15 +2625,14 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
4209 }
4210
4211 static void a0_portstatus(struct hfi1_pportdata *ppd,
4212 - struct opa_port_status_rsp *rsp, u32 vl_select_mask)
4213 + struct opa_port_status_rsp *rsp)
4214 {
4215 if (!is_bx(ppd->dd)) {
4216 unsigned long vl;
4217 u64 sum_vl_xmit_wait = 0;
4218 - u32 vl_all_mask = VL_MASK_ALL;
4219 + unsigned long vl_all_mask = VL_MASK_ALL;
4220
4221 - for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
4222 - 8 * sizeof(vl_all_mask)) {
4223 + for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
4224 u64 tmp = sum_vl_xmit_wait +
4225 read_port_cntr(ppd, C_TX_WAIT_VL,
4226 idx_from_vl(vl));
4227 @@ -2730,12 +2729,12 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
4228 (struct opa_port_status_req *)pmp->data;
4229 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
4230 struct opa_port_status_rsp *rsp;
4231 - u32 vl_select_mask = be32_to_cpu(req->vl_select_mask);
4232 + unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask);
4233 unsigned long vl;
4234 size_t response_data_size;
4235 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
4236 u8 port_num = req->port_num;
4237 - u8 num_vls = hweight32(vl_select_mask);
4238 + u8 num_vls = hweight64(vl_select_mask);
4239 struct _vls_pctrs *vlinfo;
4240 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4241 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
4242 @@ -2770,7 +2769,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
4243
4244 hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
4245
4246 - rsp->vl_select_mask = cpu_to_be32(vl_select_mask);
4247 + rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask);
4248 rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
4249 CNTR_INVALID_VL));
4250 rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
4251 @@ -2841,8 +2840,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
4252 * So in the for_each_set_bit() loop below, we don't need
4253 * any additional checks for vl.
4254 */
4255 - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
4256 - 8 * sizeof(vl_select_mask)) {
4257 + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
4258 memset(vlinfo, 0, sizeof(*vlinfo));
4259
4260 tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
4261 @@ -2883,7 +2881,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
4262 vfi++;
4263 }
4264
4265 - a0_portstatus(ppd, rsp, vl_select_mask);
4266 + a0_portstatus(ppd, rsp);
4267
4268 if (resp_len)
4269 *resp_len += response_data_size;
4270 @@ -2930,16 +2928,14 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
4271 return error_counter_summary;
4272 }
4273
4274 -static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
4275 - u32 vl_select_mask)
4276 +static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp)
4277 {
4278 if (!is_bx(ppd->dd)) {
4279 unsigned long vl;
4280 u64 sum_vl_xmit_wait = 0;
4281 - u32 vl_all_mask = VL_MASK_ALL;
4282 + unsigned long vl_all_mask = VL_MASK_ALL;
4283
4284 - for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
4285 - 8 * sizeof(vl_all_mask)) {
4286 + for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
4287 u64 tmp = sum_vl_xmit_wait +
4288 read_port_cntr(ppd, C_TX_WAIT_VL,
4289 idx_from_vl(vl));
4290 @@ -2994,7 +2990,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
4291 u64 port_mask;
4292 u8 port_num;
4293 unsigned long vl;
4294 - u32 vl_select_mask;
4295 + unsigned long vl_select_mask;
4296 int vfi;
4297 u16 link_width;
4298 u16 link_speed;
4299 @@ -3071,8 +3067,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
4300 * So in the for_each_set_bit() loop below, we don't need
4301 * any additional checks for vl.
4302 */
4303 - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
4304 - 8 * sizeof(req->vl_select_mask)) {
4305 + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
4306 memset(vlinfo, 0, sizeof(*vlinfo));
4307
4308 rsp->vls[vfi].port_vl_xmit_data =
4309 @@ -3120,7 +3115,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
4310 vfi++;
4311 }
4312
4313 - a0_datacounters(ppd, rsp, vl_select_mask);
4314 + a0_datacounters(ppd, rsp);
4315
4316 if (resp_len)
4317 *resp_len += response_data_size;
4318 @@ -3215,7 +3210,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
4319 struct _vls_ectrs *vlinfo;
4320 unsigned long vl;
4321 u64 port_mask, tmp;
4322 - u32 vl_select_mask;
4323 + unsigned long vl_select_mask;
4324 int vfi;
4325
4326 req = (struct opa_port_error_counters64_msg *)pmp->data;
4327 @@ -3273,8 +3268,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
4328 vlinfo = &rsp->vls[0];
4329 vfi = 0;
4330 vl_select_mask = be32_to_cpu(req->vl_select_mask);
4331 - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
4332 - 8 * sizeof(req->vl_select_mask)) {
4333 + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
4334 memset(vlinfo, 0, sizeof(*vlinfo));
4335 rsp->vls[vfi].port_vl_xmit_discards =
4336 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
4337 @@ -3485,7 +3479,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
4338 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
4339 u64 portn = be64_to_cpu(req->port_select_mask[3]);
4340 u32 counter_select = be32_to_cpu(req->counter_select_mask);
4341 - u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
4342 + unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
4343 unsigned long vl;
4344
4345 if ((nports != 1) || (portn != 1 << port)) {
4346 @@ -3579,8 +3573,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
4347 if (counter_select & CS_UNCORRECTABLE_ERRORS)
4348 write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
4349
4350 - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
4351 - 8 * sizeof(vl_select_mask)) {
4352 + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
4353 if (counter_select & CS_PORT_XMIT_DATA)
4354 write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
4355
4356 diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
4357 index 646f61545ed6..9f53f63b1453 100644
4358 --- a/drivers/infiniband/hw/hfi1/verbs.c
4359 +++ b/drivers/infiniband/hw/hfi1/verbs.c
4360 @@ -874,16 +874,17 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
4361 else
4362 pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
4363
4364 - if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
4365 - pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
4366 pbc = create_pbc(ppd,
4367 pbc,
4368 qp->srate_mbps,
4369 vl,
4370 plen);
4371
4372 - /* Update HCRC based on packet opcode */
4373 - pbc = update_hcrc(ps->opcode, pbc);
4374 + if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
4375 + pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
4376 + else
4377 + /* Update HCRC based on packet opcode */
4378 + pbc = update_hcrc(ps->opcode, pbc);
4379 }
4380 tx->wqe = qp->s_wqe;
4381 ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc);
4382 @@ -1030,12 +1031,12 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
4383 else
4384 pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
4385
4386 + pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
4387 if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
4388 pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
4389 - pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
4390 -
4391 - /* Update HCRC based on packet opcode */
4392 - pbc = update_hcrc(ps->opcode, pbc);
4393 + else
4394 + /* Update HCRC based on packet opcode */
4395 + pbc = update_hcrc(ps->opcode, pbc);
4396 }
4397 if (cb)
4398 iowait_pio_inc(&priv->s_iowait);
4399 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
4400 index 0569bcab02d4..14807ea8dc3f 100644
4401 --- a/drivers/infiniband/hw/mlx5/main.c
4402 +++ b/drivers/infiniband/hw/mlx5/main.c
4403 @@ -6959,6 +6959,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
4404 mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
4405 list_del(&mpi->list);
4406 mutex_unlock(&mlx5_ib_multiport_mutex);
4407 + kfree(mpi);
4408 return;
4409 }
4410
4411 diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
4412 index f13f36ae1af6..c6a277e69848 100644
4413 --- a/drivers/iommu/Makefile
4414 +++ b/drivers/iommu/Makefile
4415 @@ -10,7 +10,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
4416 obj-$(CONFIG_IOMMU_IOVA) += iova.o
4417 obj-$(CONFIG_OF_IOMMU) += of_iommu.o
4418 obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
4419 -obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
4420 +obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
4421 obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
4422 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
4423 obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
4424 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
4425 index 61de81965c44..e1259429ded2 100644
4426 --- a/drivers/iommu/amd_iommu.c
4427 +++ b/drivers/iommu/amd_iommu.c
4428 @@ -2577,7 +2577,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
4429
4430 bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
4431 phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
4432 - ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
4433 + ret = iommu_map_page(domain, bus_addr, phys_addr,
4434 + PAGE_SIZE, prot,
4435 + GFP_ATOMIC | __GFP_NOWARN);
4436 if (ret)
4437 goto out_unmap;
4438
4439 diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h
4440 new file mode 100644
4441 index 000000000000..12d540d9b59b
4442 --- /dev/null
4443 +++ b/drivers/iommu/amd_iommu.h
4444 @@ -0,0 +1,14 @@
4445 +/* SPDX-License-Identifier: GPL-2.0-only */
4446 +
4447 +#ifndef AMD_IOMMU_H
4448 +#define AMD_IOMMU_H
4449 +
4450 +int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line);
4451 +
4452 +#ifdef CONFIG_DMI
4453 +void amd_iommu_apply_ivrs_quirks(void);
4454 +#else
4455 +static void amd_iommu_apply_ivrs_quirks(void) { }
4456 +#endif
4457 +
4458 +#endif
4459 diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
4460 index 4413aa67000e..568c52317757 100644
4461 --- a/drivers/iommu/amd_iommu_init.c
4462 +++ b/drivers/iommu/amd_iommu_init.c
4463 @@ -32,6 +32,7 @@
4464 #include <asm/irq_remapping.h>
4465
4466 #include <linux/crash_dump.h>
4467 +#include "amd_iommu.h"
4468 #include "amd_iommu_proto.h"
4469 #include "amd_iommu_types.h"
4470 #include "irq_remapping.h"
4471 @@ -1002,7 +1003,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
4472 set_iommu_for_device(iommu, devid);
4473 }
4474
4475 -static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
4476 +int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
4477 {
4478 struct devid_map *entry;
4479 struct list_head *list;
4480 @@ -1153,6 +1154,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
4481 if (ret)
4482 return ret;
4483
4484 + amd_iommu_apply_ivrs_quirks();
4485 +
4486 /*
4487 * First save the recommended feature enable bits from ACPI
4488 */
4489 diff --git a/drivers/iommu/amd_iommu_quirks.c b/drivers/iommu/amd_iommu_quirks.c
4490 new file mode 100644
4491 index 000000000000..c235f79b7a20
4492 --- /dev/null
4493 +++ b/drivers/iommu/amd_iommu_quirks.c
4494 @@ -0,0 +1,92 @@
4495 +/* SPDX-License-Identifier: GPL-2.0-only */
4496 +
4497 +/*
4498 + * Quirks for AMD IOMMU
4499 + *
4500 + * Copyright (C) 2019 Kai-Heng Feng <kai.heng.feng@canonical.com>
4501 + */
4502 +
4503 +#ifdef CONFIG_DMI
4504 +#include <linux/dmi.h>
4505 +
4506 +#include "amd_iommu.h"
4507 +
4508 +#define IVHD_SPECIAL_IOAPIC 1
4509 +
4510 +struct ivrs_quirk_entry {
4511 + u8 id;
4512 + u16 devid;
4513 +};
4514 +
4515 +enum {
4516 + DELL_INSPIRON_7375 = 0,
4517 + DELL_LATITUDE_5495,
4518 + LENOVO_IDEAPAD_330S_15ARR,
4519 +};
4520 +
4521 +static const struct ivrs_quirk_entry ivrs_ioapic_quirks[][3] __initconst = {
4522 + /* ivrs_ioapic[4]=00:14.0 ivrs_ioapic[5]=00:00.2 */
4523 + [DELL_INSPIRON_7375] = {
4524 + { .id = 4, .devid = 0xa0 },
4525 + { .id = 5, .devid = 0x2 },
4526 + {}
4527 + },
4528 + /* ivrs_ioapic[4]=00:14.0 */
4529 + [DELL_LATITUDE_5495] = {
4530 + { .id = 4, .devid = 0xa0 },
4531 + {}
4532 + },
4533 + /* ivrs_ioapic[32]=00:14.0 */
4534 + [LENOVO_IDEAPAD_330S_15ARR] = {
4535 + { .id = 32, .devid = 0xa0 },
4536 + {}
4537 + },
4538 + {}
4539 +};
4540 +
4541 +static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
4542 +{
4543 + const struct ivrs_quirk_entry *i;
4544 +
4545 + for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
4546 + add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0);
4547 +
4548 + return 0;
4549 +}
4550 +
4551 +static const struct dmi_system_id ivrs_quirks[] __initconst = {
4552 + {
4553 + .callback = ivrs_ioapic_quirk_cb,
4554 + .ident = "Dell Inspiron 7375",
4555 + .matches = {
4556 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
4557 + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7375"),
4558 + },
4559 + .driver_data = (void *)&ivrs_ioapic_quirks[DELL_INSPIRON_7375],
4560 + },
4561 + {
4562 + .callback = ivrs_ioapic_quirk_cb,
4563 + .ident = "Dell Latitude 5495",
4564 + .matches = {
4565 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
4566 + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 5495"),
4567 + },
4568 + .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
4569 + },
4570 + {
4571 + .callback = ivrs_ioapic_quirk_cb,
4572 + .ident = "Lenovo ideapad 330S-15ARR",
4573 + .matches = {
4574 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4575 + DMI_MATCH(DMI_PRODUCT_NAME, "81FB"),
4576 + },
4577 + .driver_data = (void *)&ivrs_ioapic_quirks[LENOVO_IDEAPAD_330S_15ARR],
4578 + },
4579 + {}
4580 +};
4581 +
4582 +void __init amd_iommu_apply_ivrs_quirks(void)
4583 +{
4584 + dmi_check_system(ivrs_quirks);
4585 +}
4586 +#endif
4587 diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
4588 index c5c93e48b4db..d1ebe5ce3e47 100644
4589 --- a/drivers/iommu/arm-smmu-v3.c
4590 +++ b/drivers/iommu/arm-smmu-v3.c
4591 @@ -2843,11 +2843,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
4592 }
4593
4594 /* Boolean feature flags */
4595 +#if 0 /* ATS invalidation is slow and broken */
4596 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
4597 smmu->features |= ARM_SMMU_FEAT_PRI;
4598
4599 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
4600 smmu->features |= ARM_SMMU_FEAT_ATS;
4601 +#endif
4602
4603 if (reg & IDR0_SEV)
4604 smmu->features |= ARM_SMMU_FEAT_SEV;
4605 diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
4606 index 4786ca061e31..81e43c1df7ec 100644
4607 --- a/drivers/iommu/intel_irq_remapping.c
4608 +++ b/drivers/iommu/intel_irq_remapping.c
4609 @@ -376,13 +376,13 @@ static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4610 {
4611 struct set_msi_sid_data *data = opaque;
4612
4613 + if (data->count == 0 || PCI_BUS_NUM(alias) == PCI_BUS_NUM(data->alias))
4614 + data->busmatch_count++;
4615 +
4616 data->pdev = pdev;
4617 data->alias = alias;
4618 data->count++;
4619
4620 - if (PCI_BUS_NUM(alias) == pdev->bus->number)
4621 - data->busmatch_count++;
4622 -
4623 return 0;
4624 }
4625
4626 diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
4627 index 3e1a8a675572..41c605b0058f 100644
4628 --- a/drivers/iommu/iova.c
4629 +++ b/drivers/iommu/iova.c
4630 @@ -577,7 +577,9 @@ void queue_iova(struct iova_domain *iovad,
4631
4632 spin_unlock_irqrestore(&fq->lock, flags);
4633
4634 - if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
4635 + /* Avoid false sharing as much as possible. */
4636 + if (!atomic_read(&iovad->fq_timer_on) &&
4637 + !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1))
4638 mod_timer(&iovad->fq_timer,
4639 jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
4640 }
4641 diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
4642 index 1b5c3672aea2..c3a8d732805f 100644
4643 --- a/drivers/irqchip/irq-gic-v3-its.c
4644 +++ b/drivers/irqchip/irq-gic-v3-its.c
4645 @@ -2641,14 +2641,13 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
4646 struct its_node *its = its_dev->its;
4647 int i;
4648
4649 + bitmap_release_region(its_dev->event_map.lpi_map,
4650 + its_get_event_id(irq_domain_get_irq_data(domain, virq)),
4651 + get_count_order(nr_irqs));
4652 +
4653 for (i = 0; i < nr_irqs; i++) {
4654 struct irq_data *data = irq_domain_get_irq_data(domain,
4655 virq + i);
4656 - u32 event = its_get_event_id(data);
4657 -
4658 - /* Mark interrupt index as unused */
4659 - clear_bit(event, its_dev->event_map.lpi_map);
4660 -
4661 /* Nuke the entry in the domain */
4662 irq_domain_reset_irq_data(data);
4663 }
4664 diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
4665 index cf755964f2f8..c72c036aea76 100644
4666 --- a/drivers/irqchip/irq-sifive-plic.c
4667 +++ b/drivers/irqchip/irq-sifive-plic.c
4668 @@ -244,6 +244,7 @@ static int __init plic_init(struct device_node *node,
4669 struct plic_handler *handler;
4670 irq_hw_number_t hwirq;
4671 int cpu, hartid;
4672 + u32 threshold = 0;
4673
4674 if (of_irq_parse_one(node, i, &parent)) {
4675 pr_err("failed to parse parent for context %d.\n", i);
4676 @@ -266,10 +267,16 @@ static int __init plic_init(struct device_node *node,
4677 continue;
4678 }
4679
4680 + /*
4681 + * When running in M-mode we need to ignore the S-mode handler.
4682 + * Here we assume it always comes later, but that might be a
4683 + * little fragile.
4684 + */
4685 handler = per_cpu_ptr(&plic_handlers, cpu);
4686 if (handler->present) {
4687 pr_warn("handler already present for context %d.\n", i);
4688 - continue;
4689 + threshold = 0xffffffff;
4690 + goto done;
4691 }
4692
4693 handler->present = true;
4694 @@ -279,8 +286,9 @@ static int __init plic_init(struct device_node *node,
4695 handler->enable_base =
4696 plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
4697
4698 +done:
4699 /* priority must be > threshold to trigger an interrupt */
4700 - writel(0, handler->hart_base + CONTEXT_THRESHOLD);
4701 + writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
4702 for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
4703 plic_toggle(handler, hwirq, 0);
4704 nr_handlers++;
4705 diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
4706 index c6ba37df4b9d..dff4132b3702 100644
4707 --- a/drivers/isdn/mISDN/socket.c
4708 +++ b/drivers/isdn/mISDN/socket.c
4709 @@ -754,6 +754,8 @@ base_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
4710
4711 if (sock->type != SOCK_RAW)
4712 return -ESOCKTNOSUPPORT;
4713 + if (!capable(CAP_NET_RAW))
4714 + return -EPERM;
4715
4716 sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
4717 if (!sk)
4718 diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
4719 index 8d11a5e23227..eff1bda8b520 100644
4720 --- a/drivers/leds/led-triggers.c
4721 +++ b/drivers/leds/led-triggers.c
4722 @@ -173,6 +173,7 @@ err_activate:
4723 list_del(&led_cdev->trig_list);
4724 write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
4725 led_set_brightness(led_cdev, LED_OFF);
4726 + kfree(event);
4727
4728 return ret;
4729 }
4730 diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
4731 index 180895b83b88..e55a64847fe2 100644
4732 --- a/drivers/leds/leds-lm3532.c
4733 +++ b/drivers/leds/leds-lm3532.c
4734 @@ -40,7 +40,7 @@
4735 #define LM3532_REG_ZN_3_LO 0x67
4736 #define LM3532_REG_MAX 0x7e
4737
4738 -/* Contorl Enable */
4739 +/* Control Enable */
4740 #define LM3532_CTRL_A_ENABLE BIT(0)
4741 #define LM3532_CTRL_B_ENABLE BIT(1)
4742 #define LM3532_CTRL_C_ENABLE BIT(2)
4743 @@ -302,7 +302,7 @@ static int lm3532_led_disable(struct lm3532_led *led_data)
4744 int ret;
4745
4746 ret = regmap_update_bits(led_data->priv->regmap, LM3532_REG_ENABLE,
4747 - ctrl_en_val, ~ctrl_en_val);
4748 + ctrl_en_val, 0);
4749 if (ret) {
4750 dev_err(led_data->priv->dev, "Failed to set ctrl:%d\n", ret);
4751 return ret;
4752 @@ -321,7 +321,7 @@ static int lm3532_brightness_set(struct led_classdev *led_cdev,
4753
4754 mutex_lock(&led->priv->lock);
4755
4756 - if (led->mode == LM3532_BL_MODE_ALS) {
4757 + if (led->mode == LM3532_ALS_CTRL) {
4758 if (brt_val > LED_OFF)
4759 ret = lm3532_led_enable(led);
4760 else
4761 @@ -542,11 +542,14 @@ static int lm3532_parse_node(struct lm3532_data *priv)
4762 }
4763
4764 if (led->mode == LM3532_BL_MODE_ALS) {
4765 + led->mode = LM3532_ALS_CTRL;
4766 ret = lm3532_parse_als(priv);
4767 if (ret)
4768 dev_err(&priv->client->dev, "Failed to parse als\n");
4769 else
4770 lm3532_als_configure(priv, led);
4771 + } else {
4772 + led->mode = LM3532_I2C_CTRL;
4773 }
4774
4775 led->num_leds = fwnode_property_read_u32_array(child,
4776 @@ -590,7 +593,13 @@ static int lm3532_parse_node(struct lm3532_data *priv)
4777 goto child_out;
4778 }
4779
4780 - lm3532_init_registers(led);
4781 + ret = lm3532_init_registers(led);
4782 + if (ret) {
4783 + dev_err(&priv->client->dev, "register init err: %d\n",
4784 + ret);
4785 + fwnode_handle_put(child);
4786 + goto child_out;
4787 + }
4788
4789 i++;
4790 }
4791 diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
4792 index 37632fc63741..edb57c42e8b1 100644
4793 --- a/drivers/leds/leds-lp5562.c
4794 +++ b/drivers/leds/leds-lp5562.c
4795 @@ -260,7 +260,11 @@ static void lp5562_firmware_loaded(struct lp55xx_chip *chip)
4796 {
4797 const struct firmware *fw = chip->fw;
4798
4799 - if (fw->size > LP5562_PROGRAM_LENGTH) {
4800 + /*
4801 + * the firmware is encoded in ascii hex character, with 2 chars
4802 + * per byte
4803 + */
4804 + if (fw->size > (LP5562_PROGRAM_LENGTH * 2)) {
4805 dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
4806 fw->size);
4807 return;
4808 diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
4809 index 73f5319295bc..c12cd809ab19 100644
4810 --- a/drivers/md/bcache/closure.c
4811 +++ b/drivers/md/bcache/closure.c
4812 @@ -105,8 +105,14 @@ struct closure_syncer {
4813
4814 static void closure_sync_fn(struct closure *cl)
4815 {
4816 - cl->s->done = 1;
4817 - wake_up_process(cl->s->task);
4818 + struct closure_syncer *s = cl->s;
4819 + struct task_struct *p;
4820 +
4821 + rcu_read_lock();
4822 + p = READ_ONCE(s->task);
4823 + s->done = 1;
4824 + wake_up_process(p);
4825 + rcu_read_unlock();
4826 }
4827
4828 void __sched __closure_sync(struct closure *cl)
4829 diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
4830 index c9e44ac1f9a6..21d5c1784d0c 100644
4831 --- a/drivers/md/dm-rq.c
4832 +++ b/drivers/md/dm-rq.c
4833 @@ -408,6 +408,7 @@ static int map_request(struct dm_rq_target_io *tio)
4834 ret = dm_dispatch_clone_request(clone, rq);
4835 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
4836 blk_rq_unprep_clone(clone);
4837 + blk_mq_cleanup_rq(clone);
4838 tio->ti->type->release_clone_rq(clone, &tio->info);
4839 tio->clone = NULL;
4840 return DM_MAPIO_REQUEUE;
4841 diff --git a/drivers/md/md.c b/drivers/md/md.c
4842 index 24638ccedce4..3100dd53c64c 100644
4843 --- a/drivers/md/md.c
4844 +++ b/drivers/md/md.c
4845 @@ -1826,8 +1826,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
4846 if (!(le32_to_cpu(sb->feature_map) &
4847 MD_FEATURE_RECOVERY_BITMAP))
4848 rdev->saved_raid_disk = -1;
4849 - } else
4850 - set_bit(In_sync, &rdev->flags);
4851 + } else {
4852 + /*
4853 + * If the array is FROZEN, then the device can't
4854 + * be in_sync with rest of array.
4855 + */
4856 + if (!test_bit(MD_RECOVERY_FROZEN,
4857 + &mddev->recovery))
4858 + set_bit(In_sync, &rdev->flags);
4859 + }
4860 rdev->raid_disk = role;
4861 break;
4862 }
4863 @@ -4176,7 +4183,7 @@ array_state_show(struct mddev *mddev, char *page)
4864 {
4865 enum array_state st = inactive;
4866
4867 - if (mddev->pers)
4868 + if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags))
4869 switch(mddev->ro) {
4870 case 1:
4871 st = readonly;
4872 @@ -5744,9 +5751,6 @@ int md_run(struct mddev *mddev)
4873 md_update_sb(mddev, 0);
4874
4875 md_new_event(mddev);
4876 - sysfs_notify_dirent_safe(mddev->sysfs_state);
4877 - sysfs_notify_dirent_safe(mddev->sysfs_action);
4878 - sysfs_notify(&mddev->kobj, NULL, "degraded");
4879 return 0;
4880
4881 bitmap_abort:
4882 @@ -5767,6 +5771,7 @@ static int do_md_run(struct mddev *mddev)
4883 {
4884 int err;
4885
4886 + set_bit(MD_NOT_READY, &mddev->flags);
4887 err = md_run(mddev);
4888 if (err)
4889 goto out;
4890 @@ -5787,9 +5792,14 @@ static int do_md_run(struct mddev *mddev)
4891
4892 set_capacity(mddev->gendisk, mddev->array_sectors);
4893 revalidate_disk(mddev->gendisk);
4894 + clear_bit(MD_NOT_READY, &mddev->flags);
4895 mddev->changed = 1;
4896 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4897 + sysfs_notify_dirent_safe(mddev->sysfs_state);
4898 + sysfs_notify_dirent_safe(mddev->sysfs_action);
4899 + sysfs_notify(&mddev->kobj, NULL, "degraded");
4900 out:
4901 + clear_bit(MD_NOT_READY, &mddev->flags);
4902 return err;
4903 }
4904
4905 @@ -8900,6 +8910,7 @@ void md_check_recovery(struct mddev *mddev)
4906
4907 if (mddev_trylock(mddev)) {
4908 int spares = 0;
4909 + bool try_set_sync = mddev->safemode != 0;
4910
4911 if (!mddev->external && mddev->safemode == 1)
4912 mddev->safemode = 0;
4913 @@ -8945,7 +8956,7 @@ void md_check_recovery(struct mddev *mddev)
4914 }
4915 }
4916
4917 - if (!mddev->external && !mddev->in_sync) {
4918 + if (try_set_sync && !mddev->external && !mddev->in_sync) {
4919 spin_lock(&mddev->lock);
4920 set_in_sync(mddev);
4921 spin_unlock(&mddev->lock);
4922 @@ -9043,7 +9054,8 @@ void md_reap_sync_thread(struct mddev *mddev)
4923 /* resync has finished, collect result */
4924 md_unregister_thread(&mddev->sync_thread);
4925 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
4926 - !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
4927 + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
4928 + mddev->degraded != mddev->raid_disks) {
4929 /* success...*/
4930 /* activate any spares */
4931 if (mddev->pers->spare_active(mddev)) {
4932 diff --git a/drivers/md/md.h b/drivers/md/md.h
4933 index 10f98200e2f8..08f2aee383e8 100644
4934 --- a/drivers/md/md.h
4935 +++ b/drivers/md/md.h
4936 @@ -248,6 +248,9 @@ enum mddev_flags {
4937 MD_UPDATING_SB, /* md_check_recovery is updating the metadata
4938 * without explicitly holding reconfig_mutex.
4939 */
4940 + MD_NOT_READY, /* do_md_run() is active, so 'array_state'
4941 + * must not report that array is ready yet
4942 + */
4943 };
4944
4945 enum mddev_sb_flags {
4946 diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
4947 index bf5cf184a260..297bbc0f41f0 100644
4948 --- a/drivers/md/raid0.c
4949 +++ b/drivers/md/raid0.c
4950 @@ -19,6 +19,9 @@
4951 #include "raid0.h"
4952 #include "raid5.h"
4953
4954 +static int default_layout = 0;
4955 +module_param(default_layout, int, 0644);
4956 +
4957 #define UNSUPPORTED_MDDEV_FLAGS \
4958 ((1L << MD_HAS_JOURNAL) | \
4959 (1L << MD_JOURNAL_CLEAN) | \
4960 @@ -139,6 +142,19 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
4961 }
4962 pr_debug("md/raid0:%s: FINAL %d zones\n",
4963 mdname(mddev), conf->nr_strip_zones);
4964 +
4965 + if (conf->nr_strip_zones == 1) {
4966 + conf->layout = RAID0_ORIG_LAYOUT;
4967 + } else if (default_layout == RAID0_ORIG_LAYOUT ||
4968 + default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
4969 + conf->layout = default_layout;
4970 + } else {
4971 + pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
4972 + mdname(mddev));
4973 + pr_err("md/raid0: please set raid.default_layout to 1 or 2\n");
4974 + err = -ENOTSUPP;
4975 + goto abort;
4976 + }
4977 /*
4978 * now since we have the hard sector sizes, we can make sure
4979 * chunk size is a multiple of that sector size
4980 @@ -547,10 +563,12 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
4981
4982 static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
4983 {
4984 + struct r0conf *conf = mddev->private;
4985 struct strip_zone *zone;
4986 struct md_rdev *tmp_dev;
4987 sector_t bio_sector;
4988 sector_t sector;
4989 + sector_t orig_sector;
4990 unsigned chunk_sects;
4991 unsigned sectors;
4992
4993 @@ -584,8 +602,21 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
4994 bio = split;
4995 }
4996
4997 + orig_sector = sector;
4998 zone = find_zone(mddev->private, &sector);
4999 - tmp_dev = map_sector(mddev, zone, sector, &sector);
5000 + switch (conf->layout) {
5001 + case RAID0_ORIG_LAYOUT:
5002 + tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
5003 + break;
5004 + case RAID0_ALT_MULTIZONE_LAYOUT:
5005 + tmp_dev = map_sector(mddev, zone, sector, &sector);
5006 + break;
5007 + default:
5008 + WARN("md/raid0:%s: Invalid layout\n", mdname(mddev));
5009 + bio_io_error(bio);
5010 + return true;
5011 + }
5012 +
5013 bio_set_dev(bio, tmp_dev->bdev);
5014 bio->bi_iter.bi_sector = sector + zone->dev_start +
5015 tmp_dev->data_offset;
5016 diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
5017 index 540e65d92642..3816e5477db1 100644
5018 --- a/drivers/md/raid0.h
5019 +++ b/drivers/md/raid0.h
5020 @@ -8,11 +8,25 @@ struct strip_zone {
5021 int nb_dev; /* # of devices attached to the zone */
5022 };
5023
5024 +/* Linux 3.14 (20d0189b101) made an unintended change to
5025 + * the RAID0 layout for multi-zone arrays (where devices aren't all
5026 + * the same size.
5027 + * RAID0_ORIG_LAYOUT restores the original layout
5028 + * RAID0_ALT_MULTIZONE_LAYOUT uses the altered layout
5029 + * The layouts are identical when there is only one zone (all
5030 + * devices the same size).
5031 + */
5032 +
5033 +enum r0layout {
5034 + RAID0_ORIG_LAYOUT = 1,
5035 + RAID0_ALT_MULTIZONE_LAYOUT = 2,
5036 +};
5037 struct r0conf {
5038 struct strip_zone *strip_zone;
5039 struct md_rdev **devlist; /* lists of rdevs, pointed to
5040 * by strip_zone->dev */
5041 int nr_strip_zones;
5042 + enum r0layout layout;
5043 };
5044
5045 #endif
5046 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
5047 index 34e26834ad28..5afbb7df06e7 100644
5048 --- a/drivers/md/raid1.c
5049 +++ b/drivers/md/raid1.c
5050 @@ -447,19 +447,21 @@ static void raid1_end_write_request(struct bio *bio)
5051 /* We never try FailFast to WriteMostly devices */
5052 !test_bit(WriteMostly, &rdev->flags)) {
5053 md_error(r1_bio->mddev, rdev);
5054 - if (!test_bit(Faulty, &rdev->flags))
5055 - /* This is the only remaining device,
5056 - * We need to retry the write without
5057 - * FailFast
5058 - */
5059 - set_bit(R1BIO_WriteError, &r1_bio->state);
5060 - else {
5061 - /* Finished with this branch */
5062 - r1_bio->bios[mirror] = NULL;
5063 - to_put = bio;
5064 - }
5065 - } else
5066 + }
5067 +
5068 + /*
5069 + * When the device is faulty, it is not necessary to
5070 + * handle write error.
5071 + * For failfast, this is the only remaining device,
5072 + * We need to retry the write without FailFast.
5073 + */
5074 + if (!test_bit(Faulty, &rdev->flags))
5075 set_bit(R1BIO_WriteError, &r1_bio->state);
5076 + else {
5077 + /* Finished with this branch */
5078 + r1_bio->bios[mirror] = NULL;
5079 + to_put = bio;
5080 + }
5081 } else {
5082 /*
5083 * Set R1BIO_Uptodate in our master bio, so that we
5084 @@ -3127,6 +3129,13 @@ static int raid1_run(struct mddev *mddev)
5085 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
5086 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
5087 mddev->degraded++;
5088 + /*
5089 + * RAID1 needs at least one disk in active
5090 + */
5091 + if (conf->raid_disks - mddev->degraded < 1) {
5092 + ret = -EINVAL;
5093 + goto abort;
5094 + }
5095
5096 if (conf->raid_disks - mddev->degraded == 1)
5097 mddev->recovery_cp = MaxSector;
5098 @@ -3160,8 +3169,12 @@ static int raid1_run(struct mddev *mddev)
5099 ret = md_integrity_register(mddev);
5100 if (ret) {
5101 md_unregister_thread(&mddev->thread);
5102 - raid1_free(mddev, conf);
5103 + goto abort;
5104 }
5105 + return 0;
5106 +
5107 +abort:
5108 + raid1_free(mddev, conf);
5109 return ret;
5110 }
5111
5112 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
5113 index 3de4e13bde98..39f8ef6ee59c 100644
5114 --- a/drivers/md/raid5.c
5115 +++ b/drivers/md/raid5.c
5116 @@ -2526,7 +2526,8 @@ static void raid5_end_read_request(struct bio * bi)
5117 int set_bad = 0;
5118
5119 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
5120 - atomic_inc(&rdev->read_errors);
5121 + if (!(bi->bi_status == BLK_STS_PROTECTION))
5122 + atomic_inc(&rdev->read_errors);
5123 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
5124 pr_warn_ratelimited(
5125 "md/raid:%s: read error on replacement device (sector %llu on %s).\n",
5126 @@ -2558,7 +2559,9 @@ static void raid5_end_read_request(struct bio * bi)
5127 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
5128 retry = 1;
5129 if (retry)
5130 - if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
5131 + if (sh->qd_idx >= 0 && sh->pd_idx == i)
5132 + set_bit(R5_ReadError, &sh->dev[i].flags);
5133 + else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
5134 set_bit(R5_ReadError, &sh->dev[i].flags);
5135 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
5136 } else
5137 @@ -5718,7 +5721,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
5138 do_flush = false;
5139 }
5140
5141 - set_bit(STRIPE_HANDLE, &sh->state);
5142 + if (!sh->batch_head)
5143 + set_bit(STRIPE_HANDLE, &sh->state);
5144 clear_bit(STRIPE_DELAYED, &sh->state);
5145 if ((!sh->batch_head || sh == sh->batch_head) &&
5146 (bi->bi_opf & REQ_SYNC) &&
5147 diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
5148 index 52a867bde15f..4d82a5522072 100644
5149 --- a/drivers/media/cec/cec-notifier.c
5150 +++ b/drivers/media/cec/cec-notifier.c
5151 @@ -218,6 +218,8 @@ void cec_notifier_unregister(struct cec_notifier *n)
5152
5153 mutex_lock(&n->lock);
5154 n->callback = NULL;
5155 + n->cec_adap->notifier = NULL;
5156 + n->cec_adap = NULL;
5157 mutex_unlock(&n->lock);
5158 cec_notifier_put(n);
5159 }
5160 diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
5161 index 40d76eb4c2fe..5a9ba3846f0a 100644
5162 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
5163 +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
5164 @@ -872,17 +872,19 @@ EXPORT_SYMBOL_GPL(vb2_queue_release);
5165 __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
5166 {
5167 struct video_device *vfd = video_devdata(file);
5168 - __poll_t res = 0;
5169 + __poll_t res;
5170 +
5171 + res = vb2_core_poll(q, file, wait);
5172
5173 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
5174 struct v4l2_fh *fh = file->private_data;
5175
5176 poll_wait(file, &fh->wait, wait);
5177 if (v4l2_event_pending(fh))
5178 - res = EPOLLPRI;
5179 + res |= EPOLLPRI;
5180 }
5181
5182 - return res | vb2_core_poll(q, file, wait);
5183 + return res;
5184 }
5185 EXPORT_SYMBOL_GPL(vb2_poll);
5186
5187 diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
5188 index 209186c5cd9b..06ea30a689d7 100644
5189 --- a/drivers/media/dvb-core/dvb_frontend.c
5190 +++ b/drivers/media/dvb-core/dvb_frontend.c
5191 @@ -152,6 +152,9 @@ static void dvb_frontend_free(struct kref *ref)
5192
5193 static void dvb_frontend_put(struct dvb_frontend *fe)
5194 {
5195 + /* call detach before dropping the reference count */
5196 + if (fe->ops.detach)
5197 + fe->ops.detach(fe);
5198 /*
5199 * Check if the frontend was registered, as otherwise
5200 * kref was not initialized yet.
5201 @@ -3040,7 +3043,6 @@ void dvb_frontend_detach(struct dvb_frontend *fe)
5202 dvb_frontend_invoke_release(fe, fe->ops.release_sec);
5203 dvb_frontend_invoke_release(fe, fe->ops.tuner_ops.release);
5204 dvb_frontend_invoke_release(fe, fe->ops.analog_ops.release);
5205 - dvb_frontend_invoke_release(fe, fe->ops.detach);
5206 dvb_frontend_put(fe);
5207 }
5208 EXPORT_SYMBOL(dvb_frontend_detach);
5209 diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
5210 index a3393cd4e584..7557fbf9d306 100644
5211 --- a/drivers/media/dvb-core/dvbdev.c
5212 +++ b/drivers/media/dvb-core/dvbdev.c
5213 @@ -339,8 +339,10 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
5214 if (npads) {
5215 dvbdev->pads = kcalloc(npads, sizeof(*dvbdev->pads),
5216 GFP_KERNEL);
5217 - if (!dvbdev->pads)
5218 + if (!dvbdev->pads) {
5219 + kfree(dvbdev->entity);
5220 return -ENOMEM;
5221 + }
5222 }
5223
5224 switch (type) {
5225 diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
5226 index ba0c49107bd2..d45b4ddc8f91 100644
5227 --- a/drivers/media/dvb-frontends/dvb-pll.c
5228 +++ b/drivers/media/dvb-frontends/dvb-pll.c
5229 @@ -9,6 +9,7 @@
5230
5231 #include <linux/slab.h>
5232 #include <linux/module.h>
5233 +#include <linux/idr.h>
5234 #include <linux/dvb/frontend.h>
5235 #include <asm/types.h>
5236
5237 @@ -34,8 +35,7 @@ struct dvb_pll_priv {
5238 };
5239
5240 #define DVB_PLL_MAX 64
5241 -
5242 -static unsigned int dvb_pll_devcount;
5243 +static DEFINE_IDA(pll_ida);
5244
5245 static int debug;
5246 module_param(debug, int, 0644);
5247 @@ -787,6 +787,7 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
5248 struct dvb_pll_priv *priv = NULL;
5249 int ret;
5250 const struct dvb_pll_desc *desc;
5251 + int nr;
5252
5253 b1 = kmalloc(1, GFP_KERNEL);
5254 if (!b1)
5255 @@ -795,9 +796,14 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
5256 b1[0] = 0;
5257 msg.buf = b1;
5258
5259 - if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
5260 - (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
5261 - pll_desc_id = id[dvb_pll_devcount];
5262 + nr = ida_simple_get(&pll_ida, 0, DVB_PLL_MAX, GFP_KERNEL);
5263 + if (nr < 0) {
5264 + kfree(b1);
5265 + return NULL;
5266 + }
5267 +
5268 + if (id[nr] > DVB_PLL_UNDEFINED && id[nr] < ARRAY_SIZE(pll_list))
5269 + pll_desc_id = id[nr];
5270
5271 BUG_ON(pll_desc_id < 1 || pll_desc_id >= ARRAY_SIZE(pll_list));
5272
5273 @@ -808,24 +814,20 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
5274 fe->ops.i2c_gate_ctrl(fe, 1);
5275
5276 ret = i2c_transfer (i2c, &msg, 1);
5277 - if (ret != 1) {
5278 - kfree(b1);
5279 - return NULL;
5280 - }
5281 + if (ret != 1)
5282 + goto out;
5283 if (fe->ops.i2c_gate_ctrl)
5284 fe->ops.i2c_gate_ctrl(fe, 0);
5285 }
5286
5287 priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL);
5288 - if (!priv) {
5289 - kfree(b1);
5290 - return NULL;
5291 - }
5292 + if (!priv)
5293 + goto out;
5294
5295 priv->pll_i2c_address = pll_addr;
5296 priv->i2c = i2c;
5297 priv->pll_desc = desc;
5298 - priv->nr = dvb_pll_devcount++;
5299 + priv->nr = nr;
5300
5301 memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops,
5302 sizeof(struct dvb_tuner_ops));
5303 @@ -858,6 +860,11 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
5304 kfree(b1);
5305
5306 return fe;
5307 +out:
5308 + kfree(b1);
5309 + ida_simple_remove(&pll_ida, nr);
5310 +
5311 + return NULL;
5312 }
5313 EXPORT_SYMBOL(dvb_pll_attach);
5314
5315 @@ -894,9 +901,10 @@ dvb_pll_probe(struct i2c_client *client, const struct i2c_device_id *id)
5316
5317 static int dvb_pll_remove(struct i2c_client *client)
5318 {
5319 - struct dvb_frontend *fe;
5320 + struct dvb_frontend *fe = i2c_get_clientdata(client);
5321 + struct dvb_pll_priv *priv = fe->tuner_priv;
5322
5323 - fe = i2c_get_clientdata(client);
5324 + ida_simple_remove(&pll_ida, priv->nr);
5325 dvb_pll_release(fe);
5326 return 0;
5327 }
5328 diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
5329 index 759d60c6d630..afe7920557a8 100644
5330 --- a/drivers/media/i2c/ov5640.c
5331 +++ b/drivers/media/i2c/ov5640.c
5332 @@ -3022,9 +3022,14 @@ static int ov5640_probe(struct i2c_client *client,
5333 /* request optional power down pin */
5334 sensor->pwdn_gpio = devm_gpiod_get_optional(dev, "powerdown",
5335 GPIOD_OUT_HIGH);
5336 + if (IS_ERR(sensor->pwdn_gpio))
5337 + return PTR_ERR(sensor->pwdn_gpio);
5338 +
5339 /* request optional reset pin */
5340 sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
5341 GPIOD_OUT_HIGH);
5342 + if (IS_ERR(sensor->reset_gpio))
5343 + return PTR_ERR(sensor->reset_gpio);
5344
5345 v4l2_i2c_subdev_init(&sensor->sd, client, &ov5640_subdev_ops);
5346
5347 diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
5348 index 124c8df04633..58972c884705 100644
5349 --- a/drivers/media/i2c/ov5645.c
5350 +++ b/drivers/media/i2c/ov5645.c
5351 @@ -45,6 +45,8 @@
5352 #define OV5645_CHIP_ID_HIGH_BYTE 0x56
5353 #define OV5645_CHIP_ID_LOW 0x300b
5354 #define OV5645_CHIP_ID_LOW_BYTE 0x45
5355 +#define OV5645_IO_MIPI_CTRL00 0x300e
5356 +#define OV5645_PAD_OUTPUT00 0x3019
5357 #define OV5645_AWB_MANUAL_CONTROL 0x3406
5358 #define OV5645_AWB_MANUAL_ENABLE BIT(0)
5359 #define OV5645_AEC_PK_MANUAL 0x3503
5360 @@ -55,6 +57,7 @@
5361 #define OV5645_ISP_VFLIP BIT(2)
5362 #define OV5645_TIMING_TC_REG21 0x3821
5363 #define OV5645_SENSOR_MIRROR BIT(1)
5364 +#define OV5645_MIPI_CTRL00 0x4800
5365 #define OV5645_PRE_ISP_TEST_SETTING_1 0x503d
5366 #define OV5645_TEST_PATTERN_MASK 0x3
5367 #define OV5645_SET_TEST_PATTERN(x) ((x) & OV5645_TEST_PATTERN_MASK)
5368 @@ -121,7 +124,6 @@ static const struct reg_value ov5645_global_init_setting[] = {
5369 { 0x3503, 0x07 },
5370 { 0x3002, 0x1c },
5371 { 0x3006, 0xc3 },
5372 - { 0x300e, 0x45 },
5373 { 0x3017, 0x00 },
5374 { 0x3018, 0x00 },
5375 { 0x302e, 0x0b },
5376 @@ -350,7 +352,10 @@ static const struct reg_value ov5645_global_init_setting[] = {
5377 { 0x3a1f, 0x14 },
5378 { 0x0601, 0x02 },
5379 { 0x3008, 0x42 },
5380 - { 0x3008, 0x02 }
5381 + { 0x3008, 0x02 },
5382 + { OV5645_IO_MIPI_CTRL00, 0x40 },
5383 + { OV5645_MIPI_CTRL00, 0x24 },
5384 + { OV5645_PAD_OUTPUT00, 0x70 }
5385 };
5386
5387 static const struct reg_value ov5645_setting_sxga[] = {
5388 @@ -737,13 +742,9 @@ static int ov5645_s_power(struct v4l2_subdev *sd, int on)
5389 goto exit;
5390 }
5391
5392 - ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
5393 - OV5645_SYSTEM_CTRL0_STOP);
5394 - if (ret < 0) {
5395 - ov5645_set_power_off(ov5645);
5396 - goto exit;
5397 - }
5398 + usleep_range(500, 1000);
5399 } else {
5400 + ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x58);
5401 ov5645_set_power_off(ov5645);
5402 }
5403 }
5404 @@ -1049,11 +1050,20 @@ static int ov5645_s_stream(struct v4l2_subdev *subdev, int enable)
5405 dev_err(ov5645->dev, "could not sync v4l2 controls\n");
5406 return ret;
5407 }
5408 +
5409 + ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x45);
5410 + if (ret < 0)
5411 + return ret;
5412 +
5413 ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
5414 OV5645_SYSTEM_CTRL0_START);
5415 if (ret < 0)
5416 return ret;
5417 } else {
5418 + ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x40);
5419 + if (ret < 0)
5420 + return ret;
5421 +
5422 ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
5423 OV5645_SYSTEM_CTRL0_STOP);
5424 if (ret < 0)
5425 diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
5426 index 30ab2225fbd0..b350f5c1a989 100644
5427 --- a/drivers/media/i2c/ov9650.c
5428 +++ b/drivers/media/i2c/ov9650.c
5429 @@ -703,6 +703,11 @@ static int ov965x_set_gain(struct ov965x *ov965x, int auto_gain)
5430 for (m = 6; m >= 0; m--)
5431 if (gain >= (1 << m) * 16)
5432 break;
5433 +
5434 + /* Sanity check: don't adjust the gain with a negative value */
5435 + if (m < 0)
5436 + return -EINVAL;
5437 +
5438 rgain = (gain - ((1 << m) * 16)) / (1 << m);
5439 rgain |= (((1 << m) - 1) << 4);
5440
5441 diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
5442 index a62ede096636..5e68182001ec 100644
5443 --- a/drivers/media/i2c/tda1997x.c
5444 +++ b/drivers/media/i2c/tda1997x.c
5445 @@ -2691,7 +2691,13 @@ static int tda1997x_probe(struct i2c_client *client,
5446 }
5447
5448 ret = 0x34 + ((io_read(sd, REG_SLAVE_ADDR)>>4) & 0x03);
5449 - state->client_cec = i2c_new_dummy(client->adapter, ret);
5450 + state->client_cec = devm_i2c_new_dummy_device(&client->dev,
5451 + client->adapter, ret);
5452 + if (IS_ERR(state->client_cec)) {
5453 + ret = PTR_ERR(state->client_cec);
5454 + goto err_free_mutex;
5455 + }
5456 +
5457 v4l_info(client, "CEC slave address 0x%02x\n", ret);
5458
5459 ret = tda1997x_core_init(sd);
5460 @@ -2798,7 +2804,6 @@ static int tda1997x_remove(struct i2c_client *client)
5461 media_entity_cleanup(&sd->entity);
5462 v4l2_ctrl_handler_free(&state->hdl);
5463 regulator_bulk_disable(TDA1997X_NUM_SUPPLIES, state->supplies);
5464 - i2c_unregister_device(state->client_cec);
5465 cancel_delayed_work(&state->delayed_work_enable_hpd);
5466 mutex_destroy(&state->page_lock);
5467 mutex_destroy(&state->lock);
5468 diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
5469 index 493b1858815f..04e85765373e 100644
5470 --- a/drivers/media/pci/saa7134/saa7134-i2c.c
5471 +++ b/drivers/media/pci/saa7134/saa7134-i2c.c
5472 @@ -342,7 +342,11 @@ static const struct i2c_client saa7134_client_template = {
5473
5474 /* ----------------------------------------------------------- */
5475
5476 -/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
5477 +/*
5478 + * On Medion 7134 reading the SAA7134 chip config EEPROM needs DVB-T
5479 + * demod i2c gate closed due to an address clash between this EEPROM
5480 + * and the demod one.
5481 + */
5482 static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
5483 {
5484 u8 subaddr = 0x7, dmdregval;
5485 @@ -359,14 +363,14 @@ static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
5486
5487 ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
5488 if ((ret == 2) && (dmdregval & 0x2)) {
5489 - pr_debug("%s: DVB-T demod i2c gate was left closed\n",
5490 + pr_debug("%s: DVB-T demod i2c gate was left open\n",
5491 dev->name);
5492
5493 data[0] = subaddr;
5494 data[1] = (dmdregval & ~0x2);
5495 if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
5496 - pr_err("%s: EEPROM i2c gate open failure\n",
5497 - dev->name);
5498 + pr_err("%s: EEPROM i2c gate close failure\n",
5499 + dev->name);
5500 }
5501 }
5502
5503 diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
5504 index dca20a3d98e2..f96226930670 100644
5505 --- a/drivers/media/pci/saa7146/hexium_gemini.c
5506 +++ b/drivers/media/pci/saa7146/hexium_gemini.c
5507 @@ -292,6 +292,9 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
5508 ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER);
5509 if (ret < 0) {
5510 pr_err("cannot register capture v4l2 device. skipping.\n");
5511 + saa7146_vv_release(dev);
5512 + i2c_del_adapter(&hexium->i2c_adapter);
5513 + kfree(hexium);
5514 return ret;
5515 }
5516
5517 diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
5518 index f899ac3b4a61..4ef37cfc8446 100644
5519 --- a/drivers/media/platform/aspeed-video.c
5520 +++ b/drivers/media/platform/aspeed-video.c
5521 @@ -630,7 +630,7 @@ static void aspeed_video_check_and_set_polarity(struct aspeed_video *video)
5522 }
5523
5524 if (hsync_counter < 0 || vsync_counter < 0) {
5525 - u32 ctrl;
5526 + u32 ctrl = 0;
5527
5528 if (hsync_counter < 0) {
5529 ctrl = VE_CTRL_HSYNC_POL;
5530 @@ -650,7 +650,8 @@ static void aspeed_video_check_and_set_polarity(struct aspeed_video *video)
5531 V4L2_DV_VSYNC_POS_POL;
5532 }
5533
5534 - aspeed_video_update(video, VE_CTRL, 0, ctrl);
5535 + if (ctrl)
5536 + aspeed_video_update(video, VE_CTRL, 0, ctrl);
5537 }
5538 }
5539
5540 diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
5541 index e043d55133a3..b7cc8e651e32 100644
5542 --- a/drivers/media/platform/exynos4-is/fimc-is.c
5543 +++ b/drivers/media/platform/exynos4-is/fimc-is.c
5544 @@ -806,6 +806,7 @@ static int fimc_is_probe(struct platform_device *pdev)
5545 return -ENODEV;
5546
5547 is->pmu_regs = of_iomap(node, 0);
5548 + of_node_put(node);
5549 if (!is->pmu_regs)
5550 return -ENOMEM;
5551
5552 diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
5553 index d53427a8db11..a838189d4490 100644
5554 --- a/drivers/media/platform/exynos4-is/media-dev.c
5555 +++ b/drivers/media/platform/exynos4-is/media-dev.c
5556 @@ -501,6 +501,7 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
5557 continue;
5558
5559 ret = fimc_md_parse_port_node(fmd, port, index);
5560 + of_node_put(port);
5561 if (ret < 0) {
5562 of_node_put(node);
5563 goto cleanup;
5564 @@ -542,6 +543,7 @@ static int __of_get_csis_id(struct device_node *np)
5565 if (!np)
5566 return -EINVAL;
5567 of_property_read_u32(np, "reg", &reg);
5568 + of_node_put(np);
5569 return reg - FIMC_INPUT_MIPI_CSI2_0;
5570 }
5571
5572 diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
5573 index 691be788e38b..b74e4f50d7d9 100644
5574 --- a/drivers/media/platform/fsl-viu.c
5575 +++ b/drivers/media/platform/fsl-viu.c
5576 @@ -32,7 +32,7 @@
5577 #define VIU_VERSION "0.5.1"
5578
5579 /* Allow building this driver with COMPILE_TEST */
5580 -#ifndef CONFIG_PPC
5581 +#if !defined(CONFIG_PPC) && !defined(CONFIG_MICROBLAZE)
5582 #define out_be32(v, a) iowrite32be(a, (void __iomem *)v)
5583 #define in_be32(a) ioread32be((void __iomem *)a)
5584 #endif
5585 diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
5586 index fc9faec85edb..5d44f2e92dd5 100644
5587 --- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
5588 +++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
5589 @@ -110,7 +110,9 @@ static int mtk_mdp_probe(struct platform_device *pdev)
5590 mutex_init(&mdp->vpulock);
5591
5592 /* Old dts had the components as child nodes */
5593 - if (of_get_next_child(dev->of_node, NULL)) {
5594 + node = of_get_next_child(dev->of_node, NULL);
5595 + if (node) {
5596 + of_node_put(node);
5597 parent = dev->of_node;
5598 dev_warn(dev, "device tree is out of date\n");
5599 } else {
5600 diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
5601 index 83216fc7156b..9cdb43859ae0 100644
5602 --- a/drivers/media/platform/omap3isp/isp.c
5603 +++ b/drivers/media/platform/omap3isp/isp.c
5604 @@ -719,6 +719,10 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
5605 s_stream, mode);
5606 pipe->do_propagation = true;
5607 }
5608 +
5609 + /* Stop at the first external sub-device. */
5610 + if (subdev->dev != isp->dev)
5611 + break;
5612 }
5613
5614 return 0;
5615 @@ -833,6 +837,10 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
5616 &subdev->entity);
5617 failure = -ETIMEDOUT;
5618 }
5619 +
5620 + /* Stop at the first external sub-device. */
5621 + if (subdev->dev != isp->dev)
5622 + break;
5623 }
5624
5625 return failure;
5626 diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
5627 index 1ba8a5ba343f..e2f336c715a4 100644
5628 --- a/drivers/media/platform/omap3isp/ispccdc.c
5629 +++ b/drivers/media/platform/omap3isp/ispccdc.c
5630 @@ -2602,6 +2602,7 @@ int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
5631 int ret;
5632
5633 /* Register the subdev and video node. */
5634 + ccdc->subdev.dev = vdev->mdev->dev;
5635 ret = v4l2_device_register_subdev(vdev, &ccdc->subdev);
5636 if (ret < 0)
5637 goto error;
5638 diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
5639 index efca45bb02c8..d0a49cdfd22d 100644
5640 --- a/drivers/media/platform/omap3isp/ispccp2.c
5641 +++ b/drivers/media/platform/omap3isp/ispccp2.c
5642 @@ -1031,6 +1031,7 @@ int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
5643 int ret;
5644
5645 /* Register the subdev and video nodes. */
5646 + ccp2->subdev.dev = vdev->mdev->dev;
5647 ret = v4l2_device_register_subdev(vdev, &ccp2->subdev);
5648 if (ret < 0)
5649 goto error;
5650 diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
5651 index e85917f4a50c..fd493c5e4e24 100644
5652 --- a/drivers/media/platform/omap3isp/ispcsi2.c
5653 +++ b/drivers/media/platform/omap3isp/ispcsi2.c
5654 @@ -1198,6 +1198,7 @@ int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2,
5655 int ret;
5656
5657 /* Register the subdev and video nodes. */
5658 + csi2->subdev.dev = vdev->mdev->dev;
5659 ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
5660 if (ret < 0)
5661 goto error;
5662 diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
5663 index 40e22400cf5e..97d660606d98 100644
5664 --- a/drivers/media/platform/omap3isp/isppreview.c
5665 +++ b/drivers/media/platform/omap3isp/isppreview.c
5666 @@ -2225,6 +2225,7 @@ int omap3isp_preview_register_entities(struct isp_prev_device *prev,
5667 int ret;
5668
5669 /* Register the subdev and video nodes. */
5670 + prev->subdev.dev = vdev->mdev->dev;
5671 ret = v4l2_device_register_subdev(vdev, &prev->subdev);
5672 if (ret < 0)
5673 goto error;
5674 diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
5675 index 21ca6954df72..78d9dd7ea2da 100644
5676 --- a/drivers/media/platform/omap3isp/ispresizer.c
5677 +++ b/drivers/media/platform/omap3isp/ispresizer.c
5678 @@ -1681,6 +1681,7 @@ int omap3isp_resizer_register_entities(struct isp_res_device *res,
5679 int ret;
5680
5681 /* Register the subdev and video nodes. */
5682 + res->subdev.dev = vdev->mdev->dev;
5683 ret = v4l2_device_register_subdev(vdev, &res->subdev);
5684 if (ret < 0)
5685 goto error;
5686 diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
5687 index 62b2eacb96fd..5b9b57f4d9bf 100644
5688 --- a/drivers/media/platform/omap3isp/ispstat.c
5689 +++ b/drivers/media/platform/omap3isp/ispstat.c
5690 @@ -1026,6 +1026,8 @@ void omap3isp_stat_unregister_entities(struct ispstat *stat)
5691 int omap3isp_stat_register_entities(struct ispstat *stat,
5692 struct v4l2_device *vdev)
5693 {
5694 + stat->subdev.dev = vdev->mdev->dev;
5695 +
5696 return v4l2_device_register_subdev(vdev, &stat->subdev);
5697 }
5698
5699 diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
5700 index 43aae9b6bb20..c23ec127c277 100644
5701 --- a/drivers/media/platform/rcar_fdp1.c
5702 +++ b/drivers/media/platform/rcar_fdp1.c
5703 @@ -2306,7 +2306,7 @@ static int fdp1_probe(struct platform_device *pdev)
5704 fdp1->fcp = rcar_fcp_get(fcp_node);
5705 of_node_put(fcp_node);
5706 if (IS_ERR(fdp1->fcp)) {
5707 - dev_err(&pdev->dev, "FCP not found (%ld)\n",
5708 + dev_dbg(&pdev->dev, "FCP not found (%ld)\n",
5709 PTR_ERR(fdp1->fcp));
5710 return PTR_ERR(fdp1->fcp);
5711 }
5712 diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
5713 index 3e916c8befb7..7a52f585cab7 100644
5714 --- a/drivers/media/platform/vivid/vivid-ctrls.c
5715 +++ b/drivers/media/platform/vivid/vivid-ctrls.c
5716 @@ -1473,7 +1473,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
5717 v4l2_ctrl_handler_init(hdl_vid_cap, 55);
5718 v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_class, NULL);
5719 v4l2_ctrl_handler_init(hdl_vid_out, 26);
5720 - if (!no_error_inj || dev->has_fb)
5721 + if (!no_error_inj || dev->has_fb || dev->num_hdmi_outputs)
5722 v4l2_ctrl_new_custom(hdl_vid_out, &vivid_ctrl_class, NULL);
5723 v4l2_ctrl_handler_init(hdl_vbi_cap, 21);
5724 v4l2_ctrl_new_custom(hdl_vbi_cap, &vivid_ctrl_class, NULL);
5725 diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
5726 index 6cf495a7d5cc..003319d7816d 100644
5727 --- a/drivers/media/platform/vivid/vivid-kthread-cap.c
5728 +++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
5729 @@ -232,8 +232,8 @@ static void *plane_vaddr(struct tpg_data *tpg, struct vivid_buffer *buf,
5730 return vbuf;
5731 }
5732
5733 -static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
5734 - struct vivid_buffer *vid_cap_buf)
5735 +static noinline_for_stack int vivid_copy_buffer(struct vivid_dev *dev, unsigned p,
5736 + u8 *vcapbuf, struct vivid_buffer *vid_cap_buf)
5737 {
5738 bool blank = dev->must_blank[vid_cap_buf->vb.vb2_buf.index];
5739 struct tpg_data *tpg = &dev->tpg;
5740 @@ -658,6 +658,8 @@ static void vivid_cap_update_frame_period(struct vivid_dev *dev)
5741 u64 f_period;
5742
5743 f_period = (u64)dev->timeperframe_vid_cap.numerator * 1000000000;
5744 + if (WARN_ON(dev->timeperframe_vid_cap.denominator == 0))
5745 + dev->timeperframe_vid_cap.denominator = 1;
5746 do_div(f_period, dev->timeperframe_vid_cap.denominator);
5747 if (dev->field_cap == V4L2_FIELD_ALTERNATE)
5748 f_period >>= 1;
5749 @@ -670,7 +672,8 @@ static void vivid_cap_update_frame_period(struct vivid_dev *dev)
5750 dev->cap_frame_period = f_period;
5751 }
5752
5753 -static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
5754 +static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
5755 + int dropped_bufs)
5756 {
5757 struct vivid_buffer *vid_cap_buf = NULL;
5758 struct vivid_buffer *vbi_cap_buf = NULL;
5759 diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
5760 index 104b6f514536..d7b43037e500 100644
5761 --- a/drivers/media/platform/vsp1/vsp1_dl.c
5762 +++ b/drivers/media/platform/vsp1/vsp1_dl.c
5763 @@ -557,8 +557,10 @@ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
5764
5765 /* Get a default body for our list. */
5766 dl->body0 = vsp1_dl_body_get(dlm->pool);
5767 - if (!dl->body0)
5768 + if (!dl->body0) {
5769 + kfree(dl);
5770 return NULL;
5771 + }
5772
5773 header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
5774
5775 diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
5776 index 49073747b1e7..fedff68d8c49 100644
5777 --- a/drivers/media/radio/si470x/radio-si470x-usb.c
5778 +++ b/drivers/media/radio/si470x/radio-si470x-usb.c
5779 @@ -734,7 +734,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
5780 /* start radio */
5781 retval = si470x_start_usb(radio);
5782 if (retval < 0)
5783 - goto err_all;
5784 + goto err_buf;
5785
5786 /* set initial frequency */
5787 si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
5788 @@ -749,6 +749,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
5789
5790 return 0;
5791 err_all:
5792 + usb_kill_urb(radio->int_in_urb);
5793 +err_buf:
5794 kfree(radio->buffer);
5795 err_ctrl:
5796 v4l2_ctrl_handler_free(&radio->hdl);
5797 @@ -822,6 +824,7 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
5798 mutex_lock(&radio->lock);
5799 v4l2_device_disconnect(&radio->v4l2_dev);
5800 video_unregister_device(&radio->videodev);
5801 + usb_kill_urb(radio->int_in_urb);
5802 usb_set_intfdata(intf, NULL);
5803 mutex_unlock(&radio->lock);
5804 v4l2_device_put(&radio->v4l2_dev);
5805 diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
5806 index ea05e125016a..872d6441e512 100644
5807 --- a/drivers/media/rc/iguanair.c
5808 +++ b/drivers/media/rc/iguanair.c
5809 @@ -413,6 +413,10 @@ static int iguanair_probe(struct usb_interface *intf,
5810 int ret, pipein, pipeout;
5811 struct usb_host_interface *idesc;
5812
5813 + idesc = intf->altsetting;
5814 + if (idesc->desc.bNumEndpoints < 2)
5815 + return -ENODEV;
5816 +
5817 ir = kzalloc(sizeof(*ir), GFP_KERNEL);
5818 rc = rc_allocate_device(RC_DRIVER_IR_RAW);
5819 if (!ir || !rc) {
5820 @@ -427,18 +431,13 @@ static int iguanair_probe(struct usb_interface *intf,
5821 ir->urb_in = usb_alloc_urb(0, GFP_KERNEL);
5822 ir->urb_out = usb_alloc_urb(0, GFP_KERNEL);
5823
5824 - if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out) {
5825 + if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out ||
5826 + !usb_endpoint_is_int_in(&idesc->endpoint[0].desc) ||
5827 + !usb_endpoint_is_int_out(&idesc->endpoint[1].desc)) {
5828 ret = -ENOMEM;
5829 goto out;
5830 }
5831
5832 - idesc = intf->altsetting;
5833 -
5834 - if (idesc->desc.bNumEndpoints < 2) {
5835 - ret = -ENODEV;
5836 - goto out;
5837 - }
5838 -
5839 ir->rc = rc;
5840 ir->dev = &intf->dev;
5841 ir->udev = udev;
5842 diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
5843 index 7bee72108b0e..37a850421fbb 100644
5844 --- a/drivers/media/rc/imon.c
5845 +++ b/drivers/media/rc/imon.c
5846 @@ -1826,12 +1826,17 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
5847 break;
5848 /* iMON VFD, MCE IR */
5849 case 0x46:
5850 - case 0x7e:
5851 case 0x9e:
5852 dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
5853 detected_display_type = IMON_DISPLAY_TYPE_VFD;
5854 allowed_protos = RC_PROTO_BIT_RC6_MCE;
5855 break;
5856 + /* iMON VFD, iMON or MCE IR */
5857 + case 0x7e:
5858 + dev_info(ictx->dev, "0xffdc iMON VFD, iMON or MCE IR");
5859 + detected_display_type = IMON_DISPLAY_TYPE_VFD;
5860 + allowed_protos |= RC_PROTO_BIT_RC6_MCE;
5861 + break;
5862 /* iMON LCD, MCE IR */
5863 case 0x9f:
5864 dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
5865 diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
5866 index 4d5351ebb940..9929fcdec74d 100644
5867 --- a/drivers/media/rc/mceusb.c
5868 +++ b/drivers/media/rc/mceusb.c
5869 @@ -31,21 +31,22 @@
5870 #include <linux/pm_wakeup.h>
5871 #include <media/rc-core.h>
5872
5873 -#define DRIVER_VERSION "1.94"
5874 +#define DRIVER_VERSION "1.95"
5875 #define DRIVER_AUTHOR "Jarod Wilson <jarod@redhat.com>"
5876 #define DRIVER_DESC "Windows Media Center Ed. eHome Infrared Transceiver " \
5877 "device driver"
5878 #define DRIVER_NAME "mceusb"
5879
5880 +#define USB_TX_TIMEOUT 1000 /* in milliseconds */
5881 #define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
5882 #define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
5883
5884 /* MCE constants */
5885 -#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */
5886 +#define MCE_IRBUF_SIZE 128 /* TX IR buffer length */
5887 #define MCE_TIME_UNIT 50 /* Approx 50us resolution */
5888 -#define MCE_CODE_LENGTH 5 /* Normal length of packet (with header) */
5889 -#define MCE_PACKET_SIZE 4 /* Normal length of packet (without header) */
5890 -#define MCE_IRDATA_HEADER 0x84 /* Actual header format is 0x80 + num_bytes */
5891 +#define MCE_PACKET_SIZE 31 /* Max length of packet (with header) */
5892 +#define MCE_IRDATA_HEADER (0x80 + MCE_PACKET_SIZE - 1)
5893 + /* Actual format is 0x80 + num_bytes */
5894 #define MCE_IRDATA_TRAILER 0x80 /* End of IR data */
5895 #define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */
5896 #define MCE_DEFAULT_TX_MASK 0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */
5897 @@ -607,9 +608,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
5898 if (len <= skip)
5899 return;
5900
5901 - dev_dbg(dev, "%cx data: %*ph (length=%d)",
5902 - (out ? 't' : 'r'),
5903 - min(len, buf_len - offset), buf + offset, len);
5904 + dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)",
5905 + (out ? 't' : 'r'), offset,
5906 + min(len, buf_len - offset), buf + offset, len, buf_len);
5907
5908 inout = out ? "Request" : "Got";
5909
5910 @@ -731,6 +732,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
5911 case MCE_RSP_CMD_ILLEGAL:
5912 dev_dbg(dev, "Illegal PORT_IR command");
5913 break;
5914 + case MCE_RSP_TX_TIMEOUT:
5915 + dev_dbg(dev, "IR TX timeout (TX buffer underrun)");
5916 + break;
5917 default:
5918 dev_dbg(dev, "Unknown command 0x%02x 0x%02x",
5919 cmd, subcmd);
5920 @@ -745,13 +749,14 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
5921 dev_dbg(dev, "End of raw IR data");
5922 else if ((cmd != MCE_CMD_PORT_IR) &&
5923 ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
5924 - dev_dbg(dev, "Raw IR data, %d pulse/space samples", ir->rem);
5925 + dev_dbg(dev, "Raw IR data, %d pulse/space samples",
5926 + cmd & MCE_PACKET_LENGTH_MASK);
5927 #endif
5928 }
5929
5930 /*
5931 * Schedule work that can't be done in interrupt handlers
5932 - * (mceusb_dev_recv() and mce_async_callback()) nor tasklets.
5933 + * (mceusb_dev_recv() and mce_write_callback()) nor tasklets.
5934 * Invokes mceusb_deferred_kevent() for recovering from
5935 * error events specified by the kevent bit field.
5936 */
5937 @@ -764,23 +769,80 @@ static void mceusb_defer_kevent(struct mceusb_dev *ir, int kevent)
5938 dev_dbg(ir->dev, "kevent %d scheduled", kevent);
5939 }
5940
5941 -static void mce_async_callback(struct urb *urb)
5942 +static void mce_write_callback(struct urb *urb)
5943 {
5944 - struct mceusb_dev *ir;
5945 - int len;
5946 -
5947 if (!urb)
5948 return;
5949
5950 - ir = urb->context;
5951 + complete(urb->context);
5952 +}
5953 +
5954 +/*
5955 + * Write (TX/send) data to MCE device USB endpoint out.
5956 + * Used for IR blaster TX and MCE device commands.
5957 + *
5958 + * Return: The number of bytes written (> 0) or errno (< 0).
5959 + */
5960 +static int mce_write(struct mceusb_dev *ir, u8 *data, int size)
5961 +{
5962 + int ret;
5963 + struct urb *urb;
5964 + struct device *dev = ir->dev;
5965 + unsigned char *buf_out;
5966 + struct completion tx_done;
5967 + unsigned long expire;
5968 + unsigned long ret_wait;
5969 +
5970 + mceusb_dev_printdata(ir, data, size, 0, size, true);
5971 +
5972 + urb = usb_alloc_urb(0, GFP_KERNEL);
5973 + if (unlikely(!urb)) {
5974 + dev_err(dev, "Error: mce write couldn't allocate urb");
5975 + return -ENOMEM;
5976 + }
5977 +
5978 + buf_out = kmalloc(size, GFP_KERNEL);
5979 + if (!buf_out) {
5980 + usb_free_urb(urb);
5981 + return -ENOMEM;
5982 + }
5983 +
5984 + init_completion(&tx_done);
5985 +
5986 + /* outbound data */
5987 + if (usb_endpoint_xfer_int(ir->usb_ep_out))
5988 + usb_fill_int_urb(urb, ir->usbdev, ir->pipe_out,
5989 + buf_out, size, mce_write_callback, &tx_done,
5990 + ir->usb_ep_out->bInterval);
5991 + else
5992 + usb_fill_bulk_urb(urb, ir->usbdev, ir->pipe_out,
5993 + buf_out, size, mce_write_callback, &tx_done);
5994 + memcpy(buf_out, data, size);
5995 +
5996 + ret = usb_submit_urb(urb, GFP_KERNEL);
5997 + if (ret) {
5998 + dev_err(dev, "Error: mce write submit urb error = %d", ret);
5999 + kfree(buf_out);
6000 + usb_free_urb(urb);
6001 + return ret;
6002 + }
6003 +
6004 + expire = msecs_to_jiffies(USB_TX_TIMEOUT);
6005 + ret_wait = wait_for_completion_timeout(&tx_done, expire);
6006 + if (!ret_wait) {
6007 + dev_err(dev, "Error: mce write timed out (expire = %lu (%dms))",
6008 + expire, USB_TX_TIMEOUT);
6009 + usb_kill_urb(urb);
6010 + ret = (urb->status == -ENOENT ? -ETIMEDOUT : urb->status);
6011 + } else {
6012 + ret = urb->status;
6013 + }
6014 + if (ret >= 0)
6015 + ret = urb->actual_length; /* bytes written */
6016
6017 switch (urb->status) {
6018 /* success */
6019 case 0:
6020 - len = urb->actual_length;
6021 -
6022 - mceusb_dev_printdata(ir, urb->transfer_buffer, len,
6023 - 0, len, true);
6024 break;
6025
6026 case -ECONNRESET:
6027 @@ -790,140 +852,135 @@ static void mce_async_callback(struct urb *urb)
6028 break;
6029
6030 case -EPIPE:
6031 - dev_err(ir->dev, "Error: request urb status = %d (TX HALT)",
6032 + dev_err(ir->dev, "Error: mce write urb status = %d (TX HALT)",
6033 urb->status);
6034 mceusb_defer_kevent(ir, EVENT_TX_HALT);
6035 break;
6036
6037 default:
6038 - dev_err(ir->dev, "Error: request urb status = %d", urb->status);
6039 + dev_err(ir->dev, "Error: mce write urb status = %d",
6040 + urb->status);
6041 break;
6042 }
6043
6044 - /* the transfer buffer and urb were allocated in mce_request_packet */
6045 - kfree(urb->transfer_buffer);
6046 - usb_free_urb(urb);
6047 -}
6048 -
6049 -/* request outgoing (send) usb packet - used to initialize remote */
6050 -static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
6051 - int size)
6052 -{
6053 - int res;
6054 - struct urb *async_urb;
6055 - struct device *dev = ir->dev;
6056 - unsigned char *async_buf;
6057 + dev_dbg(dev, "tx done status = %d (wait = %lu, expire = %lu (%dms), urb->actual_length = %d, urb->status = %d)",
6058 + ret, ret_wait, expire, USB_TX_TIMEOUT,
6059 + urb->actual_length, urb->status);
6060
6061 - async_urb = usb_alloc_urb(0, GFP_KERNEL);
6062 - if (unlikely(!async_urb)) {
6063 - dev_err(dev, "Error, couldn't allocate urb!");
6064 - return;
6065 - }
6066 -
6067 - async_buf = kmalloc(size, GFP_KERNEL);
6068 - if (!async_buf) {
6069 - usb_free_urb(async_urb);
6070 - return;
6071 - }
6072 -
6073 - /* outbound data */
6074 - if (usb_endpoint_xfer_int(ir->usb_ep_out))
6075 - usb_fill_int_urb(async_urb, ir->usbdev, ir->pipe_out,
6076 - async_buf, size, mce_async_callback, ir,
6077 - ir->usb_ep_out->bInterval);
6078 - else
6079 - usb_fill_bulk_urb(async_urb, ir->usbdev, ir->pipe_out,
6080 - async_buf, size, mce_async_callback, ir);
6081 -
6082 - memcpy(async_buf, data, size);
6083 -
6084 - dev_dbg(dev, "send request called (size=%#x)", size);
6085 + kfree(buf_out);
6086 + usb_free_urb(urb);
6087
6088 - res = usb_submit_urb(async_urb, GFP_ATOMIC);
6089 - if (res) {
6090 - dev_err(dev, "send request FAILED! (res=%d)", res);
6091 - kfree(async_buf);
6092 - usb_free_urb(async_urb);
6093 - return;
6094 - }
6095 - dev_dbg(dev, "send request complete (res=%d)", res);
6096 + return ret;
6097 }
6098
6099 -static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
6100 +static void mce_command_out(struct mceusb_dev *ir, u8 *data, int size)
6101 {
6102 int rsize = sizeof(DEVICE_RESUME);
6103
6104 if (ir->need_reset) {
6105 ir->need_reset = false;
6106 - mce_request_packet(ir, DEVICE_RESUME, rsize);
6107 + mce_write(ir, DEVICE_RESUME, rsize);
6108 msleep(10);
6109 }
6110
6111 - mce_request_packet(ir, data, size);
6112 + mce_write(ir, data, size);
6113 msleep(10);
6114 }
6115
6116 -/* Send data out the IR blaster port(s) */
6117 +/*
6118 + * Transmit IR out the MCE device IR blaster port(s).
6119 + *
6120 + * Convert IR pulse/space sequence from LIRC to MCE format.
6121 + * Break up a long IR sequence into multiple parts (MCE IR data packets).
6122 + *
6123 + * u32 txbuf[] consists of IR pulse, space, ..., and pulse times in usec.
6124 + * Pulses and spaces are implicit by their position.
6125 + * The first IR sample, txbuf[0], is always a pulse.
6126 + *
6127 + * u8 irbuf[] consists of multiple IR data packets for the MCE device.
6128 + * A packet is 1 u8 MCE_IRDATA_HEADER and up to 30 u8 IR samples.
6129 + * An IR sample is 1-bit pulse/space flag with 7-bit time
6130 + * in MCE time units (50usec).
6131 + *
6132 + * Return: The number of IR samples sent (> 0) or errno (< 0).
6133 + */
6134 static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
6135 {
6136 struct mceusb_dev *ir = dev->priv;
6137 - int i, length, ret = 0;
6138 - int cmdcount = 0;
6139 - unsigned char cmdbuf[MCE_CMDBUF_SIZE];
6140 -
6141 - /* MCE tx init header */
6142 - cmdbuf[cmdcount++] = MCE_CMD_PORT_IR;
6143 - cmdbuf[cmdcount++] = MCE_CMD_SETIRTXPORTS;
6144 - cmdbuf[cmdcount++] = ir->tx_mask;
6145 + u8 cmdbuf[3] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00 };
6146 + u8 irbuf[MCE_IRBUF_SIZE];
6147 + int ircount = 0;
6148 + unsigned int irsample;
6149 + int i, length, ret;
6150
6151 /* Send the set TX ports command */
6152 - mce_async_out(ir, cmdbuf, cmdcount);
6153 - cmdcount = 0;
6154 -
6155 - /* Generate mce packet data */
6156 - for (i = 0; (i < count) && (cmdcount < MCE_CMDBUF_SIZE); i++) {
6157 - txbuf[i] = txbuf[i] / MCE_TIME_UNIT;
6158 -
6159 - do { /* loop to support long pulses/spaces > 127*50us=6.35ms */
6160 -
6161 - /* Insert mce packet header every 4th entry */
6162 - if ((cmdcount < MCE_CMDBUF_SIZE) &&
6163 - (cmdcount % MCE_CODE_LENGTH) == 0)
6164 - cmdbuf[cmdcount++] = MCE_IRDATA_HEADER;
6165 -
6166 - /* Insert mce packet data */
6167 - if (cmdcount < MCE_CMDBUF_SIZE)
6168 - cmdbuf[cmdcount++] =
6169 - (txbuf[i] < MCE_PULSE_BIT ?
6170 - txbuf[i] : MCE_MAX_PULSE_LENGTH) |
6171 - (i & 1 ? 0x00 : MCE_PULSE_BIT);
6172 - else {
6173 - ret = -EINVAL;
6174 - goto out;
6175 + cmdbuf[2] = ir->tx_mask;
6176 + mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
6177 +
6178 + /* Generate mce IR data packet */
6179 + for (i = 0; i < count; i++) {
6180 + irsample = txbuf[i] / MCE_TIME_UNIT;
6181 +
6182 + /* loop to support long pulses/spaces > 6350us (127*50us) */
6183 + while (irsample > 0) {
6184 + /* Insert IR header every 30th entry */
6185 + if (ircount % MCE_PACKET_SIZE == 0) {
6186 + /* Room for IR header and one IR sample? */
6187 + if (ircount >= MCE_IRBUF_SIZE - 1) {
6188 + /* Send near full buffer */
6189 + ret = mce_write(ir, irbuf, ircount);
6190 + if (ret < 0)
6191 + return ret;
6192 + ircount = 0;
6193 + }
6194 + irbuf[ircount++] = MCE_IRDATA_HEADER;
6195 }
6196
6197 - } while ((txbuf[i] > MCE_MAX_PULSE_LENGTH) &&
6198 - (txbuf[i] -= MCE_MAX_PULSE_LENGTH));
6199 - }
6200 -
6201 - /* Check if we have room for the empty packet at the end */
6202 - if (cmdcount >= MCE_CMDBUF_SIZE) {
6203 - ret = -EINVAL;
6204 - goto out;
6205 - }
6206 + /* Insert IR sample */
6207 + if (irsample <= MCE_MAX_PULSE_LENGTH) {
6208 + irbuf[ircount] = irsample;
6209 + irsample = 0;
6210 + } else {
6211 + irbuf[ircount] = MCE_MAX_PULSE_LENGTH;
6212 + irsample -= MCE_MAX_PULSE_LENGTH;
6213 + }
6214 + /*
6215 + * Even i = IR pulse
6216 + * Odd i = IR space
6217 + */
6218 + irbuf[ircount] |= (i & 1 ? 0 : MCE_PULSE_BIT);
6219 + ircount++;
6220 +
6221 + /* IR buffer full? */
6222 + if (ircount >= MCE_IRBUF_SIZE) {
6223 + /* Fix packet length in last header */
6224 + length = ircount % MCE_PACKET_SIZE;
6225 + if (length > 0)
6226 + irbuf[ircount - length] -=
6227 + MCE_PACKET_SIZE - length;
6228 + /* Send full buffer */
6229 + ret = mce_write(ir, irbuf, ircount);
6230 + if (ret < 0)
6231 + return ret;
6232 + ircount = 0;
6233 + }
6234 + }
6235 + } /* after for loop, 0 <= ircount < MCE_IRBUF_SIZE */
6236
6237 /* Fix packet length in last header */
6238 - length = cmdcount % MCE_CODE_LENGTH;
6239 - cmdbuf[cmdcount - length] -= MCE_CODE_LENGTH - length;
6240 + length = ircount % MCE_PACKET_SIZE;
6241 + if (length > 0)
6242 + irbuf[ircount - length] -= MCE_PACKET_SIZE - length;
6243
6244 - /* All mce commands end with an empty packet (0x80) */
6245 - cmdbuf[cmdcount++] = MCE_IRDATA_TRAILER;
6246 + /* Append IR trailer (0x80) to final partial (or empty) IR buffer */
6247 + irbuf[ircount++] = MCE_IRDATA_TRAILER;
6248
6249 - /* Transmit the command to the mce device */
6250 - mce_async_out(ir, cmdbuf, cmdcount);
6251 + /* Send final buffer */
6252 + ret = mce_write(ir, irbuf, ircount);
6253 + if (ret < 0)
6254 + return ret;
6255
6256 -out:
6257 - return ret ? ret : count;
6258 + return count;
6259 }
6260
6261 /* Sets active IR outputs -- mce devices typically have two */
6262 @@ -963,7 +1020,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
6263 cmdbuf[2] = MCE_CMD_SIG_END;
6264 cmdbuf[3] = MCE_IRDATA_TRAILER;
6265 dev_dbg(ir->dev, "disabling carrier modulation");
6266 - mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
6267 + mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
6268 return 0;
6269 }
6270
6271 @@ -977,7 +1034,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
6272 carrier);
6273
6274 /* Transmit new carrier to mce device */
6275 - mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
6276 + mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
6277 return 0;
6278 }
6279 }
6280 @@ -1000,10 +1057,10 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout)
6281 cmdbuf[2] = units >> 8;
6282 cmdbuf[3] = units;
6283
6284 - mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
6285 + mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
6286
6287 /* get receiver timeout value */
6288 - mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
6289 + mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
6290
6291 return 0;
6292 }
6293 @@ -1028,7 +1085,7 @@ static int mceusb_set_rx_wideband(struct rc_dev *dev, int enable)
6294 ir->wideband_rx_enabled = false;
6295 cmdbuf[2] = 1; /* port 1 is long range receiver */
6296 }
6297 - mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
6298 + mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
6299 /* response from device sets ir->learning_active */
6300
6301 return 0;
6302 @@ -1051,7 +1108,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
6303 ir->carrier_report_enabled = true;
6304 if (!ir->learning_active) {
6305 cmdbuf[2] = 2; /* port 2 is short range receiver */
6306 - mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
6307 + mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
6308 }
6309 } else {
6310 ir->carrier_report_enabled = false;
6311 @@ -1062,7 +1119,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
6312 */
6313 if (ir->learning_active && !ir->wideband_rx_enabled) {
6314 cmdbuf[2] = 1; /* port 1 is long range receiver */
6315 - mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
6316 + mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
6317 }
6318 }
6319
6320 @@ -1141,6 +1198,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
6321 }
6322 break;
6323 case MCE_RSP_CMD_ILLEGAL:
6324 + case MCE_RSP_TX_TIMEOUT:
6325 ir->need_reset = true;
6326 break;
6327 default:
6328 @@ -1279,7 +1337,7 @@ static void mceusb_get_emulator_version(struct mceusb_dev *ir)
6329 {
6330 /* If we get no reply or an illegal command reply, its ver 1, says MS */
6331 ir->emver = 1;
6332 - mce_async_out(ir, GET_EMVER, sizeof(GET_EMVER));
6333 + mce_command_out(ir, GET_EMVER, sizeof(GET_EMVER));
6334 }
6335
6336 static void mceusb_gen1_init(struct mceusb_dev *ir)
6337 @@ -1325,10 +1383,10 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
6338 dev_dbg(dev, "set handshake - retC = %d", ret);
6339
6340 /* device resume */
6341 - mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
6342 + mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
6343
6344 /* get hw/sw revision? */
6345 - mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
6346 + mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION));
6347
6348 kfree(data);
6349 }
6350 @@ -1336,13 +1394,13 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
6351 static void mceusb_gen2_init(struct mceusb_dev *ir)
6352 {
6353 /* device resume */
6354 - mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
6355 + mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
6356
6357 /* get wake version (protocol, key, address) */
6358 - mce_async_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
6359 + mce_command_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
6360
6361 /* unknown what this one actually returns... */
6362 - mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
6363 + mce_command_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
6364 }
6365
6366 static void mceusb_get_parameters(struct mceusb_dev *ir)
6367 @@ -1356,24 +1414,24 @@ static void mceusb_get_parameters(struct mceusb_dev *ir)
6368 ir->num_rxports = 2;
6369
6370 /* get number of tx and rx ports */
6371 - mce_async_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
6372 + mce_command_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
6373
6374 /* get the carrier and frequency */
6375 - mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
6376 + mce_command_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
6377
6378 if (ir->num_txports && !ir->flags.no_tx)
6379 /* get the transmitter bitmask */
6380 - mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
6381 + mce_command_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
6382
6383 /* get receiver timeout value */
6384 - mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
6385 + mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
6386
6387 /* get receiver sensor setting */
6388 - mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
6389 + mce_command_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
6390
6391 for (i = 0; i < ir->num_txports; i++) {
6392 cmdbuf[2] = i;
6393 - mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
6394 + mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
6395 }
6396 }
6397
6398 @@ -1382,7 +1440,7 @@ static void mceusb_flash_led(struct mceusb_dev *ir)
6399 if (ir->emver < 2)
6400 return;
6401
6402 - mce_async_out(ir, FLASH_LED, sizeof(FLASH_LED));
6403 + mce_command_out(ir, FLASH_LED, sizeof(FLASH_LED));
6404 }
6405
6406 /*
6407 diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
6408 index 50fb0aebb8d4..f2259082e3d8 100644
6409 --- a/drivers/media/rc/mtk-cir.c
6410 +++ b/drivers/media/rc/mtk-cir.c
6411 @@ -35,6 +35,11 @@
6412 /* Fields containing pulse width data */
6413 #define MTK_WIDTH_MASK (GENMASK(7, 0))
6414
6415 +/* IR threshold */
6416 +#define MTK_IRTHD 0x14
6417 +#define MTK_DG_CNT_MASK (GENMASK(12, 8))
6418 +#define MTK_DG_CNT(x) ((x) << 8)
6419 +
6420 /* Bit to enable interrupt */
6421 #define MTK_IRINT_EN BIT(0)
6422
6423 @@ -398,6 +403,9 @@ static int mtk_ir_probe(struct platform_device *pdev)
6424 mtk_w32_mask(ir, val, ir->data->fields[MTK_HW_PERIOD].mask,
6425 ir->data->fields[MTK_HW_PERIOD].reg);
6426
6427 + /* Set de-glitch counter */
6428 + mtk_w32_mask(ir, MTK_DG_CNT(1), MTK_DG_CNT_MASK, MTK_IRTHD);
6429 +
6430 /* Enable IR and PWM */
6431 val = mtk_r32(ir, MTK_CONFIG_HIGH_REG);
6432 val |= MTK_OK_COUNT(ir->data->ok_count) | MTK_PWM_EN | MTK_IR_EN;
6433 diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
6434 index 17468f7d78ed..3ab80a7b4498 100644
6435 --- a/drivers/media/usb/cpia2/cpia2_usb.c
6436 +++ b/drivers/media/usb/cpia2/cpia2_usb.c
6437 @@ -676,6 +676,10 @@ static int submit_urbs(struct camera_data *cam)
6438 if (!urb) {
6439 for (j = 0; j < i; j++)
6440 usb_free_urb(cam->sbuf[j].urb);
6441 + for (j = 0; j < NUM_SBUF; j++) {
6442 + kfree(cam->sbuf[j].data);
6443 + cam->sbuf[j].data = NULL;
6444 + }
6445 return -ENOMEM;
6446 }
6447
6448 diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
6449 index 66d685065e06..ab7a100ec84f 100644
6450 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c
6451 +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
6452 @@ -2439,9 +2439,13 @@ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
6453 8, 0x0486,
6454 };
6455
6456 + if (!IS_ENABLED(CONFIG_DVB_DIB9000))
6457 + return -ENODEV;
6458 if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &dib9090_dib0090_config) == NULL)
6459 return -ENODEV;
6460 i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
6461 + if (!i2c)
6462 + return -ENODEV;
6463 if (dib01x0_pmu_update(i2c, data_dib190, 10) != 0)
6464 return -ENODEV;
6465 dib0700_set_i2c_speed(adap->dev, 1500);
6466 @@ -2517,10 +2521,14 @@ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
6467 0, 0x00ef,
6468 8, 0x0406,
6469 };
6470 + if (!IS_ENABLED(CONFIG_DVB_DIB9000))
6471 + return -ENODEV;
6472 i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe);
6473 if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &nim9090md_dib0090_config[0]) == NULL)
6474 return -ENODEV;
6475 i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
6476 + if (!i2c)
6477 + return -ENODEV;
6478 if (dib01x0_pmu_update(i2c, data_dib190, 10) < 0)
6479 return -ENODEV;
6480
6481 diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
6482 index d6b36e4f33d2..441d878fc22c 100644
6483 --- a/drivers/media/usb/dvb-usb/pctv452e.c
6484 +++ b/drivers/media/usb/dvb-usb/pctv452e.c
6485 @@ -909,14 +909,6 @@ static int pctv452e_frontend_attach(struct dvb_usb_adapter *a)
6486 &a->dev->i2c_adap);
6487 if (!a->fe_adap[0].fe)
6488 return -ENODEV;
6489 -
6490 - /*
6491 - * dvb_frontend will call dvb_detach for both stb0899_detach
6492 - * and stb0899_release but we only do dvb_attach(stb0899_attach).
6493 - * Increment the module refcount instead.
6494 - */
6495 - symbol_get(stb0899_attach);
6496 -
6497 if ((dvb_attach(lnbp22_attach, a->fe_adap[0].fe,
6498 &a->dev->i2c_adap)) == NULL)
6499 err("Cannot attach lnbp22\n");
6500 diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
6501 index 1283c7ca9ad5..1de835a591a0 100644
6502 --- a/drivers/media/usb/em28xx/em28xx-cards.c
6503 +++ b/drivers/media/usb/em28xx/em28xx-cards.c
6504 @@ -4020,7 +4020,6 @@ static void em28xx_usb_disconnect(struct usb_interface *intf)
6505 dev->dev_next->disconnected = 1;
6506 dev_info(&dev->intf->dev, "Disconnecting %s\n",
6507 dev->dev_next->name);
6508 - flush_request_modules(dev->dev_next);
6509 }
6510
6511 dev->disconnected = 1;
6512 diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
6513 index d8e40137a204..53db9a2895ea 100644
6514 --- a/drivers/media/usb/gspca/konica.c
6515 +++ b/drivers/media/usb/gspca/konica.c
6516 @@ -114,6 +114,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 value, u16 index)
6517 if (ret < 0) {
6518 pr_err("reg_r err %d\n", ret);
6519 gspca_dev->usb_err = ret;
6520 + /*
6521 + * Make sure the buffer is zeroed to avoid uninitialized
6522 + * values.
6523 + */
6524 + memset(gspca_dev->usb_buf, 0, 2);
6525 }
6526 }
6527
6528 diff --git a/drivers/media/usb/gspca/nw80x.c b/drivers/media/usb/gspca/nw80x.c
6529 index 59649704beba..880f569bda30 100644
6530 --- a/drivers/media/usb/gspca/nw80x.c
6531 +++ b/drivers/media/usb/gspca/nw80x.c
6532 @@ -1572,6 +1572,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
6533 if (ret < 0) {
6534 pr_err("reg_r err %d\n", ret);
6535 gspca_dev->usb_err = ret;
6536 + /*
6537 + * Make sure the buffer is zeroed to avoid uninitialized
6538 + * values.
6539 + */
6540 + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
6541 return;
6542 }
6543 if (len == 1)
6544 diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
6545 index cfb1f53bc17e..f417dfc0b872 100644
6546 --- a/drivers/media/usb/gspca/ov519.c
6547 +++ b/drivers/media/usb/gspca/ov519.c
6548 @@ -2073,6 +2073,11 @@ static int reg_r(struct sd *sd, u16 index)
6549 } else {
6550 gspca_err(gspca_dev, "reg_r %02x failed %d\n", index, ret);
6551 sd->gspca_dev.usb_err = ret;
6552 + /*
6553 + * Make sure the result is zeroed to avoid uninitialized
6554 + * values.
6555 + */
6556 + gspca_dev->usb_buf[0] = 0;
6557 }
6558
6559 return ret;
6560 @@ -2101,6 +2106,11 @@ static int reg_r8(struct sd *sd,
6561 } else {
6562 gspca_err(gspca_dev, "reg_r8 %02x failed %d\n", index, ret);
6563 sd->gspca_dev.usb_err = ret;
6564 + /*
6565 + * Make sure the buffer is zeroed to avoid uninitialized
6566 + * values.
6567 + */
6568 + memset(gspca_dev->usb_buf, 0, 8);
6569 }
6570
6571 return ret;
6572 diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
6573 index 56521c991db4..185c1f10fb30 100644
6574 --- a/drivers/media/usb/gspca/ov534.c
6575 +++ b/drivers/media/usb/gspca/ov534.c
6576 @@ -693,6 +693,11 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
6577 if (ret < 0) {
6578 pr_err("read failed %d\n", ret);
6579 gspca_dev->usb_err = ret;
6580 + /*
6581 + * Make sure the result is zeroed to avoid uninitialized
6582 + * values.
6583 + */
6584 + gspca_dev->usb_buf[0] = 0;
6585 }
6586 return gspca_dev->usb_buf[0];
6587 }
6588 diff --git a/drivers/media/usb/gspca/ov534_9.c b/drivers/media/usb/gspca/ov534_9.c
6589 index 867f860a9650..91efc650cf76 100644
6590 --- a/drivers/media/usb/gspca/ov534_9.c
6591 +++ b/drivers/media/usb/gspca/ov534_9.c
6592 @@ -1145,6 +1145,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev, u16 reg)
6593 if (ret < 0) {
6594 pr_err("reg_r err %d\n", ret);
6595 gspca_dev->usb_err = ret;
6596 + return 0;
6597 }
6598 return gspca_dev->usb_buf[0];
6599 }
6600 diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c
6601 index 061deee138c3..e087cfb5980b 100644
6602 --- a/drivers/media/usb/gspca/se401.c
6603 +++ b/drivers/media/usb/gspca/se401.c
6604 @@ -101,6 +101,11 @@ static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent)
6605 pr_err("read req failed req %#04x error %d\n",
6606 req, err);
6607 gspca_dev->usb_err = err;
6608 + /*
6609 + * Make sure the buffer is zeroed to avoid uninitialized
6610 + * values.
6611 + */
6612 + memset(gspca_dev->usb_buf, 0, READ_REQ_SIZE);
6613 }
6614 }
6615
6616 diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
6617 index b43f89fee6c1..2a6d0a1265a7 100644
6618 --- a/drivers/media/usb/gspca/sn9c20x.c
6619 +++ b/drivers/media/usb/gspca/sn9c20x.c
6620 @@ -123,6 +123,13 @@ static const struct dmi_system_id flip_dmi_table[] = {
6621 DMI_MATCH(DMI_PRODUCT_VERSION, "0341")
6622 }
6623 },
6624 + {
6625 + .ident = "MSI MS-1039",
6626 + .matches = {
6627 + DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."),
6628 + DMI_MATCH(DMI_PRODUCT_NAME, "MS-1039"),
6629 + }
6630 + },
6631 {
6632 .ident = "MSI MS-1632",
6633 .matches = {
6634 @@ -909,6 +916,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length)
6635 if (unlikely(result < 0 || result != length)) {
6636 pr_err("Read register %02x failed %d\n", reg, result);
6637 gspca_dev->usb_err = result;
6638 + /*
6639 + * Make sure the buffer is zeroed to avoid uninitialized
6640 + * values.
6641 + */
6642 + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
6643 }
6644 }
6645
6646 diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
6647 index 046fc2c2a135..4d655e2da9cb 100644
6648 --- a/drivers/media/usb/gspca/sonixb.c
6649 +++ b/drivers/media/usb/gspca/sonixb.c
6650 @@ -453,6 +453,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
6651 dev_err(gspca_dev->v4l2_dev.dev,
6652 "Error reading register %02x: %d\n", value, res);
6653 gspca_dev->usb_err = res;
6654 + /*
6655 + * Make sure the result is zeroed to avoid uninitialized
6656 + * values.
6657 + */
6658 + gspca_dev->usb_buf[0] = 0;
6659 }
6660 }
6661
6662 diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
6663 index 50a6c8425827..2e1bd2df8304 100644
6664 --- a/drivers/media/usb/gspca/sonixj.c
6665 +++ b/drivers/media/usb/gspca/sonixj.c
6666 @@ -1162,6 +1162,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
6667 if (ret < 0) {
6668 pr_err("reg_r err %d\n", ret);
6669 gspca_dev->usb_err = ret;
6670 + /*
6671 + * Make sure the buffer is zeroed to avoid uninitialized
6672 + * values.
6673 + */
6674 + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
6675 }
6676 }
6677
6678 diff --git a/drivers/media/usb/gspca/spca1528.c b/drivers/media/usb/gspca/spca1528.c
6679 index 2ae03b60163f..ccc477944ef8 100644
6680 --- a/drivers/media/usb/gspca/spca1528.c
6681 +++ b/drivers/media/usb/gspca/spca1528.c
6682 @@ -71,6 +71,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
6683 if (ret < 0) {
6684 pr_err("reg_r err %d\n", ret);
6685 gspca_dev->usb_err = ret;
6686 + /*
6687 + * Make sure the buffer is zeroed to avoid uninitialized
6688 + * values.
6689 + */
6690 + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
6691 }
6692 }
6693
6694 diff --git a/drivers/media/usb/gspca/sq930x.c b/drivers/media/usb/gspca/sq930x.c
6695 index d1ba0888d798..c3610247a90e 100644
6696 --- a/drivers/media/usb/gspca/sq930x.c
6697 +++ b/drivers/media/usb/gspca/sq930x.c
6698 @@ -425,6 +425,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
6699 if (ret < 0) {
6700 pr_err("reg_r %04x failed %d\n", value, ret);
6701 gspca_dev->usb_err = ret;
6702 + /*
6703 + * Make sure the buffer is zeroed to avoid uninitialized
6704 + * values.
6705 + */
6706 + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
6707 }
6708 }
6709
6710 diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
6711 index d0ddfa957ca9..f4a4222f0d2e 100644
6712 --- a/drivers/media/usb/gspca/sunplus.c
6713 +++ b/drivers/media/usb/gspca/sunplus.c
6714 @@ -255,6 +255,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
6715 if (ret < 0) {
6716 pr_err("reg_r err %d\n", ret);
6717 gspca_dev->usb_err = ret;
6718 + /*
6719 + * Make sure the buffer is zeroed to avoid uninitialized
6720 + * values.
6721 + */
6722 + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
6723 }
6724 }
6725
6726 diff --git a/drivers/media/usb/gspca/vc032x.c b/drivers/media/usb/gspca/vc032x.c
6727 index 588a847ea483..4cb7c92ea132 100644
6728 --- a/drivers/media/usb/gspca/vc032x.c
6729 +++ b/drivers/media/usb/gspca/vc032x.c
6730 @@ -2906,6 +2906,11 @@ static void reg_r_i(struct gspca_dev *gspca_dev,
6731 if (ret < 0) {
6732 pr_err("reg_r err %d\n", ret);
6733 gspca_dev->usb_err = ret;
6734 + /*
6735 + * Make sure the buffer is zeroed to avoid uninitialized
6736 + * values.
6737 + */
6738 + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
6739 }
6740 }
6741 static void reg_r(struct gspca_dev *gspca_dev,
6742 diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c
6743 index 16b679c2de21..a8350ee9712f 100644
6744 --- a/drivers/media/usb/gspca/w996Xcf.c
6745 +++ b/drivers/media/usb/gspca/w996Xcf.c
6746 @@ -133,6 +133,11 @@ static int w9968cf_read_sb(struct sd *sd)
6747 } else {
6748 pr_err("Read SB reg [01] failed\n");
6749 sd->gspca_dev.usb_err = ret;
6750 + /*
6751 + * Make sure the buffer is zeroed to avoid uninitialized
6752 + * values.
6753 + */
6754 + memset(sd->gspca_dev.usb_buf, 0, 2);
6755 }
6756
6757 udelay(W9968CF_I2C_BUS_DELAY);
6758 diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
6759 index 9b9d894d29bc..b75c18a012a7 100644
6760 --- a/drivers/media/usb/hdpvr/hdpvr-core.c
6761 +++ b/drivers/media/usb/hdpvr/hdpvr-core.c
6762 @@ -137,6 +137,7 @@ static int device_authorization(struct hdpvr_device *dev)
6763
6764 dev->fw_ver = dev->usbc_buf[1];
6765
6766 + dev->usbc_buf[46] = '\0';
6767 v4l2_info(&dev->v4l2_dev, "firmware version 0x%x dated %s\n",
6768 dev->fw_ver, &dev->usbc_buf[2]);
6769
6770 @@ -271,6 +272,7 @@ static int hdpvr_probe(struct usb_interface *interface,
6771 #endif
6772 size_t buffer_size;
6773 int i;
6774 + int dev_num;
6775 int retval = -ENOMEM;
6776
6777 /* allocate memory for our device state and initialize it */
6778 @@ -368,8 +370,17 @@ static int hdpvr_probe(struct usb_interface *interface,
6779 }
6780 #endif
6781
6782 + dev_num = atomic_inc_return(&dev_nr);
6783 + if (dev_num >= HDPVR_MAX) {
6784 + v4l2_err(&dev->v4l2_dev,
6785 + "max device number reached, device register failed\n");
6786 + atomic_dec(&dev_nr);
6787 + retval = -ENODEV;
6788 + goto reg_fail;
6789 + }
6790 +
6791 retval = hdpvr_register_videodev(dev, &interface->dev,
6792 - video_nr[atomic_inc_return(&dev_nr)]);
6793 + video_nr[dev_num]);
6794 if (retval < 0) {
6795 v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
6796 goto reg_fail;
6797 diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
6798 index 1d0afa340f47..3198f9624b7c 100644
6799 --- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
6800 +++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
6801 @@ -319,7 +319,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
6802
6803 dprintk("%s\n", __func__);
6804
6805 - b = kmalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL);
6806 + b = kzalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL);
6807 if (!b)
6808 return -ENOMEM;
6809
6810 diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
6811 index 7ef3e4d22bf6..939fc11cf080 100644
6812 --- a/drivers/media/v4l2-core/videobuf-core.c
6813 +++ b/drivers/media/v4l2-core/videobuf-core.c
6814 @@ -1123,7 +1123,6 @@ __poll_t videobuf_poll_stream(struct file *file,
6815 struct videobuf_buffer *buf = NULL;
6816 __poll_t rc = 0;
6817
6818 - poll_wait(file, &buf->done, wait);
6819 videobuf_queue_lock(q);
6820 if (q->streaming) {
6821 if (!list_empty(&q->stream))
6822 @@ -1143,7 +1142,9 @@ __poll_t videobuf_poll_stream(struct file *file,
6823 }
6824 buf = q->read_buf;
6825 }
6826 - if (!buf)
6827 + if (buf)
6828 + poll_wait(file, &buf->done, wait);
6829 + else
6830 rc = EPOLLERR;
6831
6832 if (0 == rc) {
6833 diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
6834 index 0bcc5e83bd1a..40109a615922 100644
6835 --- a/drivers/mmc/core/sdio_irq.c
6836 +++ b/drivers/mmc/core/sdio_irq.c
6837 @@ -31,6 +31,7 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
6838 {
6839 struct mmc_card *card = host->card;
6840 int i, ret, count;
6841 + bool sdio_irq_pending = host->sdio_irq_pending;
6842 unsigned char pending;
6843 struct sdio_func *func;
6844
6845 @@ -38,13 +39,16 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
6846 if (mmc_card_suspended(card))
6847 return 0;
6848
6849 + /* Clear the flag to indicate that we have processed the IRQ. */
6850 + host->sdio_irq_pending = false;
6851 +
6852 /*
6853 * Optimization, if there is only 1 function interrupt registered
6854 * and we know an IRQ was signaled then call irq handler directly.
6855 * Otherwise do the full probe.
6856 */
6857 func = card->sdio_single_irq;
6858 - if (func && host->sdio_irq_pending) {
6859 + if (func && sdio_irq_pending) {
6860 func->irq_handler(func);
6861 return 1;
6862 }
6863 @@ -96,7 +100,6 @@ static void sdio_run_irqs(struct mmc_host *host)
6864 {
6865 mmc_claim_host(host);
6866 if (host->sdio_irqs) {
6867 - host->sdio_irq_pending = true;
6868 process_sdio_pending_irqs(host);
6869 if (host->ops->ack_sdio_irq)
6870 host->ops->ack_sdio_irq(host);
6871 @@ -114,6 +117,7 @@ void sdio_irq_work(struct work_struct *work)
6872
6873 void sdio_signal_irq(struct mmc_host *host)
6874 {
6875 + host->sdio_irq_pending = true;
6876 queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
6877 }
6878 EXPORT_SYMBOL_GPL(sdio_signal_irq);
6879 @@ -159,7 +163,6 @@ static int sdio_irq_thread(void *_host)
6880 if (ret)
6881 break;
6882 ret = process_sdio_pending_irqs(host);
6883 - host->sdio_irq_pending = false;
6884 mmc_release_host(host);
6885
6886 /*
6887 diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
6888 index eea52e2c5a0c..79c55c7b4afd 100644
6889 --- a/drivers/mmc/host/dw_mmc.c
6890 +++ b/drivers/mmc/host/dw_mmc.c
6891 @@ -3460,6 +3460,10 @@ int dw_mci_runtime_resume(struct device *dev)
6892 /* Force setup bus to guarantee available clock output */
6893 dw_mci_setup_bus(host->slot, true);
6894
6895 + /* Re-enable SDIO interrupts. */
6896 + if (sdio_irq_claimed(host->slot->mmc))
6897 + __dw_mci_enable_sdio_irq(host->slot, 1);
6898 +
6899 /* Now that slots are all setup, we can enable card detect */
6900 dw_mci_enable_cd(host);
6901
6902 diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
6903 index 33f4b6387ef7..978c8ccce7e3 100644
6904 --- a/drivers/mmc/host/mtk-sd.c
6905 +++ b/drivers/mmc/host/mtk-sd.c
6906 @@ -2421,6 +2421,9 @@ static void msdc_restore_reg(struct msdc_host *host)
6907 } else {
6908 writel(host->save_para.pad_tune, host->base + tune_reg);
6909 }
6910 +
6911 + if (sdio_irq_claimed(host->mmc))
6912 + __msdc_enable_sdio_irq(host, 1);
6913 }
6914
6915 static int msdc_runtime_suspend(struct device *dev)
6916 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
6917 index a5dc5aae973e..c66e66fbaeb4 100644
6918 --- a/drivers/mmc/host/sdhci.c
6919 +++ b/drivers/mmc/host/sdhci.c
6920 @@ -1849,7 +1849,9 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
6921 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
6922 else if (timing == MMC_TIMING_UHS_SDR12)
6923 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
6924 - else if (timing == MMC_TIMING_UHS_SDR25)
6925 + else if (timing == MMC_TIMING_SD_HS ||
6926 + timing == MMC_TIMING_MMC_HS ||
6927 + timing == MMC_TIMING_UHS_SDR25)
6928 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
6929 else if (timing == MMC_TIMING_UHS_SDR50)
6930 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
6931 diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
6932 index e63acc077c18..8cc852dc7d54 100644
6933 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
6934 +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
6935 @@ -1427,21 +1427,16 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
6936 struct stm32_fmc2_timings *tims = &nand->timings;
6937 unsigned long hclk = clk_get_rate(fmc2->clk);
6938 unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000);
6939 - int tar, tclr, thiz, twait, tset_mem, tset_att, thold_mem, thold_att;
6940 -
6941 - tar = hclkp;
6942 - if (tar < sdrt->tAR_min)
6943 - tar = sdrt->tAR_min;
6944 - tims->tar = DIV_ROUND_UP(tar, hclkp) - 1;
6945 - if (tims->tar > FMC2_PCR_TIMING_MASK)
6946 - tims->tar = FMC2_PCR_TIMING_MASK;
6947 -
6948 - tclr = hclkp;
6949 - if (tclr < sdrt->tCLR_min)
6950 - tclr = sdrt->tCLR_min;
6951 - tims->tclr = DIV_ROUND_UP(tclr, hclkp) - 1;
6952 - if (tims->tclr > FMC2_PCR_TIMING_MASK)
6953 - tims->tclr = FMC2_PCR_TIMING_MASK;
6954 + unsigned long timing, tar, tclr, thiz, twait;
6955 + unsigned long tset_mem, tset_att, thold_mem, thold_att;
6956 +
6957 + tar = max_t(unsigned long, hclkp, sdrt->tAR_min);
6958 + timing = DIV_ROUND_UP(tar, hclkp) - 1;
6959 + tims->tar = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK);
6960 +
6961 + tclr = max_t(unsigned long, hclkp, sdrt->tCLR_min);
6962 + timing = DIV_ROUND_UP(tclr, hclkp) - 1;
6963 + tims->tclr = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK);
6964
6965 tims->thiz = FMC2_THIZ;
6966 thiz = (tims->thiz + 1) * hclkp;
6967 @@ -1451,18 +1446,11 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
6968 * tWAIT > tWP
6969 * tWAIT > tREA + tIO
6970 */
6971 - twait = hclkp;
6972 - if (twait < sdrt->tRP_min)
6973 - twait = sdrt->tRP_min;
6974 - if (twait < sdrt->tWP_min)
6975 - twait = sdrt->tWP_min;
6976 - if (twait < sdrt->tREA_max + FMC2_TIO)
6977 - twait = sdrt->tREA_max + FMC2_TIO;
6978 - tims->twait = DIV_ROUND_UP(twait, hclkp);
6979 - if (tims->twait == 0)
6980 - tims->twait = 1;
6981 - else if (tims->twait > FMC2_PMEM_PATT_TIMING_MASK)
6982 - tims->twait = FMC2_PMEM_PATT_TIMING_MASK;
6983 + twait = max_t(unsigned long, hclkp, sdrt->tRP_min);
6984 + twait = max_t(unsigned long, twait, sdrt->tWP_min);
6985 + twait = max_t(unsigned long, twait, sdrt->tREA_max + FMC2_TIO);
6986 + timing = DIV_ROUND_UP(twait, hclkp);
6987 + tims->twait = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
6988
6989 /*
6990 * tSETUP_MEM > tCS - tWAIT
6991 @@ -1477,20 +1465,15 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
6992 if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
6993 (tset_mem < sdrt->tDS_min - (twait - thiz)))
6994 tset_mem = sdrt->tDS_min - (twait - thiz);
6995 - tims->tset_mem = DIV_ROUND_UP(tset_mem, hclkp);
6996 - if (tims->tset_mem == 0)
6997 - tims->tset_mem = 1;
6998 - else if (tims->tset_mem > FMC2_PMEM_PATT_TIMING_MASK)
6999 - tims->tset_mem = FMC2_PMEM_PATT_TIMING_MASK;
7000 + timing = DIV_ROUND_UP(tset_mem, hclkp);
7001 + tims->tset_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
7002
7003 /*
7004 * tHOLD_MEM > tCH
7005 * tHOLD_MEM > tREH - tSETUP_MEM
7006 * tHOLD_MEM > max(tRC, tWC) - (tSETUP_MEM + tWAIT)
7007 */
7008 - thold_mem = hclkp;
7009 - if (thold_mem < sdrt->tCH_min)
7010 - thold_mem = sdrt->tCH_min;
7011 + thold_mem = max_t(unsigned long, hclkp, sdrt->tCH_min);
7012 if (sdrt->tREH_min > tset_mem &&
7013 (thold_mem < sdrt->tREH_min - tset_mem))
7014 thold_mem = sdrt->tREH_min - tset_mem;
7015 @@ -1500,11 +1483,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
7016 if ((sdrt->tWC_min > tset_mem + twait) &&
7017 (thold_mem < sdrt->tWC_min - (tset_mem + twait)))
7018 thold_mem = sdrt->tWC_min - (tset_mem + twait);
7019 - tims->thold_mem = DIV_ROUND_UP(thold_mem, hclkp);
7020 - if (tims->thold_mem == 0)
7021 - tims->thold_mem = 1;
7022 - else if (tims->thold_mem > FMC2_PMEM_PATT_TIMING_MASK)
7023 - tims->thold_mem = FMC2_PMEM_PATT_TIMING_MASK;
7024 + timing = DIV_ROUND_UP(thold_mem, hclkp);
7025 + tims->thold_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
7026
7027 /*
7028 * tSETUP_ATT > tCS - tWAIT
7029 @@ -1526,11 +1506,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
7030 if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
7031 (tset_att < sdrt->tDS_min - (twait - thiz)))
7032 tset_att = sdrt->tDS_min - (twait - thiz);
7033 - tims->tset_att = DIV_ROUND_UP(tset_att, hclkp);
7034 - if (tims->tset_att == 0)
7035 - tims->tset_att = 1;
7036 - else if (tims->tset_att > FMC2_PMEM_PATT_TIMING_MASK)
7037 - tims->tset_att = FMC2_PMEM_PATT_TIMING_MASK;
7038 + timing = DIV_ROUND_UP(tset_att, hclkp);
7039 + tims->tset_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
7040
7041 /*
7042 * tHOLD_ATT > tALH
7043 @@ -1545,17 +1522,11 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
7044 * tHOLD_ATT > tRC - (tSETUP_ATT + tWAIT)
7045 * tHOLD_ATT > tWC - (tSETUP_ATT + tWAIT)
7046 */
7047 - thold_att = hclkp;
7048 - if (thold_att < sdrt->tALH_min)
7049 - thold_att = sdrt->tALH_min;
7050 - if (thold_att < sdrt->tCH_min)
7051 - thold_att = sdrt->tCH_min;
7052 - if (thold_att < sdrt->tCLH_min)
7053 - thold_att = sdrt->tCLH_min;
7054 - if (thold_att < sdrt->tCOH_min)
7055 - thold_att = sdrt->tCOH_min;
7056 - if (thold_att < sdrt->tDH_min)
7057 - thold_att = sdrt->tDH_min;
7058 + thold_att = max_t(unsigned long, hclkp, sdrt->tALH_min);
7059 + thold_att = max_t(unsigned long, thold_att, sdrt->tCH_min);
7060 + thold_att = max_t(unsigned long, thold_att, sdrt->tCLH_min);
7061 + thold_att = max_t(unsigned long, thold_att, sdrt->tCOH_min);
7062 + thold_att = max_t(unsigned long, thold_att, sdrt->tDH_min);
7063 if ((sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC > tset_mem) &&
7064 (thold_att < sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem))
7065 thold_att = sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem;
7066 @@ -1574,11 +1545,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
7067 if ((sdrt->tWC_min > tset_att + twait) &&
7068 (thold_att < sdrt->tWC_min - (tset_att + twait)))
7069 thold_att = sdrt->tWC_min - (tset_att + twait);
7070 - tims->thold_att = DIV_ROUND_UP(thold_att, hclkp);
7071 - if (tims->thold_att == 0)
7072 - tims->thold_att = 1;
7073 - else if (tims->thold_att > FMC2_PMEM_PATT_TIMING_MASK)
7074 - tims->thold_att = FMC2_PMEM_PATT_TIMING_MASK;
7075 + timing = DIV_ROUND_UP(thold_att, hclkp);
7076 + tims->thold_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
7077 }
7078
7079 static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
7080 diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
7081 index 8459115d9d4e..553776cc1d29 100644
7082 --- a/drivers/net/arcnet/arcnet.c
7083 +++ b/drivers/net/arcnet/arcnet.c
7084 @@ -1063,31 +1063,34 @@ EXPORT_SYMBOL(arcnet_interrupt);
7085 static void arcnet_rx(struct net_device *dev, int bufnum)
7086 {
7087 struct arcnet_local *lp = netdev_priv(dev);
7088 - struct archdr pkt;
7089 + union {
7090 + struct archdr pkt;
7091 + char buf[512];
7092 + } rxdata;
7093 struct arc_rfc1201 *soft;
7094 int length, ofs;
7095
7096 - soft = &pkt.soft.rfc1201;
7097 + soft = &rxdata.pkt.soft.rfc1201;
7098
7099 - lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
7100 - if (pkt.hard.offset[0]) {
7101 - ofs = pkt.hard.offset[0];
7102 + lp->hw.copy_from_card(dev, bufnum, 0, &rxdata.pkt, ARC_HDR_SIZE);
7103 + if (rxdata.pkt.hard.offset[0]) {
7104 + ofs = rxdata.pkt.hard.offset[0];
7105 length = 256 - ofs;
7106 } else {
7107 - ofs = pkt.hard.offset[1];
7108 + ofs = rxdata.pkt.hard.offset[1];
7109 length = 512 - ofs;
7110 }
7111
7112 /* get the full header, if possible */
7113 - if (sizeof(pkt.soft) <= length) {
7114 - lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft));
7115 + if (sizeof(rxdata.pkt.soft) <= length) {
7116 + lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(rxdata.pkt.soft));
7117 } else {
7118 - memset(&pkt.soft, 0, sizeof(pkt.soft));
7119 + memset(&rxdata.pkt.soft, 0, sizeof(rxdata.pkt.soft));
7120 lp->hw.copy_from_card(dev, bufnum, ofs, soft, length);
7121 }
7122
7123 arc_printk(D_DURING, dev, "Buffer #%d: received packet from %02Xh to %02Xh (%d+4 bytes)\n",
7124 - bufnum, pkt.hard.source, pkt.hard.dest, length);
7125 + bufnum, rxdata.pkt.hard.source, rxdata.pkt.hard.dest, length);
7126
7127 dev->stats.rx_packets++;
7128 dev->stats.rx_bytes += length + ARC_HDR_SIZE;
7129 @@ -1096,13 +1099,13 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
7130 if (arc_proto_map[soft->proto]->is_ip) {
7131 if (BUGLVL(D_PROTO)) {
7132 struct ArcProto
7133 - *oldp = arc_proto_map[lp->default_proto[pkt.hard.source]],
7134 + *oldp = arc_proto_map[lp->default_proto[rxdata.pkt.hard.source]],
7135 *newp = arc_proto_map[soft->proto];
7136
7137 if (oldp != newp) {
7138 arc_printk(D_PROTO, dev,
7139 "got protocol %02Xh; encap for host %02Xh is now '%c' (was '%c')\n",
7140 - soft->proto, pkt.hard.source,
7141 + soft->proto, rxdata.pkt.hard.source,
7142 newp->suffix, oldp->suffix);
7143 }
7144 }
7145 @@ -1111,10 +1114,10 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
7146 lp->default_proto[0] = soft->proto;
7147
7148 /* in striking contrast, the following isn't a hack. */
7149 - lp->default_proto[pkt.hard.source] = soft->proto;
7150 + lp->default_proto[rxdata.pkt.hard.source] = soft->proto;
7151 }
7152 /* call the protocol-specific receiver. */
7153 - arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length);
7154 + arc_proto_map[soft->proto]->rx(dev, bufnum, &rxdata.pkt, length);
7155 }
7156
7157 static void null_rx(struct net_device *dev, int bufnum,
7158 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
7159 index 395b05701480..a1fab77b2096 100644
7160 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
7161 +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
7162 @@ -1429,6 +1429,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
7163 else
7164 phy_reg |= 0xFA;
7165 e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
7166 +
7167 + if (speed == SPEED_1000) {
7168 + hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
7169 + &phy_reg);
7170 +
7171 + phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
7172 +
7173 + hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
7174 + phy_reg);
7175 + }
7176 }
7177 hw->phy.ops.release(hw);
7178
7179 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
7180 index eb09c755fa17..1502895eb45d 100644
7181 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
7182 +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
7183 @@ -210,7 +210,7 @@
7184
7185 /* PHY Power Management Control */
7186 #define HV_PM_CTRL PHY_REG(770, 17)
7187 -#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
7188 +#define HV_PM_CTRL_K1_CLK_REQ 0x200
7189 #define HV_PM_CTRL_K1_ENABLE 0x4000
7190
7191 #define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
7192 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
7193 index 9ebbe3da61bb..d22491ce73e6 100644
7194 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
7195 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
7196 @@ -2583,6 +2583,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
7197 return;
7198 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
7199 return;
7200 + if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
7201 + set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
7202 + return;
7203 + }
7204
7205 for (v = 0; v < pf->num_alloc_vsi; v++) {
7206 if (pf->vsi[v] &&
7207 @@ -2597,6 +2601,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
7208 }
7209 }
7210 }
7211 + clear_bit(__I40E_VF_DISABLE, pf->state);
7212 }
7213
7214 /**
7215 diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
7216 index 9ac854c2b371..697321898e84 100644
7217 --- a/drivers/net/ethernet/marvell/skge.c
7218 +++ b/drivers/net/ethernet/marvell/skge.c
7219 @@ -3108,7 +3108,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
7220 skb_put(skb, len);
7221
7222 if (dev->features & NETIF_F_RXCSUM) {
7223 - skb->csum = csum;
7224 + skb->csum = le16_to_cpu(csum);
7225 skb->ip_summed = CHECKSUM_COMPLETE;
7226 }
7227
7228 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
7229 index 94304abc49e9..39e90b873319 100644
7230 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
7231 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
7232 @@ -399,10 +399,10 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
7233 struct mlx5_flow_table *ft,
7234 struct ethtool_rx_flow_spec *fs)
7235 {
7236 + struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND };
7237 struct mlx5_flow_destination *dst = NULL;
7238 - struct mlx5_flow_act flow_act = {0};
7239 - struct mlx5_flow_spec *spec;
7240 struct mlx5_flow_handle *rule;
7241 + struct mlx5_flow_spec *spec;
7242 int err = 0;
7243
7244 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
7245 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
7246 index 00b2d4a86159..98be5fe33674 100644
7247 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
7248 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
7249 @@ -1369,46 +1369,63 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
7250 return err;
7251 }
7252
7253 - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
7254 - struct flow_match_ipv4_addrs match;
7255 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
7256 + struct flow_match_control match;
7257 + u16 addr_type;
7258
7259 - flow_rule_match_enc_ipv4_addrs(rule, &match);
7260 - MLX5_SET(fte_match_set_lyr_2_4, headers_c,
7261 - src_ipv4_src_ipv6.ipv4_layout.ipv4,
7262 - ntohl(match.mask->src));
7263 - MLX5_SET(fte_match_set_lyr_2_4, headers_v,
7264 - src_ipv4_src_ipv6.ipv4_layout.ipv4,
7265 - ntohl(match.key->src));
7266 -
7267 - MLX5_SET(fte_match_set_lyr_2_4, headers_c,
7268 - dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
7269 - ntohl(match.mask->dst));
7270 - MLX5_SET(fte_match_set_lyr_2_4, headers_v,
7271 - dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
7272 - ntohl(match.key->dst));
7273 -
7274 - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
7275 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
7276 - } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
7277 - struct flow_match_ipv6_addrs match;
7278 + flow_rule_match_enc_control(rule, &match);
7279 + addr_type = match.key->addr_type;
7280
7281 - flow_rule_match_enc_ipv6_addrs(rule, &match);
7282 - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
7283 - src_ipv4_src_ipv6.ipv6_layout.ipv6),
7284 - &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
7285 - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7286 - src_ipv4_src_ipv6.ipv6_layout.ipv6),
7287 - &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
7288 + /* For tunnel addr_type used same key id`s as for non-tunnel */
7289 + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7290 + struct flow_match_ipv4_addrs match;
7291
7292 - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
7293 - dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
7294 - &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
7295 - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7296 - dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
7297 - &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
7298 + flow_rule_match_enc_ipv4_addrs(rule, &match);
7299 + MLX5_SET(fte_match_set_lyr_2_4, headers_c,
7300 + src_ipv4_src_ipv6.ipv4_layout.ipv4,
7301 + ntohl(match.mask->src));
7302 + MLX5_SET(fte_match_set_lyr_2_4, headers_v,
7303 + src_ipv4_src_ipv6.ipv4_layout.ipv4,
7304 + ntohl(match.key->src));
7305
7306 - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
7307 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
7308 + MLX5_SET(fte_match_set_lyr_2_4, headers_c,
7309 + dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
7310 + ntohl(match.mask->dst));
7311 + MLX5_SET(fte_match_set_lyr_2_4, headers_v,
7312 + dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
7313 + ntohl(match.key->dst));
7314 +
7315 + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
7316 + ethertype);
7317 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
7318 + ETH_P_IP);
7319 + } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7320 + struct flow_match_ipv6_addrs match;
7321 +
7322 + flow_rule_match_enc_ipv6_addrs(rule, &match);
7323 + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
7324 + src_ipv4_src_ipv6.ipv6_layout.ipv6),
7325 + &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
7326 + ipv6));
7327 + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7328 + src_ipv4_src_ipv6.ipv6_layout.ipv6),
7329 + &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
7330 + ipv6));
7331 +
7332 + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
7333 + dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
7334 + &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
7335 + ipv6));
7336 + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7337 + dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
7338 + &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
7339 + ipv6));
7340 +
7341 + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
7342 + ethertype);
7343 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
7344 + ETH_P_IPV6);
7345 + }
7346 }
7347
7348 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
7349 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
7350 index b15b27a497fc..fda4964c5cf4 100644
7351 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
7352 +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
7353 @@ -1554,6 +1554,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
7354 { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
7355 { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
7356 { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
7357 + { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
7358 { 0, }
7359 };
7360
7361 diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
7362 index eb846133943b..acb02e1513f2 100644
7363 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c
7364 +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
7365 @@ -400,6 +400,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
7366 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
7367 if (!repr_priv) {
7368 err = -ENOMEM;
7369 + nfp_repr_free(repr);
7370 goto err_reprs_clean;
7371 }
7372
7373 @@ -413,6 +414,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
7374 port = nfp_port_alloc(app, port_type, repr);
7375 if (IS_ERR(port)) {
7376 err = PTR_ERR(port);
7377 + kfree(repr_priv);
7378 nfp_repr_free(repr);
7379 goto err_reprs_clean;
7380 }
7381 @@ -433,6 +435,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
7382 err = nfp_repr_init(app, repr,
7383 port_id, port, priv->nn->dp.netdev);
7384 if (err) {
7385 + kfree(repr_priv);
7386 nfp_port_free(port);
7387 nfp_repr_free(repr);
7388 goto err_reprs_clean;
7389 @@ -515,6 +518,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
7390 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
7391 if (!repr_priv) {
7392 err = -ENOMEM;
7393 + nfp_repr_free(repr);
7394 goto err_reprs_clean;
7395 }
7396
7397 @@ -525,11 +529,13 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
7398 port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
7399 if (IS_ERR(port)) {
7400 err = PTR_ERR(port);
7401 + kfree(repr_priv);
7402 nfp_repr_free(repr);
7403 goto err_reprs_clean;
7404 }
7405 err = nfp_port_init_phy_port(app->pf, app, port, i);
7406 if (err) {
7407 + kfree(repr_priv);
7408 nfp_port_free(port);
7409 nfp_repr_free(repr);
7410 goto err_reprs_clean;
7411 @@ -542,6 +548,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
7412 err = nfp_repr_init(app, repr,
7413 cmsg_port_id, port, priv->nn->dp.netdev);
7414 if (err) {
7415 + kfree(repr_priv);
7416 nfp_port_free(port);
7417 nfp_repr_free(repr);
7418 goto err_reprs_clean;
7419 diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
7420 index f7e11f1b0426..b0c8be127bee 100644
7421 --- a/drivers/net/ethernet/nxp/lpc_eth.c
7422 +++ b/drivers/net/ethernet/nxp/lpc_eth.c
7423 @@ -1344,13 +1344,14 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
7424 pldat->dma_buff_base_p = dma_handle;
7425
7426 netdev_dbg(ndev, "IO address space :%pR\n", res);
7427 - netdev_dbg(ndev, "IO address size :%d\n", resource_size(res));
7428 + netdev_dbg(ndev, "IO address size :%zd\n",
7429 + (size_t)resource_size(res));
7430 netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
7431 pldat->net_base);
7432 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
7433 - netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
7434 - netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
7435 - pldat->dma_buff_base_p);
7436 + netdev_dbg(ndev, "DMA buffer size :%zd\n", pldat->dma_buff_size);
7437 + netdev_dbg(ndev, "DMA buffer P address :%pad\n",
7438 + &pldat->dma_buff_base_p);
7439 netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
7440 pldat->dma_buff_base_v);
7441
7442 @@ -1397,8 +1398,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
7443 if (ret)
7444 goto err_out_unregister_netdev;
7445
7446 - netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
7447 - res->start, ndev->irq);
7448 + netdev_info(ndev, "LPC mac at 0x%08lx irq %d\n",
7449 + (unsigned long)res->start, ndev->irq);
7450
7451 device_init_wakeup(dev, 1);
7452 device_set_wakeup_enable(dev, 0);
7453 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7454 index b19ab09cb18f..5c4408bdc843 100644
7455 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7456 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7457 @@ -1532,13 +1532,15 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
7458 for (queue = 0; queue < rx_count; queue++) {
7459 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
7460 struct page_pool_params pp_params = { 0 };
7461 + unsigned int num_pages;
7462
7463 rx_q->queue_index = queue;
7464 rx_q->priv_data = priv;
7465
7466 pp_params.flags = PP_FLAG_DMA_MAP;
7467 pp_params.pool_size = DMA_RX_SIZE;
7468 - pp_params.order = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
7469 + num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
7470 + pp_params.order = ilog2(num_pages);
7471 pp_params.nid = dev_to_node(priv->device);
7472 pp_params.dev = priv->device;
7473 pp_params.dma_dir = DMA_FROM_DEVICE;
7474 diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
7475 index 8f46aa1ddec0..cb7637364b40 100644
7476 --- a/drivers/net/macsec.c
7477 +++ b/drivers/net/macsec.c
7478 @@ -1235,6 +1235,7 @@ deliver:
7479 macsec_rxsa_put(rx_sa);
7480 macsec_rxsc_put(rx_sc);
7481
7482 + skb_orphan(skb);
7483 ret = gro_cells_receive(&macsec->gro_cells, skb);
7484 if (ret == NET_RX_SUCCESS)
7485 count_rx(dev, skb->len);
7486 diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
7487 index 3c8186f269f9..2fea5541c35a 100644
7488 --- a/drivers/net/phy/micrel.c
7489 +++ b/drivers/net/phy/micrel.c
7490 @@ -763,6 +763,8 @@ static int ksz9031_get_features(struct phy_device *phydev)
7491 * Whenever the device's Asymmetric Pause capability is set to 1,
7492 * link-up may fail after a link-up to link-down transition.
7493 *
7494 + * The Errata Sheet is for ksz9031, but ksz9021 has the same issue
7495 + *
7496 * Workaround:
7497 * Do not enable the Asymmetric Pause capability bit.
7498 */
7499 @@ -1076,6 +1078,7 @@ static struct phy_driver ksphy_driver[] = {
7500 /* PHY_GBIT_FEATURES */
7501 .driver_data = &ksz9021_type,
7502 .probe = kszphy_probe,
7503 + .get_features = ksz9031_get_features,
7504 .config_init = ksz9021_config_init,
7505 .ack_interrupt = kszphy_ack_interrupt,
7506 .config_intr = kszphy_config_intr,
7507 diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
7508 index a221dd552c3c..a5bf0874c7d8 100644
7509 --- a/drivers/net/phy/national.c
7510 +++ b/drivers/net/phy/national.c
7511 @@ -105,14 +105,17 @@ static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
7512
7513 static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
7514 {
7515 + u16 lb_dis = BIT(1);
7516 +
7517 if (disable)
7518 - ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) | 1);
7519 + ns_exp_write(phydev, 0x1c0,
7520 + ns_exp_read(phydev, 0x1c0) | lb_dis);
7521 else
7522 ns_exp_write(phydev, 0x1c0,
7523 - ns_exp_read(phydev, 0x1c0) & 0xfffe);
7524 + ns_exp_read(phydev, 0x1c0) & ~lb_dis);
7525
7526 pr_debug("10BASE-T HDX loopback %s\n",
7527 - (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
7528 + (ns_exp_read(phydev, 0x1c0) & lb_dis) ? "off" : "on");
7529 }
7530
7531 static int ns_config_init(struct phy_device *phydev)
7532 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
7533 index a30e41a56085..9a1b006904a7 100644
7534 --- a/drivers/net/ppp/ppp_generic.c
7535 +++ b/drivers/net/ppp/ppp_generic.c
7536 @@ -1415,6 +1415,8 @@ static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
7537 netif_wake_queue(ppp->dev);
7538 else
7539 netif_stop_queue(ppp->dev);
7540 + } else {
7541 + kfree_skb(skb);
7542 }
7543 ppp_xmit_unlock(ppp);
7544 }
7545 diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
7546 index 50c05d0f44cb..00cab3f43a4c 100644
7547 --- a/drivers/net/usb/cdc_ncm.c
7548 +++ b/drivers/net/usb/cdc_ncm.c
7549 @@ -681,8 +681,12 @@ cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf)
7550 u8 ep;
7551
7552 for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
7553 -
7554 e = intf->cur_altsetting->endpoint + ep;
7555 +
7556 + /* ignore endpoints which cannot transfer data */
7557 + if (!usb_endpoint_maxp(&e->desc))
7558 + continue;
7559 +
7560 switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
7561 case USB_ENDPOINT_XFER_INT:
7562 if (usb_endpoint_dir_in(&e->desc)) {
7563 diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
7564 index 72514c46b478..ef1d667b0108 100644
7565 --- a/drivers/net/usb/usbnet.c
7566 +++ b/drivers/net/usb/usbnet.c
7567 @@ -100,6 +100,11 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
7568 int intr = 0;
7569
7570 e = alt->endpoint + ep;
7571 +
7572 + /* ignore endpoints which cannot transfer data */
7573 + if (!usb_endpoint_maxp(&e->desc))
7574 + continue;
7575 +
7576 switch (e->desc.bmAttributes) {
7577 case USB_ENDPOINT_XFER_INT:
7578 if (!usb_endpoint_dir_in(&e->desc))
7579 @@ -339,6 +344,8 @@ void usbnet_update_max_qlen(struct usbnet *dev)
7580 {
7581 enum usb_device_speed speed = dev->udev->speed;
7582
7583 + if (!dev->rx_urb_size || !dev->hard_mtu)
7584 + goto insanity;
7585 switch (speed) {
7586 case USB_SPEED_HIGH:
7587 dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
7588 @@ -355,6 +362,7 @@ void usbnet_update_max_qlen(struct usbnet *dev)
7589 dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu;
7590 break;
7591 default:
7592 +insanity:
7593 dev->rx_qlen = dev->tx_qlen = 4;
7594 }
7595 }
7596 diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
7597 index 6e84328bdd40..a4b38a980c3c 100644
7598 --- a/drivers/net/vrf.c
7599 +++ b/drivers/net/vrf.c
7600 @@ -1154,7 +1154,8 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
7601 struct sk_buff *skb;
7602 int err;
7603
7604 - if (family == AF_INET6 && !ipv6_mod_enabled())
7605 + if ((family == AF_INET6 || family == RTNL_FAMILY_IP6MR) &&
7606 + !ipv6_mod_enabled())
7607 return 0;
7608
7609 skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
7610 diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
7611 index 2985bb17decd..4d5d10c01064 100644
7612 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
7613 +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
7614 @@ -841,7 +841,7 @@ static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
7615 struct wmi_ch_info_ev_arg *arg)
7616 {
7617 const void **tb;
7618 - const struct wmi_chan_info_event *ev;
7619 + const struct wmi_tlv_chan_info_event *ev;
7620 int ret;
7621
7622 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
7623 diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
7624 index d691f06e58f2..649b229a41e9 100644
7625 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
7626 +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
7627 @@ -1615,6 +1615,22 @@ struct chan_info_params {
7628
7629 #define WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL BIT(9)
7630
7631 +struct wmi_tlv_chan_info_event {
7632 + __le32 err_code;
7633 + __le32 freq;
7634 + __le32 cmd_flags;
7635 + __le32 noise_floor;
7636 + __le32 rx_clear_count;
7637 + __le32 cycle_count;
7638 + __le32 chan_tx_pwr_range;
7639 + __le32 chan_tx_pwr_tp;
7640 + __le32 rx_frame_count;
7641 + __le32 my_bss_rx_cycle_count;
7642 + __le32 rx_11b_mode_data_duration;
7643 + __le32 tx_frame_cnt;
7644 + __le32 mac_clk_mhz;
7645 +} __packed;
7646 +
7647 struct wmi_tlv_mgmt_tx_compl_ev {
7648 __le32 desc_id;
7649 __le32 status;
7650 diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
7651 index 838768c98adc..e80dbe7e8f4c 100644
7652 --- a/drivers/net/wireless/ath/ath10k/wmi.h
7653 +++ b/drivers/net/wireless/ath/ath10k/wmi.h
7654 @@ -6533,14 +6533,6 @@ struct wmi_chan_info_event {
7655 __le32 noise_floor;
7656 __le32 rx_clear_count;
7657 __le32 cycle_count;
7658 - __le32 chan_tx_pwr_range;
7659 - __le32 chan_tx_pwr_tp;
7660 - __le32 rx_frame_count;
7661 - __le32 my_bss_rx_cycle_count;
7662 - __le32 rx_11b_mode_data_duration;
7663 - __le32 tx_frame_cnt;
7664 - __le32 mac_clk_mhz;
7665 -
7666 } __packed;
7667
7668 struct wmi_10_4_chan_info_event {
7669 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
7670 index 5de54d1559dd..8b0b464a1f21 100644
7671 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
7672 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
7673 @@ -887,11 +887,13 @@ static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
7674 * firmware versions. Unfortunately, we don't have a TLV API
7675 * flag to rely on, so rely on the major version which is in
7676 * the first byte of ucode_ver. This was implemented
7677 - * initially on version 38 and then backported to 36, 29 and
7678 - * 17.
7679 + * initially on version 38 and then backported to29 and 17.
7680 + * The intention was to have it in 36 as well, but not all
7681 + * 8000 family got this feature enabled. The 8000 family is
7682 + * the only one using version 36, so skip this version
7683 + * entirely.
7684 */
7685 return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
7686 - IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
7687 IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
7688 IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
7689 }
7690 diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
7691 index afac2481909b..20436a289d5c 100644
7692 --- a/drivers/net/wireless/marvell/libertas/if_usb.c
7693 +++ b/drivers/net/wireless/marvell/libertas/if_usb.c
7694 @@ -50,7 +50,8 @@ static const struct lbs_fw_table fw_table[] = {
7695 { MODEL_8388, "libertas/usb8388_v5.bin", NULL },
7696 { MODEL_8388, "libertas/usb8388.bin", NULL },
7697 { MODEL_8388, "usb8388.bin", NULL },
7698 - { MODEL_8682, "libertas/usb8682.bin", NULL }
7699 + { MODEL_8682, "libertas/usb8682.bin", NULL },
7700 + { 0, NULL, NULL }
7701 };
7702
7703 static const struct usb_device_id if_usb_table[] = {
7704 diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
7705 index 38368d19aa6f..83c96a47914f 100644
7706 --- a/drivers/net/wireless/mediatek/mt76/mmio.c
7707 +++ b/drivers/net/wireless/mediatek/mt76/mmio.c
7708 @@ -43,7 +43,7 @@ static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
7709 static void mt76_mmio_copy(struct mt76_dev *dev, u32 offset, const void *data,
7710 int len)
7711 {
7712 - __iowrite32_copy(dev->mmio.regs + offset, data, len >> 2);
7713 + __iowrite32_copy(dev->mmio.regs + offset, data, DIV_ROUND_UP(len, 4));
7714 }
7715
7716 static int mt76_mmio_wr_rp(struct mt76_dev *dev, u32 base,
7717 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
7718 index cdad2c8dc297..b941fa4a1bcd 100644
7719 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
7720 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
7721 @@ -257,9 +257,8 @@ static int mt7615_driver_own(struct mt7615_dev *dev)
7722
7723 static int mt7615_load_patch(struct mt7615_dev *dev)
7724 {
7725 - const struct firmware *fw;
7726 const struct mt7615_patch_hdr *hdr;
7727 - const char *firmware = MT7615_ROM_PATCH;
7728 + const struct firmware *fw = NULL;
7729 int len, ret, sem;
7730
7731 sem = mt7615_mcu_patch_sem_ctrl(dev, 1);
7732 @@ -273,9 +272,9 @@ static int mt7615_load_patch(struct mt7615_dev *dev)
7733 return -EAGAIN;
7734 }
7735
7736 - ret = request_firmware(&fw, firmware, dev->mt76.dev);
7737 + ret = request_firmware(&fw, MT7615_ROM_PATCH, dev->mt76.dev);
7738 if (ret)
7739 - return ret;
7740 + goto out;
7741
7742 if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
7743 dev_err(dev->mt76.dev, "Invalid firmware\n");
7744 @@ -339,14 +338,12 @@ static u32 gen_dl_mode(u8 feature_set, bool is_cr4)
7745
7746 static int mt7615_load_ram(struct mt7615_dev *dev)
7747 {
7748 - const struct firmware *fw;
7749 const struct mt7615_fw_trailer *hdr;
7750 - const char *n9_firmware = MT7615_FIRMWARE_N9;
7751 - const char *cr4_firmware = MT7615_FIRMWARE_CR4;
7752 u32 n9_ilm_addr, offset;
7753 int i, ret;
7754 + const struct firmware *fw;
7755
7756 - ret = request_firmware(&fw, n9_firmware, dev->mt76.dev);
7757 + ret = request_firmware(&fw, MT7615_FIRMWARE_N9, dev->mt76.dev);
7758 if (ret)
7759 return ret;
7760
7761 @@ -394,7 +391,7 @@ static int mt7615_load_ram(struct mt7615_dev *dev)
7762
7763 release_firmware(fw);
7764
7765 - ret = request_firmware(&fw, cr4_firmware, dev->mt76.dev);
7766 + ret = request_firmware(&fw, MT7615_FIRMWARE_CR4, dev->mt76.dev);
7767 if (ret)
7768 return ret;
7769
7770 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
7771 index f02ffcffe637..f83615dbe1c5 100644
7772 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
7773 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
7774 @@ -25,9 +25,9 @@
7775 #define MT7615_RX_RING_SIZE 1024
7776 #define MT7615_RX_MCU_RING_SIZE 512
7777
7778 -#define MT7615_FIRMWARE_CR4 "mt7615_cr4.bin"
7779 -#define MT7615_FIRMWARE_N9 "mt7615_n9.bin"
7780 -#define MT7615_ROM_PATCH "mt7615_rom_patch.bin"
7781 +#define MT7615_FIRMWARE_CR4 "mediatek/mt7615_cr4.bin"
7782 +#define MT7615_FIRMWARE_N9 "mediatek/mt7615_n9.bin"
7783 +#define MT7615_ROM_PATCH "mediatek/mt7615_rom_patch.bin"
7784
7785 #define MT7615_EEPROM_SIZE 1024
7786 #define MT7615_TOKEN_SIZE 4096
7787 diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
7788 index fb87ce7fbdf6..185eea83aada 100644
7789 --- a/drivers/net/wireless/mediatek/mt76/usb.c
7790 +++ b/drivers/net/wireless/mediatek/mt76/usb.c
7791 @@ -164,7 +164,7 @@ static void mt76u_copy(struct mt76_dev *dev, u32 offset,
7792 int i, ret;
7793
7794 mutex_lock(&usb->usb_ctrl_mtx);
7795 - for (i = 0; i < (len / 4); i++) {
7796 + for (i = 0; i < DIV_ROUND_UP(len, 4); i++) {
7797 put_unaligned_le32(val[i], usb->data);
7798 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
7799 USB_DIR_OUT | USB_TYPE_VENDOR,
7800 diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
7801 index 353871c27779..23dd06afef3d 100644
7802 --- a/drivers/net/wireless/realtek/rtw88/pci.c
7803 +++ b/drivers/net/wireless/realtek/rtw88/pci.c
7804 @@ -206,6 +206,23 @@ static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
7805 return 0;
7806 }
7807
7808 +static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
7809 + struct rtw_pci_rx_ring *rx_ring,
7810 + u32 idx, u32 desc_sz)
7811 +{
7812 + struct device *dev = rtwdev->dev;
7813 + struct rtw_pci_rx_buffer_desc *buf_desc;
7814 + int buf_sz = RTK_PCI_RX_BUF_SIZE;
7815 +
7816 + dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
7817 +
7818 + buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
7819 + idx * desc_sz);
7820 + memset(buf_desc, 0, sizeof(*buf_desc));
7821 + buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
7822 + buf_desc->dma = cpu_to_le32(dma);
7823 +}
7824 +
7825 static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
7826 struct rtw_pci_rx_ring *rx_ring,
7827 u8 desc_size, u32 len)
7828 @@ -765,6 +782,7 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
7829 u32 pkt_offset;
7830 u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
7831 u32 buf_desc_sz = chip->rx_buf_desc_sz;
7832 + u32 new_len;
7833 u8 *rx_desc;
7834 dma_addr_t dma;
7835
7836 @@ -783,8 +801,8 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
7837 rtw_pci_dma_check(rtwdev, ring, cur_rp);
7838 skb = ring->buf[cur_rp];
7839 dma = *((dma_addr_t *)skb->cb);
7840 - pci_unmap_single(rtwpci->pdev, dma, RTK_PCI_RX_BUF_SIZE,
7841 - PCI_DMA_FROMDEVICE);
7842 + dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
7843 + DMA_FROM_DEVICE);
7844 rx_desc = skb->data;
7845 chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
7846
7847 @@ -792,40 +810,35 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
7848 pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
7849 pkt_stat.shift;
7850
7851 - if (pkt_stat.is_c2h) {
7852 - /* keep rx_desc, halmac needs it */
7853 - skb_put(skb, pkt_stat.pkt_len + pkt_offset);
7854 + /* allocate a new skb for this frame,
7855 + * discard the frame if none available
7856 + */
7857 + new_len = pkt_stat.pkt_len + pkt_offset;
7858 + new = dev_alloc_skb(new_len);
7859 + if (WARN_ONCE(!new, "rx routine starvation\n"))
7860 + goto next_rp;
7861
7862 - /* pass offset for further operation */
7863 - *((u32 *)skb->cb) = pkt_offset;
7864 - skb_queue_tail(&rtwdev->c2h_queue, skb);
7865 + /* put the DMA data including rx_desc from phy to new skb */
7866 + skb_put_data(new, skb->data, new_len);
7867 +
7868 + if (pkt_stat.is_c2h) {
7869 + /* pass rx_desc & offset for further operation */
7870 + *((u32 *)new->cb) = pkt_offset;
7871 + skb_queue_tail(&rtwdev->c2h_queue, new);
7872 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
7873 } else {
7874 - /* remove rx_desc, maybe use skb_pull? */
7875 - skb_put(skb, pkt_stat.pkt_len);
7876 - skb_reserve(skb, pkt_offset);
7877 -
7878 - /* alloc a smaller skb to mac80211 */
7879 - new = dev_alloc_skb(pkt_stat.pkt_len);
7880 - if (!new) {
7881 - new = skb;
7882 - } else {
7883 - skb_put_data(new, skb->data, skb->len);
7884 - dev_kfree_skb_any(skb);
7885 - }
7886 - /* TODO: merge into rx.c */
7887 - rtw_rx_stats(rtwdev, pkt_stat.vif, skb);
7888 + /* remove rx_desc */
7889 + skb_pull(new, pkt_offset);
7890 +
7891 + rtw_rx_stats(rtwdev, pkt_stat.vif, new);
7892 memcpy(new->cb, &rx_status, sizeof(rx_status));
7893 ieee80211_rx_irqsafe(rtwdev->hw, new);
7894 }
7895
7896 - /* skb delivered to mac80211, alloc a new one in rx ring */
7897 - new = dev_alloc_skb(RTK_PCI_RX_BUF_SIZE);
7898 - if (WARN(!new, "rx routine starvation\n"))
7899 - return;
7900 -
7901 - ring->buf[cur_rp] = new;
7902 - rtw_pci_reset_rx_desc(rtwdev, new, ring, cur_rp, buf_desc_sz);
7903 +next_rp:
7904 + /* new skb delivered to mac80211, re-enable original skb DMA */
7905 + rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
7906 + buf_desc_sz);
7907
7908 /* host read next element in ring */
7909 if (++cur_rp >= ring->r.len)
7910 diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
7911 index da7e63fca9f5..a9999d10ae81 100644
7912 --- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
7913 +++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
7914 @@ -223,7 +223,6 @@ void zd_mac_clear(struct zd_mac *mac)
7915 {
7916 flush_workqueue(zd_workqueue);
7917 zd_chip_clear(&mac->chip);
7918 - lockdep_assert_held(&mac->lock);
7919 ZD_MEMCLEAR(mac, sizeof(struct zd_mac));
7920 }
7921
7922 diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
7923 index af831d3d15d0..30de7efef003 100644
7924 --- a/drivers/nvme/host/multipath.c
7925 +++ b/drivers/nvme/host/multipath.c
7926 @@ -509,14 +509,16 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
7927
7928 down_write(&ctrl->namespaces_rwsem);
7929 list_for_each_entry(ns, &ctrl->namespaces, list) {
7930 - if (ns->head->ns_id != le32_to_cpu(desc->nsids[n]))
7931 + unsigned nsid = le32_to_cpu(desc->nsids[n]);
7932 +
7933 + if (ns->head->ns_id < nsid)
7934 continue;
7935 - nvme_update_ns_ana_state(desc, ns);
7936 + if (ns->head->ns_id == nsid)
7937 + nvme_update_ns_ana_state(desc, ns);
7938 if (++n == nr_nsids)
7939 break;
7940 }
7941 up_write(&ctrl->namespaces_rwsem);
7942 - WARN_ON_ONCE(n < nr_nsids);
7943 return 0;
7944 }
7945
7946 diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
7947 index 4dc12ea52f23..51800a9ce9a9 100644
7948 --- a/drivers/nvme/target/admin-cmd.c
7949 +++ b/drivers/nvme/target/admin-cmd.c
7950 @@ -81,9 +81,11 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
7951 goto out;
7952
7953 host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
7954 - data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
7955 + data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
7956 + sectors[READ]), 1000);
7957 host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
7958 - data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
7959 + data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
7960 + sectors[WRITE]), 1000);
7961
7962 put_unaligned_le64(host_reads, &slog->host_reads[0]);
7963 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
7964 @@ -111,11 +113,11 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
7965 if (!ns->bdev)
7966 continue;
7967 host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
7968 - data_units_read +=
7969 - part_stat_read(ns->bdev->bd_part, sectors[READ]);
7970 + data_units_read += DIV_ROUND_UP(
7971 + part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
7972 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
7973 - data_units_written +=
7974 - part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
7975 + data_units_written += DIV_ROUND_UP(
7976 + part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
7977
7978 }
7979 rcu_read_unlock();
7980 diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
7981 index 3c730103e637..14be463e25b0 100644
7982 --- a/drivers/parisc/dino.c
7983 +++ b/drivers/parisc/dino.c
7984 @@ -156,6 +156,15 @@ static inline struct dino_device *DINO_DEV(struct pci_hba_data *hba)
7985 return container_of(hba, struct dino_device, hba);
7986 }
7987
7988 +/* Check if PCI device is behind a Card-mode Dino. */
7989 +static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
7990 +{
7991 + struct dino_device *dino_dev;
7992 +
7993 + dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
7994 + return is_card_dino(&dino_dev->hba.dev->id);
7995 +}
7996 +
7997 /*
7998 * Dino Configuration Space Accessor Functions
7999 */
8000 @@ -437,6 +446,21 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev)
8001 }
8002 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus );
8003
8004 +#ifdef CONFIG_TULIP
8005 +static void pci_fixup_tulip(struct pci_dev *dev)
8006 +{
8007 + if (!pci_dev_is_behind_card_dino(dev))
8008 + return;
8009 + if (!(pci_resource_flags(dev, 1) & IORESOURCE_MEM))
8010 + return;
8011 + pr_warn("%s: HP HSC-PCI Cards with card-mode Dino not yet supported.\n",
8012 + pci_name(dev));
8013 + /* Disable this card by zeroing the PCI resources */
8014 + memset(&dev->resource[0], 0, sizeof(dev->resource[0]));
8015 + memset(&dev->resource[1], 0, sizeof(dev->resource[1]));
8016 +}
8017 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_DEC, PCI_ANY_ID, pci_fixup_tulip);
8018 +#endif /* CONFIG_TULIP */
8019
8020 static void __init
8021 dino_bios_init(void)
8022 diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
8023 index 5d3fb2abad1d..bec19d4814ab 100644
8024 --- a/drivers/platform/chrome/cros_ec_rpmsg.c
8025 +++ b/drivers/platform/chrome/cros_ec_rpmsg.c
8026 @@ -41,6 +41,7 @@ struct cros_ec_rpmsg {
8027 struct rpmsg_device *rpdev;
8028 struct completion xfer_ack;
8029 struct work_struct host_event_work;
8030 + struct rpmsg_endpoint *ept;
8031 };
8032
8033 /**
8034 @@ -72,7 +73,6 @@ static int cros_ec_pkt_xfer_rpmsg(struct cros_ec_device *ec_dev,
8035 struct cros_ec_command *ec_msg)
8036 {
8037 struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv;
8038 - struct rpmsg_device *rpdev = ec_rpmsg->rpdev;
8039 struct ec_host_response *response;
8040 unsigned long timeout;
8041 int len;
8042 @@ -85,7 +85,7 @@ static int cros_ec_pkt_xfer_rpmsg(struct cros_ec_device *ec_dev,
8043 dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
8044
8045 reinit_completion(&ec_rpmsg->xfer_ack);
8046 - ret = rpmsg_send(rpdev->ept, ec_dev->dout, len);
8047 + ret = rpmsg_send(ec_rpmsg->ept, ec_dev->dout, len);
8048 if (ret) {
8049 dev_err(ec_dev->dev, "rpmsg send failed\n");
8050 return ret;
8051 @@ -196,11 +196,24 @@ static int cros_ec_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
8052 return 0;
8053 }
8054
8055 +static struct rpmsg_endpoint *
8056 +cros_ec_rpmsg_create_ept(struct rpmsg_device *rpdev)
8057 +{
8058 + struct rpmsg_channel_info chinfo = {};
8059 +
8060 + strscpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
8061 + chinfo.src = rpdev->src;
8062 + chinfo.dst = RPMSG_ADDR_ANY;
8063 +
8064 + return rpmsg_create_ept(rpdev, cros_ec_rpmsg_callback, NULL, chinfo);
8065 +}
8066 +
8067 static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev)
8068 {
8069 struct device *dev = &rpdev->dev;
8070 struct cros_ec_rpmsg *ec_rpmsg;
8071 struct cros_ec_device *ec_dev;
8072 + int ret;
8073
8074 ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
8075 if (!ec_dev)
8076 @@ -225,7 +238,18 @@ static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev)
8077 INIT_WORK(&ec_rpmsg->host_event_work,
8078 cros_ec_rpmsg_host_event_function);
8079
8080 - return cros_ec_register(ec_dev);
8081 + ec_rpmsg->ept = cros_ec_rpmsg_create_ept(rpdev);
8082 + if (!ec_rpmsg->ept)
8083 + return -ENOMEM;
8084 +
8085 + ret = cros_ec_register(ec_dev);
8086 + if (ret < 0) {
8087 + rpmsg_destroy_ept(ec_rpmsg->ept);
8088 + cancel_work_sync(&ec_rpmsg->host_event_work);
8089 + return ret;
8090 + }
8091 +
8092 + return 0;
8093 }
8094
8095 static void cros_ec_rpmsg_remove(struct rpmsg_device *rpdev)
8096 @@ -233,6 +257,7 @@ static void cros_ec_rpmsg_remove(struct rpmsg_device *rpdev)
8097 struct cros_ec_device *ec_dev = dev_get_drvdata(&rpdev->dev);
8098 struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv;
8099
8100 + rpmsg_destroy_ept(ec_rpmsg->ept);
8101 cancel_work_sync(&ec_rpmsg->host_event_work);
8102 }
8103
8104 @@ -249,7 +274,6 @@ static struct rpmsg_driver cros_ec_driver_rpmsg = {
8105 },
8106 .probe = cros_ec_rpmsg_probe,
8107 .remove = cros_ec_rpmsg_remove,
8108 - .callback = cros_ec_rpmsg_callback,
8109 };
8110
8111 module_rpmsg_driver(cros_ec_driver_rpmsg);
8112 diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
8113 index d9542c661ddc..9ea1a2a19f86 100644
8114 --- a/drivers/platform/x86/intel_int0002_vgpio.c
8115 +++ b/drivers/platform/x86/intel_int0002_vgpio.c
8116 @@ -144,6 +144,7 @@ static struct irq_chip int0002_cht_irqchip = {
8117 * No set_wake, on CHT the IRQ is typically shared with the ACPI SCI
8118 * and we don't want to mess with the ACPI SCI irq settings.
8119 */
8120 + .flags = IRQCHIP_SKIP_SET_WAKE,
8121 };
8122
8123 static const struct x86_cpu_id int0002_cpu_ids[] = {
8124 diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
8125 index c510d0d72475..3b6b8dcc4767 100644
8126 --- a/drivers/platform/x86/intel_pmc_core.c
8127 +++ b/drivers/platform/x86/intel_pmc_core.c
8128 @@ -878,10 +878,14 @@ static int pmc_core_probe(struct platform_device *pdev)
8129 if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
8130 pmcdev->map = &cnp_reg_map;
8131
8132 - if (lpit_read_residency_count_address(&slp_s0_addr))
8133 + if (lpit_read_residency_count_address(&slp_s0_addr)) {
8134 pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT;
8135 - else
8136 +
8137 + if (page_is_ram(PHYS_PFN(pmcdev->base_addr)))
8138 + return -ENODEV;
8139 + } else {
8140 pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset;
8141 + }
8142
8143 pmcdev->regbase = ioremap(pmcdev->base_addr,
8144 pmcdev->map->regmap_length);
8145 diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c
8146 index a8754a6db1b8..186540014c48 100644
8147 --- a/drivers/platform/x86/intel_pmc_core_pltdrv.c
8148 +++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c
8149 @@ -18,8 +18,16 @@
8150 #include <asm/cpu_device_id.h>
8151 #include <asm/intel-family.h>
8152
8153 +static void intel_pmc_core_release(struct device *dev)
8154 +{
8155 + /* Nothing to do. */
8156 +}
8157 +
8158 static struct platform_device pmc_core_device = {
8159 .name = "intel_pmc_core",
8160 + .dev = {
8161 + .release = intel_pmc_core_release,
8162 + },
8163 };
8164
8165 /*
8166 diff --git a/drivers/ras/Makefile b/drivers/ras/Makefile
8167 index ef6777e14d3d..6f0404f50107 100644
8168 --- a/drivers/ras/Makefile
8169 +++ b/drivers/ras/Makefile
8170 @@ -1,3 +1,4 @@
8171 # SPDX-License-Identifier: GPL-2.0-only
8172 -obj-$(CONFIG_RAS) += ras.o debugfs.o
8173 +obj-$(CONFIG_RAS) += ras.o
8174 +obj-$(CONFIG_DEBUG_FS) += debugfs.o
8175 obj-$(CONFIG_RAS_CEC) += cec.o
8176 diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
8177 index e0c0cf462004..1b35b8311650 100644
8178 --- a/drivers/regulator/core.c
8179 +++ b/drivers/regulator/core.c
8180 @@ -5640,7 +5640,7 @@ static int __init regulator_init(void)
8181 /* init early to allow our consumers to complete system booting */
8182 core_initcall(regulator_init);
8183
8184 -static int __init regulator_late_cleanup(struct device *dev, void *data)
8185 +static int regulator_late_cleanup(struct device *dev, void *data)
8186 {
8187 struct regulator_dev *rdev = dev_to_rdev(dev);
8188 const struct regulator_ops *ops = rdev->desc->ops;
8189 @@ -5689,17 +5689,8 @@ unlock:
8190 return 0;
8191 }
8192
8193 -static int __init regulator_init_complete(void)
8194 +static void regulator_init_complete_work_function(struct work_struct *work)
8195 {
8196 - /*
8197 - * Since DT doesn't provide an idiomatic mechanism for
8198 - * enabling full constraints and since it's much more natural
8199 - * with DT to provide them just assume that a DT enabled
8200 - * system has full constraints.
8201 - */
8202 - if (of_have_populated_dt())
8203 - has_full_constraints = true;
8204 -
8205 /*
8206 * Regulators may had failed to resolve their input supplies
8207 * when were registered, either because the input supply was
8208 @@ -5717,6 +5708,35 @@ static int __init regulator_init_complete(void)
8209 */
8210 class_for_each_device(&regulator_class, NULL, NULL,
8211 regulator_late_cleanup);
8212 +}
8213 +
8214 +static DECLARE_DELAYED_WORK(regulator_init_complete_work,
8215 + regulator_init_complete_work_function);
8216 +
8217 +static int __init regulator_init_complete(void)
8218 +{
8219 + /*
8220 + * Since DT doesn't provide an idiomatic mechanism for
8221 + * enabling full constraints and since it's much more natural
8222 + * with DT to provide them just assume that a DT enabled
8223 + * system has full constraints.
8224 + */
8225 + if (of_have_populated_dt())
8226 + has_full_constraints = true;
8227 +
8228 + /*
8229 + * We punt completion for an arbitrary amount of time since
8230 + * systems like distros will load many drivers from userspace
8231 + * so consumers might not always be ready yet, this is
8232 + * particularly an issue with laptops where this might bounce
8233 + * the display off then on. Ideally we'd get a notification
8234 + * from userspace when this happens but we don't so just wait
8235 + * a bit and hope we waited long enough. It'd be better if
8236 + * we'd only do this on systems that need it, and a kernel
8237 + * command line option might be useful.
8238 + */
8239 + schedule_delayed_work(&regulator_init_complete_work,
8240 + msecs_to_jiffies(30000));
8241
8242 return 0;
8243 }
8244 diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c
8245 index 5647e2f97ff8..4b9f618b07e9 100644
8246 --- a/drivers/regulator/lm363x-regulator.c
8247 +++ b/drivers/regulator/lm363x-regulator.c
8248 @@ -30,13 +30,13 @@
8249
8250 /* LM3632 */
8251 #define LM3632_BOOST_VSEL_MAX 0x26
8252 -#define LM3632_LDO_VSEL_MAX 0x29
8253 +#define LM3632_LDO_VSEL_MAX 0x28
8254 #define LM3632_VBOOST_MIN 4500000
8255 #define LM3632_VLDO_MIN 4000000
8256
8257 /* LM36274 */
8258 #define LM36274_BOOST_VSEL_MAX 0x3f
8259 -#define LM36274_LDO_VSEL_MAX 0x34
8260 +#define LM36274_LDO_VSEL_MAX 0x32
8261 #define LM36274_VOLTAGE_MIN 4000000
8262
8263 /* Common */
8264 @@ -226,7 +226,7 @@ static const struct regulator_desc lm363x_regulator_desc[] = {
8265 .of_match = "vboost",
8266 .id = LM36274_BOOST,
8267 .ops = &lm363x_boost_voltage_table_ops,
8268 - .n_voltages = LM36274_BOOST_VSEL_MAX,
8269 + .n_voltages = LM36274_BOOST_VSEL_MAX + 1,
8270 .min_uV = LM36274_VOLTAGE_MIN,
8271 .uV_step = LM363X_STEP_50mV,
8272 .type = REGULATOR_VOLTAGE,
8273 @@ -239,7 +239,7 @@ static const struct regulator_desc lm363x_regulator_desc[] = {
8274 .of_match = "vpos",
8275 .id = LM36274_LDO_POS,
8276 .ops = &lm363x_regulator_voltage_table_ops,
8277 - .n_voltages = LM36274_LDO_VSEL_MAX,
8278 + .n_voltages = LM36274_LDO_VSEL_MAX + 1,
8279 .min_uV = LM36274_VOLTAGE_MIN,
8280 .uV_step = LM363X_STEP_50mV,
8281 .type = REGULATOR_VOLTAGE,
8282 @@ -254,7 +254,7 @@ static const struct regulator_desc lm363x_regulator_desc[] = {
8283 .of_match = "vneg",
8284 .id = LM36274_LDO_NEG,
8285 .ops = &lm363x_regulator_voltage_table_ops,
8286 - .n_voltages = LM36274_LDO_VSEL_MAX,
8287 + .n_voltages = LM36274_LDO_VSEL_MAX + 1,
8288 .min_uV = LM36274_VOLTAGE_MIN,
8289 .uV_step = LM363X_STEP_50mV,
8290 .type = REGULATOR_VOLTAGE,
8291 diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
8292 index 65f1fe343c64..5efc959493ec 100644
8293 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c
8294 +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
8295 @@ -546,6 +546,8 @@ static void send_mode_select(struct work_struct *work)
8296 spin_unlock(&ctlr->ms_lock);
8297
8298 retry:
8299 + memset(cdb, 0, sizeof(cdb));
8300 +
8301 data_size = rdac_failover_get(ctlr, &list, cdb);
8302
8303 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
8304 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
8305 index da83034d4759..afcd9a885884 100644
8306 --- a/drivers/scsi/qla2xxx/qla_init.c
8307 +++ b/drivers/scsi/qla2xxx/qla_init.c
8308 @@ -289,8 +289,13 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
8309 struct srb_iocb *lio;
8310 int rval = QLA_FUNCTION_FAILED;
8311
8312 - if (!vha->flags.online)
8313 - goto done;
8314 + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
8315 + fcport->loop_id == FC_NO_LOOP_ID) {
8316 + ql_log(ql_log_warn, vha, 0xffff,
8317 + "%s: %8phC - not sending command.\n",
8318 + __func__, fcport->port_name);
8319 + return rval;
8320 + }
8321
8322 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
8323 if (!sp)
8324 @@ -1262,8 +1267,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
8325 struct port_database_24xx *pd;
8326 struct qla_hw_data *ha = vha->hw;
8327
8328 - if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
8329 + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
8330 + fcport->loop_id == FC_NO_LOOP_ID) {
8331 + ql_log(ql_log_warn, vha, 0xffff,
8332 + "%s: %8phC - not sending command.\n",
8333 + __func__, fcport->port_name);
8334 return rval;
8335 + }
8336
8337 fcport->disc_state = DSC_GPDB;
8338
8339 @@ -1953,8 +1963,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
8340 return;
8341 }
8342
8343 - if (fcport->disc_state == DSC_DELETE_PEND)
8344 + if ((fcport->disc_state == DSC_DELETE_PEND) ||
8345 + (fcport->disc_state == DSC_DELETED)) {
8346 + set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
8347 return;
8348 + }
8349
8350 if (ea->sp->gen2 != fcport->login_gen) {
8351 /* target side must have changed it. */
8352 @@ -6698,8 +6711,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
8353 }
8354
8355 /* Clear all async request states across all VPs. */
8356 - list_for_each_entry(fcport, &vha->vp_fcports, list)
8357 + list_for_each_entry(fcport, &vha->vp_fcports, list) {
8358 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
8359 + fcport->scan_state = 0;
8360 + }
8361 spin_lock_irqsave(&ha->vport_slock, flags);
8362 list_for_each_entry(vp, &ha->vp_list, list) {
8363 atomic_inc(&vp->vref_count);
8364 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
8365 index 98e60a34afd9..4fda308c3ef5 100644
8366 --- a/drivers/scsi/qla2xxx/qla_os.c
8367 +++ b/drivers/scsi/qla2xxx/qla_os.c
8368 @@ -5086,6 +5086,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
8369 if (fcport) {
8370 fcport->id_changed = 1;
8371 fcport->scan_state = QLA_FCPORT_FOUND;
8372 + fcport->chip_reset = vha->hw->base_qpair->chip_reset;
8373 memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
8374
8375 if (pla) {
8376 diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
8377 index 1c1f63be6eed..459c28aa3b94 100644
8378 --- a/drivers/scsi/qla2xxx/qla_target.c
8379 +++ b/drivers/scsi/qla2xxx/qla_target.c
8380 @@ -1209,7 +1209,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
8381 sess->logout_on_delete = 0;
8382 sess->logo_ack_needed = 0;
8383 sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
8384 - sess->scan_state = 0;
8385 }
8386 }
8387
8388 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
8389 index 11e64b50497f..4e88d7e9cf9a 100644
8390 --- a/drivers/scsi/scsi_lib.c
8391 +++ b/drivers/scsi/scsi_lib.c
8392 @@ -1089,6 +1089,18 @@ static void scsi_initialize_rq(struct request *rq)
8393 cmd->retries = 0;
8394 }
8395
8396 +/*
8397 + * Only called when the request isn't completed by SCSI, and not freed by
8398 + * SCSI
8399 + */
8400 +static void scsi_cleanup_rq(struct request *rq)
8401 +{
8402 + if (rq->rq_flags & RQF_DONTPREP) {
8403 + scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
8404 + rq->rq_flags &= ~RQF_DONTPREP;
8405 + }
8406 +}
8407 +
8408 /* Add a command to the list used by the aacraid and dpt_i2o drivers */
8409 void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
8410 {
8411 @@ -1821,6 +1833,7 @@ static const struct blk_mq_ops scsi_mq_ops = {
8412 .init_request = scsi_mq_init_request,
8413 .exit_request = scsi_mq_exit_request,
8414 .initialize_rq_fn = scsi_initialize_rq,
8415 + .cleanup_rq = scsi_cleanup_rq,
8416 .busy = scsi_mq_lld_busy,
8417 .map_queues = scsi_map_queues,
8418 };
8419 diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
8420 index 19d4cbc93a17..c470e24f1dfa 100644
8421 --- a/drivers/soc/amlogic/meson-clk-measure.c
8422 +++ b/drivers/soc/amlogic/meson-clk-measure.c
8423 @@ -11,6 +11,8 @@
8424 #include <linux/debugfs.h>
8425 #include <linux/regmap.h>
8426
8427 +static DEFINE_MUTEX(measure_lock);
8428 +
8429 #define MSR_CLK_DUTY 0x0
8430 #define MSR_CLK_REG0 0x4
8431 #define MSR_CLK_REG1 0x8
8432 @@ -360,6 +362,10 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id,
8433 unsigned int val;
8434 int ret;
8435
8436 + ret = mutex_lock_interruptible(&measure_lock);
8437 + if (ret)
8438 + return ret;
8439 +
8440 regmap_write(priv->regmap, MSR_CLK_REG0, 0);
8441
8442 /* Set measurement duration */
8443 @@ -377,8 +383,10 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id,
8444
8445 ret = regmap_read_poll_timeout(priv->regmap, MSR_CLK_REG0,
8446 val, !(val & MSR_BUSY), 10, 10000);
8447 - if (ret)
8448 + if (ret) {
8449 + mutex_unlock(&measure_lock);
8450 return ret;
8451 + }
8452
8453 /* Disable */
8454 regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_ENABLE, 0);
8455 @@ -386,6 +394,8 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id,
8456 /* Get the value in multiple of gate time counts */
8457 regmap_read(priv->regmap, MSR_CLK_REG2, &val);
8458
8459 + mutex_unlock(&measure_lock);
8460 +
8461 if (val >= MSR_VAL_MASK)
8462 return -EINVAL;
8463
8464 diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
8465 index 2bbf49e5d441..9583c542c47f 100644
8466 --- a/drivers/soc/renesas/Kconfig
8467 +++ b/drivers/soc/renesas/Kconfig
8468 @@ -55,6 +55,7 @@ config ARCH_EMEV2
8469
8470 config ARCH_R7S72100
8471 bool "RZ/A1H (R7S72100)"
8472 + select ARM_ERRATA_754322
8473 select PM
8474 select PM_GENERIC_DOMAINS
8475 select RENESAS_OSTM
8476 @@ -78,6 +79,7 @@ config ARCH_R8A73A4
8477 config ARCH_R8A7740
8478 bool "R-Mobile A1 (R8A77400)"
8479 select ARCH_RMOBILE
8480 + select ARM_ERRATA_754322
8481 select RENESAS_INTC_IRQPIN
8482
8483 config ARCH_R8A7743
8484 @@ -105,10 +107,12 @@ config ARCH_R8A77470
8485 config ARCH_R8A7778
8486 bool "R-Car M1A (R8A77781)"
8487 select ARCH_RCAR_GEN1
8488 + select ARM_ERRATA_754322
8489
8490 config ARCH_R8A7779
8491 bool "R-Car H1 (R8A77790)"
8492 select ARCH_RCAR_GEN1
8493 + select ARM_ERRATA_754322
8494 select HAVE_ARM_SCU if SMP
8495 select HAVE_ARM_TWD if SMP
8496 select SYSC_R8A7779
8497 @@ -152,6 +156,7 @@ config ARCH_R9A06G032
8498 config ARCH_SH73A0
8499 bool "SH-Mobile AG5 (R8A73A00)"
8500 select ARCH_RMOBILE
8501 + select ARM_ERRATA_754322
8502 select HAVE_ARM_SCU if SMP
8503 select HAVE_ARM_TWD if SMP
8504 select RENESAS_INTC_IRQPIN
8505 diff --git a/drivers/soc/renesas/rmobile-sysc.c b/drivers/soc/renesas/rmobile-sysc.c
8506 index 421ae1c887d8..54b616ad4a62 100644
8507 --- a/drivers/soc/renesas/rmobile-sysc.c
8508 +++ b/drivers/soc/renesas/rmobile-sysc.c
8509 @@ -48,12 +48,8 @@ struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d)
8510 static int rmobile_pd_power_down(struct generic_pm_domain *genpd)
8511 {
8512 struct rmobile_pm_domain *rmobile_pd = to_rmobile_pd(genpd);
8513 - unsigned int mask;
8514 + unsigned int mask = BIT(rmobile_pd->bit_shift);
8515
8516 - if (rmobile_pd->bit_shift == ~0)
8517 - return -EBUSY;
8518 -
8519 - mask = BIT(rmobile_pd->bit_shift);
8520 if (rmobile_pd->suspend) {
8521 int ret = rmobile_pd->suspend();
8522
8523 @@ -80,14 +76,10 @@ static int rmobile_pd_power_down(struct generic_pm_domain *genpd)
8524
8525 static int __rmobile_pd_power_up(struct rmobile_pm_domain *rmobile_pd)
8526 {
8527 - unsigned int mask;
8528 + unsigned int mask = BIT(rmobile_pd->bit_shift);
8529 unsigned int retry_count;
8530 int ret = 0;
8531
8532 - if (rmobile_pd->bit_shift == ~0)
8533 - return 0;
8534 -
8535 - mask = BIT(rmobile_pd->bit_shift);
8536 if (__raw_readl(rmobile_pd->base + PSTR) & mask)
8537 return ret;
8538
8539 @@ -122,11 +114,15 @@ static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
8540 struct dev_power_governor *gov = rmobile_pd->gov;
8541
8542 genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
8543 - genpd->power_off = rmobile_pd_power_down;
8544 - genpd->power_on = rmobile_pd_power_up;
8545 - genpd->attach_dev = cpg_mstp_attach_dev;
8546 - genpd->detach_dev = cpg_mstp_detach_dev;
8547 - __rmobile_pd_power_up(rmobile_pd);
8548 + genpd->attach_dev = cpg_mstp_attach_dev;
8549 + genpd->detach_dev = cpg_mstp_detach_dev;
8550 +
8551 + if (!(genpd->flags & GENPD_FLAG_ALWAYS_ON)) {
8552 + genpd->power_off = rmobile_pd_power_down;
8553 + genpd->power_on = rmobile_pd_power_up;
8554 + __rmobile_pd_power_up(rmobile_pd);
8555 + }
8556 +
8557 pm_genpd_init(genpd, gov ? : &simple_qos_governor, false);
8558 }
8559
8560 @@ -270,6 +266,11 @@ static void __init rmobile_setup_pm_domain(struct device_node *np,
8561 break;
8562
8563 case PD_NORMAL:
8564 + if (pd->bit_shift == ~0) {
8565 + /* Top-level always-on domain */
8566 + pr_debug("PM domain %s is always-on domain\n", name);
8567 + pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
8568 + }
8569 break;
8570 }
8571
8572 diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
8573 index 840b1b8ff3dc..dfdcebb38830 100644
8574 --- a/drivers/spi/spi-bcm2835.c
8575 +++ b/drivers/spi/spi-bcm2835.c
8576 @@ -319,6 +319,13 @@ static void bcm2835_spi_reset_hw(struct spi_controller *ctlr)
8577 BCM2835_SPI_CS_INTD |
8578 BCM2835_SPI_CS_DMAEN |
8579 BCM2835_SPI_CS_TA);
8580 + /*
8581 + * Transmission sometimes breaks unless the DONE bit is written at the
8582 + * end of every transfer. The spec says it's a RO bit. Either the
8583 + * spec is wrong and the bit is actually of type RW1C, or it's a
8584 + * hardware erratum.
8585 + */
8586 + cs |= BCM2835_SPI_CS_DONE;
8587 /* and reset RX/TX FIFOS */
8588 cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
8589
8590 @@ -477,7 +484,9 @@ static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
8591 bcm2835_wr_fifo_count(bs, bs->rx_prologue);
8592 bcm2835_wait_tx_fifo_empty(bs);
8593 bcm2835_rd_fifo_count(bs, bs->rx_prologue);
8594 - bcm2835_spi_reset_hw(ctlr);
8595 + bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_RX
8596 + | BCM2835_SPI_CS_CLEAR_TX
8597 + | BCM2835_SPI_CS_DONE);
8598
8599 dma_sync_single_for_device(ctlr->dma_rx->device->dev,
8600 sg_dma_address(&tfr->rx_sg.sgl[0]),
8601 @@ -498,7 +507,8 @@ static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
8602 | BCM2835_SPI_CS_DMAEN);
8603 bcm2835_wr_fifo_count(bs, tx_remaining);
8604 bcm2835_wait_tx_fifo_empty(bs);
8605 - bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX);
8606 + bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX
8607 + | BCM2835_SPI_CS_DONE);
8608 }
8609
8610 if (likely(!bs->tx_spillover)) {
8611 diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
8612 index 18c06568805e..86789dbaf577 100644
8613 --- a/drivers/spi/spi-dw-mmio.c
8614 +++ b/drivers/spi/spi-dw-mmio.c
8615 @@ -172,8 +172,10 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
8616
8617 /* Optional clock needed to access the registers */
8618 dwsmmio->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
8619 - if (IS_ERR(dwsmmio->pclk))
8620 - return PTR_ERR(dwsmmio->pclk);
8621 + if (IS_ERR(dwsmmio->pclk)) {
8622 + ret = PTR_ERR(dwsmmio->pclk);
8623 + goto out_clk;
8624 + }
8625 ret = clk_prepare_enable(dwsmmio->pclk);
8626 if (ret)
8627 goto out_clk;
8628 diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
8629 index 53335ccc98f6..545fc8189fb0 100644
8630 --- a/drivers/spi/spi-fsl-dspi.c
8631 +++ b/drivers/spi/spi-fsl-dspi.c
8632 @@ -886,9 +886,11 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
8633 trans_mode);
8634 }
8635 }
8636 +
8637 + return IRQ_HANDLED;
8638 }
8639
8640 - return IRQ_HANDLED;
8641 + return IRQ_NONE;
8642 }
8643
8644 static const struct of_device_id fsl_dspi_dt_ids[] = {
8645 diff --git a/drivers/staging/erofs/zmap.c b/drivers/staging/erofs/zmap.c
8646 index 9c0bd65c46bf..c2359321ca13 100644
8647 --- a/drivers/staging/erofs/zmap.c
8648 +++ b/drivers/staging/erofs/zmap.c
8649 @@ -86,12 +86,11 @@ static int fill_inode_lazy(struct inode *inode)
8650
8651 vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits +
8652 ((h->h_clusterbits >> 5) & 7);
8653 + set_bit(EROFS_V_Z_INITED_BIT, &vi->flags);
8654 unmap_done:
8655 kunmap_atomic(kaddr);
8656 unlock_page(page);
8657 put_page(page);
8658 -
8659 - set_bit(EROFS_V_Z_INITED_BIT, &vi->flags);
8660 out_unlock:
8661 clear_and_wake_up_bit(EROFS_V_BL_Z_BIT, &vi->flags);
8662 return err;
8663 diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
8664 index c3665f0e87a2..46dcb46bb927 100644
8665 --- a/drivers/staging/media/hantro/hantro_drv.c
8666 +++ b/drivers/staging/media/hantro/hantro_drv.c
8667 @@ -724,6 +724,7 @@ static int hantro_probe(struct platform_device *pdev)
8668 dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
8669 return ret;
8670 }
8671 + vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
8672
8673 for (i = 0; i < vpu->variant->num_irqs; i++) {
8674 const char *irq_name = vpu->variant->irqs[i].name;
8675 diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
8676 index f29e28df36ed..bfa4b254c4e4 100644
8677 --- a/drivers/staging/media/imx/imx6-mipi-csi2.c
8678 +++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
8679 @@ -243,7 +243,7 @@ static int __maybe_unused csi2_dphy_wait_ulp(struct csi2_dev *csi2)
8680 }
8681
8682 /* Waits for low-power LP-11 state on data and clock lanes. */
8683 -static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
8684 +static void csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
8685 {
8686 u32 mask, reg;
8687 int ret;
8688 @@ -254,11 +254,9 @@ static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
8689 ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
8690 (reg & mask) == mask, 0, 500000);
8691 if (ret) {
8692 - v4l2_err(&csi2->sd, "LP-11 timeout, phy_state = 0x%08x\n", reg);
8693 - return ret;
8694 + v4l2_warn(&csi2->sd, "LP-11 wait timeout, likely a sensor driver bug, expect capture failures.\n");
8695 + v4l2_warn(&csi2->sd, "phy_state = 0x%08x\n", reg);
8696 }
8697 -
8698 - return 0;
8699 }
8700
8701 /* Wait for active clock on the clock lane. */
8702 @@ -316,9 +314,7 @@ static int csi2_start(struct csi2_dev *csi2)
8703 csi2_enable(csi2, true);
8704
8705 /* Step 5 */
8706 - ret = csi2_dphy_wait_stopstate(csi2);
8707 - if (ret)
8708 - goto err_assert_reset;
8709 + csi2_dphy_wait_stopstate(csi2);
8710
8711 /* Step 6 */
8712 ret = v4l2_subdev_call(csi2->src_sd, video, s_stream, 1);
8713 diff --git a/drivers/staging/media/tegra-vde/Kconfig b/drivers/staging/media/tegra-vde/Kconfig
8714 index 2e7f644ae591..ba49ea50b8c0 100644
8715 --- a/drivers/staging/media/tegra-vde/Kconfig
8716 +++ b/drivers/staging/media/tegra-vde/Kconfig
8717 @@ -3,7 +3,7 @@ config TEGRA_VDE
8718 tristate "NVIDIA Tegra Video Decoder Engine driver"
8719 depends on ARCH_TEGRA || COMPILE_TEST
8720 select DMA_SHARED_BUFFER
8721 - select IOMMU_IOVA if IOMMU_SUPPORT
8722 + select IOMMU_IOVA if (IOMMU_SUPPORT || COMPILE_TEST)
8723 select SRAM
8724 help
8725 Say Y here to enable support for the NVIDIA Tegra video decoder
8726 diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
8727 index 04a22663b4fb..51d97ec4f58f 100644
8728 --- a/drivers/video/fbdev/efifb.c
8729 +++ b/drivers/video/fbdev/efifb.c
8730 @@ -122,28 +122,13 @@ static void efifb_copy_bmp(u8 *src, u32 *dst, int width, struct screen_info *si)
8731 */
8732 static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
8733 {
8734 - static const int default_resolutions[][2] = {
8735 - { 800, 600 },
8736 - { 1024, 768 },
8737 - { 1280, 1024 },
8738 - };
8739 - u32 i, right_margin;
8740 -
8741 - for (i = 0; i < ARRAY_SIZE(default_resolutions); i++) {
8742 - if (default_resolutions[i][0] == si->lfb_width &&
8743 - default_resolutions[i][1] == si->lfb_height)
8744 - break;
8745 - }
8746 - /* If not a default resolution used for textmode, this should be fine */
8747 - if (i >= ARRAY_SIZE(default_resolutions))
8748 - return true;
8749 -
8750 - /* If the right margin is 5 times smaller then the left one, reject */
8751 - right_margin = si->lfb_width - (bgrt_tab.image_offset_x + bmp_width);
8752 - if (right_margin < (bgrt_tab.image_offset_x / 5))
8753 - return false;
8754 + /*
8755 + * All x86 firmwares horizontally center the image (the yoffset
8756 + * calculations differ between boards, but xoffset is predictable).
8757 + */
8758 + u32 expected_xoffset = (si->lfb_width - bmp_width) / 2;
8759
8760 - return true;
8761 + return bgrt_tab.image_offset_x == expected_xoffset;
8762 }
8763 #else
8764 static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
8765 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
8766 index d4e11b2e04f6..f131651502b8 100644
8767 --- a/fs/binfmt_elf.c
8768 +++ b/fs/binfmt_elf.c
8769 @@ -1141,7 +1141,8 @@ out_free_interp:
8770 * (since it grows up, and may collide early with the stack
8771 * growing down), and into the unused ELF_ET_DYN_BASE region.
8772 */
8773 - if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && !interpreter)
8774 + if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
8775 + loc->elf_ex.e_type == ET_DYN && !interpreter)
8776 current->mm->brk = current->mm->start_brk =
8777 ELF_ET_DYN_BASE;
8778
8779 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
8780 index 5df76c17775a..322ec4b839ed 100644
8781 --- a/fs/btrfs/ctree.c
8782 +++ b/fs/btrfs/ctree.c
8783 @@ -1343,6 +1343,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
8784 struct tree_mod_elem *tm;
8785 struct extent_buffer *eb = NULL;
8786 struct extent_buffer *eb_root;
8787 + u64 eb_root_owner = 0;
8788 struct extent_buffer *old;
8789 struct tree_mod_root *old_root = NULL;
8790 u64 old_generation = 0;
8791 @@ -1380,6 +1381,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
8792 free_extent_buffer(old);
8793 }
8794 } else if (old_root) {
8795 + eb_root_owner = btrfs_header_owner(eb_root);
8796 btrfs_tree_read_unlock(eb_root);
8797 free_extent_buffer(eb_root);
8798 eb = alloc_dummy_extent_buffer(fs_info, logical);
8799 @@ -1396,7 +1398,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
8800 if (old_root) {
8801 btrfs_set_header_bytenr(eb, eb->start);
8802 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
8803 - btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
8804 + btrfs_set_header_owner(eb, eb_root_owner);
8805 btrfs_set_header_level(eb, old_root->level);
8806 btrfs_set_header_generation(eb, old_generation);
8807 }
8808 @@ -5475,6 +5477,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
8809 advance_left = advance_right = 0;
8810
8811 while (1) {
8812 + cond_resched();
8813 if (advance_left && !left_end_reached) {
8814 ret = tree_advance(left_path, &left_level,
8815 left_root_level,
8816 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
8817 index 94660063a162..d9541d58ce3d 100644
8818 --- a/fs/btrfs/ctree.h
8819 +++ b/fs/btrfs/ctree.h
8820 @@ -43,6 +43,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep;
8821 extern struct kmem_cache *btrfs_bit_radix_cachep;
8822 extern struct kmem_cache *btrfs_path_cachep;
8823 extern struct kmem_cache *btrfs_free_space_cachep;
8824 +extern struct kmem_cache *btrfs_free_space_bitmap_cachep;
8825 struct btrfs_ordered_sum;
8826 struct btrfs_ref;
8827
8828 diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
8829 index 43fdb2992956..6858a05606dd 100644
8830 --- a/fs/btrfs/delayed-inode.c
8831 +++ b/fs/btrfs/delayed-inode.c
8832 @@ -474,6 +474,9 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
8833 struct rb_root_cached *root;
8834 struct btrfs_delayed_root *delayed_root;
8835
8836 + /* Not associated with any delayed_node */
8837 + if (!delayed_item->delayed_node)
8838 + return;
8839 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
8840
8841 BUG_ON(!delayed_root);
8842 @@ -1525,7 +1528,12 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
8843 * we have reserved enough space when we start a new transaction,
8844 * so reserving metadata failure is impossible.
8845 */
8846 - BUG_ON(ret);
8847 + if (ret < 0) {
8848 + btrfs_err(trans->fs_info,
8849 +"metadata reservation failed for delayed dir item deltiona, should have been reserved");
8850 + btrfs_release_delayed_item(item);
8851 + goto end;
8852 + }
8853
8854 mutex_lock(&node->mutex);
8855 ret = __btrfs_add_delayed_deletion_item(node, item);
8856 @@ -1534,7 +1542,8 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
8857 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
8858 index, node->root->root_key.objectid,
8859 node->inode_id, ret);
8860 - BUG();
8861 + btrfs_delayed_item_release_metadata(dir->root, item);
8862 + btrfs_release_delayed_item(item);
8863 }
8864 mutex_unlock(&node->mutex);
8865 end:
8866 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
8867 index 97beb351a10c..65af7eb3f7bd 100644
8868 --- a/fs/btrfs/disk-io.c
8869 +++ b/fs/btrfs/disk-io.c
8870 @@ -416,6 +416,16 @@ int btrfs_verify_level_key(struct extent_buffer *eb, int level,
8871 */
8872 if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
8873 return 0;
8874 +
8875 + /* We have @first_key, so this @eb must have at least one item */
8876 + if (btrfs_header_nritems(eb) == 0) {
8877 + btrfs_err(fs_info,
8878 + "invalid tree nritems, bytenr=%llu nritems=0 expect >0",
8879 + eb->start);
8880 + WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
8881 + return -EUCLEAN;
8882 + }
8883 +
8884 if (found_level)
8885 btrfs_node_key_to_cpu(eb, &found_key, 0);
8886 else
8887 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
8888 index 8b7eb22d508a..ef2f80825c82 100644
8889 --- a/fs/btrfs/extent-tree.c
8890 +++ b/fs/btrfs/extent-tree.c
8891 @@ -5751,6 +5751,14 @@ search:
8892 */
8893 if ((flags & extra) && !(block_group->flags & extra))
8894 goto loop;
8895 +
8896 + /*
8897 + * This block group has different flags than we want.
8898 + * It's possible that we have MIXED_GROUP flag but no
8899 + * block group is mixed. Just skip such block group.
8900 + */
8901 + btrfs_release_block_group(block_group, delalloc);
8902 + continue;
8903 }
8904
8905 have_block_group:
8906 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
8907 index eeb75281894e..3e0c8fcb658f 100644
8908 --- a/fs/btrfs/extent_io.c
8909 +++ b/fs/btrfs/extent_io.c
8910 @@ -3745,11 +3745,20 @@ err_unlock:
8911 static void set_btree_ioerr(struct page *page)
8912 {
8913 struct extent_buffer *eb = (struct extent_buffer *)page->private;
8914 + struct btrfs_fs_info *fs_info;
8915
8916 SetPageError(page);
8917 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
8918 return;
8919
8920 + /*
8921 + * If we error out, we should add back the dirty_metadata_bytes
8922 + * to make it consistent.
8923 + */
8924 + fs_info = eb->fs_info;
8925 + percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
8926 + eb->len, fs_info->dirty_metadata_batch);
8927 +
8928 /*
8929 * If writeback for a btree extent that doesn't belong to a log tree
8930 * failed, increment the counter transaction->eb_write_errors.
8931 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
8932 index 062be9dde4c6..52ad985cc7f9 100644
8933 --- a/fs/btrfs/free-space-cache.c
8934 +++ b/fs/btrfs/free-space-cache.c
8935 @@ -764,7 +764,8 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
8936 } else {
8937 ASSERT(num_bitmaps);
8938 num_bitmaps--;
8939 - e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
8940 + e->bitmap = kmem_cache_zalloc(
8941 + btrfs_free_space_bitmap_cachep, GFP_NOFS);
8942 if (!e->bitmap) {
8943 kmem_cache_free(
8944 btrfs_free_space_cachep, e);
8945 @@ -1881,7 +1882,7 @@ static void free_bitmap(struct btrfs_free_space_ctl *ctl,
8946 struct btrfs_free_space *bitmap_info)
8947 {
8948 unlink_free_space(ctl, bitmap_info);
8949 - kfree(bitmap_info->bitmap);
8950 + kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
8951 kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
8952 ctl->total_bitmaps--;
8953 ctl->op->recalc_thresholds(ctl);
8954 @@ -2135,7 +2136,8 @@ new_bitmap:
8955 }
8956
8957 /* allocate the bitmap */
8958 - info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
8959 + info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
8960 + GFP_NOFS);
8961 spin_lock(&ctl->tree_lock);
8962 if (!info->bitmap) {
8963 ret = -ENOMEM;
8964 @@ -2146,7 +2148,9 @@ new_bitmap:
8965
8966 out:
8967 if (info) {
8968 - kfree(info->bitmap);
8969 + if (info->bitmap)
8970 + kmem_cache_free(btrfs_free_space_bitmap_cachep,
8971 + info->bitmap);
8972 kmem_cache_free(btrfs_free_space_cachep, info);
8973 }
8974
8975 @@ -2802,7 +2806,8 @@ out:
8976 if (entry->bytes == 0) {
8977 ctl->free_extents--;
8978 if (entry->bitmap) {
8979 - kfree(entry->bitmap);
8980 + kmem_cache_free(btrfs_free_space_bitmap_cachep,
8981 + entry->bitmap);
8982 ctl->total_bitmaps--;
8983 ctl->op->recalc_thresholds(ctl);
8984 }
8985 @@ -3606,7 +3611,7 @@ again:
8986 }
8987
8988 if (!map) {
8989 - map = kzalloc(PAGE_SIZE, GFP_NOFS);
8990 + map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
8991 if (!map) {
8992 kmem_cache_free(btrfs_free_space_cachep, info);
8993 return -ENOMEM;
8994 @@ -3635,7 +3640,8 @@ again:
8995
8996 if (info)
8997 kmem_cache_free(btrfs_free_space_cachep, info);
8998 - kfree(map);
8999 + if (map)
9000 + kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
9001 return 0;
9002 }
9003
9004 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
9005 index ee582a36653d..d51d9466feb0 100644
9006 --- a/fs/btrfs/inode.c
9007 +++ b/fs/btrfs/inode.c
9008 @@ -74,6 +74,7 @@ static struct kmem_cache *btrfs_inode_cachep;
9009 struct kmem_cache *btrfs_trans_handle_cachep;
9010 struct kmem_cache *btrfs_path_cachep;
9011 struct kmem_cache *btrfs_free_space_cachep;
9012 +struct kmem_cache *btrfs_free_space_bitmap_cachep;
9013
9014 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
9015 static int btrfs_truncate(struct inode *inode, bool skip_writeback);
9016 @@ -9380,6 +9381,7 @@ void __cold btrfs_destroy_cachep(void)
9017 kmem_cache_destroy(btrfs_trans_handle_cachep);
9018 kmem_cache_destroy(btrfs_path_cachep);
9019 kmem_cache_destroy(btrfs_free_space_cachep);
9020 + kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
9021 }
9022
9023 int __init btrfs_init_cachep(void)
9024 @@ -9409,6 +9411,12 @@ int __init btrfs_init_cachep(void)
9025 if (!btrfs_free_space_cachep)
9026 goto fail;
9027
9028 + btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
9029 + PAGE_SIZE, PAGE_SIZE,
9030 + SLAB_RED_ZONE, NULL);
9031 + if (!btrfs_free_space_bitmap_cachep)
9032 + goto fail;
9033 +
9034 return 0;
9035 fail:
9036 btrfs_destroy_cachep();
9037 diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
9038 index f8a3c1b0a15a..001efc9ba1e7 100644
9039 --- a/fs/btrfs/qgroup.c
9040 +++ b/fs/btrfs/qgroup.c
9041 @@ -3154,9 +3154,6 @@ out:
9042 btrfs_free_path(path);
9043
9044 mutex_lock(&fs_info->qgroup_rescan_lock);
9045 - if (!btrfs_fs_closing(fs_info))
9046 - fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
9047 -
9048 if (err > 0 &&
9049 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
9050 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
9051 @@ -3172,16 +3169,30 @@ out:
9052 trans = btrfs_start_transaction(fs_info->quota_root, 1);
9053 if (IS_ERR(trans)) {
9054 err = PTR_ERR(trans);
9055 + trans = NULL;
9056 btrfs_err(fs_info,
9057 "fail to start transaction for status update: %d",
9058 err);
9059 - goto done;
9060 }
9061 - ret = update_qgroup_status_item(trans);
9062 - if (ret < 0) {
9063 - err = ret;
9064 - btrfs_err(fs_info, "fail to update qgroup status: %d", err);
9065 +
9066 + mutex_lock(&fs_info->qgroup_rescan_lock);
9067 + if (!btrfs_fs_closing(fs_info))
9068 + fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
9069 + if (trans) {
9070 + ret = update_qgroup_status_item(trans);
9071 + if (ret < 0) {
9072 + err = ret;
9073 + btrfs_err(fs_info, "fail to update qgroup status: %d",
9074 + err);
9075 + }
9076 }
9077 + fs_info->qgroup_rescan_running = false;
9078 + complete_all(&fs_info->qgroup_rescan_completion);
9079 + mutex_unlock(&fs_info->qgroup_rescan_lock);
9080 +
9081 + if (!trans)
9082 + return;
9083 +
9084 btrfs_end_transaction(trans);
9085
9086 if (btrfs_fs_closing(fs_info)) {
9087 @@ -3192,12 +3203,6 @@ out:
9088 } else {
9089 btrfs_err(fs_info, "qgroup scan failed with %d", err);
9090 }
9091 -
9092 -done:
9093 - mutex_lock(&fs_info->qgroup_rescan_lock);
9094 - fs_info->qgroup_rescan_running = false;
9095 - mutex_unlock(&fs_info->qgroup_rescan_lock);
9096 - complete_all(&fs_info->qgroup_rescan_completion);
9097 }
9098
9099 /*
9100 @@ -3425,6 +3430,9 @@ cleanup:
9101 while ((unode = ulist_next(&reserved->range_changed, &uiter)))
9102 clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
9103 unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
9104 + /* Also free data bytes of already reserved one */
9105 + btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid,
9106 + orig_reserved, BTRFS_QGROUP_RSV_DATA);
9107 extent_changeset_release(reserved);
9108 return ret;
9109 }
9110 @@ -3469,7 +3477,7 @@ static int qgroup_free_reserved_data(struct inode *inode,
9111 * EXTENT_QGROUP_RESERVED, we won't double free.
9112 * So not need to rush.
9113 */
9114 - ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree,
9115 + ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree,
9116 free_start, free_start + free_len - 1,
9117 EXTENT_QGROUP_RESERVED, &changeset);
9118 if (ret < 0)
9119 diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
9120 index ccd5706199d7..9634cae1e1b1 100644
9121 --- a/fs/btrfs/tree-checker.c
9122 +++ b/fs/btrfs/tree-checker.c
9123 @@ -821,6 +821,95 @@ static int check_inode_item(struct extent_buffer *leaf,
9124 return 0;
9125 }
9126
9127 +static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
9128 + int slot)
9129 +{
9130 + struct btrfs_fs_info *fs_info = leaf->fs_info;
9131 + struct btrfs_root_item ri;
9132 + const u64 valid_root_flags = BTRFS_ROOT_SUBVOL_RDONLY |
9133 + BTRFS_ROOT_SUBVOL_DEAD;
9134 +
9135 + /* No such tree id */
9136 + if (key->objectid == 0) {
9137 + generic_err(leaf, slot, "invalid root id 0");
9138 + return -EUCLEAN;
9139 + }
9140 +
9141 + /*
9142 + * Some older kernel may create ROOT_ITEM with non-zero offset, so here
9143 + * we only check offset for reloc tree whose key->offset must be a
9144 + * valid tree.
9145 + */
9146 + if (key->objectid == BTRFS_TREE_RELOC_OBJECTID && key->offset == 0) {
9147 + generic_err(leaf, slot, "invalid root id 0 for reloc tree");
9148 + return -EUCLEAN;
9149 + }
9150 +
9151 + if (btrfs_item_size_nr(leaf, slot) != sizeof(ri)) {
9152 + generic_err(leaf, slot,
9153 + "invalid root item size, have %u expect %zu",
9154 + btrfs_item_size_nr(leaf, slot), sizeof(ri));
9155 + }
9156 +
9157 + read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot),
9158 + sizeof(ri));
9159 +
9160 + /* Generation related */
9161 + if (btrfs_root_generation(&ri) >
9162 + btrfs_super_generation(fs_info->super_copy) + 1) {
9163 + generic_err(leaf, slot,
9164 + "invalid root generation, have %llu expect (0, %llu]",
9165 + btrfs_root_generation(&ri),
9166 + btrfs_super_generation(fs_info->super_copy) + 1);
9167 + return -EUCLEAN;
9168 + }
9169 + if (btrfs_root_generation_v2(&ri) >
9170 + btrfs_super_generation(fs_info->super_copy) + 1) {
9171 + generic_err(leaf, slot,
9172 + "invalid root v2 generation, have %llu expect (0, %llu]",
9173 + btrfs_root_generation_v2(&ri),
9174 + btrfs_super_generation(fs_info->super_copy) + 1);
9175 + return -EUCLEAN;
9176 + }
9177 + if (btrfs_root_last_snapshot(&ri) >
9178 + btrfs_super_generation(fs_info->super_copy) + 1) {
9179 + generic_err(leaf, slot,
9180 + "invalid root last_snapshot, have %llu expect (0, %llu]",
9181 + btrfs_root_last_snapshot(&ri),
9182 + btrfs_super_generation(fs_info->super_copy) + 1);
9183 + return -EUCLEAN;
9184 + }
9185 +
9186 + /* Alignment and level check */
9187 + if (!IS_ALIGNED(btrfs_root_bytenr(&ri), fs_info->sectorsize)) {
9188 + generic_err(leaf, slot,
9189 + "invalid root bytenr, have %llu expect to be aligned to %u",
9190 + btrfs_root_bytenr(&ri), fs_info->sectorsize);
9191 + return -EUCLEAN;
9192 + }
9193 + if (btrfs_root_level(&ri) >= BTRFS_MAX_LEVEL) {
9194 + generic_err(leaf, slot,
9195 + "invalid root level, have %u expect [0, %u]",
9196 + btrfs_root_level(&ri), BTRFS_MAX_LEVEL - 1);
9197 + return -EUCLEAN;
9198 + }
9199 + if (ri.drop_level >= BTRFS_MAX_LEVEL) {
9200 + generic_err(leaf, slot,
9201 + "invalid root level, have %u expect [0, %u]",
9202 + ri.drop_level, BTRFS_MAX_LEVEL - 1);
9203 + return -EUCLEAN;
9204 + }
9205 +
9206 + /* Flags check */
9207 + if (btrfs_root_flags(&ri) & ~valid_root_flags) {
9208 + generic_err(leaf, slot,
9209 + "invalid root flags, have 0x%llx expect mask 0x%llx",
9210 + btrfs_root_flags(&ri), valid_root_flags);
9211 + return -EUCLEAN;
9212 + }
9213 + return 0;
9214 +}
9215 +
9216 /*
9217 * Common point to switch the item-specific validation.
9218 */
9219 @@ -856,6 +945,9 @@ static int check_leaf_item(struct extent_buffer *leaf,
9220 case BTRFS_INODE_ITEM_KEY:
9221 ret = check_inode_item(leaf, key, slot);
9222 break;
9223 + case BTRFS_ROOT_ITEM_KEY:
9224 + ret = check_root_item(leaf, key, slot);
9225 + break;
9226 }
9227 return ret;
9228 }
9229 @@ -899,6 +991,12 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
9230 owner);
9231 return -EUCLEAN;
9232 }
9233 + /* Unknown tree */
9234 + if (owner == 0) {
9235 + generic_err(leaf, 0,
9236 + "invalid owner, root 0 is not defined");
9237 + return -EUCLEAN;
9238 + }
9239 return 0;
9240 }
9241
9242 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
9243 index a447d3ec48d5..e821a0e97cd8 100644
9244 --- a/fs/btrfs/volumes.c
9245 +++ b/fs/btrfs/volumes.c
9246 @@ -4072,7 +4072,13 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
9247 }
9248
9249 num_devices = btrfs_num_devices(fs_info);
9250 - allowed = 0;
9251 +
9252 + /*
9253 + * SINGLE profile on-disk has no profile bit, but in-memory we have a
9254 + * special bit for it, to make it easier to distinguish. Thus we need
9255 + * to set it manually, or balance would refuse the profile.
9256 + */
9257 + allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
9258 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
9259 if (num_devices >= btrfs_raid_array[i].devs_min)
9260 allowed |= btrfs_raid_array[i].bg_flag;
9261 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
9262 index 3289b566463f..64e33e7bff1e 100644
9263 --- a/fs/cifs/cifsfs.c
9264 +++ b/fs/cifs/cifsfs.c
9265 @@ -433,6 +433,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
9266 cifs_show_security(s, tcon->ses);
9267 cifs_show_cache_flavor(s, cifs_sb);
9268
9269 + if (tcon->no_lease)
9270 + seq_puts(s, ",nolease");
9271 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
9272 seq_puts(s, ",multiuser");
9273 else if (tcon->ses->user_name)
9274 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
9275 index fe610e7e3670..5ef5a16c01d2 100644
9276 --- a/fs/cifs/cifsglob.h
9277 +++ b/fs/cifs/cifsglob.h
9278 @@ -576,6 +576,7 @@ struct smb_vol {
9279 bool noblocksnd:1;
9280 bool noautotune:1;
9281 bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
9282 + bool no_lease:1; /* disable requesting leases */
9283 bool fsc:1; /* enable fscache */
9284 bool mfsymlinks:1; /* use Minshall+French Symlinks */
9285 bool multiuser:1;
9286 @@ -1082,6 +1083,7 @@ struct cifs_tcon {
9287 bool need_reopen_files:1; /* need to reopen tcon file handles */
9288 bool use_resilient:1; /* use resilient instead of durable handles */
9289 bool use_persistent:1; /* use persistent instead of durable handles */
9290 + bool no_lease:1; /* Do not request leases on files or directories */
9291 __le32 capabilities;
9292 __u32 share_flags;
9293 __u32 maximal_access;
9294 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
9295 index 5299effa6f7d..8ee57d1f507f 100644
9296 --- a/fs/cifs/connect.c
9297 +++ b/fs/cifs/connect.c
9298 @@ -74,7 +74,7 @@ enum {
9299 Opt_user_xattr, Opt_nouser_xattr,
9300 Opt_forceuid, Opt_noforceuid,
9301 Opt_forcegid, Opt_noforcegid,
9302 - Opt_noblocksend, Opt_noautotune,
9303 + Opt_noblocksend, Opt_noautotune, Opt_nolease,
9304 Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
9305 Opt_mapposix, Opt_nomapposix,
9306 Opt_mapchars, Opt_nomapchars, Opt_sfu,
9307 @@ -134,6 +134,7 @@ static const match_table_t cifs_mount_option_tokens = {
9308 { Opt_noforcegid, "noforcegid" },
9309 { Opt_noblocksend, "noblocksend" },
9310 { Opt_noautotune, "noautotune" },
9311 + { Opt_nolease, "nolease" },
9312 { Opt_hard, "hard" },
9313 { Opt_soft, "soft" },
9314 { Opt_perm, "perm" },
9315 @@ -1713,6 +1714,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
9316 case Opt_noautotune:
9317 vol->noautotune = 1;
9318 break;
9319 + case Opt_nolease:
9320 + vol->no_lease = 1;
9321 + break;
9322 case Opt_hard:
9323 vol->retry = 1;
9324 break;
9325 @@ -3250,6 +3254,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
9326 return 0;
9327 if (tcon->handle_timeout != volume_info->handle_timeout)
9328 return 0;
9329 + if (tcon->no_lease != volume_info->no_lease)
9330 + return 0;
9331 return 1;
9332 }
9333
9334 @@ -3464,6 +3470,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
9335 tcon->nocase = volume_info->nocase;
9336 tcon->nohandlecache = volume_info->nohandlecache;
9337 tcon->local_lease = volume_info->local_lease;
9338 + tcon->no_lease = volume_info->no_lease;
9339 INIT_LIST_HEAD(&tcon->pending_opens);
9340
9341 spin_lock(&cifs_tcp_ses_lock);
9342 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
9343 index 64a5864127be..7e8e8826c26f 100644
9344 --- a/fs/cifs/smb2ops.c
9345 +++ b/fs/cifs/smb2ops.c
9346 @@ -656,6 +656,15 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
9347 return 0;
9348 }
9349
9350 + /*
9351 + * We do not hold the lock for the open because in case
9352 + * SMB2_open needs to reconnect, it will end up calling
9353 + * cifs_mark_open_files_invalid() which takes the lock again
9354 + * thus causing a deadlock
9355 + */
9356 +
9357 + mutex_unlock(&tcon->crfid.fid_mutex);
9358 +
9359 if (smb3_encryption_required(tcon))
9360 flags |= CIFS_TRANSFORM_REQ;
9361
9362 @@ -677,7 +686,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
9363
9364 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path);
9365 if (rc)
9366 - goto oshr_exit;
9367 + goto oshr_free;
9368 smb2_set_next_command(tcon, &rqst[0]);
9369
9370 memset(&qi_iov, 0, sizeof(qi_iov));
9371 @@ -690,18 +699,10 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
9372 sizeof(struct smb2_file_all_info) +
9373 PATH_MAX * 2, 0, NULL);
9374 if (rc)
9375 - goto oshr_exit;
9376 + goto oshr_free;
9377
9378 smb2_set_related(&rqst[1]);
9379
9380 - /*
9381 - * We do not hold the lock for the open because in case
9382 - * SMB2_open needs to reconnect, it will end up calling
9383 - * cifs_mark_open_files_invalid() which takes the lock again
9384 - * thus causing a deadlock
9385 - */
9386 -
9387 - mutex_unlock(&tcon->crfid.fid_mutex);
9388 rc = compound_send_recv(xid, ses, flags, 2, rqst,
9389 resp_buftype, rsp_iov);
9390 mutex_lock(&tcon->crfid.fid_mutex);
9391 @@ -742,6 +743,8 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
9392 if (rc)
9393 goto oshr_exit;
9394
9395 + atomic_inc(&tcon->num_remote_opens);
9396 +
9397 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
9398 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
9399 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
9400 @@ -1167,6 +1170,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
9401
9402 rc = compound_send_recv(xid, ses, flags, 3, rqst,
9403 resp_buftype, rsp_iov);
9404 + /* no need to bump num_remote_opens because handle immediately closed */
9405
9406 sea_exit:
9407 kfree(ea);
9408 @@ -1488,6 +1492,8 @@ smb2_ioctl_query_info(const unsigned int xid,
9409 resp_buftype, rsp_iov);
9410 if (rc)
9411 goto iqinf_exit;
9412 +
9413 + /* No need to bump num_remote_opens since handle immediately closed */
9414 if (qi.flags & PASSTHRU_FSCTL) {
9415 pqi = (struct smb_query_info __user *)arg;
9416 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
9417 @@ -3295,6 +3301,11 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
9418 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
9419 return;
9420
9421 + /* Check if the server granted an oplock rather than a lease */
9422 + if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
9423 + return smb2_set_oplock_level(cinode, oplock, epoch,
9424 + purge_cache);
9425 +
9426 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
9427 new_oplock |= CIFS_CACHE_READ_FLG;
9428 strcat(message, "R");
9429 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
9430 index 31e4a1b0b170..0aa40129dfb5 100644
9431 --- a/fs/cifs/smb2pdu.c
9432 +++ b/fs/cifs/smb2pdu.c
9433 @@ -2351,6 +2351,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
9434 rqst.rq_iov = iov;
9435 rqst.rq_nvec = n_iov;
9436
9437 + /* no need to inc num_remote_opens because we close it just below */
9438 trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE,
9439 FILE_WRITE_ATTRIBUTES);
9440 /* resource #4: response buffer */
9441 @@ -2458,7 +2459,7 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
9442 iov[1].iov_len = uni_path_len;
9443 iov[1].iov_base = path;
9444
9445 - if (!server->oplocks)
9446 + if ((!server->oplocks) || (tcon->no_lease))
9447 *oplock = SMB2_OPLOCK_LEVEL_NONE;
9448
9449 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
9450 diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
9451 index 9076150758d8..db4ba8f6077e 100644
9452 --- a/fs/cifs/xattr.c
9453 +++ b/fs/cifs/xattr.c
9454 @@ -31,7 +31,7 @@
9455 #include "cifs_fs_sb.h"
9456 #include "cifs_unicode.h"
9457
9458 -#define MAX_EA_VALUE_SIZE 65535
9459 +#define MAX_EA_VALUE_SIZE CIFSMaxBufSize
9460 #define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
9461 #define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */
9462 #define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */
9463 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
9464 index 92266a2da7d6..f203bf989a4c 100644
9465 --- a/fs/ext4/extents.c
9466 +++ b/fs/ext4/extents.c
9467 @@ -3813,8 +3813,8 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
9468 * illegal.
9469 */
9470 if (ee_block != map->m_lblk || ee_len > map->m_len) {
9471 -#ifdef EXT4_DEBUG
9472 - ext4_warning("Inode (%ld) finished: extent logical block %llu,"
9473 +#ifdef CONFIG_EXT4_DEBUG
9474 + ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
9475 " len %u; IO logical block %llu, len %u",
9476 inode->i_ino, (unsigned long long)ee_block, ee_len,
9477 (unsigned long long)map->m_lblk, map->m_len);
9478 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
9479 index 006b7a2070bf..723b0d1a3881 100644
9480 --- a/fs/ext4/inode.c
9481 +++ b/fs/ext4/inode.c
9482 @@ -4297,6 +4297,15 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
9483
9484 trace_ext4_punch_hole(inode, offset, length, 0);
9485
9486 + ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
9487 + if (ext4_has_inline_data(inode)) {
9488 + down_write(&EXT4_I(inode)->i_mmap_sem);
9489 + ret = ext4_convert_inline_data(inode);
9490 + up_write(&EXT4_I(inode)->i_mmap_sem);
9491 + if (ret)
9492 + return ret;
9493 + }
9494 +
9495 /*
9496 * Write out all dirty pages to avoid race conditions
9497 * Then release them.
9498 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
9499 index ea8237513dfa..186468fba82e 100644
9500 --- a/fs/fuse/dev.c
9501 +++ b/fs/fuse/dev.c
9502 @@ -377,7 +377,7 @@ static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
9503 req->in.h.len = sizeof(struct fuse_in_header) +
9504 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
9505 list_add_tail(&req->list, &fiq->pending);
9506 - wake_up_locked(&fiq->waitq);
9507 + wake_up(&fiq->waitq);
9508 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
9509 }
9510
9511 @@ -389,16 +389,16 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
9512 forget->forget_one.nodeid = nodeid;
9513 forget->forget_one.nlookup = nlookup;
9514
9515 - spin_lock(&fiq->waitq.lock);
9516 + spin_lock(&fiq->lock);
9517 if (fiq->connected) {
9518 fiq->forget_list_tail->next = forget;
9519 fiq->forget_list_tail = forget;
9520 - wake_up_locked(&fiq->waitq);
9521 + wake_up(&fiq->waitq);
9522 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
9523 } else {
9524 kfree(forget);
9525 }
9526 - spin_unlock(&fiq->waitq.lock);
9527 + spin_unlock(&fiq->lock);
9528 }
9529
9530 static void flush_bg_queue(struct fuse_conn *fc)
9531 @@ -412,10 +412,10 @@ static void flush_bg_queue(struct fuse_conn *fc)
9532 req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
9533 list_del(&req->list);
9534 fc->active_background++;
9535 - spin_lock(&fiq->waitq.lock);
9536 + spin_lock(&fiq->lock);
9537 req->in.h.unique = fuse_get_unique(fiq);
9538 queue_request(fiq, req);
9539 - spin_unlock(&fiq->waitq.lock);
9540 + spin_unlock(&fiq->lock);
9541 }
9542 }
9543
9544 @@ -439,9 +439,9 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
9545 * smp_mb() from queue_interrupt().
9546 */
9547 if (!list_empty(&req->intr_entry)) {
9548 - spin_lock(&fiq->waitq.lock);
9549 + spin_lock(&fiq->lock);
9550 list_del_init(&req->intr_entry);
9551 - spin_unlock(&fiq->waitq.lock);
9552 + spin_unlock(&fiq->lock);
9553 }
9554 WARN_ON(test_bit(FR_PENDING, &req->flags));
9555 WARN_ON(test_bit(FR_SENT, &req->flags));
9556 @@ -483,10 +483,10 @@ put_request:
9557
9558 static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
9559 {
9560 - spin_lock(&fiq->waitq.lock);
9561 + spin_lock(&fiq->lock);
9562 /* Check for we've sent request to interrupt this req */
9563 if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
9564 - spin_unlock(&fiq->waitq.lock);
9565 + spin_unlock(&fiq->lock);
9566 return -EINVAL;
9567 }
9568
9569 @@ -499,13 +499,13 @@ static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
9570 smp_mb();
9571 if (test_bit(FR_FINISHED, &req->flags)) {
9572 list_del_init(&req->intr_entry);
9573 - spin_unlock(&fiq->waitq.lock);
9574 + spin_unlock(&fiq->lock);
9575 return 0;
9576 }
9577 - wake_up_locked(&fiq->waitq);
9578 + wake_up(&fiq->waitq);
9579 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
9580 }
9581 - spin_unlock(&fiq->waitq.lock);
9582 + spin_unlock(&fiq->lock);
9583 return 0;
9584 }
9585
9586 @@ -535,16 +535,16 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
9587 if (!err)
9588 return;
9589
9590 - spin_lock(&fiq->waitq.lock);
9591 + spin_lock(&fiq->lock);
9592 /* Request is not yet in userspace, bail out */
9593 if (test_bit(FR_PENDING, &req->flags)) {
9594 list_del(&req->list);
9595 - spin_unlock(&fiq->waitq.lock);
9596 + spin_unlock(&fiq->lock);
9597 __fuse_put_request(req);
9598 req->out.h.error = -EINTR;
9599 return;
9600 }
9601 - spin_unlock(&fiq->waitq.lock);
9602 + spin_unlock(&fiq->lock);
9603 }
9604
9605 /*
9606 @@ -559,9 +559,9 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
9607 struct fuse_iqueue *fiq = &fc->iq;
9608
9609 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
9610 - spin_lock(&fiq->waitq.lock);
9611 + spin_lock(&fiq->lock);
9612 if (!fiq->connected) {
9613 - spin_unlock(&fiq->waitq.lock);
9614 + spin_unlock(&fiq->lock);
9615 req->out.h.error = -ENOTCONN;
9616 } else {
9617 req->in.h.unique = fuse_get_unique(fiq);
9618 @@ -569,7 +569,7 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
9619 /* acquire extra reference, since request is still needed
9620 after request_end() */
9621 __fuse_get_request(req);
9622 - spin_unlock(&fiq->waitq.lock);
9623 + spin_unlock(&fiq->lock);
9624
9625 request_wait_answer(fc, req);
9626 /* Pairs with smp_wmb() in request_end() */
9627 @@ -700,12 +700,12 @@ static int fuse_request_send_notify_reply(struct fuse_conn *fc,
9628
9629 __clear_bit(FR_ISREPLY, &req->flags);
9630 req->in.h.unique = unique;
9631 - spin_lock(&fiq->waitq.lock);
9632 + spin_lock(&fiq->lock);
9633 if (fiq->connected) {
9634 queue_request(fiq, req);
9635 err = 0;
9636 }
9637 - spin_unlock(&fiq->waitq.lock);
9638 + spin_unlock(&fiq->lock);
9639
9640 return err;
9641 }
9642 @@ -1149,12 +1149,12 @@ static int request_pending(struct fuse_iqueue *fiq)
9643 * Unlike other requests this is assembled on demand, without a need
9644 * to allocate a separate fuse_req structure.
9645 *
9646 - * Called with fiq->waitq.lock held, releases it
9647 + * Called with fiq->lock held, releases it
9648 */
9649 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
9650 struct fuse_copy_state *cs,
9651 size_t nbytes, struct fuse_req *req)
9652 -__releases(fiq->waitq.lock)
9653 +__releases(fiq->lock)
9654 {
9655 struct fuse_in_header ih;
9656 struct fuse_interrupt_in arg;
9657 @@ -1169,7 +1169,7 @@ __releases(fiq->waitq.lock)
9658 ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
9659 arg.unique = req->in.h.unique;
9660
9661 - spin_unlock(&fiq->waitq.lock);
9662 + spin_unlock(&fiq->lock);
9663 if (nbytes < reqsize)
9664 return -EINVAL;
9665
9666 @@ -1206,7 +1206,7 @@ static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
9667 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
9668 struct fuse_copy_state *cs,
9669 size_t nbytes)
9670 -__releases(fiq->waitq.lock)
9671 +__releases(fiq->lock)
9672 {
9673 int err;
9674 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
9675 @@ -1220,7 +1220,7 @@ __releases(fiq->waitq.lock)
9676 .len = sizeof(ih) + sizeof(arg),
9677 };
9678
9679 - spin_unlock(&fiq->waitq.lock);
9680 + spin_unlock(&fiq->lock);
9681 kfree(forget);
9682 if (nbytes < ih.len)
9683 return -EINVAL;
9684 @@ -1238,7 +1238,7 @@ __releases(fiq->waitq.lock)
9685
9686 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
9687 struct fuse_copy_state *cs, size_t nbytes)
9688 -__releases(fiq->waitq.lock)
9689 +__releases(fiq->lock)
9690 {
9691 int err;
9692 unsigned max_forgets;
9693 @@ -1252,13 +1252,13 @@ __releases(fiq->waitq.lock)
9694 };
9695
9696 if (nbytes < ih.len) {
9697 - spin_unlock(&fiq->waitq.lock);
9698 + spin_unlock(&fiq->lock);
9699 return -EINVAL;
9700 }
9701
9702 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
9703 head = dequeue_forget(fiq, max_forgets, &count);
9704 - spin_unlock(&fiq->waitq.lock);
9705 + spin_unlock(&fiq->lock);
9706
9707 arg.count = count;
9708 ih.len += count * sizeof(struct fuse_forget_one);
9709 @@ -1288,7 +1288,7 @@ __releases(fiq->waitq.lock)
9710 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
9711 struct fuse_copy_state *cs,
9712 size_t nbytes)
9713 -__releases(fiq->waitq.lock)
9714 +__releases(fiq->lock)
9715 {
9716 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
9717 return fuse_read_single_forget(fiq, cs, nbytes);
9718 @@ -1318,16 +1318,19 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
9719 unsigned int hash;
9720
9721 restart:
9722 - spin_lock(&fiq->waitq.lock);
9723 - err = -EAGAIN;
9724 - if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
9725 - !request_pending(fiq))
9726 - goto err_unlock;
9727 + for (;;) {
9728 + spin_lock(&fiq->lock);
9729 + if (!fiq->connected || request_pending(fiq))
9730 + break;
9731 + spin_unlock(&fiq->lock);
9732
9733 - err = wait_event_interruptible_exclusive_locked(fiq->waitq,
9734 + if (file->f_flags & O_NONBLOCK)
9735 + return -EAGAIN;
9736 + err = wait_event_interruptible_exclusive(fiq->waitq,
9737 !fiq->connected || request_pending(fiq));
9738 - if (err)
9739 - goto err_unlock;
9740 + if (err)
9741 + return err;
9742 + }
9743
9744 if (!fiq->connected) {
9745 err = fc->aborted ? -ECONNABORTED : -ENODEV;
9746 @@ -1351,7 +1354,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
9747 req = list_entry(fiq->pending.next, struct fuse_req, list);
9748 clear_bit(FR_PENDING, &req->flags);
9749 list_del_init(&req->list);
9750 - spin_unlock(&fiq->waitq.lock);
9751 + spin_unlock(&fiq->lock);
9752
9753 in = &req->in;
9754 reqsize = in->h.len;
9755 @@ -1409,7 +1412,7 @@ out_end:
9756 return err;
9757
9758 err_unlock:
9759 - spin_unlock(&fiq->waitq.lock);
9760 + spin_unlock(&fiq->lock);
9761 return err;
9762 }
9763
9764 @@ -2121,12 +2124,12 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
9765 fiq = &fud->fc->iq;
9766 poll_wait(file, &fiq->waitq, wait);
9767
9768 - spin_lock(&fiq->waitq.lock);
9769 + spin_lock(&fiq->lock);
9770 if (!fiq->connected)
9771 mask = EPOLLERR;
9772 else if (request_pending(fiq))
9773 mask |= EPOLLIN | EPOLLRDNORM;
9774 - spin_unlock(&fiq->waitq.lock);
9775 + spin_unlock(&fiq->lock);
9776
9777 return mask;
9778 }
9779 @@ -2221,15 +2224,15 @@ void fuse_abort_conn(struct fuse_conn *fc)
9780 flush_bg_queue(fc);
9781 spin_unlock(&fc->bg_lock);
9782
9783 - spin_lock(&fiq->waitq.lock);
9784 + spin_lock(&fiq->lock);
9785 fiq->connected = 0;
9786 list_for_each_entry(req, &fiq->pending, list)
9787 clear_bit(FR_PENDING, &req->flags);
9788 list_splice_tail_init(&fiq->pending, &to_end);
9789 while (forget_pending(fiq))
9790 kfree(dequeue_forget(fiq, 1, NULL));
9791 - wake_up_all_locked(&fiq->waitq);
9792 - spin_unlock(&fiq->waitq.lock);
9793 + wake_up_all(&fiq->waitq);
9794 + spin_unlock(&fiq->lock);
9795 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
9796 end_polls(fc);
9797 wake_up_all(&fc->blocked_waitq);
9798 diff --git a/fs/fuse/file.c b/fs/fuse/file.c
9799 index 5ae2828beb00..91c99724dee0 100644
9800 --- a/fs/fuse/file.c
9801 +++ b/fs/fuse/file.c
9802 @@ -1767,6 +1767,7 @@ static int fuse_writepage(struct page *page, struct writeback_control *wbc)
9803 WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
9804
9805 redirty_page_for_writepage(wbc, page);
9806 + unlock_page(page);
9807 return 0;
9808 }
9809
9810 diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
9811 index 24dbca777775..89bdc41e0d86 100644
9812 --- a/fs/fuse/fuse_i.h
9813 +++ b/fs/fuse/fuse_i.h
9814 @@ -450,6 +450,9 @@ struct fuse_iqueue {
9815 /** Connection established */
9816 unsigned connected;
9817
9818 + /** Lock protecting accesses to members of this structure */
9819 + spinlock_t lock;
9820 +
9821 /** Readers of the connection are waiting on this */
9822 wait_queue_head_t waitq;
9823
9824 diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
9825 index 4bb885b0f032..987877860c01 100644
9826 --- a/fs/fuse/inode.c
9827 +++ b/fs/fuse/inode.c
9828 @@ -582,6 +582,7 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
9829 static void fuse_iqueue_init(struct fuse_iqueue *fiq)
9830 {
9831 memset(fiq, 0, sizeof(struct fuse_iqueue));
9832 + spin_lock_init(&fiq->lock);
9833 init_waitqueue_head(&fiq->waitq);
9834 INIT_LIST_HEAD(&fiq->pending);
9835 INIT_LIST_HEAD(&fiq->interrupts);
9836 diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
9837 index 574d03f8a573..b2da3de6a78e 100644
9838 --- a/fs/fuse/readdir.c
9839 +++ b/fs/fuse/readdir.c
9840 @@ -372,11 +372,13 @@ static enum fuse_parse_result fuse_parse_cache(struct fuse_file *ff,
9841 for (;;) {
9842 struct fuse_dirent *dirent = addr + offset;
9843 unsigned int nbytes = size - offset;
9844 - size_t reclen = FUSE_DIRENT_SIZE(dirent);
9845 + size_t reclen;
9846
9847 if (nbytes < FUSE_NAME_OFFSET || !dirent->namelen)
9848 break;
9849
9850 + reclen = FUSE_DIRENT_SIZE(dirent); /* derefs ->namelen */
9851 +
9852 if (WARN_ON(dirent->namelen > FUSE_NAME_MAX))
9853 return FOUND_ERR;
9854 if (WARN_ON(reclen > nbytes))
9855 diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
9856 index 4f8b5fd6c81f..b7ba5e194965 100644
9857 --- a/fs/gfs2/bmap.c
9858 +++ b/fs/gfs2/bmap.c
9859 @@ -1680,6 +1680,7 @@ out_unlock:
9860 brelse(dibh);
9861 up_write(&ip->i_rw_mutex);
9862 gfs2_trans_end(sdp);
9863 + buf_in_tr = false;
9864 }
9865 gfs2_glock_dq_uninit(rd_gh);
9866 cond_resched();
9867 diff --git a/fs/io_uring.c b/fs/io_uring.c
9868 index cfb48bd088e1..06d048341fa4 100644
9869 --- a/fs/io_uring.c
9870 +++ b/fs/io_uring.c
9871 @@ -288,6 +288,7 @@ struct io_ring_ctx {
9872 struct sqe_submit {
9873 const struct io_uring_sqe *sqe;
9874 unsigned short index;
9875 + u32 sequence;
9876 bool has_user;
9877 bool needs_lock;
9878 bool needs_fixed_file;
9879 @@ -2040,7 +2041,7 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
9880
9881 if (flags & IOSQE_IO_DRAIN) {
9882 req->flags |= REQ_F_IO_DRAIN;
9883 - req->sequence = ctx->cached_sq_head - 1;
9884 + req->sequence = s->sequence;
9885 }
9886
9887 if (!io_op_needs_file(s->sqe))
9888 @@ -2247,6 +2248,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
9889 if (head < ctx->sq_entries) {
9890 s->index = head;
9891 s->sqe = &ctx->sq_sqes[head];
9892 + s->sequence = ctx->cached_sq_head;
9893 ctx->cached_sq_head++;
9894 return true;
9895 }
9896 diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
9897 index cb8ec1f65c03..73c9775215b3 100644
9898 --- a/fs/overlayfs/export.c
9899 +++ b/fs/overlayfs/export.c
9900 @@ -227,9 +227,8 @@ static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen)
9901 /* Encode an upper or lower file handle */
9902 fh = ovl_encode_real_fh(enc_lower ? ovl_dentry_lower(dentry) :
9903 ovl_dentry_upper(dentry), !enc_lower);
9904 - err = PTR_ERR(fh);
9905 if (IS_ERR(fh))
9906 - goto fail;
9907 + return PTR_ERR(fh);
9908
9909 err = -EOVERFLOW;
9910 if (fh->len > buflen)
9911 diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
9912 index 7663aeb85fa3..bc14781886bf 100644
9913 --- a/fs/overlayfs/inode.c
9914 +++ b/fs/overlayfs/inode.c
9915 @@ -383,7 +383,8 @@ static bool ovl_can_list(const char *s)
9916 return true;
9917
9918 /* Never list trusted.overlay, list other trusted for superuser only */
9919 - return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
9920 + return !ovl_is_private_xattr(s) &&
9921 + ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
9922 }
9923
9924 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
9925 diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
9926 index 28101bbc0b78..d952d5962e93 100644
9927 --- a/fs/xfs/xfs_file.c
9928 +++ b/fs/xfs/xfs_file.c
9929 @@ -28,6 +28,7 @@
9930 #include <linux/falloc.h>
9931 #include <linux/backing-dev.h>
9932 #include <linux/mman.h>
9933 +#include <linux/fadvise.h>
9934
9935 static const struct vm_operations_struct xfs_file_vm_ops;
9936
9937 @@ -933,6 +934,30 @@ out_unlock:
9938 return error;
9939 }
9940
9941 +STATIC int
9942 +xfs_file_fadvise(
9943 + struct file *file,
9944 + loff_t start,
9945 + loff_t end,
9946 + int advice)
9947 +{
9948 + struct xfs_inode *ip = XFS_I(file_inode(file));
9949 + int ret;
9950 + int lockflags = 0;
9951 +
9952 + /*
9953 + * Operations creating pages in page cache need protection from hole
9954 + * punching and similar ops
9955 + */
9956 + if (advice == POSIX_FADV_WILLNEED) {
9957 + lockflags = XFS_IOLOCK_SHARED;
9958 + xfs_ilock(ip, lockflags);
9959 + }
9960 + ret = generic_fadvise(file, start, end, advice);
9961 + if (lockflags)
9962 + xfs_iunlock(ip, lockflags);
9963 + return ret;
9964 +}
9965
9966 STATIC loff_t
9967 xfs_file_remap_range(
9968 @@ -1232,6 +1257,7 @@ const struct file_operations xfs_file_operations = {
9969 .fsync = xfs_file_fsync,
9970 .get_unmapped_area = thp_get_unmapped_area,
9971 .fallocate = xfs_file_fallocate,
9972 + .fadvise = xfs_file_fadvise,
9973 .remap_file_range = xfs_file_remap_range,
9974 };
9975
9976 diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
9977 index 3fa1fa59f9b2..ab25e69a15d1 100644
9978 --- a/include/linux/blk-mq.h
9979 +++ b/include/linux/blk-mq.h
9980 @@ -140,6 +140,7 @@ typedef int (poll_fn)(struct blk_mq_hw_ctx *);
9981 typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
9982 typedef bool (busy_fn)(struct request_queue *);
9983 typedef void (complete_fn)(struct request *);
9984 +typedef void (cleanup_rq_fn)(struct request *);
9985
9986
9987 struct blk_mq_ops {
9988 @@ -200,6 +201,12 @@ struct blk_mq_ops {
9989 /* Called from inside blk_get_request() */
9990 void (*initialize_rq_fn)(struct request *rq);
9991
9992 + /*
9993 + * Called before freeing one request which isn't completed yet,
9994 + * and usually for freeing the driver private data
9995 + */
9996 + cleanup_rq_fn *cleanup_rq;
9997 +
9998 /*
9999 * If set, returns whether or not this queue currently is busy
10000 */
10001 @@ -366,4 +373,10 @@ static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
10002 BLK_QC_T_INTERNAL;
10003 }
10004
10005 +static inline void blk_mq_cleanup_rq(struct request *rq)
10006 +{
10007 + if (rq->q->mq_ops->cleanup_rq)
10008 + rq->q->mq_ops->cleanup_rq(rq);
10009 +}
10010 +
10011 #endif
10012 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
10013 index 1ef375dafb1c..ae51050c5094 100644
10014 --- a/include/linux/blkdev.h
10015 +++ b/include/linux/blkdev.h
10016 @@ -202,9 +202,12 @@ struct request {
10017 #ifdef CONFIG_BLK_WBT
10018 unsigned short wbt_flags;
10019 #endif
10020 -#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
10021 - unsigned short throtl_size;
10022 -#endif
10023 + /*
10024 + * rq sectors used for blk stats. It has the same value
10025 + * with blk_rq_sectors(rq), except that it never be zeroed
10026 + * by completion.
10027 + */
10028 + unsigned short stats_sectors;
10029
10030 /*
10031 * Number of scatter-gather DMA addr+len pairs after
10032 @@ -903,6 +906,7 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
10033 * blk_rq_err_bytes() : bytes left till the next error boundary
10034 * blk_rq_sectors() : sectors left in the entire request
10035 * blk_rq_cur_sectors() : sectors left in the current segment
10036 + * blk_rq_stats_sectors() : sectors of the entire request used for stats
10037 */
10038 static inline sector_t blk_rq_pos(const struct request *rq)
10039 {
10040 @@ -931,6 +935,11 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
10041 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
10042 }
10043
10044 +static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
10045 +{
10046 + return rq->stats_sectors;
10047 +}
10048 +
10049 #ifdef CONFIG_BLK_DEV_ZONED
10050 static inline unsigned int blk_rq_zone_no(struct request *rq)
10051 {
10052 diff --git a/include/linux/bug.h b/include/linux/bug.h
10053 index fe5916550da8..f639bd0122f3 100644
10054 --- a/include/linux/bug.h
10055 +++ b/include/linux/bug.h
10056 @@ -47,6 +47,11 @@ void generic_bug_clear_once(void);
10057
10058 #else /* !CONFIG_GENERIC_BUG */
10059
10060 +static inline void *find_bug(unsigned long bugaddr)
10061 +{
10062 + return NULL;
10063 +}
10064 +
10065 static inline enum bug_trap_type report_bug(unsigned long bug_addr,
10066 struct pt_regs *regs)
10067 {
10068 diff --git a/include/linux/fs.h b/include/linux/fs.h
10069 index 997a530ff4e9..bc1b40fb0db7 100644
10070 --- a/include/linux/fs.h
10071 +++ b/include/linux/fs.h
10072 @@ -3531,6 +3531,8 @@ extern void inode_nohighmem(struct inode *inode);
10073 /* mm/fadvise.c */
10074 extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
10075 int advice);
10076 +extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
10077 + int advice);
10078
10079 #if defined(CONFIG_IO_URING)
10080 extern struct sock *io_uring_get_socket(struct file *file);
10081 diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
10082 index 4a351cb7f20f..cf87c673cbb8 100644
10083 --- a/include/linux/mmc/host.h
10084 +++ b/include/linux/mmc/host.h
10085 @@ -493,6 +493,15 @@ void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
10086
10087 void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq);
10088
10089 +/*
10090 + * May be called from host driver's system/runtime suspend/resume callbacks,
10091 + * to know if SDIO IRQs has been claimed.
10092 + */
10093 +static inline bool sdio_irq_claimed(struct mmc_host *host)
10094 +{
10095 + return host->sdio_irqs > 0;
10096 +}
10097 +
10098 static inline void mmc_signal_sdio_irq(struct mmc_host *host)
10099 {
10100 host->ops->enable_sdio_irq(host, 0);
10101 diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
10102 index c842735a4f45..4b97f427cc92 100644
10103 --- a/include/linux/pci_ids.h
10104 +++ b/include/linux/pci_ids.h
10105 @@ -548,6 +548,7 @@
10106 #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
10107 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
10108 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
10109 +#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
10110 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
10111 #define PCI_DEVICE_ID_AMD_LANCE 0x2000
10112 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
10113 diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
10114 index dc905a4ff8d7..185d94829701 100644
10115 --- a/include/linux/quotaops.h
10116 +++ b/include/linux/quotaops.h
10117 @@ -22,7 +22,7 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
10118 /* i_mutex must being held */
10119 static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
10120 {
10121 - return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) ||
10122 + return (ia->ia_valid & ATTR_SIZE) ||
10123 (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) ||
10124 (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
10125 }
10126 diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
10127 index 13e108bcc9eb..d783e15ba898 100644
10128 --- a/include/linux/sunrpc/xprt.h
10129 +++ b/include/linux/sunrpc/xprt.h
10130 @@ -352,6 +352,7 @@ bool xprt_prepare_transmit(struct rpc_task *task);
10131 void xprt_request_enqueue_transmit(struct rpc_task *task);
10132 void xprt_request_enqueue_receive(struct rpc_task *task);
10133 void xprt_request_wait_receive(struct rpc_task *task);
10134 +void xprt_request_dequeue_xprt(struct rpc_task *task);
10135 bool xprt_request_need_retransmit(struct rpc_task *task);
10136 void xprt_transmit(struct rpc_task *task);
10137 void xprt_end_transmit(struct rpc_task *task);
10138 diff --git a/include/net/route.h b/include/net/route.h
10139 index dfce19c9fa96..6c516840380d 100644
10140 --- a/include/net/route.h
10141 +++ b/include/net/route.h
10142 @@ -53,10 +53,11 @@ struct rtable {
10143 unsigned int rt_flags;
10144 __u16 rt_type;
10145 __u8 rt_is_input;
10146 - u8 rt_gw_family;
10147 + __u8 rt_uses_gateway;
10148
10149 int rt_iif;
10150
10151 + u8 rt_gw_family;
10152 /* Info on neighbour */
10153 union {
10154 __be32 rt_gw4;
10155 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
10156 index df3008419a1d..cdb3ffab128b 100644
10157 --- a/kernel/jump_label.c
10158 +++ b/kernel/jump_label.c
10159 @@ -407,7 +407,9 @@ static bool jump_label_can_update(struct jump_entry *entry, bool init)
10160 return false;
10161
10162 if (!kernel_text_address(jump_entry_code(entry))) {
10163 - WARN_ONCE(1, "can't patch jump_label at %pS", (void *)jump_entry_code(entry));
10164 + WARN_ONCE(!jump_entry_is_init(entry),
10165 + "can't patch jump_label at %pS",
10166 + (void *)jump_entry_code(entry));
10167 return false;
10168 }
10169
10170 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
10171 index d9770a5393c8..ebe8315a756a 100644
10172 --- a/kernel/kprobes.c
10173 +++ b/kernel/kprobes.c
10174 @@ -1514,7 +1514,8 @@ static int check_kprobe_address_safe(struct kprobe *p,
10175 /* Ensure it is not in reserved area nor out of text */
10176 if (!kernel_text_address((unsigned long) p->addr) ||
10177 within_kprobe_blacklist((unsigned long) p->addr) ||
10178 - jump_label_text_reserved(p->addr, p->addr)) {
10179 + jump_label_text_reserved(p->addr, p->addr) ||
10180 + find_bug((unsigned long)p->addr)) {
10181 ret = -EINVAL;
10182 goto out;
10183 }
10184 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
10185 index 1888f6a3b694..424abf802f02 100644
10186 --- a/kernel/printk/printk.c
10187 +++ b/kernel/printk/printk.c
10188 @@ -3274,7 +3274,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
10189 /* move first record forward until length fits into the buffer */
10190 seq = dumper->cur_seq;
10191 idx = dumper->cur_idx;
10192 - while (l > size && seq < dumper->next_seq) {
10193 + while (l >= size && seq < dumper->next_seq) {
10194 struct printk_log *msg = log_from_idx(idx);
10195
10196 l -= msg_print_text(msg, true, time, NULL, 0);
10197 diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
10198 index a14e5fbbea46..5efdce756fdf 100644
10199 --- a/kernel/rcu/tree.c
10200 +++ b/kernel/rcu/tree.c
10201 @@ -3234,13 +3234,13 @@ static int __init rcu_spawn_gp_kthread(void)
10202 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
10203 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
10204 return 0;
10205 - rnp = rcu_get_root();
10206 - raw_spin_lock_irqsave_rcu_node(rnp, flags);
10207 - rcu_state.gp_kthread = t;
10208 if (kthread_prio) {
10209 sp.sched_priority = kthread_prio;
10210 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
10211 }
10212 + rnp = rcu_get_root();
10213 + raw_spin_lock_irqsave_rcu_node(rnp, flags);
10214 + rcu_state.gp_kthread = t;
10215 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
10216 wake_up_process(t);
10217 rcu_spawn_nocb_kthreads();
10218 diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
10219 index af7e7b9c86af..513b403b683b 100644
10220 --- a/kernel/rcu/tree_exp.h
10221 +++ b/kernel/rcu/tree_exp.h
10222 @@ -792,6 +792,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
10223 */
10224 void synchronize_rcu_expedited(void)
10225 {
10226 + bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
10227 struct rcu_exp_work rew;
10228 struct rcu_node *rnp;
10229 unsigned long s;
10230 @@ -817,7 +818,7 @@ void synchronize_rcu_expedited(void)
10231 return; /* Someone else did our work for us. */
10232
10233 /* Ensure that load happens before action based on it. */
10234 - if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
10235 + if (unlikely(boottime)) {
10236 /* Direct call during scheduler init and early_initcalls(). */
10237 rcu_exp_sel_wait_wake(s);
10238 } else {
10239 @@ -835,5 +836,8 @@ void synchronize_rcu_expedited(void)
10240
10241 /* Let the next expedited grace period start. */
10242 mutex_unlock(&rcu_state.exp_mutex);
10243 +
10244 + if (likely(!boottime))
10245 + destroy_work_on_stack(&rew.rew_work);
10246 }
10247 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
10248 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
10249 index df9f1fe5689b..d38f007afea7 100644
10250 --- a/kernel/sched/core.c
10251 +++ b/kernel/sched/core.c
10252 @@ -3486,8 +3486,36 @@ void scheduler_tick(void)
10253
10254 struct tick_work {
10255 int cpu;
10256 + atomic_t state;
10257 struct delayed_work work;
10258 };
10259 +/* Values for ->state, see diagram below. */
10260 +#define TICK_SCHED_REMOTE_OFFLINE 0
10261 +#define TICK_SCHED_REMOTE_OFFLINING 1
10262 +#define TICK_SCHED_REMOTE_RUNNING 2
10263 +
10264 +/*
10265 + * State diagram for ->state:
10266 + *
10267 + *
10268 + * TICK_SCHED_REMOTE_OFFLINE
10269 + * | ^
10270 + * | |
10271 + * | | sched_tick_remote()
10272 + * | |
10273 + * | |
10274 + * +--TICK_SCHED_REMOTE_OFFLINING
10275 + * | ^
10276 + * | |
10277 + * sched_tick_start() | | sched_tick_stop()
10278 + * | |
10279 + * V |
10280 + * TICK_SCHED_REMOTE_RUNNING
10281 + *
10282 + *
10283 + * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
10284 + * and sched_tick_start() are happy to leave the state in RUNNING.
10285 + */
10286
10287 static struct tick_work __percpu *tick_work_cpu;
10288
10289 @@ -3500,6 +3528,7 @@ static void sched_tick_remote(struct work_struct *work)
10290 struct task_struct *curr;
10291 struct rq_flags rf;
10292 u64 delta;
10293 + int os;
10294
10295 /*
10296 * Handle the tick only if it appears the remote CPU is running in full
10297 @@ -3513,7 +3542,7 @@ static void sched_tick_remote(struct work_struct *work)
10298
10299 rq_lock_irq(rq, &rf);
10300 curr = rq->curr;
10301 - if (is_idle_task(curr))
10302 + if (is_idle_task(curr) || cpu_is_offline(cpu))
10303 goto out_unlock;
10304
10305 update_rq_clock(rq);
10306 @@ -3533,13 +3562,18 @@ out_requeue:
10307 /*
10308 * Run the remote tick once per second (1Hz). This arbitrary
10309 * frequency is large enough to avoid overload but short enough
10310 - * to keep scheduler internal stats reasonably up to date.
10311 + * to keep scheduler internal stats reasonably up to date. But
10312 + * first update state to reflect hotplug activity if required.
10313 */
10314 - queue_delayed_work(system_unbound_wq, dwork, HZ);
10315 + os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
10316 + WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
10317 + if (os == TICK_SCHED_REMOTE_RUNNING)
10318 + queue_delayed_work(system_unbound_wq, dwork, HZ);
10319 }
10320
10321 static void sched_tick_start(int cpu)
10322 {
10323 + int os;
10324 struct tick_work *twork;
10325
10326 if (housekeeping_cpu(cpu, HK_FLAG_TICK))
10327 @@ -3548,15 +3582,20 @@ static void sched_tick_start(int cpu)
10328 WARN_ON_ONCE(!tick_work_cpu);
10329
10330 twork = per_cpu_ptr(tick_work_cpu, cpu);
10331 - twork->cpu = cpu;
10332 - INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
10333 - queue_delayed_work(system_unbound_wq, &twork->work, HZ);
10334 + os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
10335 + WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
10336 + if (os == TICK_SCHED_REMOTE_OFFLINE) {
10337 + twork->cpu = cpu;
10338 + INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
10339 + queue_delayed_work(system_unbound_wq, &twork->work, HZ);
10340 + }
10341 }
10342
10343 #ifdef CONFIG_HOTPLUG_CPU
10344 static void sched_tick_stop(int cpu)
10345 {
10346 struct tick_work *twork;
10347 + int os;
10348
10349 if (housekeeping_cpu(cpu, HK_FLAG_TICK))
10350 return;
10351 @@ -3564,7 +3603,10 @@ static void sched_tick_stop(int cpu)
10352 WARN_ON_ONCE(!tick_work_cpu);
10353
10354 twork = per_cpu_ptr(tick_work_cpu, cpu);
10355 - cancel_delayed_work_sync(&twork->work);
10356 + /* There cannot be competing actions, but don't rely on stop-machine. */
10357 + os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
10358 + WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
10359 + /* Don't cancel, as this would mess up the state machine. */
10360 }
10361 #endif /* CONFIG_HOTPLUG_CPU */
10362
10363 @@ -3572,7 +3614,6 @@ int __init sched_tick_offload_init(void)
10364 {
10365 tick_work_cpu = alloc_percpu(struct tick_work);
10366 BUG_ON(!tick_work_cpu);
10367 -
10368 return 0;
10369 }
10370
10371 @@ -6939,10 +6980,6 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
10372 #ifdef CONFIG_RT_GROUP_SCHED
10373 if (!sched_rt_can_attach(css_tg(css), task))
10374 return -EINVAL;
10375 -#else
10376 - /* We don't support RT-tasks being in separate groups */
10377 - if (task->sched_class != &fair_sched_class)
10378 - return -EINVAL;
10379 #endif
10380 /*
10381 * Serialize against wake_up_new_task() such that if its
10382 diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
10383 index 867b4bb6d4be..b03ca2f73713 100644
10384 --- a/kernel/sched/cpufreq_schedutil.c
10385 +++ b/kernel/sched/cpufreq_schedutil.c
10386 @@ -117,6 +117,7 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
10387 unsigned int next_freq)
10388 {
10389 struct cpufreq_policy *policy = sg_policy->policy;
10390 + int cpu;
10391
10392 if (!sugov_update_next_freq(sg_policy, time, next_freq))
10393 return;
10394 @@ -126,7 +127,11 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
10395 return;
10396
10397 policy->cur = next_freq;
10398 - trace_cpu_frequency(next_freq, smp_processor_id());
10399 +
10400 + if (trace_cpu_frequency_enabled()) {
10401 + for_each_cpu(cpu, policy->cpus)
10402 + trace_cpu_frequency(next_freq, cpu);
10403 + }
10404 }
10405
10406 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
10407 diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
10408 index 46122edd8552..20951112b6cd 100644
10409 --- a/kernel/sched/deadline.c
10410 +++ b/kernel/sched/deadline.c
10411 @@ -529,6 +529,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
10412 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
10413 {
10414 struct rq *later_rq = NULL;
10415 + struct dl_bw *dl_b;
10416
10417 later_rq = find_lock_later_rq(p, rq);
10418 if (!later_rq) {
10419 @@ -557,6 +558,38 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
10420 double_lock_balance(rq, later_rq);
10421 }
10422
10423 + if (p->dl.dl_non_contending || p->dl.dl_throttled) {
10424 + /*
10425 + * Inactive timer is armed (or callback is running, but
10426 + * waiting for us to release rq locks). In any case, when it
10427 + * will fire (or continue), it will see running_bw of this
10428 + * task migrated to later_rq (and correctly handle it).
10429 + */
10430 + sub_running_bw(&p->dl, &rq->dl);
10431 + sub_rq_bw(&p->dl, &rq->dl);
10432 +
10433 + add_rq_bw(&p->dl, &later_rq->dl);
10434 + add_running_bw(&p->dl, &later_rq->dl);
10435 + } else {
10436 + sub_rq_bw(&p->dl, &rq->dl);
10437 + add_rq_bw(&p->dl, &later_rq->dl);
10438 + }
10439 +
10440 + /*
10441 + * And we finally need to fixup root_domain(s) bandwidth accounting,
10442 + * since p is still hanging out in the old (now moved to default) root
10443 + * domain.
10444 + */
10445 + dl_b = &rq->rd->dl_bw;
10446 + raw_spin_lock(&dl_b->lock);
10447 + __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
10448 + raw_spin_unlock(&dl_b->lock);
10449 +
10450 + dl_b = &later_rq->rd->dl_bw;
10451 + raw_spin_lock(&dl_b->lock);
10452 + __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
10453 + raw_spin_unlock(&dl_b->lock);
10454 +
10455 set_task_cpu(p, later_rq->cpu);
10456 double_unlock_balance(later_rq, rq);
10457
10458 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
10459 index 500f5db0de0b..86cfc5d5129c 100644
10460 --- a/kernel/sched/fair.c
10461 +++ b/kernel/sched/fair.c
10462 @@ -9052,9 +9052,10 @@ more_balance:
10463 out_balanced:
10464 /*
10465 * We reach balance although we may have faced some affinity
10466 - * constraints. Clear the imbalance flag if it was set.
10467 + * constraints. Clear the imbalance flag only if other tasks got
10468 + * a chance to move and fix the imbalance.
10469 */
10470 - if (sd_parent) {
10471 + if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
10472 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
10473
10474 if (*group_imbalance)
10475 @@ -10300,18 +10301,18 @@ err:
10476 void online_fair_sched_group(struct task_group *tg)
10477 {
10478 struct sched_entity *se;
10479 + struct rq_flags rf;
10480 struct rq *rq;
10481 int i;
10482
10483 for_each_possible_cpu(i) {
10484 rq = cpu_rq(i);
10485 se = tg->se[i];
10486 -
10487 - raw_spin_lock_irq(&rq->lock);
10488 + rq_lock_irq(rq, &rf);
10489 update_rq_clock(rq);
10490 attach_entity_cfs_rq(se);
10491 sync_throttle(tg, i);
10492 - raw_spin_unlock_irq(&rq->lock);
10493 + rq_unlock_irq(rq, &rf);
10494 }
10495 }
10496
10497 diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
10498 index 80940939b733..e4bc4aa739b8 100644
10499 --- a/kernel/sched/idle.c
10500 +++ b/kernel/sched/idle.c
10501 @@ -241,13 +241,14 @@ static void do_idle(void)
10502 check_pgt_cache();
10503 rmb();
10504
10505 + local_irq_disable();
10506 +
10507 if (cpu_is_offline(cpu)) {
10508 - tick_nohz_idle_stop_tick_protected();
10509 + tick_nohz_idle_stop_tick();
10510 cpuhp_report_idle_dead();
10511 arch_cpu_idle_dead();
10512 }
10513
10514 - local_irq_disable();
10515 arch_cpu_idle_enter();
10516
10517 /*
10518 diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
10519 index 6e52b67b420e..517e3719027e 100644
10520 --- a/kernel/sched/psi.c
10521 +++ b/kernel/sched/psi.c
10522 @@ -1198,7 +1198,7 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
10523 if (static_branch_likely(&psi_disabled))
10524 return -EOPNOTSUPP;
10525
10526 - buf_size = min(nbytes, (sizeof(buf) - 1));
10527 + buf_size = min(nbytes, sizeof(buf));
10528 if (copy_from_user(buf, user_buf, buf_size))
10529 return -EFAULT;
10530
10531 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
10532 index 57518efc3810..b7d75a9e8ccf 100644
10533 --- a/kernel/time/alarmtimer.c
10534 +++ b/kernel/time/alarmtimer.c
10535 @@ -672,7 +672,7 @@ static int alarm_timer_create(struct k_itimer *new_timer)
10536 enum alarmtimer_type type;
10537
10538 if (!alarmtimer_get_rtcdev())
10539 - return -ENOTSUPP;
10540 + return -EOPNOTSUPP;
10541
10542 if (!capable(CAP_WAKE_ALARM))
10543 return -EPERM;
10544 @@ -790,7 +790,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
10545 int ret = 0;
10546
10547 if (!alarmtimer_get_rtcdev())
10548 - return -ENOTSUPP;
10549 + return -EOPNOTSUPP;
10550
10551 if (flags & ~TIMER_ABSTIME)
10552 return -EINVAL;
10553 diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
10554 index 0a426f4e3125..5bbad147a90c 100644
10555 --- a/kernel/time/posix-cpu-timers.c
10556 +++ b/kernel/time/posix-cpu-timers.c
10557 @@ -375,7 +375,8 @@ static int posix_cpu_timer_del(struct k_itimer *timer)
10558 struct sighand_struct *sighand;
10559 struct task_struct *p = timer->it.cpu.task;
10560
10561 - WARN_ON_ONCE(p == NULL);
10562 + if (WARN_ON_ONCE(!p))
10563 + return -EINVAL;
10564
10565 /*
10566 * Protect against sighand release/switch in exit/exec and process/
10567 @@ -580,7 +581,8 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
10568 u64 old_expires, new_expires, old_incr, val;
10569 int ret;
10570
10571 - WARN_ON_ONCE(p == NULL);
10572 + if (WARN_ON_ONCE(!p))
10573 + return -EINVAL;
10574
10575 /*
10576 * Use the to_ktime conversion because that clamps the maximum
10577 @@ -715,10 +717,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
10578
10579 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
10580 {
10581 - u64 now;
10582 struct task_struct *p = timer->it.cpu.task;
10583 + u64 now;
10584
10585 - WARN_ON_ONCE(p == NULL);
10586 + if (WARN_ON_ONCE(!p))
10587 + return;
10588
10589 /*
10590 * Easy part: convert the reload time.
10591 @@ -1000,12 +1003,13 @@ static void check_process_timers(struct task_struct *tsk,
10592 */
10593 static void posix_cpu_timer_rearm(struct k_itimer *timer)
10594 {
10595 + struct task_struct *p = timer->it.cpu.task;
10596 struct sighand_struct *sighand;
10597 unsigned long flags;
10598 - struct task_struct *p = timer->it.cpu.task;
10599 u64 now;
10600
10601 - WARN_ON_ONCE(p == NULL);
10602 + if (WARN_ON_ONCE(!p))
10603 + return;
10604
10605 /*
10606 * Fetch the current sample and update the timer's expiry time.
10607 @@ -1202,7 +1206,9 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
10608 u64 now;
10609 int ret;
10610
10611 - WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
10612 + if (WARN_ON_ONCE(clock_idx >= CPUCLOCK_SCHED))
10613 + return;
10614 +
10615 ret = cpu_timer_sample_group(clock_idx, tsk, &now);
10616
10617 if (oldval && ret != -EINVAL) {
10618 diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
10619 index ba16c08e8cb9..717c940112f9 100644
10620 --- a/lib/lzo/lzo1x_compress.c
10621 +++ b/lib/lzo/lzo1x_compress.c
10622 @@ -83,17 +83,19 @@ next:
10623 ALIGN((uintptr_t)ir, 4)) &&
10624 (ir < limit) && (*ir == 0))
10625 ir++;
10626 - for (; (ir + 4) <= limit; ir += 4) {
10627 - dv = *((u32 *)ir);
10628 - if (dv) {
10629 + if (IS_ALIGNED((uintptr_t)ir, 4)) {
10630 + for (; (ir + 4) <= limit; ir += 4) {
10631 + dv = *((u32 *)ir);
10632 + if (dv) {
10633 # if defined(__LITTLE_ENDIAN)
10634 - ir += __builtin_ctz(dv) >> 3;
10635 + ir += __builtin_ctz(dv) >> 3;
10636 # elif defined(__BIG_ENDIAN)
10637 - ir += __builtin_clz(dv) >> 3;
10638 + ir += __builtin_clz(dv) >> 3;
10639 # else
10640 # error "missing endian definition"
10641 # endif
10642 - break;
10643 + break;
10644 + }
10645 }
10646 }
10647 #endif
10648 diff --git a/mm/compaction.c b/mm/compaction.c
10649 index 952dc2fb24e5..1e994920e6ff 100644
10650 --- a/mm/compaction.c
10651 +++ b/mm/compaction.c
10652 @@ -2078,6 +2078,17 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
10653 const bool sync = cc->mode != MIGRATE_ASYNC;
10654 bool update_cached;
10655
10656 + /*
10657 + * These counters track activities during zone compaction. Initialize
10658 + * them before compacting a new zone.
10659 + */
10660 + cc->total_migrate_scanned = 0;
10661 + cc->total_free_scanned = 0;
10662 + cc->nr_migratepages = 0;
10663 + cc->nr_freepages = 0;
10664 + INIT_LIST_HEAD(&cc->freepages);
10665 + INIT_LIST_HEAD(&cc->migratepages);
10666 +
10667 cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
10668 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
10669 cc->classzone_idx);
10670 @@ -2281,10 +2292,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
10671 {
10672 enum compact_result ret;
10673 struct compact_control cc = {
10674 - .nr_freepages = 0,
10675 - .nr_migratepages = 0,
10676 - .total_migrate_scanned = 0,
10677 - .total_free_scanned = 0,
10678 .order = order,
10679 .search_order = order,
10680 .gfp_mask = gfp_mask,
10681 @@ -2305,8 +2312,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
10682
10683 if (capture)
10684 current->capture_control = &capc;
10685 - INIT_LIST_HEAD(&cc.freepages);
10686 - INIT_LIST_HEAD(&cc.migratepages);
10687
10688 ret = compact_zone(&cc, &capc);
10689
10690 @@ -2408,8 +2413,6 @@ static void compact_node(int nid)
10691 struct zone *zone;
10692 struct compact_control cc = {
10693 .order = -1,
10694 - .total_migrate_scanned = 0,
10695 - .total_free_scanned = 0,
10696 .mode = MIGRATE_SYNC,
10697 .ignore_skip_hint = true,
10698 .whole_zone = true,
10699 @@ -2423,11 +2426,7 @@ static void compact_node(int nid)
10700 if (!populated_zone(zone))
10701 continue;
10702
10703 - cc.nr_freepages = 0;
10704 - cc.nr_migratepages = 0;
10705 cc.zone = zone;
10706 - INIT_LIST_HEAD(&cc.freepages);
10707 - INIT_LIST_HEAD(&cc.migratepages);
10708
10709 compact_zone(&cc, NULL);
10710
10711 @@ -2529,8 +2528,6 @@ static void kcompactd_do_work(pg_data_t *pgdat)
10712 struct compact_control cc = {
10713 .order = pgdat->kcompactd_max_order,
10714 .search_order = pgdat->kcompactd_max_order,
10715 - .total_migrate_scanned = 0,
10716 - .total_free_scanned = 0,
10717 .classzone_idx = pgdat->kcompactd_classzone_idx,
10718 .mode = MIGRATE_SYNC_LIGHT,
10719 .ignore_skip_hint = false,
10720 @@ -2554,16 +2551,10 @@ static void kcompactd_do_work(pg_data_t *pgdat)
10721 COMPACT_CONTINUE)
10722 continue;
10723
10724 - cc.nr_freepages = 0;
10725 - cc.nr_migratepages = 0;
10726 - cc.total_migrate_scanned = 0;
10727 - cc.total_free_scanned = 0;
10728 - cc.zone = zone;
10729 - INIT_LIST_HEAD(&cc.freepages);
10730 - INIT_LIST_HEAD(&cc.migratepages);
10731 -
10732 if (kthread_should_stop())
10733 return;
10734 +
10735 + cc.zone = zone;
10736 status = compact_zone(&cc, NULL);
10737
10738 if (status == COMPACT_SUCCESS) {
10739 diff --git a/mm/fadvise.c b/mm/fadvise.c
10740 index 467bcd032037..4f17c83db575 100644
10741 --- a/mm/fadvise.c
10742 +++ b/mm/fadvise.c
10743 @@ -27,8 +27,7 @@
10744 * deactivate the pages and clear PG_Referenced.
10745 */
10746
10747 -static int generic_fadvise(struct file *file, loff_t offset, loff_t len,
10748 - int advice)
10749 +int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
10750 {
10751 struct inode *inode;
10752 struct address_space *mapping;
10753 @@ -178,6 +177,7 @@ static int generic_fadvise(struct file *file, loff_t offset, loff_t len,
10754 }
10755 return 0;
10756 }
10757 +EXPORT_SYMBOL(generic_fadvise);
10758
10759 int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
10760 {
10761 diff --git a/mm/madvise.c b/mm/madvise.c
10762 index 968df3aa069f..bac973b9f2cc 100644
10763 --- a/mm/madvise.c
10764 +++ b/mm/madvise.c
10765 @@ -14,6 +14,7 @@
10766 #include <linux/userfaultfd_k.h>
10767 #include <linux/hugetlb.h>
10768 #include <linux/falloc.h>
10769 +#include <linux/fadvise.h>
10770 #include <linux/sched.h>
10771 #include <linux/ksm.h>
10772 #include <linux/fs.h>
10773 @@ -275,6 +276,7 @@ static long madvise_willneed(struct vm_area_struct *vma,
10774 unsigned long start, unsigned long end)
10775 {
10776 struct file *file = vma->vm_file;
10777 + loff_t offset;
10778
10779 *prev = vma;
10780 #ifdef CONFIG_SWAP
10781 @@ -298,12 +300,20 @@ static long madvise_willneed(struct vm_area_struct *vma,
10782 return 0;
10783 }
10784
10785 - start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
10786 - if (end > vma->vm_end)
10787 - end = vma->vm_end;
10788 - end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
10789 -
10790 - force_page_cache_readahead(file->f_mapping, file, start, end - start);
10791 + /*
10792 + * Filesystem's fadvise may need to take various locks. We need to
10793 + * explicitly grab a reference because the vma (and hence the
10794 + * vma's reference to the file) can go away as soon as we drop
10795 + * mmap_sem.
10796 + */
10797 + *prev = NULL; /* tell sys_madvise we drop mmap_sem */
10798 + get_file(file);
10799 + up_read(&current->mm->mmap_sem);
10800 + offset = (loff_t)(start - vma->vm_start)
10801 + + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
10802 + vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
10803 + fput(file);
10804 + down_read(&current->mm->mmap_sem);
10805 return 0;
10806 }
10807
10808 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
10809 index 9ec5e12486a7..e18108b2b786 100644
10810 --- a/mm/memcontrol.c
10811 +++ b/mm/memcontrol.c
10812 @@ -2821,6 +2821,16 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
10813
10814 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
10815 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
10816 +
10817 + /*
10818 + * Enforce __GFP_NOFAIL allocation because callers are not
10819 + * prepared to see failures and likely do not have any failure
10820 + * handling code.
10821 + */
10822 + if (gfp & __GFP_NOFAIL) {
10823 + page_counter_charge(&memcg->kmem, nr_pages);
10824 + return 0;
10825 + }
10826 cancel_charge(memcg, nr_pages);
10827 return -ENOMEM;
10828 }
10829 diff --git a/mm/oom_kill.c b/mm/oom_kill.c
10830 index eda2e2a0bdc6..26804abe99d6 100644
10831 --- a/mm/oom_kill.c
10832 +++ b/mm/oom_kill.c
10833 @@ -1068,9 +1068,10 @@ bool out_of_memory(struct oom_control *oc)
10834 * The OOM killer does not compensate for IO-less reclaim.
10835 * pagefault_out_of_memory lost its gfp context so we have to
10836 * make sure exclude 0 mask - all other users should have at least
10837 - * ___GFP_DIRECT_RECLAIM to get here.
10838 + * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
10839 + * invoke the OOM killer even if it is a GFP_NOFS allocation.
10840 */
10841 - if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS))
10842 + if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
10843 return true;
10844
10845 /*
10846 diff --git a/mm/z3fold.c b/mm/z3fold.c
10847 index ed19d98c9dcd..05bdf90646e7 100644
10848 --- a/mm/z3fold.c
10849 +++ b/mm/z3fold.c
10850 @@ -295,14 +295,11 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool)
10851 }
10852
10853 /* Initializes the z3fold header of a newly allocated z3fold page */
10854 -static struct z3fold_header *init_z3fold_page(struct page *page,
10855 +static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
10856 struct z3fold_pool *pool, gfp_t gfp)
10857 {
10858 struct z3fold_header *zhdr = page_address(page);
10859 - struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp);
10860 -
10861 - if (!slots)
10862 - return NULL;
10863 + struct z3fold_buddy_slots *slots;
10864
10865 INIT_LIST_HEAD(&page->lru);
10866 clear_bit(PAGE_HEADLESS, &page->private);
10867 @@ -310,6 +307,12 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
10868 clear_bit(NEEDS_COMPACTING, &page->private);
10869 clear_bit(PAGE_STALE, &page->private);
10870 clear_bit(PAGE_CLAIMED, &page->private);
10871 + if (headless)
10872 + return zhdr;
10873 +
10874 + slots = alloc_slots(pool, gfp);
10875 + if (!slots)
10876 + return NULL;
10877
10878 spin_lock_init(&zhdr->page_lock);
10879 kref_init(&zhdr->refcount);
10880 @@ -366,9 +369,10 @@ static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
10881 * Encodes the handle of a particular buddy within a z3fold page
10882 * Pool lock should be held as this function accesses first_num
10883 */
10884 -static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
10885 +static unsigned long __encode_handle(struct z3fold_header *zhdr,
10886 + struct z3fold_buddy_slots *slots,
10887 + enum buddy bud)
10888 {
10889 - struct z3fold_buddy_slots *slots;
10890 unsigned long h = (unsigned long)zhdr;
10891 int idx = 0;
10892
10893 @@ -385,11 +389,15 @@ static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
10894 if (bud == LAST)
10895 h |= (zhdr->last_chunks << BUDDY_SHIFT);
10896
10897 - slots = zhdr->slots;
10898 slots->slot[idx] = h;
10899 return (unsigned long)&slots->slot[idx];
10900 }
10901
10902 +static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
10903 +{
10904 + return __encode_handle(zhdr, zhdr->slots, bud);
10905 +}
10906 +
10907 /* Returns the z3fold page where a given handle is stored */
10908 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
10909 {
10910 @@ -624,6 +632,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
10911 }
10912
10913 if (unlikely(PageIsolated(page) ||
10914 + test_bit(PAGE_CLAIMED, &page->private) ||
10915 test_bit(PAGE_STALE, &page->private))) {
10916 z3fold_page_unlock(zhdr);
10917 return;
10918 @@ -924,7 +933,7 @@ retry:
10919 if (!page)
10920 return -ENOMEM;
10921
10922 - zhdr = init_z3fold_page(page, pool, gfp);
10923 + zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
10924 if (!zhdr) {
10925 __free_page(page);
10926 return -ENOMEM;
10927 @@ -1100,6 +1109,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
10928 struct z3fold_header *zhdr = NULL;
10929 struct page *page = NULL;
10930 struct list_head *pos;
10931 + struct z3fold_buddy_slots slots;
10932 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
10933
10934 spin_lock(&pool->lock);
10935 @@ -1118,16 +1128,22 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
10936 /* this bit could have been set by free, in which case
10937 * we pass over to the next page in the pool.
10938 */
10939 - if (test_and_set_bit(PAGE_CLAIMED, &page->private))
10940 + if (test_and_set_bit(PAGE_CLAIMED, &page->private)) {
10941 + page = NULL;
10942 continue;
10943 + }
10944
10945 - if (unlikely(PageIsolated(page)))
10946 + if (unlikely(PageIsolated(page))) {
10947 + clear_bit(PAGE_CLAIMED, &page->private);
10948 + page = NULL;
10949 continue;
10950 + }
10951 + zhdr = page_address(page);
10952 if (test_bit(PAGE_HEADLESS, &page->private))
10953 break;
10954
10955 - zhdr = page_address(page);
10956 if (!z3fold_page_trylock(zhdr)) {
10957 + clear_bit(PAGE_CLAIMED, &page->private);
10958 zhdr = NULL;
10959 continue; /* can't evict at this point */
10960 }
10961 @@ -1145,26 +1161,30 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
10962
10963 if (!test_bit(PAGE_HEADLESS, &page->private)) {
10964 /*
10965 - * We need encode the handles before unlocking, since
10966 - * we can race with free that will set
10967 - * (first|last)_chunks to 0
10968 + * We need encode the handles before unlocking, and
10969 + * use our local slots structure because z3fold_free
10970 + * can zero out zhdr->slots and we can't do much
10971 + * about that
10972 */
10973 first_handle = 0;
10974 last_handle = 0;
10975 middle_handle = 0;
10976 if (zhdr->first_chunks)
10977 - first_handle = encode_handle(zhdr, FIRST);
10978 + first_handle = __encode_handle(zhdr, &slots,
10979 + FIRST);
10980 if (zhdr->middle_chunks)
10981 - middle_handle = encode_handle(zhdr, MIDDLE);
10982 + middle_handle = __encode_handle(zhdr, &slots,
10983 + MIDDLE);
10984 if (zhdr->last_chunks)
10985 - last_handle = encode_handle(zhdr, LAST);
10986 + last_handle = __encode_handle(zhdr, &slots,
10987 + LAST);
10988 /*
10989 * it's safe to unlock here because we hold a
10990 * reference to this page
10991 */
10992 z3fold_page_unlock(zhdr);
10993 } else {
10994 - first_handle = encode_handle(zhdr, HEADLESS);
10995 + first_handle = __encode_handle(zhdr, &slots, HEADLESS);
10996 last_handle = middle_handle = 0;
10997 }
10998
10999 @@ -1194,9 +1214,9 @@ next:
11000 spin_lock(&pool->lock);
11001 list_add(&page->lru, &pool->lru);
11002 spin_unlock(&pool->lock);
11003 + clear_bit(PAGE_CLAIMED, &page->private);
11004 } else {
11005 z3fold_page_lock(zhdr);
11006 - clear_bit(PAGE_CLAIMED, &page->private);
11007 if (kref_put(&zhdr->refcount,
11008 release_z3fold_page_locked)) {
11009 atomic64_dec(&pool->pages_nr);
11010 @@ -1211,6 +1231,7 @@ next:
11011 list_add(&page->lru, &pool->lru);
11012 spin_unlock(&pool->lock);
11013 z3fold_page_unlock(zhdr);
11014 + clear_bit(PAGE_CLAIMED, &page->private);
11015 }
11016
11017 /* We started off locked to we need to lock the pool back */
11018 @@ -1315,7 +1336,8 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
11019 VM_BUG_ON_PAGE(!PageMovable(page), page);
11020 VM_BUG_ON_PAGE(PageIsolated(page), page);
11021
11022 - if (test_bit(PAGE_HEADLESS, &page->private))
11023 + if (test_bit(PAGE_HEADLESS, &page->private) ||
11024 + test_bit(PAGE_CLAIMED, &page->private))
11025 return false;
11026
11027 zhdr = page_address(page);
11028 diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
11029 index a8cb6b2e20c1..5a203acdcae5 100644
11030 --- a/net/appletalk/ddp.c
11031 +++ b/net/appletalk/ddp.c
11032 @@ -1023,6 +1023,11 @@ static int atalk_create(struct net *net, struct socket *sock, int protocol,
11033 */
11034 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
11035 goto out;
11036 +
11037 + rc = -EPERM;
11038 + if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
11039 + goto out;
11040 +
11041 rc = -ENOMEM;
11042 sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, kern);
11043 if (!sk)
11044 diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
11045 index ca5207767dc2..bb222b882b67 100644
11046 --- a/net/ax25/af_ax25.c
11047 +++ b/net/ax25/af_ax25.c
11048 @@ -855,6 +855,8 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
11049 break;
11050
11051 case SOCK_RAW:
11052 + if (!capable(CAP_NET_RAW))
11053 + return -EPERM;
11054 break;
11055 default:
11056 return -ESOCKTNOSUPPORT;
11057 diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
11058 index badc5cfe4dc6..d93d4531aa9b 100644
11059 --- a/net/ieee802154/socket.c
11060 +++ b/net/ieee802154/socket.c
11061 @@ -1008,6 +1008,9 @@ static int ieee802154_create(struct net *net, struct socket *sock,
11062
11063 switch (sock->type) {
11064 case SOCK_RAW:
11065 + rc = -EPERM;
11066 + if (!capable(CAP_NET_RAW))
11067 + goto out;
11068 proto = &ieee802154_raw_prot;
11069 ops = &ieee802154_raw_ops;
11070 break;
11071 diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
11072 index f5c163d4771b..a9183543ca30 100644
11073 --- a/net/ipv4/inet_connection_sock.c
11074 +++ b/net/ipv4/inet_connection_sock.c
11075 @@ -560,7 +560,7 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
11076 rt = ip_route_output_flow(net, fl4, sk);
11077 if (IS_ERR(rt))
11078 goto no_route;
11079 - if (opt && opt->opt.is_strictroute && rt->rt_gw_family)
11080 + if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
11081 goto route_err;
11082 rcu_read_unlock();
11083 return &rt->dst;
11084 @@ -598,7 +598,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
11085 rt = ip_route_output_flow(net, fl4, sk);
11086 if (IS_ERR(rt))
11087 goto no_route;
11088 - if (opt && opt->opt.is_strictroute && rt->rt_gw_family)
11089 + if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
11090 goto route_err;
11091 return &rt->dst;
11092
11093 diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
11094 index 06f6f280b9ff..00ec819f949b 100644
11095 --- a/net/ipv4/ip_forward.c
11096 +++ b/net/ipv4/ip_forward.c
11097 @@ -123,7 +123,7 @@ int ip_forward(struct sk_buff *skb)
11098
11099 rt = skb_rtable(skb);
11100
11101 - if (opt->is_strictroute && rt->rt_gw_family)
11102 + if (opt->is_strictroute && rt->rt_uses_gateway)
11103 goto sr_failed;
11104
11105 IPCB(skb)->flags |= IPSKB_FORWARDED;
11106 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
11107 index cc7ef0d05bbd..da521790cd63 100644
11108 --- a/net/ipv4/ip_output.c
11109 +++ b/net/ipv4/ip_output.c
11110 @@ -499,7 +499,7 @@ int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
11111 skb_dst_set_noref(skb, &rt->dst);
11112
11113 packet_routed:
11114 - if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_gw_family)
11115 + if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
11116 goto no_route;
11117
11118 /* OK, we know where to send it, allocate and build IP header. */
11119 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
11120 index b6a6f18c3dd1..7dcce724c78b 100644
11121 --- a/net/ipv4/route.c
11122 +++ b/net/ipv4/route.c
11123 @@ -635,6 +635,7 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh
11124
11125 if (fnhe->fnhe_gw) {
11126 rt->rt_flags |= RTCF_REDIRECTED;
11127 + rt->rt_uses_gateway = 1;
11128 rt->rt_gw_family = AF_INET;
11129 rt->rt_gw4 = fnhe->fnhe_gw;
11130 }
11131 @@ -1313,7 +1314,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
11132 mtu = READ_ONCE(dst->dev->mtu);
11133
11134 if (unlikely(ip_mtu_locked(dst))) {
11135 - if (rt->rt_gw_family && mtu > 576)
11136 + if (rt->rt_uses_gateway && mtu > 576)
11137 mtu = 576;
11138 }
11139
11140 @@ -1569,6 +1570,7 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
11141 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
11142
11143 if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
11144 + rt->rt_uses_gateway = 1;
11145 rt->rt_gw_family = nhc->nhc_gw_family;
11146 /* only INET and INET6 are supported */
11147 if (likely(nhc->nhc_gw_family == AF_INET))
11148 @@ -1634,6 +1636,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
11149 rt->rt_iif = 0;
11150 rt->rt_pmtu = 0;
11151 rt->rt_mtu_locked = 0;
11152 + rt->rt_uses_gateway = 0;
11153 rt->rt_gw_family = 0;
11154 rt->rt_gw4 = 0;
11155 INIT_LIST_HEAD(&rt->rt_uncached);
11156 @@ -2694,6 +2697,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
11157 rt->rt_genid = rt_genid_ipv4(net);
11158 rt->rt_flags = ort->rt_flags;
11159 rt->rt_type = ort->rt_type;
11160 + rt->rt_uses_gateway = ort->rt_uses_gateway;
11161 rt->rt_gw_family = ort->rt_gw_family;
11162 if (rt->rt_gw_family == AF_INET)
11163 rt->rt_gw4 = ort->rt_gw4;
11164 @@ -2778,21 +2782,23 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
11165 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
11166 goto nla_put_failure;
11167 }
11168 - if (rt->rt_gw_family == AF_INET &&
11169 - nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
11170 - goto nla_put_failure;
11171 - } else if (rt->rt_gw_family == AF_INET6) {
11172 - int alen = sizeof(struct in6_addr);
11173 - struct nlattr *nla;
11174 - struct rtvia *via;
11175 -
11176 - nla = nla_reserve(skb, RTA_VIA, alen + 2);
11177 - if (!nla)
11178 + if (rt->rt_uses_gateway) {
11179 + if (rt->rt_gw_family == AF_INET &&
11180 + nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
11181 goto nla_put_failure;
11182 -
11183 - via = nla_data(nla);
11184 - via->rtvia_family = AF_INET6;
11185 - memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
11186 + } else if (rt->rt_gw_family == AF_INET6) {
11187 + int alen = sizeof(struct in6_addr);
11188 + struct nlattr *nla;
11189 + struct rtvia *via;
11190 +
11191 + nla = nla_reserve(skb, RTA_VIA, alen + 2);
11192 + if (!nla)
11193 + goto nla_put_failure;
11194 +
11195 + via = nla_data(nla);
11196 + via->rtvia_family = AF_INET6;
11197 + memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
11198 + }
11199 }
11200
11201 expires = rt->dst.expires;
11202 diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
11203 index 56be7d27f208..00ade9c185ea 100644
11204 --- a/net/ipv4/tcp_bbr.c
11205 +++ b/net/ipv4/tcp_bbr.c
11206 @@ -386,7 +386,7 @@ static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
11207 * which allows 2 outstanding 2-packet sequences, to try to keep pipe
11208 * full even with ACK-every-other-packet delayed ACKs.
11209 */
11210 -static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd, int gain)
11211 +static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd)
11212 {
11213 struct bbr *bbr = inet_csk_ca(sk);
11214
11215 @@ -397,7 +397,7 @@ static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd, int gain)
11216 cwnd = (cwnd + 1) & ~1U;
11217
11218 /* Ensure gain cycling gets inflight above BDP even for small BDPs. */
11219 - if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT)
11220 + if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == 0)
11221 cwnd += 2;
11222
11223 return cwnd;
11224 @@ -409,7 +409,7 @@ static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
11225 u32 inflight;
11226
11227 inflight = bbr_bdp(sk, bw, gain);
11228 - inflight = bbr_quantization_budget(sk, inflight, gain);
11229 + inflight = bbr_quantization_budget(sk, inflight);
11230
11231 return inflight;
11232 }
11233 @@ -529,7 +529,7 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
11234 * due to aggregation (of data and/or ACKs) visible in the ACK stream.
11235 */
11236 target_cwnd += bbr_ack_aggregation_cwnd(sk);
11237 - target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain);
11238 + target_cwnd = bbr_quantization_budget(sk, target_cwnd);
11239
11240 /* If we're below target cwnd, slow start cwnd toward target cwnd. */
11241 if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
11242 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
11243 index c801cd37cc2a..3e8b38c73d8c 100644
11244 --- a/net/ipv4/tcp_timer.c
11245 +++ b/net/ipv4/tcp_timer.c
11246 @@ -210,7 +210,7 @@ static int tcp_write_timeout(struct sock *sk)
11247 struct inet_connection_sock *icsk = inet_csk(sk);
11248 struct tcp_sock *tp = tcp_sk(sk);
11249 struct net *net = sock_net(sk);
11250 - bool expired, do_reset;
11251 + bool expired = false, do_reset;
11252 int retry_until;
11253
11254 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
11255 @@ -242,9 +242,10 @@ static int tcp_write_timeout(struct sock *sk)
11256 if (tcp_out_of_resources(sk, do_reset))
11257 return 1;
11258 }
11259 + }
11260 + if (!expired)
11261 expired = retransmits_timed_out(sk, retry_until,
11262 icsk->icsk_user_timeout);
11263 - }
11264 tcp_fastopen_active_detect_blackhole(sk, expired);
11265
11266 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
11267 diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
11268 index cdef8f9a3b01..35b84b52b702 100644
11269 --- a/net/ipv4/xfrm4_policy.c
11270 +++ b/net/ipv4/xfrm4_policy.c
11271 @@ -85,6 +85,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
11272 xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
11273 RTCF_LOCAL);
11274 xdst->u.rt.rt_type = rt->rt_type;
11275 + xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
11276 xdst->u.rt.rt_gw_family = rt->rt_gw_family;
11277 if (rt->rt_gw_family == AF_INET)
11278 xdst->u.rt.rt_gw4 = rt->rt_gw4;
11279 diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
11280 index d22b6c140f23..f9e8fe3ff0c5 100644
11281 --- a/net/ipv6/fib6_rules.c
11282 +++ b/net/ipv6/fib6_rules.c
11283 @@ -287,7 +287,8 @@ static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg
11284 return false;
11285
11286 suppress_route:
11287 - ip6_rt_put(rt);
11288 + if (!(arg->flags & FIB_LOOKUP_NOREF))
11289 + ip6_rt_put(rt);
11290 return true;
11291 }
11292
11293 diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
11294 index 87f47bc55c5e..6e2af411cd9c 100644
11295 --- a/net/ipv6/ip6_fib.c
11296 +++ b/net/ipv6/ip6_fib.c
11297 @@ -318,7 +318,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
11298 if (rt->dst.error == -EAGAIN) {
11299 ip6_rt_put_flags(rt, flags);
11300 rt = net->ipv6.ip6_null_entry;
11301 - if (!(flags | RT6_LOOKUP_F_DST_NOREF))
11302 + if (!(flags & RT6_LOOKUP_F_DST_NOREF))
11303 dst_hold(&rt->dst);
11304 }
11305
11306 diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
11307 index 9b8742947aff..8dfea26536c9 100644
11308 --- a/net/nfc/llcp_sock.c
11309 +++ b/net/nfc/llcp_sock.c
11310 @@ -1004,10 +1004,13 @@ static int llcp_sock_create(struct net *net, struct socket *sock,
11311 sock->type != SOCK_RAW)
11312 return -ESOCKTNOSUPPORT;
11313
11314 - if (sock->type == SOCK_RAW)
11315 + if (sock->type == SOCK_RAW) {
11316 + if (!capable(CAP_NET_RAW))
11317 + return -EPERM;
11318 sock->ops = &llcp_rawsock_ops;
11319 - else
11320 + } else {
11321 sock->ops = &llcp_sock_ops;
11322 + }
11323
11324 sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC, kern);
11325 if (sk == NULL)
11326 diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
11327 index d01410e52097..f1e7041a5a60 100644
11328 --- a/net/openvswitch/datapath.c
11329 +++ b/net/openvswitch/datapath.c
11330 @@ -2263,7 +2263,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
11331 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
11332 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
11333 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
11334 - [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
11335 + [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
11336 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
11337 [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
11338 [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
11339 diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
11340 index 6c8b0f6d28f9..88f98f27ad88 100644
11341 --- a/net/qrtr/qrtr.c
11342 +++ b/net/qrtr/qrtr.c
11343 @@ -150,6 +150,7 @@ static void __qrtr_node_release(struct kref *kref)
11344 list_del(&node->item);
11345 mutex_unlock(&qrtr_node_lock);
11346
11347 + cancel_work_sync(&node->work);
11348 skb_queue_purge(&node->rx_queue);
11349 kfree(node);
11350 }
11351 diff --git a/net/rds/bind.c b/net/rds/bind.c
11352 index 05464fd7c17a..93e336535d3b 100644
11353 --- a/net/rds/bind.c
11354 +++ b/net/rds/bind.c
11355 @@ -244,7 +244,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
11356 */
11357 if (rs->rs_transport) {
11358 trans = rs->rs_transport;
11359 - if (trans->laddr_check(sock_net(sock->sk),
11360 + if (!trans->laddr_check ||
11361 + trans->laddr_check(sock_net(sock->sk),
11362 binding_addr, scope_id) != 0) {
11363 ret = -ENOPROTOOPT;
11364 goto out;
11365 @@ -263,6 +264,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
11366
11367 sock_set_flag(sk, SOCK_RCU_FREE);
11368 ret = rds_add_bound(rs, binding_addr, &port, scope_id);
11369 + if (ret)
11370 + rs->rs_transport = NULL;
11371
11372 out:
11373 release_sock(sk);
11374 diff --git a/net/sched/act_api.c b/net/sched/act_api.c
11375 index 339712296164..2558f00f6b3e 100644
11376 --- a/net/sched/act_api.c
11377 +++ b/net/sched/act_api.c
11378 @@ -831,6 +831,15 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
11379 return c;
11380 }
11381
11382 +static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
11383 + [TCA_ACT_KIND] = { .type = NLA_NUL_STRING,
11384 + .len = IFNAMSIZ - 1 },
11385 + [TCA_ACT_INDEX] = { .type = NLA_U32 },
11386 + [TCA_ACT_COOKIE] = { .type = NLA_BINARY,
11387 + .len = TC_COOKIE_MAX_SIZE },
11388 + [TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
11389 +};
11390 +
11391 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
11392 struct nlattr *nla, struct nlattr *est,
11393 char *name, int ovr, int bind,
11394 @@ -846,8 +855,8 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
11395 int err;
11396
11397 if (name == NULL) {
11398 - err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, NULL,
11399 - extack);
11400 + err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
11401 + tcf_action_policy, extack);
11402 if (err < 0)
11403 goto err_out;
11404 err = -EINVAL;
11405 @@ -856,18 +865,9 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
11406 NL_SET_ERR_MSG(extack, "TC action kind must be specified");
11407 goto err_out;
11408 }
11409 - if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) {
11410 - NL_SET_ERR_MSG(extack, "TC action name too long");
11411 - goto err_out;
11412 - }
11413 - if (tb[TCA_ACT_COOKIE]) {
11414 - int cklen = nla_len(tb[TCA_ACT_COOKIE]);
11415 -
11416 - if (cklen > TC_COOKIE_MAX_SIZE) {
11417 - NL_SET_ERR_MSG(extack, "TC cookie size above the maximum");
11418 - goto err_out;
11419 - }
11420 + nla_strlcpy(act_name, kind, IFNAMSIZ);
11421
11422 + if (tb[TCA_ACT_COOKIE]) {
11423 cookie = nla_memdup_cookie(tb);
11424 if (!cookie) {
11425 NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
11426 @@ -1098,7 +1098,8 @@ static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
11427 int index;
11428 int err;
11429
11430 - err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, NULL, extack);
11431 + err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
11432 + tcf_action_policy, extack);
11433 if (err < 0)
11434 goto err_out;
11435
11436 @@ -1152,7 +1153,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
11437
11438 b = skb_tail_pointer(skb);
11439
11440 - err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, NULL, extack);
11441 + err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
11442 + tcf_action_policy, extack);
11443 if (err < 0)
11444 goto err_out;
11445
11446 @@ -1440,7 +1442,7 @@ static struct nlattr *find_dump_kind(struct nlattr **nla)
11447
11448 if (tb[1] == NULL)
11449 return NULL;
11450 - if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], NULL, NULL) < 0)
11451 + if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
11452 return NULL;
11453 kind = tb2[TCA_ACT_KIND];
11454
11455 diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
11456 index 10229124a992..86344fd2ff1f 100644
11457 --- a/net/sched/act_sample.c
11458 +++ b/net/sched/act_sample.c
11459 @@ -146,6 +146,7 @@ static bool tcf_sample_dev_ok_push(struct net_device *dev)
11460 case ARPHRD_TUNNEL6:
11461 case ARPHRD_SIT:
11462 case ARPHRD_IPGRE:
11463 + case ARPHRD_IP6GRE:
11464 case ARPHRD_VOID:
11465 case ARPHRD_NONE:
11466 return false;
11467 diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
11468 index efd3cfb80a2a..9aef93300f1c 100644
11469 --- a/net/sched/cls_api.c
11470 +++ b/net/sched/cls_api.c
11471 @@ -3027,8 +3027,10 @@ out:
11472 void tcf_exts_destroy(struct tcf_exts *exts)
11473 {
11474 #ifdef CONFIG_NET_CLS_ACT
11475 - tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
11476 - kfree(exts->actions);
11477 + if (exts->actions) {
11478 + tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
11479 + kfree(exts->actions);
11480 + }
11481 exts->nr_actions = 0;
11482 #endif
11483 }
11484 diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
11485 index 1047825d9f48..81d58b280612 100644
11486 --- a/net/sched/sch_api.c
11487 +++ b/net/sched/sch_api.c
11488 @@ -1390,7 +1390,8 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
11489 }
11490
11491 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
11492 - [TCA_KIND] = { .type = NLA_STRING },
11493 + [TCA_KIND] = { .type = NLA_NUL_STRING,
11494 + .len = IFNAMSIZ - 1 },
11495 [TCA_RATE] = { .type = NLA_BINARY,
11496 .len = sizeof(struct tc_estimator) },
11497 [TCA_STAB] = { .type = NLA_NESTED },
11498 diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
11499 index 810645b5c086..4a403d35438f 100644
11500 --- a/net/sched/sch_cbs.c
11501 +++ b/net/sched/sch_cbs.c
11502 @@ -392,7 +392,6 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
11503 {
11504 struct cbs_sched_data *q = qdisc_priv(sch);
11505 struct net_device *dev = qdisc_dev(sch);
11506 - int err;
11507
11508 if (!opt) {
11509 NL_SET_ERR_MSG(extack, "Missing CBS qdisc options which are mandatory");
11510 @@ -404,6 +403,10 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
11511 if (!q->qdisc)
11512 return -ENOMEM;
11513
11514 + spin_lock(&cbs_list_lock);
11515 + list_add(&q->cbs_list, &cbs_list);
11516 + spin_unlock(&cbs_list_lock);
11517 +
11518 qdisc_hash_add(q->qdisc, false);
11519
11520 q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
11521 @@ -413,17 +416,7 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
11522
11523 qdisc_watchdog_init(&q->watchdog, sch);
11524
11525 - err = cbs_change(sch, opt, extack);
11526 - if (err)
11527 - return err;
11528 -
11529 - if (!q->offload) {
11530 - spin_lock(&cbs_list_lock);
11531 - list_add(&q->cbs_list, &cbs_list);
11532 - spin_unlock(&cbs_list_lock);
11533 - }
11534 -
11535 - return 0;
11536 + return cbs_change(sch, opt, extack);
11537 }
11538
11539 static void cbs_destroy(struct Qdisc *sch)
11540 @@ -431,15 +424,18 @@ static void cbs_destroy(struct Qdisc *sch)
11541 struct cbs_sched_data *q = qdisc_priv(sch);
11542 struct net_device *dev = qdisc_dev(sch);
11543
11544 - spin_lock(&cbs_list_lock);
11545 - list_del(&q->cbs_list);
11546 - spin_unlock(&cbs_list_lock);
11547 + /* Nothing to do if we couldn't create the underlying qdisc */
11548 + if (!q->qdisc)
11549 + return;
11550
11551 qdisc_watchdog_cancel(&q->watchdog);
11552 cbs_disable_offload(dev, q);
11553
11554 - if (q->qdisc)
11555 - qdisc_put(q->qdisc);
11556 + spin_lock(&cbs_list_lock);
11557 + list_del(&q->cbs_list);
11558 + spin_unlock(&cbs_list_lock);
11559 +
11560 + qdisc_put(q->qdisc);
11561 }
11562
11563 static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb)
11564 diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
11565 index b17f2ed970e2..f5cb35e550f8 100644
11566 --- a/net/sched/sch_netem.c
11567 +++ b/net/sched/sch_netem.c
11568 @@ -777,7 +777,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
11569 struct disttable *d;
11570 int i;
11571
11572 - if (n > NETEM_DIST_MAX)
11573 + if (!n || n > NETEM_DIST_MAX)
11574 return -EINVAL;
11575
11576 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
11577 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
11578 index a07b516e503a..7a75f34ad393 100644
11579 --- a/net/sunrpc/clnt.c
11580 +++ b/net/sunrpc/clnt.c
11581 @@ -1862,6 +1862,7 @@ rpc_xdr_encode(struct rpc_task *task)
11582 req->rq_rbuffer,
11583 req->rq_rcvsize);
11584
11585 + req->rq_reply_bytes_recvd = 0;
11586 req->rq_snd_buf.head[0].iov_len = 0;
11587 xdr_init_encode(&xdr, &req->rq_snd_buf,
11588 req->rq_snd_buf.head[0].iov_base, req);
11589 @@ -1881,6 +1882,8 @@ call_encode(struct rpc_task *task)
11590 if (!rpc_task_need_encode(task))
11591 goto out;
11592 dprint_status(task);
11593 + /* Dequeue task from the receive queue while we're encoding */
11594 + xprt_request_dequeue_xprt(task);
11595 /* Encode here so that rpcsec_gss can use correct sequence number. */
11596 rpc_xdr_encode(task);
11597 /* Did the encode result in an error condition? */
11598 @@ -2518,9 +2521,6 @@ call_decode(struct rpc_task *task)
11599 return;
11600 case -EAGAIN:
11601 task->tk_status = 0;
11602 - xdr_free_bvec(&req->rq_rcv_buf);
11603 - req->rq_reply_bytes_recvd = 0;
11604 - req->rq_rcv_buf.len = 0;
11605 if (task->tk_client->cl_discrtry)
11606 xprt_conditional_disconnect(req->rq_xprt,
11607 req->rq_connect_cookie);
11608 diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
11609 index 48c93b9e525e..b256806d69cd 100644
11610 --- a/net/sunrpc/xdr.c
11611 +++ b/net/sunrpc/xdr.c
11612 @@ -1237,16 +1237,29 @@ xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
11613 EXPORT_SYMBOL_GPL(xdr_encode_word);
11614
11615 /* If the netobj starting offset bytes from the start of xdr_buf is contained
11616 - * entirely in the head or the tail, set object to point to it; otherwise
11617 - * try to find space for it at the end of the tail, copy it there, and
11618 - * set obj to point to it. */
11619 + * entirely in the head, pages, or tail, set object to point to it; otherwise
11620 + * shift the buffer until it is contained entirely within the pages or tail.
11621 + */
11622 int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
11623 {
11624 struct xdr_buf subbuf;
11625 + unsigned int boundary;
11626
11627 if (xdr_decode_word(buf, offset, &obj->len))
11628 return -EFAULT;
11629 - if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
11630 + offset += 4;
11631 +
11632 + /* Is the obj partially in the head? */
11633 + boundary = buf->head[0].iov_len;
11634 + if (offset < boundary && (offset + obj->len) > boundary)
11635 + xdr_shift_buf(buf, boundary - offset);
11636 +
11637 + /* Is the obj partially in the pages? */
11638 + boundary += buf->page_len;
11639 + if (offset < boundary && (offset + obj->len) > boundary)
11640 + xdr_shrink_pagelen(buf, boundary - offset);
11641 +
11642 + if (xdr_buf_subsegment(buf, &subbuf, offset, obj->len))
11643 return -EFAULT;
11644
11645 /* Is the obj contained entirely in the head? */
11646 @@ -1258,11 +1271,7 @@ int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned in
11647 if (subbuf.tail[0].iov_len == obj->len)
11648 return 0;
11649
11650 - /* use end of tail as storage for obj:
11651 - * (We don't copy to the beginning because then we'd have
11652 - * to worry about doing a potentially overlapping copy.
11653 - * This assumes the object is at most half the length of the
11654 - * tail.) */
11655 + /* Find a contiguous area in @buf to hold all of @obj */
11656 if (obj->len > buf->buflen - buf->len)
11657 return -ENOMEM;
11658 if (buf->tail[0].iov_len != 0)
11659 diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
11660 index 2e71f5455c6c..20631d64312c 100644
11661 --- a/net/sunrpc/xprt.c
11662 +++ b/net/sunrpc/xprt.c
11663 @@ -1323,6 +1323,36 @@ xprt_request_dequeue_transmit(struct rpc_task *task)
11664 spin_unlock(&xprt->queue_lock);
11665 }
11666
11667 +/**
11668 + * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
11669 + * @task: pointer to rpc_task
11670 + *
11671 + * Remove a task from the transmit and receive queues, and ensure that
11672 + * it is not pinned by the receive work item.
11673 + */
11674 +void
11675 +xprt_request_dequeue_xprt(struct rpc_task *task)
11676 +{
11677 + struct rpc_rqst *req = task->tk_rqstp;
11678 + struct rpc_xprt *xprt = req->rq_xprt;
11679 +
11680 + if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
11681 + test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
11682 + xprt_is_pinned_rqst(req)) {
11683 + spin_lock(&xprt->queue_lock);
11684 + xprt_request_dequeue_transmit_locked(task);
11685 + xprt_request_dequeue_receive_locked(task);
11686 + while (xprt_is_pinned_rqst(req)) {
11687 + set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
11688 + spin_unlock(&xprt->queue_lock);
11689 + xprt_wait_on_pinned_rqst(req);
11690 + spin_lock(&xprt->queue_lock);
11691 + clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
11692 + }
11693 + spin_unlock(&xprt->queue_lock);
11694 + }
11695 +}
11696 +
11697 /**
11698 * xprt_request_prepare - prepare an encoded request for transport
11699 * @req: pointer to rpc_rqst
11700 @@ -1747,28 +1777,6 @@ void xprt_retry_reserve(struct rpc_task *task)
11701 xprt_do_reserve(xprt, task);
11702 }
11703
11704 -static void
11705 -xprt_request_dequeue_all(struct rpc_task *task, struct rpc_rqst *req)
11706 -{
11707 - struct rpc_xprt *xprt = req->rq_xprt;
11708 -
11709 - if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
11710 - test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
11711 - xprt_is_pinned_rqst(req)) {
11712 - spin_lock(&xprt->queue_lock);
11713 - xprt_request_dequeue_transmit_locked(task);
11714 - xprt_request_dequeue_receive_locked(task);
11715 - while (xprt_is_pinned_rqst(req)) {
11716 - set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
11717 - spin_unlock(&xprt->queue_lock);
11718 - xprt_wait_on_pinned_rqst(req);
11719 - spin_lock(&xprt->queue_lock);
11720 - clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
11721 - }
11722 - spin_unlock(&xprt->queue_lock);
11723 - }
11724 -}
11725 -
11726 /**
11727 * xprt_release - release an RPC request slot
11728 * @task: task which is finished with the slot
11729 @@ -1788,7 +1796,7 @@ void xprt_release(struct rpc_task *task)
11730 }
11731
11732 xprt = req->rq_xprt;
11733 - xprt_request_dequeue_all(task, req);
11734 + xprt_request_dequeue_xprt(task);
11735 spin_lock(&xprt->transport_lock);
11736 xprt->ops->release_xprt(xprt, task);
11737 if (xprt->ops->release_request)
11738 diff --git a/net/wireless/util.c b/net/wireless/util.c
11739 index e74837824cea..f68818dbac1a 100644
11740 --- a/net/wireless/util.c
11741 +++ b/net/wireless/util.c
11742 @@ -960,6 +960,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
11743 }
11744
11745 cfg80211_process_rdev_events(rdev);
11746 + cfg80211_mlme_purge_registrations(dev->ieee80211_ptr);
11747 }
11748
11749 err = rdev_change_virtual_intf(rdev, dev, ntype, params);
11750 diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
11751 index 6410bd22fe38..03757cc60e06 100644
11752 --- a/scripts/Makefile.kasan
11753 +++ b/scripts/Makefile.kasan
11754 @@ -1,4 +1,9 @@
11755 # SPDX-License-Identifier: GPL-2.0
11756 +ifdef CONFIG_KASAN
11757 +CFLAGS_KASAN_NOSANITIZE := -fno-builtin
11758 +KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
11759 +endif
11760 +
11761 ifdef CONFIG_KASAN_GENERIC
11762
11763 ifdef CONFIG_KASAN_INLINE
11764 @@ -7,8 +12,6 @@ else
11765 call_threshold := 0
11766 endif
11767
11768 -KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
11769 -
11770 CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
11771
11772 cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
11773 @@ -45,7 +48,3 @@ CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
11774 $(instrumentation_flags)
11775
11776 endif # CONFIG_KASAN_SW_TAGS
11777 -
11778 -ifdef CONFIG_KASAN
11779 -CFLAGS_KASAN_NOSANITIZE := -fno-builtin
11780 -endif
11781 diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
11782 index 6d5bbd31db7f..bd29e4e7a524 100644
11783 --- a/scripts/gcc-plugins/randomize_layout_plugin.c
11784 +++ b/scripts/gcc-plugins/randomize_layout_plugin.c
11785 @@ -443,13 +443,13 @@ static int is_pure_ops_struct(const_tree node)
11786 if (node == fieldtype)
11787 continue;
11788
11789 - if (!is_fptr(fieldtype))
11790 - return 0;
11791 -
11792 - if (code != RECORD_TYPE && code != UNION_TYPE)
11793 + if (code == RECORD_TYPE || code == UNION_TYPE) {
11794 + if (!is_pure_ops_struct(fieldtype))
11795 + return 0;
11796 continue;
11797 + }
11798
11799 - if (!is_pure_ops_struct(fieldtype))
11800 + if (!is_fptr(fieldtype))
11801 return 0;
11802 }
11803
11804 diff --git a/security/keys/trusted.c b/security/keys/trusted.c
11805 index ade699131065..1fbd77816610 100644
11806 --- a/security/keys/trusted.c
11807 +++ b/security/keys/trusted.c
11808 @@ -1228,11 +1228,16 @@ hashalg_fail:
11809
11810 static int __init init_digests(void)
11811 {
11812 + int i;
11813 +
11814 digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests),
11815 GFP_KERNEL);
11816 if (!digests)
11817 return -ENOMEM;
11818
11819 + for (i = 0; i < chip->nr_allocated_banks; i++)
11820 + digests[i].alg_id = chip->allocated_banks[i].alg_id;
11821 +
11822 return 0;
11823 }
11824
11825 diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
11826 index 03cda2166ea3..72908b4de77c 100644
11827 --- a/sound/firewire/motu/motu.c
11828 +++ b/sound/firewire/motu/motu.c
11829 @@ -247,6 +247,17 @@ static const struct snd_motu_spec motu_audio_express = {
11830 .analog_out_ports = 4,
11831 };
11832
11833 +static const struct snd_motu_spec motu_4pre = {
11834 + .name = "4pre",
11835 + .protocol = &snd_motu_protocol_v3,
11836 + .flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
11837 + SND_MOTU_SPEC_TX_MICINST_CHUNK |
11838 + SND_MOTU_SPEC_TX_RETURN_CHUNK |
11839 + SND_MOTU_SPEC_RX_SEPARETED_MAIN,
11840 + .analog_in_ports = 2,
11841 + .analog_out_ports = 2,
11842 +};
11843 +
11844 #define SND_MOTU_DEV_ENTRY(model, data) \
11845 { \
11846 .match_flags = IEEE1394_MATCH_VENDOR_ID | \
11847 @@ -265,6 +276,7 @@ static const struct ieee1394_device_id motu_id_table[] = {
11848 SND_MOTU_DEV_ENTRY(0x000015, &motu_828mk3), /* FireWire only. */
11849 SND_MOTU_DEV_ENTRY(0x000035, &motu_828mk3), /* Hybrid. */
11850 SND_MOTU_DEV_ENTRY(0x000033, &motu_audio_express),
11851 + SND_MOTU_DEV_ENTRY(0x000045, &motu_4pre),
11852 { }
11853 };
11854 MODULE_DEVICE_TABLE(ieee1394, motu_id_table);
11855 diff --git a/sound/firewire/tascam/tascam-pcm.c b/sound/firewire/tascam/tascam-pcm.c
11856 index b5ced5415e40..2377732caa52 100644
11857 --- a/sound/firewire/tascam/tascam-pcm.c
11858 +++ b/sound/firewire/tascam/tascam-pcm.c
11859 @@ -56,6 +56,9 @@ static int pcm_open(struct snd_pcm_substream *substream)
11860 goto err_locked;
11861
11862 err = snd_tscm_stream_get_clock(tscm, &clock);
11863 + if (err < 0)
11864 + goto err_locked;
11865 +
11866 if (clock != SND_TSCM_CLOCK_INTERNAL ||
11867 amdtp_stream_pcm_running(&tscm->rx_stream) ||
11868 amdtp_stream_pcm_running(&tscm->tx_stream)) {
11869 diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c
11870 index e852e46ebe6f..ccfa92fbc145 100644
11871 --- a/sound/firewire/tascam/tascam-stream.c
11872 +++ b/sound/firewire/tascam/tascam-stream.c
11873 @@ -8,20 +8,37 @@
11874 #include <linux/delay.h>
11875 #include "tascam.h"
11876
11877 +#define CLOCK_STATUS_MASK 0xffff0000
11878 +#define CLOCK_CONFIG_MASK 0x0000ffff
11879 +
11880 #define CALLBACK_TIMEOUT 500
11881
11882 static int get_clock(struct snd_tscm *tscm, u32 *data)
11883 {
11884 + int trial = 0;
11885 __be32 reg;
11886 int err;
11887
11888 - err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST,
11889 - TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS,
11890 - &reg, sizeof(reg), 0);
11891 - if (err >= 0)
11892 + while (trial++ < 5) {
11893 + err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST,
11894 + TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS,
11895 + &reg, sizeof(reg), 0);
11896 + if (err < 0)
11897 + return err;
11898 +
11899 *data = be32_to_cpu(reg);
11900 + if (*data & CLOCK_STATUS_MASK)
11901 + break;
11902
11903 - return err;
11904 + // In intermediate state after changing clock status.
11905 + msleep(50);
11906 + }
11907 +
11908 + // Still in the intermediate state.
11909 + if (trial >= 5)
11910 + return -EAGAIN;
11911 +
11912 + return 0;
11913 }
11914
11915 static int set_clock(struct snd_tscm *tscm, unsigned int rate,
11916 @@ -34,7 +51,7 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate,
11917 err = get_clock(tscm, &data);
11918 if (err < 0)
11919 return err;
11920 - data &= 0x0000ffff;
11921 + data &= CLOCK_CONFIG_MASK;
11922
11923 if (rate > 0) {
11924 data &= 0x000000ff;
11925 @@ -79,17 +96,14 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate,
11926
11927 int snd_tscm_stream_get_rate(struct snd_tscm *tscm, unsigned int *rate)
11928 {
11929 - u32 data = 0x0;
11930 - unsigned int trials = 0;
11931 + u32 data;
11932 int err;
11933
11934 - while (data == 0x0 || trials++ < 5) {
11935 - err = get_clock(tscm, &data);
11936 - if (err < 0)
11937 - return err;
11938 + err = get_clock(tscm, &data);
11939 + if (err < 0)
11940 + return err;
11941
11942 - data = (data & 0xff000000) >> 24;
11943 - }
11944 + data = (data & 0xff000000) >> 24;
11945
11946 /* Check base rate. */
11947 if ((data & 0x0f) == 0x01)
11948 diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
11949 index 3b0110545070..196bbc85699e 100644
11950 --- a/sound/hda/hdac_controller.c
11951 +++ b/sound/hda/hdac_controller.c
11952 @@ -447,6 +447,8 @@ static void azx_int_disable(struct hdac_bus *bus)
11953 list_for_each_entry(azx_dev, &bus->stream_list, list)
11954 snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0);
11955
11956 + synchronize_irq(bus->irq);
11957 +
11958 /* disable SIE for all streams */
11959 snd_hdac_chip_writeb(bus, INTCTL, 0);
11960
11961 diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c
11962 index 5f59316f982a..7d15093844b9 100644
11963 --- a/sound/i2c/other/ak4xxx-adda.c
11964 +++ b/sound/i2c/other/ak4xxx-adda.c
11965 @@ -775,11 +775,12 @@ static int build_adc_controls(struct snd_akm4xxx *ak)
11966 return err;
11967
11968 memset(&knew, 0, sizeof(knew));
11969 - knew.name = ak->adc_info[mixer_ch].selector_name;
11970 - if (!knew.name) {
11971 + if (!ak->adc_info ||
11972 + !ak->adc_info[mixer_ch].selector_name) {
11973 knew.name = "Capture Channel";
11974 knew.index = mixer_ch + ak->idx_offset * 2;
11975 - }
11976 + } else
11977 + knew.name = ak->adc_info[mixer_ch].selector_name;
11978
11979 knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
11980 knew.info = ak4xxx_capture_source_info;
11981 diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
11982 index 51f10ed9bc43..a2fb19129219 100644
11983 --- a/sound/pci/hda/hda_codec.c
11984 +++ b/sound/pci/hda/hda_codec.c
11985 @@ -846,7 +846,13 @@ static void snd_hda_codec_dev_release(struct device *dev)
11986 snd_hda_sysfs_clear(codec);
11987 kfree(codec->modelname);
11988 kfree(codec->wcaps);
11989 - kfree(codec);
11990 +
11991 + /*
11992 + * In the case of ASoC HD-audio, hda_codec is device managed.
11993 + * It will be freed when the ASoC device is removed.
11994 + */
11995 + if (codec->core.type == HDA_DEV_LEGACY)
11996 + kfree(codec);
11997 }
11998
11999 #define DEV_NAME_LEN 31
12000 diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
12001 index 48d863736b3c..a5a2e9fe7785 100644
12002 --- a/sound/pci/hda/hda_controller.c
12003 +++ b/sound/pci/hda/hda_controller.c
12004 @@ -869,10 +869,13 @@ static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
12005 */
12006 if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
12007 hbus->response_reset = 1;
12008 + dev_err(chip->card->dev,
12009 + "No response from codec, resetting bus: last cmd=0x%08x\n",
12010 + bus->last_cmd[addr]);
12011 return -EAGAIN; /* give a chance to retry */
12012 }
12013
12014 - dev_err(chip->card->dev,
12015 + dev_WARN(chip->card->dev,
12016 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
12017 bus->last_cmd[addr]);
12018 chip->single_cmd = 1;
12019 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
12020 index b0de3e3b33e5..783f9a9c40ec 100644
12021 --- a/sound/pci/hda/hda_intel.c
12022 +++ b/sound/pci/hda/hda_intel.c
12023 @@ -1349,9 +1349,9 @@ static int azx_free(struct azx *chip)
12024 }
12025
12026 if (bus->chip_init) {
12027 + azx_stop_chip(chip);
12028 azx_clear_irq_pending(chip);
12029 azx_stop_all_streams(chip);
12030 - azx_stop_chip(chip);
12031 }
12032
12033 if (bus->irq >= 0)
12034 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
12035 index bea7b0961080..36240def9bf5 100644
12036 --- a/sound/pci/hda/patch_hdmi.c
12037 +++ b/sound/pci/hda/patch_hdmi.c
12038 @@ -1421,7 +1421,7 @@ static void hdmi_pcm_reset_pin(struct hdmi_spec *spec,
12039 /* update per_pin ELD from the given new ELD;
12040 * setup info frame and notification accordingly
12041 */
12042 -static void update_eld(struct hda_codec *codec,
12043 +static bool update_eld(struct hda_codec *codec,
12044 struct hdmi_spec_per_pin *per_pin,
12045 struct hdmi_eld *eld)
12046 {
12047 @@ -1452,18 +1452,22 @@ static void update_eld(struct hda_codec *codec,
12048 snd_hdmi_show_eld(codec, &eld->info);
12049
12050 eld_changed = (pin_eld->eld_valid != eld->eld_valid);
12051 - if (eld->eld_valid && pin_eld->eld_valid)
12052 + eld_changed |= (pin_eld->monitor_present != eld->monitor_present);
12053 + if (!eld_changed && eld->eld_valid && pin_eld->eld_valid)
12054 if (pin_eld->eld_size != eld->eld_size ||
12055 memcmp(pin_eld->eld_buffer, eld->eld_buffer,
12056 eld->eld_size) != 0)
12057 eld_changed = true;
12058
12059 - pin_eld->monitor_present = eld->monitor_present;
12060 - pin_eld->eld_valid = eld->eld_valid;
12061 - pin_eld->eld_size = eld->eld_size;
12062 - if (eld->eld_valid)
12063 - memcpy(pin_eld->eld_buffer, eld->eld_buffer, eld->eld_size);
12064 - pin_eld->info = eld->info;
12065 + if (eld_changed) {
12066 + pin_eld->monitor_present = eld->monitor_present;
12067 + pin_eld->eld_valid = eld->eld_valid;
12068 + pin_eld->eld_size = eld->eld_size;
12069 + if (eld->eld_valid)
12070 + memcpy(pin_eld->eld_buffer, eld->eld_buffer,
12071 + eld->eld_size);
12072 + pin_eld->info = eld->info;
12073 + }
12074
12075 /*
12076 * Re-setup pin and infoframe. This is needed e.g. when
12077 @@ -1481,6 +1485,7 @@ static void update_eld(struct hda_codec *codec,
12078 SNDRV_CTL_EVENT_MASK_VALUE |
12079 SNDRV_CTL_EVENT_MASK_INFO,
12080 &get_hdmi_pcm(spec, pcm_idx)->eld_ctl->id);
12081 + return eld_changed;
12082 }
12083
12084 /* update ELD and jack state via HD-audio verbs */
12085 @@ -1582,6 +1587,7 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
12086 struct hdmi_spec *spec = codec->spec;
12087 struct hdmi_eld *eld = &spec->temp_eld;
12088 struct snd_jack *jack = NULL;
12089 + bool changed;
12090 int size;
12091
12092 mutex_lock(&per_pin->lock);
12093 @@ -1608,15 +1614,13 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
12094 * disconnected event. Jack must be fetched before update_eld()
12095 */
12096 jack = pin_idx_to_jack(codec, per_pin);
12097 - update_eld(codec, per_pin, eld);
12098 + changed = update_eld(codec, per_pin, eld);
12099 if (jack == NULL)
12100 jack = pin_idx_to_jack(codec, per_pin);
12101 - if (jack == NULL)
12102 - goto unlock;
12103 - snd_jack_report(jack,
12104 - (eld->monitor_present && eld->eld_valid) ?
12105 + if (changed && jack)
12106 + snd_jack_report(jack,
12107 + (eld->monitor_present && eld->eld_valid) ?
12108 SND_JACK_AVOUT : 0);
12109 - unlock:
12110 mutex_unlock(&per_pin->lock);
12111 }
12112
12113 @@ -2612,6 +2616,8 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec,
12114 /* precondition and allocation for Intel codecs */
12115 static int alloc_intel_hdmi(struct hda_codec *codec)
12116 {
12117 + int err;
12118 +
12119 /* requires i915 binding */
12120 if (!codec->bus->core.audio_component) {
12121 codec_info(codec, "No i915 binding for Intel HDMI/DP codec\n");
12122 @@ -2620,7 +2626,12 @@ static int alloc_intel_hdmi(struct hda_codec *codec)
12123 return -ENODEV;
12124 }
12125
12126 - return alloc_generic_hdmi(codec);
12127 + err = alloc_generic_hdmi(codec);
12128 + if (err < 0)
12129 + return err;
12130 + /* no need to handle unsol events */
12131 + codec->patch_ops.unsol_event = NULL;
12132 + return 0;
12133 }
12134
12135 /* parse and post-process for Intel codecs */
12136 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
12137 index c1ddfd2fac52..36aee8ad2054 100644
12138 --- a/sound/pci/hda/patch_realtek.c
12139 +++ b/sound/pci/hda/patch_realtek.c
12140 @@ -1058,6 +1058,9 @@ static const struct snd_pci_quirk beep_white_list[] = {
12141 SND_PCI_QUIRK(0x1043, 0x834a, "EeePC", 1),
12142 SND_PCI_QUIRK(0x1458, 0xa002, "GA-MA790X", 1),
12143 SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
12144 + /* blacklist -- no beep available */
12145 + SND_PCI_QUIRK(0x17aa, 0x309e, "Lenovo ThinkCentre M73", 0),
12146 + SND_PCI_QUIRK(0x17aa, 0x30a3, "Lenovo ThinkCentre M93", 0),
12147 {}
12148 };
12149
12150 @@ -3755,6 +3758,72 @@ static void alc269_x101_hp_automute_hook(struct hda_codec *codec,
12151 vref);
12152 }
12153
12154 +/*
12155 + * Magic sequence to make Huawei Matebook X right speaker working (bko#197801)
12156 + */
12157 +struct hda_alc298_mbxinit {
12158 + unsigned char value_0x23;
12159 + unsigned char value_0x25;
12160 +};
12161 +
12162 +static void alc298_huawei_mbx_stereo_seq(struct hda_codec *codec,
12163 + const struct hda_alc298_mbxinit *initval,
12164 + bool first)
12165 +{
12166 + snd_hda_codec_write(codec, 0x06, 0, AC_VERB_SET_DIGI_CONVERT_3, 0x0);
12167 + alc_write_coef_idx(codec, 0x26, 0xb000);
12168 +
12169 + if (first)
12170 + snd_hda_codec_write(codec, 0x21, 0, AC_VERB_GET_PIN_SENSE, 0x0);
12171 +
12172 + snd_hda_codec_write(codec, 0x6, 0, AC_VERB_SET_DIGI_CONVERT_3, 0x80);
12173 + alc_write_coef_idx(codec, 0x26, 0xf000);
12174 + alc_write_coef_idx(codec, 0x23, initval->value_0x23);
12175 +
12176 + if (initval->value_0x23 != 0x1e)
12177 + alc_write_coef_idx(codec, 0x25, initval->value_0x25);
12178 +
12179 + snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 0x26);
12180 + snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_PROC_COEF, 0xb010);
12181 +}
12182 +
12183 +static void alc298_fixup_huawei_mbx_stereo(struct hda_codec *codec,
12184 + const struct hda_fixup *fix,
12185 + int action)
12186 +{
12187 + /* Initialization magic */
12188 + static const struct hda_alc298_mbxinit dac_init[] = {
12189 + {0x0c, 0x00}, {0x0d, 0x00}, {0x0e, 0x00}, {0x0f, 0x00},
12190 + {0x10, 0x00}, {0x1a, 0x40}, {0x1b, 0x82}, {0x1c, 0x00},
12191 + {0x1d, 0x00}, {0x1e, 0x00}, {0x1f, 0x00},
12192 + {0x20, 0xc2}, {0x21, 0xc8}, {0x22, 0x26}, {0x23, 0x24},
12193 + {0x27, 0xff}, {0x28, 0xff}, {0x29, 0xff}, {0x2a, 0x8f},
12194 + {0x2b, 0x02}, {0x2c, 0x48}, {0x2d, 0x34}, {0x2e, 0x00},
12195 + {0x2f, 0x00},
12196 + {0x30, 0x00}, {0x31, 0x00}, {0x32, 0x00}, {0x33, 0x00},
12197 + {0x34, 0x00}, {0x35, 0x01}, {0x36, 0x93}, {0x37, 0x0c},
12198 + {0x38, 0x00}, {0x39, 0x00}, {0x3a, 0xf8}, {0x38, 0x80},
12199 + {}
12200 + };
12201 + const struct hda_alc298_mbxinit *seq;
12202 +
12203 + if (action != HDA_FIXUP_ACT_INIT)
12204 + return;
12205 +
12206 + /* Start */
12207 + snd_hda_codec_write(codec, 0x06, 0, AC_VERB_SET_DIGI_CONVERT_3, 0x00);
12208 + snd_hda_codec_write(codec, 0x06, 0, AC_VERB_SET_DIGI_CONVERT_3, 0x80);
12209 + alc_write_coef_idx(codec, 0x26, 0xf000);
12210 + alc_write_coef_idx(codec, 0x22, 0x31);
12211 + alc_write_coef_idx(codec, 0x23, 0x0b);
12212 + alc_write_coef_idx(codec, 0x25, 0x00);
12213 + snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 0x26);
12214 + snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_PROC_COEF, 0xb010);
12215 +
12216 + for (seq = dac_init; seq->value_0x23; seq++)
12217 + alc298_huawei_mbx_stereo_seq(codec, seq, seq == dac_init);
12218 +}
12219 +
12220 static void alc269_fixup_x101_headset_mic(struct hda_codec *codec,
12221 const struct hda_fixup *fix, int action)
12222 {
12223 @@ -5780,6 +5849,7 @@ enum {
12224 ALC255_FIXUP_DUMMY_LINEOUT_VERB,
12225 ALC255_FIXUP_DELL_HEADSET_MIC,
12226 ALC256_FIXUP_HUAWEI_MACH_WX9_PINS,
12227 + ALC298_FIXUP_HUAWEI_MBX_STEREO,
12228 ALC295_FIXUP_HP_X360,
12229 ALC221_FIXUP_HP_HEADSET_MIC,
12230 ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
12231 @@ -5800,6 +5870,7 @@ enum {
12232 ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
12233 ALC299_FIXUP_PREDATOR_SPK,
12234 ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
12235 + ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
12236 };
12237
12238 static const struct hda_fixup alc269_fixups[] = {
12239 @@ -6089,6 +6160,12 @@ static const struct hda_fixup alc269_fixups[] = {
12240 .chained = true,
12241 .chain_id = ALC255_FIXUP_MIC_MUTE_LED
12242 },
12243 + [ALC298_FIXUP_HUAWEI_MBX_STEREO] = {
12244 + .type = HDA_FIXUP_FUNC,
12245 + .v.func = alc298_fixup_huawei_mbx_stereo,
12246 + .chained = true,
12247 + .chain_id = ALC255_FIXUP_MIC_MUTE_LED
12248 + },
12249 [ALC269_FIXUP_ASUS_X101_FUNC] = {
12250 .type = HDA_FIXUP_FUNC,
12251 .v.func = alc269_fixup_x101_headset_mic,
12252 @@ -6850,6 +6927,16 @@ static const struct hda_fixup alc269_fixups[] = {
12253 .chained = true,
12254 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
12255 },
12256 + [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = {
12257 + .type = HDA_FIXUP_PINS,
12258 + .v.pins = (const struct hda_pintbl[]) {
12259 + { 0x19, 0x04a11040 },
12260 + { 0x21, 0x04211020 },
12261 + { }
12262 + },
12263 + .chained = true,
12264 + .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
12265 + },
12266 };
12267
12268 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
12269 @@ -7113,6 +7200,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
12270 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
12271 SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
12272 SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
12273 + SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
12274
12275 #if 0
12276 /* Below is a quirk table taken from the old code.
12277 @@ -7280,6 +7368,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
12278 {.id = ALC225_FIXUP_HEADSET_JACK, .name = "alc-headset-jack"},
12279 {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-chrome-book"},
12280 {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
12281 + {.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"},
12282 + {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
12283 {}
12284 };
12285 #define ALC225_STANDARD_PINS \
12286 diff --git a/sound/soc/atmel/mchp-i2s-mcc.c b/sound/soc/atmel/mchp-i2s-mcc.c
12287 index 86495883ca3f..ab7d5f98e759 100644
12288 --- a/sound/soc/atmel/mchp-i2s-mcc.c
12289 +++ b/sound/soc/atmel/mchp-i2s-mcc.c
12290 @@ -670,8 +670,13 @@ static int mchp_i2s_mcc_hw_params(struct snd_pcm_substream *substream,
12291 }
12292
12293 ret = regmap_write(dev->regmap, MCHP_I2SMCC_MRA, mra);
12294 - if (ret < 0)
12295 + if (ret < 0) {
12296 + if (dev->gclk_use) {
12297 + clk_unprepare(dev->gclk);
12298 + dev->gclk_use = 0;
12299 + }
12300 return ret;
12301 + }
12302 return regmap_write(dev->regmap, MCHP_I2SMCC_MRB, mrb);
12303 }
12304
12305 @@ -686,31 +691,37 @@ static int mchp_i2s_mcc_hw_free(struct snd_pcm_substream *substream,
12306 err = wait_event_interruptible_timeout(dev->wq_txrdy,
12307 dev->tx_rdy,
12308 msecs_to_jiffies(500));
12309 + if (err == 0) {
12310 + dev_warn_once(dev->dev,
12311 + "Timeout waiting for Tx ready\n");
12312 + regmap_write(dev->regmap, MCHP_I2SMCC_IDRA,
12313 + MCHP_I2SMCC_INT_TXRDY_MASK(dev->channels));
12314 + dev->tx_rdy = 1;
12315 + }
12316 } else {
12317 err = wait_event_interruptible_timeout(dev->wq_rxrdy,
12318 dev->rx_rdy,
12319 msecs_to_jiffies(500));
12320 - }
12321 -
12322 - if (err == 0) {
12323 - u32 idra;
12324 -
12325 - dev_warn_once(dev->dev, "Timeout waiting for %s\n",
12326 - is_playback ? "Tx ready" : "Rx ready");
12327 - if (is_playback)
12328 - idra = MCHP_I2SMCC_INT_TXRDY_MASK(dev->channels);
12329 - else
12330 - idra = MCHP_I2SMCC_INT_RXRDY_MASK(dev->channels);
12331 - regmap_write(dev->regmap, MCHP_I2SMCC_IDRA, idra);
12332 + if (err == 0) {
12333 + dev_warn_once(dev->dev,
12334 + "Timeout waiting for Rx ready\n");
12335 + regmap_write(dev->regmap, MCHP_I2SMCC_IDRA,
12336 + MCHP_I2SMCC_INT_RXRDY_MASK(dev->channels));
12337 + dev->rx_rdy = 1;
12338 + }
12339 }
12340
12341 if (!mchp_i2s_mcc_is_running(dev)) {
12342 regmap_write(dev->regmap, MCHP_I2SMCC_CR, MCHP_I2SMCC_CR_CKDIS);
12343
12344 if (dev->gclk_running) {
12345 - clk_disable_unprepare(dev->gclk);
12346 + clk_disable(dev->gclk);
12347 dev->gclk_running = 0;
12348 }
12349 + if (dev->gclk_use) {
12350 + clk_unprepare(dev->gclk);
12351 + dev->gclk_use = 0;
12352 + }
12353 }
12354
12355 return 0;
12356 @@ -809,6 +820,8 @@ static int mchp_i2s_mcc_dai_probe(struct snd_soc_dai *dai)
12357
12358 init_waitqueue_head(&dev->wq_txrdy);
12359 init_waitqueue_head(&dev->wq_rxrdy);
12360 + dev->tx_rdy = 1;
12361 + dev->rx_rdy = 1;
12362
12363 snd_soc_dai_init_dma_data(dai, &dev->playback, &dev->capture);
12364
12365 diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
12366 index 6db002cc2058..96d04896193f 100644
12367 --- a/sound/soc/codecs/es8316.c
12368 +++ b/sound/soc/codecs/es8316.c
12369 @@ -51,7 +51,10 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(adc_vol_tlv, -9600, 50, 1);
12370 static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_max_gain_tlv, -650, 150, 0);
12371 static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_min_gain_tlv, -1200, 150, 0);
12372 static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_target_tlv, -1650, 150, 0);
12373 -static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(hpmixer_gain_tlv, -1200, 150, 0);
12374 +static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpmixer_gain_tlv,
12375 + 0, 4, TLV_DB_SCALE_ITEM(-1200, 150, 0),
12376 + 8, 11, TLV_DB_SCALE_ITEM(-450, 150, 0),
12377 +);
12378
12379 static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(adc_pga_gain_tlv,
12380 0, 0, TLV_DB_SCALE_ITEM(-350, 0, 0),
12381 @@ -89,7 +92,7 @@ static const struct snd_kcontrol_new es8316_snd_controls[] = {
12382 SOC_DOUBLE_TLV("Headphone Playback Volume", ES8316_CPHP_ICAL_VOL,
12383 4, 0, 3, 1, hpout_vol_tlv),
12384 SOC_DOUBLE_TLV("Headphone Mixer Volume", ES8316_HPMIX_VOL,
12385 - 0, 4, 7, 0, hpmixer_gain_tlv),
12386 + 0, 4, 11, 0, hpmixer_gain_tlv),
12387
12388 SOC_ENUM("Playback Polarity", dacpol),
12389 SOC_DOUBLE_R_TLV("DAC Playback Volume", ES8316_DAC_VOLL,
12390 diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c
12391 index 7d4940256914..91242b6f8ea7 100644
12392 --- a/sound/soc/codecs/hdac_hda.c
12393 +++ b/sound/soc/codecs/hdac_hda.c
12394 @@ -495,6 +495,10 @@ static int hdac_hda_dev_probe(struct hdac_device *hdev)
12395
12396 static int hdac_hda_dev_remove(struct hdac_device *hdev)
12397 {
12398 + struct hdac_hda_priv *hda_pvt;
12399 +
12400 + hda_pvt = dev_get_drvdata(&hdev->dev);
12401 + cancel_delayed_work_sync(&hda_pvt->codec.jackpoll_work);
12402 return 0;
12403 }
12404
12405 diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
12406 index a6a4748c97f9..7cbaedffa1ef 100644
12407 --- a/sound/soc/codecs/sgtl5000.c
12408 +++ b/sound/soc/codecs/sgtl5000.c
12409 @@ -1173,12 +1173,17 @@ static int sgtl5000_set_power_regs(struct snd_soc_component *component)
12410 SGTL5000_INT_OSC_EN);
12411 /* Enable VDDC charge pump */
12412 ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP;
12413 - } else if (vddio >= 3100 && vdda >= 3100) {
12414 + } else {
12415 ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP;
12416 - /* VDDC use VDDIO rail */
12417 - lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
12418 - lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
12419 - SGTL5000_VDDC_MAN_ASSN_SHIFT;
12420 + /*
12421 + * if vddio == vdda the source of charge pump should be
12422 + * assigned manually to VDDIO
12423 + */
12424 + if (vddio == vdda) {
12425 + lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
12426 + lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
12427 + SGTL5000_VDDC_MAN_ASSN_SHIFT;
12428 + }
12429 }
12430
12431 snd_soc_component_write(component, SGTL5000_CHIP_LINREG_CTRL, lreg_ctrl);
12432 @@ -1288,6 +1293,7 @@ static int sgtl5000_probe(struct snd_soc_component *component)
12433 int ret;
12434 u16 reg;
12435 struct sgtl5000_priv *sgtl5000 = snd_soc_component_get_drvdata(component);
12436 + unsigned int zcd_mask = SGTL5000_HP_ZCD_EN | SGTL5000_ADC_ZCD_EN;
12437
12438 /* power up sgtl5000 */
12439 ret = sgtl5000_set_power_regs(component);
12440 @@ -1315,9 +1321,8 @@ static int sgtl5000_probe(struct snd_soc_component *component)
12441 0x1f);
12442 snd_soc_component_write(component, SGTL5000_CHIP_PAD_STRENGTH, reg);
12443
12444 - snd_soc_component_write(component, SGTL5000_CHIP_ANA_CTRL,
12445 - SGTL5000_HP_ZCD_EN |
12446 - SGTL5000_ADC_ZCD_EN);
12447 + snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
12448 + zcd_mask, zcd_mask);
12449
12450 snd_soc_component_update_bits(component, SGTL5000_CHIP_MIC_CTRL,
12451 SGTL5000_BIAS_R_MASK,
12452 diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
12453 index 9b37e98da0db..26a4f6cd3288 100644
12454 --- a/sound/soc/codecs/tlv320aic31xx.c
12455 +++ b/sound/soc/codecs/tlv320aic31xx.c
12456 @@ -1553,7 +1553,8 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
12457 aic31xx->gpio_reset = devm_gpiod_get_optional(aic31xx->dev, "reset",
12458 GPIOD_OUT_LOW);
12459 if (IS_ERR(aic31xx->gpio_reset)) {
12460 - dev_err(aic31xx->dev, "not able to acquire gpio\n");
12461 + if (PTR_ERR(aic31xx->gpio_reset) != -EPROBE_DEFER)
12462 + dev_err(aic31xx->dev, "not able to acquire gpio\n");
12463 return PTR_ERR(aic31xx->gpio_reset);
12464 }
12465
12466 @@ -1564,7 +1565,9 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
12467 ARRAY_SIZE(aic31xx->supplies),
12468 aic31xx->supplies);
12469 if (ret) {
12470 - dev_err(aic31xx->dev, "Failed to request supplies: %d\n", ret);
12471 + if (ret != -EPROBE_DEFER)
12472 + dev_err(aic31xx->dev,
12473 + "Failed to request supplies: %d\n", ret);
12474 return ret;
12475 }
12476
12477 diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
12478 index fa862af25c1a..085855f9b08d 100644
12479 --- a/sound/soc/fsl/fsl_ssi.c
12480 +++ b/sound/soc/fsl/fsl_ssi.c
12481 @@ -799,15 +799,6 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
12482 u32 wl = SSI_SxCCR_WL(sample_size);
12483 int ret;
12484
12485 - /*
12486 - * SSI is properly configured if it is enabled and running in
12487 - * the synchronous mode; Note that AC97 mode is an exception
12488 - * that should set separate configurations for STCCR and SRCCR
12489 - * despite running in the synchronous mode.
12490 - */
12491 - if (ssi->streams && ssi->synchronous)
12492 - return 0;
12493 -
12494 if (fsl_ssi_is_i2s_master(ssi)) {
12495 ret = fsl_ssi_set_bclk(substream, dai, hw_params);
12496 if (ret)
12497 @@ -823,6 +814,15 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
12498 }
12499 }
12500
12501 + /*
12502 + * SSI is properly configured if it is enabled and running in
12503 + * the synchronous mode; Note that AC97 mode is an exception
12504 + * that should set separate configurations for STCCR and SRCCR
12505 + * despite running in the synchronous mode.
12506 + */
12507 + if (ssi->streams && ssi->synchronous)
12508 + return 0;
12509 +
12510 if (!fsl_ssi_is_ac97(ssi)) {
12511 /*
12512 * Keep the ssi->i2s_net intact while having a local variable
12513 diff --git a/sound/soc/intel/common/sst-acpi.c b/sound/soc/intel/common/sst-acpi.c
12514 index 0e8e0a7a11df..5854868650b9 100644
12515 --- a/sound/soc/intel/common/sst-acpi.c
12516 +++ b/sound/soc/intel/common/sst-acpi.c
12517 @@ -141,11 +141,12 @@ static int sst_acpi_probe(struct platform_device *pdev)
12518 }
12519
12520 platform_set_drvdata(pdev, sst_acpi);
12521 + mach->pdata = sst_pdata;
12522
12523 /* register machine driver */
12524 sst_acpi->pdev_mach =
12525 platform_device_register_data(dev, mach->drv_name, -1,
12526 - sst_pdata, sizeof(*sst_pdata));
12527 + mach, sizeof(*mach));
12528 if (IS_ERR(sst_acpi->pdev_mach))
12529 return PTR_ERR(sst_acpi->pdev_mach);
12530
12531 diff --git a/sound/soc/intel/common/sst-ipc.c b/sound/soc/intel/common/sst-ipc.c
12532 index ef5b66af1cd2..3a66121ee9bb 100644
12533 --- a/sound/soc/intel/common/sst-ipc.c
12534 +++ b/sound/soc/intel/common/sst-ipc.c
12535 @@ -222,6 +222,8 @@ struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
12536
12537 if (ipc->ops.reply_msg_match != NULL)
12538 header = ipc->ops.reply_msg_match(header, &mask);
12539 + else
12540 + mask = (u64)-1;
12541
12542 if (list_empty(&ipc->rx_list)) {
12543 dev_err(ipc->dev, "error: rx list empty but received 0x%llx\n",
12544 diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c
12545 index b9b4a72a4334..b28a9c2b0380 100644
12546 --- a/sound/soc/intel/skylake/skl-debug.c
12547 +++ b/sound/soc/intel/skylake/skl-debug.c
12548 @@ -188,7 +188,7 @@ static ssize_t fw_softreg_read(struct file *file, char __user *user_buf,
12549 memset(d->fw_read_buff, 0, FW_REG_BUF);
12550
12551 if (w0_stat_sz > 0)
12552 - __iowrite32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
12553 + __ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
12554
12555 for (offset = 0; offset < FW_REG_SIZE; offset += 16) {
12556 ret += snprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
12557 diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
12558 index 1132109cb992..e01815cec6fd 100644
12559 --- a/sound/soc/intel/skylake/skl-nhlt.c
12560 +++ b/sound/soc/intel/skylake/skl-nhlt.c
12561 @@ -225,7 +225,7 @@ int skl_nhlt_update_topology_bin(struct skl *skl)
12562 struct hdac_bus *bus = skl_to_bus(skl);
12563 struct device *dev = bus->dev;
12564
12565 - dev_dbg(dev, "oem_id %.6s, oem_table_id %8s oem_revision %d\n",
12566 + dev_dbg(dev, "oem_id %.6s, oem_table_id %.8s oem_revision %d\n",
12567 nhlt->header.oem_id, nhlt->header.oem_table_id,
12568 nhlt->header.oem_revision);
12569
12570 diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
12571 index fce4e050a9b7..b9aacf3d3b29 100644
12572 --- a/sound/soc/sh/rcar/adg.c
12573 +++ b/sound/soc/sh/rcar/adg.c
12574 @@ -30,6 +30,7 @@ struct rsnd_adg {
12575 struct clk *clkout[CLKOUTMAX];
12576 struct clk_onecell_data onecell;
12577 struct rsnd_mod mod;
12578 + int clk_rate[CLKMAX];
12579 u32 flags;
12580 u32 ckr;
12581 u32 rbga;
12582 @@ -114,9 +115,9 @@ static void __rsnd_adg_get_timesel_ratio(struct rsnd_priv *priv,
12583 unsigned int val, en;
12584 unsigned int min, diff;
12585 unsigned int sel_rate[] = {
12586 - clk_get_rate(adg->clk[CLKA]), /* 0000: CLKA */
12587 - clk_get_rate(adg->clk[CLKB]), /* 0001: CLKB */
12588 - clk_get_rate(adg->clk[CLKC]), /* 0010: CLKC */
12589 + adg->clk_rate[CLKA], /* 0000: CLKA */
12590 + adg->clk_rate[CLKB], /* 0001: CLKB */
12591 + adg->clk_rate[CLKC], /* 0010: CLKC */
12592 adg->rbga_rate_for_441khz, /* 0011: RBGA */
12593 adg->rbgb_rate_for_48khz, /* 0100: RBGB */
12594 };
12595 @@ -302,7 +303,7 @@ int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
12596 * AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC/AUDIO_CLKI.
12597 */
12598 for_each_rsnd_clk(clk, adg, i) {
12599 - if (rate == clk_get_rate(clk))
12600 + if (rate == adg->clk_rate[i])
12601 return sel_table[i];
12602 }
12603
12604 @@ -369,10 +370,18 @@ void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
12605
12606 for_each_rsnd_clk(clk, adg, i) {
12607 ret = 0;
12608 - if (enable)
12609 + if (enable) {
12610 ret = clk_prepare_enable(clk);
12611 - else
12612 +
12613 + /*
12614 + * We shouldn't use clk_get_rate() under
12615 + * atomic context. Let's keep it when
12616 + * rsnd_adg_clk_enable() was called
12617 + */
12618 + adg->clk_rate[i] = clk_get_rate(adg->clk[i]);
12619 + } else {
12620 clk_disable_unprepare(clk);
12621 + }
12622
12623 if (ret < 0)
12624 dev_warn(dev, "can't use clk %d\n", i);
12625 diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
12626 index 748f5f641002..d93db2c2b527 100644
12627 --- a/sound/soc/soc-generic-dmaengine-pcm.c
12628 +++ b/sound/soc/soc-generic-dmaengine-pcm.c
12629 @@ -306,6 +306,12 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
12630
12631 if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i]))
12632 pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE;
12633 +
12634 + if (rtd->pcm->streams[i].pcm->name[0] == '\0') {
12635 + strncpy(rtd->pcm->streams[i].pcm->name,
12636 + rtd->pcm->streams[i].pcm->id,
12637 + sizeof(rtd->pcm->streams[i].pcm->name));
12638 + }
12639 }
12640
12641 return 0;
12642 diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
12643 index b8b37f082309..0d8437b080bf 100644
12644 --- a/sound/soc/sof/intel/hda-codec.c
12645 +++ b/sound/soc/sof/intel/hda-codec.c
12646 @@ -62,8 +62,7 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address)
12647 address, resp);
12648
12649 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
12650 - /* snd_hdac_ext_bus_device_exit will use kfree to free hdev */
12651 - hda_priv = kzalloc(sizeof(*hda_priv), GFP_KERNEL);
12652 + hda_priv = devm_kzalloc(sdev->dev, sizeof(*hda_priv), GFP_KERNEL);
12653 if (!hda_priv)
12654 return -ENOMEM;
12655
12656 @@ -82,8 +81,7 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address)
12657
12658 return 0;
12659 #else
12660 - /* snd_hdac_ext_bus_device_exit will use kfree to free hdev */
12661 - hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
12662 + hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
12663 if (!hdev)
12664 return -ENOMEM;
12665
12666 diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c
12667 index 334e9d59b1ba..3b8955e755b2 100644
12668 --- a/sound/soc/sof/pcm.c
12669 +++ b/sound/soc/sof/pcm.c
12670 @@ -208,12 +208,11 @@ static int sof_pcm_hw_params(struct snd_pcm_substream *substream,
12671 if (ret < 0)
12672 return ret;
12673
12674 + spcm->prepared[substream->stream] = true;
12675 +
12676 /* save pcm hw_params */
12677 memcpy(&spcm->params[substream->stream], params, sizeof(*params));
12678
12679 - /* clear hw_params_upon_resume flag */
12680 - spcm->hw_params_upon_resume[substream->stream] = 0;
12681 -
12682 return ret;
12683 }
12684
12685 @@ -236,6 +235,9 @@ static int sof_pcm_hw_free(struct snd_pcm_substream *substream)
12686 if (!spcm)
12687 return -EINVAL;
12688
12689 + if (!spcm->prepared[substream->stream])
12690 + return 0;
12691 +
12692 dev_dbg(sdev->dev, "pcm: free stream %d dir %d\n", spcm->pcm.pcm_id,
12693 substream->stream);
12694
12695 @@ -258,6 +260,8 @@ static int sof_pcm_hw_free(struct snd_pcm_substream *substream)
12696 if (ret < 0)
12697 dev_err(sdev->dev, "error: platform hw free failed\n");
12698
12699 + spcm->prepared[substream->stream] = false;
12700 +
12701 return ret;
12702 }
12703
12704 @@ -278,11 +282,7 @@ static int sof_pcm_prepare(struct snd_pcm_substream *substream)
12705 if (!spcm)
12706 return -EINVAL;
12707
12708 - /*
12709 - * check if hw_params needs to be set-up again.
12710 - * This is only needed when resuming from system sleep.
12711 - */
12712 - if (!spcm->hw_params_upon_resume[substream->stream])
12713 + if (spcm->prepared[substream->stream])
12714 return 0;
12715
12716 dev_dbg(sdev->dev, "pcm: prepare stream %d dir %d\n", spcm->pcm.pcm_id,
12717 @@ -311,6 +311,7 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
12718 struct snd_sof_pcm *spcm;
12719 struct sof_ipc_stream stream;
12720 struct sof_ipc_reply reply;
12721 + bool reset_hw_params = false;
12722 int ret;
12723
12724 /* nothing to do for BE */
12725 @@ -351,6 +352,7 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
12726 case SNDRV_PCM_TRIGGER_SUSPEND:
12727 case SNDRV_PCM_TRIGGER_STOP:
12728 stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_STOP;
12729 + reset_hw_params = true;
12730 break;
12731 default:
12732 dev_err(sdev->dev, "error: unhandled trigger cmd %d\n", cmd);
12733 @@ -363,17 +365,17 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
12734 ret = sof_ipc_tx_message(sdev->ipc, stream.hdr.cmd, &stream,
12735 sizeof(stream), &reply, sizeof(reply));
12736
12737 - if (ret < 0 || cmd != SNDRV_PCM_TRIGGER_SUSPEND)
12738 + if (ret < 0 || !reset_hw_params)
12739 return ret;
12740
12741 /*
12742 - * The hw_free op is usually called when the pcm stream is closed.
12743 - * Since the stream is not closed during suspend, the DSP needs to be
12744 - * notified explicitly to free pcm to prevent errors upon resume.
12745 + * In case of stream is stopped, DSP must be reprogrammed upon
12746 + * restart, so free PCM here.
12747 */
12748 stream.hdr.size = sizeof(stream);
12749 stream.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_PCM_FREE;
12750 stream.comp_id = spcm->stream[substream->stream].comp_id;
12751 + spcm->prepared[substream->stream] = false;
12752
12753 /* send IPC to the DSP */
12754 return sof_ipc_tx_message(sdev->ipc, stream.hdr.cmd, &stream,
12755 @@ -481,6 +483,7 @@ static int sof_pcm_open(struct snd_pcm_substream *substream)
12756 spcm->stream[substream->stream].posn.host_posn = 0;
12757 spcm->stream[substream->stream].posn.dai_posn = 0;
12758 spcm->stream[substream->stream].substream = substream;
12759 + spcm->prepared[substream->stream] = false;
12760
12761 ret = snd_sof_pcm_platform_open(sdev, substream);
12762 if (ret < 0)
12763 diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
12764 index 278abfd10490..48c6d78d72e2 100644
12765 --- a/sound/soc/sof/pm.c
12766 +++ b/sound/soc/sof/pm.c
12767 @@ -233,7 +233,7 @@ static int sof_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
12768
12769 state = substream->runtime->status->state;
12770 if (state == SNDRV_PCM_STATE_SUSPENDED)
12771 - spcm->hw_params_upon_resume[dir] = 1;
12772 + spcm->prepared[dir] = false;
12773 }
12774 }
12775
12776 diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
12777 index 65d1bac4c6b8..6fd3df7c57a3 100644
12778 --- a/sound/soc/sof/sof-pci-dev.c
12779 +++ b/sound/soc/sof/sof-pci-dev.c
12780 @@ -223,6 +223,9 @@ static void sof_pci_probe_complete(struct device *dev)
12781 */
12782 pm_runtime_allow(dev);
12783
12784 + /* mark last_busy for pm_runtime to make sure not suspend immediately */
12785 + pm_runtime_mark_last_busy(dev);
12786 +
12787 /* follow recommendation in pci-driver.c to decrement usage counter */
12788 pm_runtime_put_noidle(dev);
12789 }
12790 diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h
12791 index b8c0b2a22684..fa5cb7d2a660 100644
12792 --- a/sound/soc/sof/sof-priv.h
12793 +++ b/sound/soc/sof/sof-priv.h
12794 @@ -297,7 +297,7 @@ struct snd_sof_pcm {
12795 struct snd_sof_pcm_stream stream[2];
12796 struct list_head list; /* list in sdev pcm list */
12797 struct snd_pcm_hw_params params[2];
12798 - int hw_params_upon_resume[2]; /* set up hw_params upon resume */
12799 + bool prepared[2]; /* PCM_PARAMS set successfully */
12800 };
12801
12802 /* ALSA SOF Kcontrol device */
12803 diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
12804 index 7fa5c61169db..ab8cb83c8b1a 100644
12805 --- a/sound/soc/sunxi/sun4i-i2s.c
12806 +++ b/sound/soc/sunxi/sun4i-i2s.c
12807 @@ -222,10 +222,11 @@ static const struct sun4i_i2s_clk_div sun4i_i2s_mclk_div[] = {
12808 };
12809
12810 static int sun4i_i2s_get_bclk_div(struct sun4i_i2s *i2s,
12811 - unsigned int oversample_rate,
12812 + unsigned long parent_rate,
12813 + unsigned int sampling_rate,
12814 unsigned int word_size)
12815 {
12816 - int div = oversample_rate / word_size / 2;
12817 + int div = parent_rate / sampling_rate / word_size / 2;
12818 int i;
12819
12820 for (i = 0; i < ARRAY_SIZE(sun4i_i2s_bclk_div); i++) {
12821 @@ -315,8 +316,8 @@ static int sun4i_i2s_set_clk_rate(struct snd_soc_dai *dai,
12822 return -EINVAL;
12823 }
12824
12825 - bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate,
12826 - word_size);
12827 + bclk_div = sun4i_i2s_get_bclk_div(i2s, i2s->mclk_freq,
12828 + rate, word_size);
12829 if (bclk_div < 0) {
12830 dev_err(dai->dev, "Unsupported BCLK divider: %d\n", bclk_div);
12831 return -EINVAL;
12832 diff --git a/sound/soc/uniphier/aio-cpu.c b/sound/soc/uniphier/aio-cpu.c
12833 index ee90e6c3937c..2ae582a99b63 100644
12834 --- a/sound/soc/uniphier/aio-cpu.c
12835 +++ b/sound/soc/uniphier/aio-cpu.c
12836 @@ -424,8 +424,11 @@ int uniphier_aio_dai_suspend(struct snd_soc_dai *dai)
12837 {
12838 struct uniphier_aio *aio = uniphier_priv(dai);
12839
12840 - reset_control_assert(aio->chip->rst);
12841 - clk_disable_unprepare(aio->chip->clk);
12842 + aio->chip->num_wup_aios--;
12843 + if (!aio->chip->num_wup_aios) {
12844 + reset_control_assert(aio->chip->rst);
12845 + clk_disable_unprepare(aio->chip->clk);
12846 + }
12847
12848 return 0;
12849 }
12850 @@ -439,13 +442,15 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
12851 if (!aio->chip->active)
12852 return 0;
12853
12854 - ret = clk_prepare_enable(aio->chip->clk);
12855 - if (ret)
12856 - return ret;
12857 + if (!aio->chip->num_wup_aios) {
12858 + ret = clk_prepare_enable(aio->chip->clk);
12859 + if (ret)
12860 + return ret;
12861
12862 - ret = reset_control_deassert(aio->chip->rst);
12863 - if (ret)
12864 - goto err_out_clock;
12865 + ret = reset_control_deassert(aio->chip->rst);
12866 + if (ret)
12867 + goto err_out_clock;
12868 + }
12869
12870 aio_iecout_set_enable(aio->chip, true);
12871 aio_chip_init(aio->chip);
12872 @@ -458,7 +463,7 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
12873
12874 ret = aio_init(sub);
12875 if (ret)
12876 - goto err_out_clock;
12877 + goto err_out_reset;
12878
12879 if (!sub->setting)
12880 continue;
12881 @@ -466,11 +471,16 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
12882 aio_port_reset(sub);
12883 aio_src_reset(sub);
12884 }
12885 + aio->chip->num_wup_aios++;
12886
12887 return 0;
12888
12889 +err_out_reset:
12890 + if (!aio->chip->num_wup_aios)
12891 + reset_control_assert(aio->chip->rst);
12892 err_out_clock:
12893 - clk_disable_unprepare(aio->chip->clk);
12894 + if (!aio->chip->num_wup_aios)
12895 + clk_disable_unprepare(aio->chip->clk);
12896
12897 return ret;
12898 }
12899 @@ -619,6 +629,7 @@ int uniphier_aio_probe(struct platform_device *pdev)
12900 return PTR_ERR(chip->rst);
12901
12902 chip->num_aios = chip->chip_spec->num_dais;
12903 + chip->num_wup_aios = chip->num_aios;
12904 chip->aios = devm_kcalloc(dev,
12905 chip->num_aios, sizeof(struct uniphier_aio),
12906 GFP_KERNEL);
12907 diff --git a/sound/soc/uniphier/aio.h b/sound/soc/uniphier/aio.h
12908 index ca6ccbae0ee8..a7ff7e556429 100644
12909 --- a/sound/soc/uniphier/aio.h
12910 +++ b/sound/soc/uniphier/aio.h
12911 @@ -285,6 +285,7 @@ struct uniphier_aio_chip {
12912
12913 struct uniphier_aio *aios;
12914 int num_aios;
12915 + int num_wup_aios;
12916 struct uniphier_aio_pll *plls;
12917 int num_plls;
12918
12919 diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
12920 index e4bbf79de956..33cd26763c0e 100644
12921 --- a/sound/usb/pcm.c
12922 +++ b/sound/usb/pcm.c
12923 @@ -457,6 +457,7 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
12924 }
12925 ep = get_endpoint(alts, 1)->bEndpointAddress;
12926 if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
12927 + get_endpoint(alts, 0)->bSynchAddress != 0 &&
12928 ((is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
12929 (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
12930 dev_err(&dev->dev,
12931 diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
12932 index 57aaeaf8e192..edba4d93e9e6 100644
12933 --- a/tools/include/uapi/asm/bitsperlong.h
12934 +++ b/tools/include/uapi/asm/bitsperlong.h
12935 @@ -1,22 +1,22 @@
12936 /* SPDX-License-Identifier: GPL-2.0 */
12937 #if defined(__i386__) || defined(__x86_64__)
12938 -#include "../../arch/x86/include/uapi/asm/bitsperlong.h"
12939 +#include "../../../arch/x86/include/uapi/asm/bitsperlong.h"
12940 #elif defined(__aarch64__)
12941 -#include "../../arch/arm64/include/uapi/asm/bitsperlong.h"
12942 +#include "../../../arch/arm64/include/uapi/asm/bitsperlong.h"
12943 #elif defined(__powerpc__)
12944 -#include "../../arch/powerpc/include/uapi/asm/bitsperlong.h"
12945 +#include "../../../arch/powerpc/include/uapi/asm/bitsperlong.h"
12946 #elif defined(__s390__)
12947 -#include "../../arch/s390/include/uapi/asm/bitsperlong.h"
12948 +#include "../../../arch/s390/include/uapi/asm/bitsperlong.h"
12949 #elif defined(__sparc__)
12950 -#include "../../arch/sparc/include/uapi/asm/bitsperlong.h"
12951 +#include "../../../arch/sparc/include/uapi/asm/bitsperlong.h"
12952 #elif defined(__mips__)
12953 -#include "../../arch/mips/include/uapi/asm/bitsperlong.h"
12954 +#include "../../../arch/mips/include/uapi/asm/bitsperlong.h"
12955 #elif defined(__ia64__)
12956 -#include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
12957 +#include "../../../arch/ia64/include/uapi/asm/bitsperlong.h"
12958 #elif defined(__riscv)
12959 -#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
12960 +#include "../../../arch/riscv/include/uapi/asm/bitsperlong.h"
12961 #elif defined(__alpha__)
12962 -#include "../../arch/alpha/include/uapi/asm/bitsperlong.h"
12963 +#include "../../../arch/alpha/include/uapi/asm/bitsperlong.h"
12964 #else
12965 #include <asm-generic/bitsperlong.h>
12966 #endif
12967 diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
12968 index 3292c290654f..86ce17a1f7fb 100644
12969 --- a/tools/lib/traceevent/Makefile
12970 +++ b/tools/lib/traceevent/Makefile
12971 @@ -62,15 +62,15 @@ set_plugin_dir := 1
12972
12973 # Set plugin_dir to preffered global plugin location
12974 # If we install under $HOME directory we go under
12975 -# $(HOME)/.traceevent/plugins
12976 +# $(HOME)/.local/lib/traceevent/plugins
12977 #
12978 # We dont set PLUGIN_DIR in case we install under $HOME
12979 # directory, because by default the code looks under:
12980 -# $(HOME)/.traceevent/plugins by default.
12981 +# $(HOME)/.local/lib/traceevent/plugins by default.
12982 #
12983 ifeq ($(plugin_dir),)
12984 ifeq ($(prefix),$(HOME))
12985 -override plugin_dir = $(HOME)/.traceevent/plugins
12986 +override plugin_dir = $(HOME)/.local/lib/traceevent/plugins
12987 set_plugin_dir := 0
12988 else
12989 override plugin_dir = $(libdir)/traceevent/plugins
12990 diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c
12991 index 8ca28de9337a..e1f7ddd5a6cf 100644
12992 --- a/tools/lib/traceevent/event-plugin.c
12993 +++ b/tools/lib/traceevent/event-plugin.c
12994 @@ -18,7 +18,7 @@
12995 #include "event-utils.h"
12996 #include "trace-seq.h"
12997
12998 -#define LOCAL_PLUGIN_DIR ".traceevent/plugins"
12999 +#define LOCAL_PLUGIN_DIR ".local/lib/traceevent/plugins/"
13000
13001 static struct registered_plugin_options {
13002 struct registered_plugin_options *next;
13003 diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
13004 index 865a9762f22e..3f84403c0983 100644
13005 --- a/tools/perf/arch/x86/util/kvm-stat.c
13006 +++ b/tools/perf/arch/x86/util/kvm-stat.c
13007 @@ -1,7 +1,7 @@
13008 // SPDX-License-Identifier: GPL-2.0
13009 #include <errno.h>
13010 -#include "../../util/kvm-stat.h"
13011 -#include "../../util/evsel.h"
13012 +#include "../../../util/kvm-stat.h"
13013 +#include "../../../util/evsel.h"
13014 #include <asm/svm.h>
13015 #include <asm/vmx.h>
13016 #include <asm/kvm.h>
13017 diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c
13018 index 950539f9a4f7..b1eb963b4a6e 100644
13019 --- a/tools/perf/arch/x86/util/tsc.c
13020 +++ b/tools/perf/arch/x86/util/tsc.c
13021 @@ -5,10 +5,10 @@
13022 #include <linux/stddef.h>
13023 #include <linux/perf_event.h>
13024
13025 -#include "../../perf.h"
13026 +#include "../../../perf.h"
13027 #include <linux/types.h>
13028 -#include "../../util/debug.h"
13029 -#include "../../util/tsc.h"
13030 +#include "../../../util/debug.h"
13031 +#include "../../../util/tsc.h"
13032
13033 int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
13034 struct perf_tsc_conversion *tc)
13035 diff --git a/tools/perf/perf.c b/tools/perf/perf.c
13036 index 97e2628ea5dd..d4e4d53e8b44 100644
13037 --- a/tools/perf/perf.c
13038 +++ b/tools/perf/perf.c
13039 @@ -441,6 +441,9 @@ int main(int argc, const char **argv)
13040
13041 srandom(time(NULL));
13042
13043 + /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
13044 + config_exclusive_filename = getenv("PERF_CONFIG");
13045 +
13046 err = perf_config(perf_default_config, NULL);
13047 if (err)
13048 return err;
13049 diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
13050 index 45d269b0157e..11cc2af13f2b 100755
13051 --- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
13052 +++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
13053 @@ -32,6 +32,10 @@ if [ $err -ne 0 ] ; then
13054 exit $err
13055 fi
13056
13057 +# Do not use whatever ~/.perfconfig file, it may change the output
13058 +# via trace.{show_timestamp,show_prefix,etc}
13059 +export PERF_CONFIG=/dev/null
13060 +
13061 trace_open_vfs_getname
13062 err=$?
13063 rm -f ${file}
13064 diff --git a/tools/perf/trace/beauty/ioctl.c b/tools/perf/trace/beauty/ioctl.c
13065 index 52242fa4072b..e19eb6ea361d 100644
13066 --- a/tools/perf/trace/beauty/ioctl.c
13067 +++ b/tools/perf/trace/beauty/ioctl.c
13068 @@ -21,7 +21,7 @@
13069 static size_t ioctl__scnprintf_tty_cmd(int nr, int dir, char *bf, size_t size)
13070 {
13071 static const char *ioctl_tty_cmd[] = {
13072 - "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
13073 + [_IOC_NR(TCGETS)] = "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
13074 "TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL", "TIOCSCTTY",
13075 "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI", "TIOCGWINSZ", "TIOCSWINSZ",
13076 "TIOCMGET", "TIOCMBIS", "TIOCMBIC", "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR",
13077 diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c
13078 index 4d565cc14076..0355d4aaf2ee 100644
13079 --- a/tools/perf/ui/browsers/scripts.c
13080 +++ b/tools/perf/ui/browsers/scripts.c
13081 @@ -131,8 +131,10 @@ static int list_scripts(char *script_name, bool *custom,
13082 int key = ui_browser__input_window("perf script command",
13083 "Enter perf script command line (without perf script prefix)",
13084 script_args, "", 0);
13085 - if (key != K_ENTER)
13086 - return -1;
13087 + if (key != K_ENTER) {
13088 + ret = -1;
13089 + goto out;
13090 + }
13091 sprintf(script_name, "%s script %s", perf, script_args);
13092 } else if (choice < num + max_std) {
13093 strcpy(script_name, paths[choice]);
13094 diff --git a/tools/perf/ui/helpline.c b/tools/perf/ui/helpline.c
13095 index b3c421429ed4..54bcd08df87e 100644
13096 --- a/tools/perf/ui/helpline.c
13097 +++ b/tools/perf/ui/helpline.c
13098 @@ -3,10 +3,10 @@
13099 #include <stdlib.h>
13100 #include <string.h>
13101
13102 -#include "../debug.h"
13103 +#include "../util/debug.h"
13104 #include "helpline.h"
13105 #include "ui.h"
13106 -#include "../util.h"
13107 +#include "../util/util.h"
13108
13109 char ui_helpline__current[512];
13110
13111 diff --git a/tools/perf/ui/util.c b/tools/perf/ui/util.c
13112 index 63bf06e80ab9..9ed76e88a3e4 100644
13113 --- a/tools/perf/ui/util.c
13114 +++ b/tools/perf/ui/util.c
13115 @@ -1,6 +1,6 @@
13116 // SPDX-License-Identifier: GPL-2.0
13117 #include "util.h"
13118 -#include "../debug.h"
13119 +#include "../util/debug.h"
13120
13121
13122 /*
13123 diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
13124 index b0364d923f76..070c3bd57882 100644
13125 --- a/tools/perf/util/evlist.c
13126 +++ b/tools/perf/util/evlist.c
13127 @@ -20,6 +20,7 @@
13128 #include "bpf-event.h"
13129 #include <signal.h>
13130 #include <unistd.h>
13131 +#include <sched.h>
13132
13133 #include "parse-events.h"
13134 #include <subcmd/parse-options.h>
13135 @@ -1870,6 +1871,14 @@ static void *perf_evlist__poll_thread(void *arg)
13136 struct perf_evlist *evlist = arg;
13137 bool draining = false;
13138 int i, done = 0;
13139 + /*
13140 + * In order to read symbols from other namespaces perf to needs to call
13141 + * setns(2). This isn't permitted if the struct_fs has multiple users.
13142 + * unshare(2) the fs so that we may continue to setns into namespaces
13143 + * that we're observing when, for instance, reading the build-ids at
13144 + * the end of a 'perf record' session.
13145 + */
13146 + unshare(CLONE_FS);
13147
13148 while (!done) {
13149 bool got_data = false;
13150 diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
13151 index 1903d7ec9797..bf7cf1249553 100644
13152 --- a/tools/perf/util/header.c
13153 +++ b/tools/perf/util/header.c
13154 @@ -2251,8 +2251,10 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
13155 /* On s390 the socket_id number is not related to the numbers of cpus.
13156 * The socket_id number might be higher than the numbers of cpus.
13157 * This depends on the configuration.
13158 + * AArch64 is the same.
13159 */
13160 - if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
13161 + if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
13162 + || !strncmp(ph->env.arch, "aarch64", 7)))
13163 do_core_id_test = false;
13164
13165 for (i = 0; i < (u32)cpu_nr; i++) {
13166 diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
13167 index f24fd1954f6c..6bd270a1e93e 100644
13168 --- a/tools/perf/util/hist.c
13169 +++ b/tools/perf/util/hist.c
13170 @@ -193,7 +193,10 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
13171 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
13172 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
13173 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
13174 - hists__new_col_len(hists, HISTC_TIME, 12);
13175 + if (symbol_conf.nanosecs)
13176 + hists__new_col_len(hists, HISTC_TIME, 16);
13177 + else
13178 + hists__new_col_len(hists, HISTC_TIME, 12);
13179
13180 if (h->srcline) {
13181 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
13182 diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
13183 index 668410b1d426..7666206d06fa 100644
13184 --- a/tools/perf/util/map.c
13185 +++ b/tools/perf/util/map.c
13186 @@ -647,6 +647,7 @@ struct map_groups *map_groups__new(struct machine *machine)
13187 void map_groups__delete(struct map_groups *mg)
13188 {
13189 map_groups__exit(mg);
13190 + unwind__finish_access(mg);
13191 free(mg);
13192 }
13193
13194 @@ -887,7 +888,7 @@ int map_groups__clone(struct thread *thread, struct map_groups *parent)
13195 if (new == NULL)
13196 goto out_unlock;
13197
13198 - err = unwind__prepare_access(thread, new, NULL);
13199 + err = unwind__prepare_access(mg, new, NULL);
13200 if (err)
13201 goto out_unlock;
13202
13203 diff --git a/tools/perf/util/map_groups.h b/tools/perf/util/map_groups.h
13204 index 5f25efa6d6bc..77252e14008f 100644
13205 --- a/tools/perf/util/map_groups.h
13206 +++ b/tools/perf/util/map_groups.h
13207 @@ -31,6 +31,10 @@ struct map_groups {
13208 struct maps maps;
13209 struct machine *machine;
13210 refcount_t refcnt;
13211 +#ifdef HAVE_LIBUNWIND_SUPPORT
13212 + void *addr_space;
13213 + struct unwind_libunwind_ops *unwind_libunwind_ops;
13214 +#endif
13215 };
13216
13217 #define KMAP_NAME_LEN 256
13218 diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
13219 index 590793cc5142..bbf7816cba31 100644
13220 --- a/tools/perf/util/thread.c
13221 +++ b/tools/perf/util/thread.c
13222 @@ -105,7 +105,6 @@ void thread__delete(struct thread *thread)
13223 }
13224 up_write(&thread->comm_lock);
13225
13226 - unwind__finish_access(thread);
13227 nsinfo__zput(thread->nsinfo);
13228 srccode_state_free(&thread->srccode_state);
13229
13230 @@ -252,7 +251,7 @@ static int ____thread__set_comm(struct thread *thread, const char *str,
13231 list_add(&new->list, &thread->comm_list);
13232
13233 if (exec)
13234 - unwind__flush_access(thread);
13235 + unwind__flush_access(thread->mg);
13236 }
13237
13238 thread->comm_set = true;
13239 @@ -332,7 +331,7 @@ int thread__insert_map(struct thread *thread, struct map *map)
13240 {
13241 int ret;
13242
13243 - ret = unwind__prepare_access(thread, map, NULL);
13244 + ret = unwind__prepare_access(thread->mg, map, NULL);
13245 if (ret)
13246 return ret;
13247
13248 @@ -352,7 +351,7 @@ static int __thread__prepare_access(struct thread *thread)
13249 down_read(&maps->lock);
13250
13251 for (map = maps__first(maps); map; map = map__next(map)) {
13252 - err = unwind__prepare_access(thread, map, &initialized);
13253 + err = unwind__prepare_access(thread->mg, map, &initialized);
13254 if (err || initialized)
13255 break;
13256 }
13257 diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
13258 index e97ef6977eb9..bf06113be4f3 100644
13259 --- a/tools/perf/util/thread.h
13260 +++ b/tools/perf/util/thread.h
13261 @@ -44,10 +44,6 @@ struct thread {
13262 struct thread_stack *ts;
13263 struct nsinfo *nsinfo;
13264 struct srccode_state srccode_state;
13265 -#ifdef HAVE_LIBUNWIND_SUPPORT
13266 - void *addr_space;
13267 - struct unwind_libunwind_ops *unwind_libunwind_ops;
13268 -#endif
13269 bool filter;
13270 int filter_entry_depth;
13271 };
13272 diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
13273 index 71a788921b62..ebdbb056510c 100644
13274 --- a/tools/perf/util/unwind-libunwind-local.c
13275 +++ b/tools/perf/util/unwind-libunwind-local.c
13276 @@ -616,26 +616,26 @@ static unw_accessors_t accessors = {
13277 .get_proc_name = get_proc_name,
13278 };
13279
13280 -static int _unwind__prepare_access(struct thread *thread)
13281 +static int _unwind__prepare_access(struct map_groups *mg)
13282 {
13283 - thread->addr_space = unw_create_addr_space(&accessors, 0);
13284 - if (!thread->addr_space) {
13285 + mg->addr_space = unw_create_addr_space(&accessors, 0);
13286 + if (!mg->addr_space) {
13287 pr_err("unwind: Can't create unwind address space.\n");
13288 return -ENOMEM;
13289 }
13290
13291 - unw_set_caching_policy(thread->addr_space, UNW_CACHE_GLOBAL);
13292 + unw_set_caching_policy(mg->addr_space, UNW_CACHE_GLOBAL);
13293 return 0;
13294 }
13295
13296 -static void _unwind__flush_access(struct thread *thread)
13297 +static void _unwind__flush_access(struct map_groups *mg)
13298 {
13299 - unw_flush_cache(thread->addr_space, 0, 0);
13300 + unw_flush_cache(mg->addr_space, 0, 0);
13301 }
13302
13303 -static void _unwind__finish_access(struct thread *thread)
13304 +static void _unwind__finish_access(struct map_groups *mg)
13305 {
13306 - unw_destroy_addr_space(thread->addr_space);
13307 + unw_destroy_addr_space(mg->addr_space);
13308 }
13309
13310 static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
13311 @@ -660,7 +660,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
13312 */
13313 if (max_stack - 1 > 0) {
13314 WARN_ONCE(!ui->thread, "WARNING: ui->thread is NULL");
13315 - addr_space = ui->thread->addr_space;
13316 + addr_space = ui->thread->mg->addr_space;
13317
13318 if (addr_space == NULL)
13319 return -1;
13320 diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
13321 index c0811977d7d5..b843f9d0a9ea 100644
13322 --- a/tools/perf/util/unwind-libunwind.c
13323 +++ b/tools/perf/util/unwind-libunwind.c
13324 @@ -11,13 +11,13 @@ struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
13325 struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
13326 struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops;
13327
13328 -static void unwind__register_ops(struct thread *thread,
13329 +static void unwind__register_ops(struct map_groups *mg,
13330 struct unwind_libunwind_ops *ops)
13331 {
13332 - thread->unwind_libunwind_ops = ops;
13333 + mg->unwind_libunwind_ops = ops;
13334 }
13335
13336 -int unwind__prepare_access(struct thread *thread, struct map *map,
13337 +int unwind__prepare_access(struct map_groups *mg, struct map *map,
13338 bool *initialized)
13339 {
13340 const char *arch;
13341 @@ -28,7 +28,7 @@ int unwind__prepare_access(struct thread *thread, struct map *map,
13342 if (!dwarf_callchain_users)
13343 return 0;
13344
13345 - if (thread->addr_space) {
13346 + if (mg->addr_space) {
13347 pr_debug("unwind: thread map already set, dso=%s\n",
13348 map->dso->name);
13349 if (initialized)
13350 @@ -37,14 +37,14 @@ int unwind__prepare_access(struct thread *thread, struct map *map,
13351 }
13352
13353 /* env->arch is NULL for live-mode (i.e. perf top) */
13354 - if (!thread->mg->machine->env || !thread->mg->machine->env->arch)
13355 + if (!mg->machine->env || !mg->machine->env->arch)
13356 goto out_register;
13357
13358 - dso_type = dso__type(map->dso, thread->mg->machine);
13359 + dso_type = dso__type(map->dso, mg->machine);
13360 if (dso_type == DSO__TYPE_UNKNOWN)
13361 return 0;
13362
13363 - arch = perf_env__arch(thread->mg->machine->env);
13364 + arch = perf_env__arch(mg->machine->env);
13365
13366 if (!strcmp(arch, "x86")) {
13367 if (dso_type != DSO__TYPE_64BIT)
13368 @@ -59,37 +59,37 @@ int unwind__prepare_access(struct thread *thread, struct map *map,
13369 return 0;
13370 }
13371 out_register:
13372 - unwind__register_ops(thread, ops);
13373 + unwind__register_ops(mg, ops);
13374
13375 - err = thread->unwind_libunwind_ops->prepare_access(thread);
13376 + err = mg->unwind_libunwind_ops->prepare_access(mg);
13377 if (initialized)
13378 *initialized = err ? false : true;
13379 return err;
13380 }
13381
13382 -void unwind__flush_access(struct thread *thread)
13383 +void unwind__flush_access(struct map_groups *mg)
13384 {
13385 if (!dwarf_callchain_users)
13386 return;
13387
13388 - if (thread->unwind_libunwind_ops)
13389 - thread->unwind_libunwind_ops->flush_access(thread);
13390 + if (mg->unwind_libunwind_ops)
13391 + mg->unwind_libunwind_ops->flush_access(mg);
13392 }
13393
13394 -void unwind__finish_access(struct thread *thread)
13395 +void unwind__finish_access(struct map_groups *mg)
13396 {
13397 if (!dwarf_callchain_users)
13398 return;
13399
13400 - if (thread->unwind_libunwind_ops)
13401 - thread->unwind_libunwind_ops->finish_access(thread);
13402 + if (mg->unwind_libunwind_ops)
13403 + mg->unwind_libunwind_ops->finish_access(mg);
13404 }
13405
13406 int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
13407 struct thread *thread,
13408 struct perf_sample *data, int max_stack)
13409 {
13410 - if (thread->unwind_libunwind_ops)
13411 - return thread->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
13412 + if (thread->mg->unwind_libunwind_ops)
13413 + return thread->mg->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
13414 return 0;
13415 }
13416 diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h
13417 index 8a44a1569a21..3a7d00c20d86 100644
13418 --- a/tools/perf/util/unwind.h
13419 +++ b/tools/perf/util/unwind.h
13420 @@ -6,6 +6,7 @@
13421 #include <linux/types.h>
13422
13423 struct map;
13424 +struct map_groups;
13425 struct perf_sample;
13426 struct symbol;
13427 struct thread;
13428 @@ -19,9 +20,9 @@ struct unwind_entry {
13429 typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
13430
13431 struct unwind_libunwind_ops {
13432 - int (*prepare_access)(struct thread *thread);
13433 - void (*flush_access)(struct thread *thread);
13434 - void (*finish_access)(struct thread *thread);
13435 + int (*prepare_access)(struct map_groups *mg);
13436 + void (*flush_access)(struct map_groups *mg);
13437 + void (*finish_access)(struct map_groups *mg);
13438 int (*get_entries)(unwind_entry_cb_t cb, void *arg,
13439 struct thread *thread,
13440 struct perf_sample *data, int max_stack);
13441 @@ -46,20 +47,20 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
13442 #endif
13443
13444 int LIBUNWIND__ARCH_REG_ID(int regnum);
13445 -int unwind__prepare_access(struct thread *thread, struct map *map,
13446 +int unwind__prepare_access(struct map_groups *mg, struct map *map,
13447 bool *initialized);
13448 -void unwind__flush_access(struct thread *thread);
13449 -void unwind__finish_access(struct thread *thread);
13450 +void unwind__flush_access(struct map_groups *mg);
13451 +void unwind__finish_access(struct map_groups *mg);
13452 #else
13453 -static inline int unwind__prepare_access(struct thread *thread __maybe_unused,
13454 +static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused,
13455 struct map *map __maybe_unused,
13456 bool *initialized __maybe_unused)
13457 {
13458 return 0;
13459 }
13460
13461 -static inline void unwind__flush_access(struct thread *thread __maybe_unused) {}
13462 -static inline void unwind__finish_access(struct thread *thread __maybe_unused) {}
13463 +static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {}
13464 +static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {}
13465 #endif
13466 #else
13467 static inline int
13468 @@ -72,14 +73,14 @@ unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
13469 return 0;
13470 }
13471
13472 -static inline int unwind__prepare_access(struct thread *thread __maybe_unused,
13473 +static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused,
13474 struct map *map __maybe_unused,
13475 bool *initialized __maybe_unused)
13476 {
13477 return 0;
13478 }
13479
13480 -static inline void unwind__flush_access(struct thread *thread __maybe_unused) {}
13481 -static inline void unwind__finish_access(struct thread *thread __maybe_unused) {}
13482 +static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {}
13483 +static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {}
13484 #endif /* HAVE_DWARF_UNWIND_SUPPORT */
13485 #endif /* __UNWIND_H */
13486 diff --git a/tools/perf/util/xyarray.h b/tools/perf/util/xyarray.h
13487 index 7ffe562e7ae7..2627b038b6f2 100644
13488 --- a/tools/perf/util/xyarray.h
13489 +++ b/tools/perf/util/xyarray.h
13490 @@ -2,6 +2,7 @@
13491 #ifndef _PERF_XYARRAY_H_
13492 #define _PERF_XYARRAY_H_ 1
13493
13494 +#include <linux/compiler.h>
13495 #include <sys/types.h>
13496
13497 struct xyarray {
13498 @@ -10,7 +11,7 @@ struct xyarray {
13499 size_t entries;
13500 size_t max_x;
13501 size_t max_y;
13502 - char contents[];
13503 + char contents[] __aligned(8);
13504 };
13505
13506 struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size);
13507 diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
13508 index 91c5ad1685a1..6a10dea01eef 100644
13509 --- a/tools/power/x86/intel-speed-select/isst-config.c
13510 +++ b/tools/power/x86/intel-speed-select/isst-config.c
13511 @@ -603,6 +603,10 @@ static int isst_fill_platform_info(void)
13512
13513 close(fd);
13514
13515 + if (isst_platform_info.api_version > supported_api_ver) {
13516 + printf("Incompatible API versions; Upgrade of tool is required\n");
13517 + return -1;
13518 + }
13519 return 0;
13520 }
13521
13522 @@ -1529,6 +1533,7 @@ static void cmdline(int argc, char **argv)
13523 {
13524 int opt;
13525 int option_index = 0;
13526 + int ret;
13527
13528 static struct option long_options[] = {
13529 { "cpu", required_argument, 0, 'c' },
13530 @@ -1590,13 +1595,14 @@ static void cmdline(int argc, char **argv)
13531 set_max_cpu_num();
13532 set_cpu_present_cpu_mask();
13533 set_cpu_target_cpu_mask();
13534 - isst_fill_platform_info();
13535 - if (isst_platform_info.api_version > supported_api_ver) {
13536 - printf("Incompatible API versions; Upgrade of tool is required\n");
13537 - exit(0);
13538 - }
13539 + ret = isst_fill_platform_info();
13540 + if (ret)
13541 + goto out;
13542
13543 process_command(argc, argv);
13544 +out:
13545 + free_cpu_set(present_cpumask);
13546 + free_cpu_set(target_cpumask);
13547 }
13548
13549 int main(int argc, char **argv)
13550 diff --git a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
13551 index e6828732843e..9dc35a16e415 100755
13552 --- a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
13553 +++ b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
13554 @@ -15,6 +15,8 @@
13555 PAUSE_ON_FAIL=no
13556 VERBOSE=0
13557
13558 +which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
13559 +
13560 ################################################################################
13561 # helpers
13562
13563 @@ -200,7 +202,7 @@ validate_v6_exception()
13564 local rc
13565
13566 if [ ${ping_sz} != "0" ]; then
13567 - run_cmd ip netns exec h0 ping6 -s ${ping_sz} -c5 -w5 ${dst}
13568 + run_cmd ip netns exec h0 ${ping6} -s ${ping_sz} -c5 -w5 ${dst}
13569 fi
13570
13571 if [ "$VERBOSE" = "1" ]; then
13572 @@ -243,7 +245,7 @@ do
13573 run_cmd taskset -c ${c} ip netns exec h0 ping -c1 -w1 172.16.10${i}.1
13574 [ $? -ne 0 ] && printf "\nERROR: ping to h${i} failed\n" && ret=1
13575
13576 - run_cmd taskset -c ${c} ip netns exec h0 ping6 -c1 -w1 2001:db8:10${i}::1
13577 + run_cmd taskset -c ${c} ip netns exec h0 ${ping6} -c1 -w1 2001:db8:10${i}::1
13578 [ $? -ne 0 ] && printf "\nERROR: ping6 to h${i} failed\n" && ret=1
13579
13580 [ $ret -ne 0 ] && break
13581 diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
13582 index 4465fc2dae14..c4ba0ff4a53f 100755
13583 --- a/tools/testing/selftests/net/fib_tests.sh
13584 +++ b/tools/testing/selftests/net/fib_tests.sh
13585 @@ -9,7 +9,7 @@ ret=0
13586 ksft_skip=4
13587
13588 # all tests in this script. Can be overridden with -t option
13589 -TESTS="unregister down carrier nexthop ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw rp_filter"
13590 +TESTS="unregister down carrier nexthop suppress ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw rp_filter"
13591
13592 VERBOSE=0
13593 PAUSE_ON_FAIL=no
13594 @@ -17,6 +17,8 @@ PAUSE=no
13595 IP="ip -netns ns1"
13596 NS_EXEC="ip netns exec ns1"
13597
13598 +which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
13599 +
13600 log_test()
13601 {
13602 local rc=$1
13603 @@ -614,6 +616,20 @@ fib_nexthop_test()
13604 cleanup
13605 }
13606
13607 +fib_suppress_test()
13608 +{
13609 + $IP link add dummy1 type dummy
13610 + $IP link set dummy1 up
13611 + $IP -6 route add default dev dummy1
13612 + $IP -6 rule add table main suppress_prefixlength 0
13613 + ping -f -c 1000 -W 1 1234::1 || true
13614 + $IP -6 rule del table main suppress_prefixlength 0
13615 + $IP link del dummy1
13616 +
13617 + # If we got here without crashing, we're good.
13618 + return 0
13619 +}
13620 +
13621 ################################################################################
13622 # Tests on route add and replace
13623
13624 @@ -1086,7 +1102,7 @@ ipv6_route_metrics_test()
13625 log_test $rc 0 "Multipath route with mtu metric"
13626
13627 $IP -6 ro add 2001:db8:104::/64 via 2001:db8:101::2 mtu 1300
13628 - run_cmd "ip netns exec ns1 ping6 -w1 -c1 -s 1500 2001:db8:104::1"
13629 + run_cmd "ip netns exec ns1 ${ping6} -w1 -c1 -s 1500 2001:db8:104::1"
13630 log_test $? 0 "Using route with mtu metric"
13631
13632 run_cmd "$IP -6 ro add 2001:db8:114::/64 via 2001:db8:101::2 congctl lock foo"
13633 @@ -1591,6 +1607,7 @@ do
13634 fib_carrier_test|carrier) fib_carrier_test;;
13635 fib_rp_filter_test|rp_filter) fib_rp_filter_test;;
13636 fib_nexthop_test|nexthop) fib_nexthop_test;;
13637 + fib_suppress_test|suppress) fib_suppress_test;;
13638 ipv6_route_test|ipv6_rt) ipv6_route_test;;
13639 ipv4_route_test|ipv4_rt) ipv4_route_test;;
13640 ipv6_addr_metric) ipv6_addr_metric_test;;