Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0297-5.4.198-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (show annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (18 months ago) by niro
File size: 402141 byte(s)
-add missing
1 diff --git a/Documentation/ABI/testing/sysfs-ata b/Documentation/ABI/testing/sysfs-ata
2 index 9ab0ef1dd1c72..299e0d1dc1619 100644
3 --- a/Documentation/ABI/testing/sysfs-ata
4 +++ b/Documentation/ABI/testing/sysfs-ata
5 @@ -107,13 +107,14 @@ Description:
6 described in ATA8 7.16 and 7.17. Only valid if
7 the device is not a PM.
8
9 - pio_mode: (RO) Transfer modes supported by the device when
10 - in PIO mode. Mostly used by PATA device.
11 + pio_mode: (RO) PIO transfer mode used by the device.
12 + Mostly used by PATA devices.
13
14 - xfer_mode: (RO) Current transfer mode
15 + xfer_mode: (RO) Current transfer mode. Mostly used by
16 + PATA devices.
17
18 - dma_mode: (RO) Transfer modes supported by the device when
19 - in DMA mode. Mostly used by PATA device.
20 + dma_mode: (RO) DMA transfer mode used by the device.
21 + Mostly used by PATA devices.
22
23 class: (RO) Device class. Can be "ata" for disk,
24 "atapi" for packet device, "pmp" for PM, or
25 diff --git a/Documentation/conf.py b/Documentation/conf.py
26 index a8fe845832bce..38c1f7618b5e8 100644
27 --- a/Documentation/conf.py
28 +++ b/Documentation/conf.py
29 @@ -98,7 +98,7 @@ finally:
30 #
31 # This is also used if you do content translation via gettext catalogs.
32 # Usually you set "language" from the command line for these cases.
33 -language = None
34 +language = 'en'
35
36 # There are two options for replacing |today|: either, you set today to some
37 # non-false value, then it is used:
38 diff --git a/Documentation/devicetree/bindings/gpio/gpio-altera.txt b/Documentation/devicetree/bindings/gpio/gpio-altera.txt
39 index 146e554b3c676..2a80e272cd666 100644
40 --- a/Documentation/devicetree/bindings/gpio/gpio-altera.txt
41 +++ b/Documentation/devicetree/bindings/gpio/gpio-altera.txt
42 @@ -9,8 +9,9 @@ Required properties:
43 - The second cell is reserved and is currently unused.
44 - gpio-controller : Marks the device node as a GPIO controller.
45 - interrupt-controller: Mark the device node as an interrupt controller
46 -- #interrupt-cells : Should be 1. The interrupt type is fixed in the hardware.
47 +- #interrupt-cells : Should be 2. The interrupt type is fixed in the hardware.
48 - The first cell is the GPIO offset number within the GPIO controller.
49 + - The second cell is the interrupt trigger type and level flags.
50 - interrupts: Specify the interrupt.
51 - altr,interrupt-type: Specifies the interrupt trigger type the GPIO
52 hardware is synthesized. This field is required if the Altera GPIO controller
53 @@ -38,6 +39,6 @@ gpio_altr: gpio@ff200000 {
54 altr,interrupt-type = <IRQ_TYPE_EDGE_RISING>;
55 #gpio-cells = <2>;
56 gpio-controller;
57 - #interrupt-cells = <1>;
58 + #interrupt-cells = <2>;
59 interrupt-controller;
60 };
61 diff --git a/Documentation/hwmon/hwmon-kernel-api.rst b/Documentation/hwmon/hwmon-kernel-api.rst
62 index c41eb61081036..23f27fe78e379 100644
63 --- a/Documentation/hwmon/hwmon-kernel-api.rst
64 +++ b/Documentation/hwmon/hwmon-kernel-api.rst
65 @@ -72,7 +72,7 @@ hwmon_device_register_with_info is the most comprehensive and preferred means
66 to register a hardware monitoring device. It creates the standard sysfs
67 attributes in the hardware monitoring core, letting the driver focus on reading
68 from and writing to the chip instead of having to bother with sysfs attributes.
69 -The parent device parameter cannot be NULL with non-NULL chip info. Its
70 +The parent device parameter as well as the chip parameter must not be NULL. Its
71 parameters are described in more detail below.
72
73 devm_hwmon_device_register_with_info is similar to
74 diff --git a/Makefile b/Makefile
75 index 57e27af9fc0c0..1c99e688da213 100644
76 --- a/Makefile
77 +++ b/Makefile
78 @@ -1,7 +1,7 @@
79 # SPDX-License-Identifier: GPL-2.0
80 VERSION = 5
81 PATCHLEVEL = 4
82 -SUBLEVEL = 197
83 +SUBLEVEL = 198
84 EXTRAVERSION =
85 NAME = Kleptomaniac Octopus
86
87 diff --git a/arch/arm/boot/dts/bcm2835-rpi-b.dts b/arch/arm/boot/dts/bcm2835-rpi-b.dts
88 index 2b69957e0113e..1838e0fa0ff59 100644
89 --- a/arch/arm/boot/dts/bcm2835-rpi-b.dts
90 +++ b/arch/arm/boot/dts/bcm2835-rpi-b.dts
91 @@ -53,18 +53,17 @@
92 "GPIO18",
93 "NC", /* GPIO19 */
94 "NC", /* GPIO20 */
95 - "GPIO21",
96 + "CAM_GPIO0",
97 "GPIO22",
98 "GPIO23",
99 "GPIO24",
100 "GPIO25",
101 "NC", /* GPIO26 */
102 - "CAM_GPIO0",
103 - /* Binary number representing build/revision */
104 - "CONFIG0",
105 - "CONFIG1",
106 - "CONFIG2",
107 - "CONFIG3",
108 + "GPIO27",
109 + "GPIO28",
110 + "GPIO29",
111 + "GPIO30",
112 + "GPIO31",
113 "NC", /* GPIO32 */
114 "NC", /* GPIO33 */
115 "NC", /* GPIO34 */
116 diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
117 index f65448c01e317..34a85ad9f03c2 100644
118 --- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
119 +++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
120 @@ -74,16 +74,18 @@
121 "GPIO27",
122 "SDA0",
123 "SCL0",
124 - "NC", /* GPIO30 */
125 - "NC", /* GPIO31 */
126 - "NC", /* GPIO32 */
127 - "NC", /* GPIO33 */
128 - "NC", /* GPIO34 */
129 - "NC", /* GPIO35 */
130 - "NC", /* GPIO36 */
131 - "NC", /* GPIO37 */
132 - "NC", /* GPIO38 */
133 - "NC", /* GPIO39 */
134 + /* Used by BT module */
135 + "CTS0",
136 + "RTS0",
137 + "TXD0",
138 + "RXD0",
139 + /* Used by Wifi */
140 + "SD1_CLK",
141 + "SD1_CMD",
142 + "SD1_DATA0",
143 + "SD1_DATA1",
144 + "SD1_DATA2",
145 + "SD1_DATA3",
146 "CAM_GPIO1", /* GPIO40 */
147 "WL_ON", /* GPIO41 */
148 "NC", /* GPIO42 */
149 diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
150 index 74ed6d0478070..d9f63fc59f165 100644
151 --- a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
152 +++ b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
153 @@ -43,7 +43,7 @@
154 #gpio-cells = <2>;
155 gpio-line-names = "BT_ON",
156 "WL_ON",
157 - "STATUS_LED_R",
158 + "PWR_LED_R",
159 "LAN_RUN",
160 "",
161 "CAM_GPIO0",
162 diff --git a/arch/arm/boot/dts/bcm2837-rpi-cm3-io3.dts b/arch/arm/boot/dts/bcm2837-rpi-cm3-io3.dts
163 index 588d9411ceb61..3dfce4312dfc4 100644
164 --- a/arch/arm/boot/dts/bcm2837-rpi-cm3-io3.dts
165 +++ b/arch/arm/boot/dts/bcm2837-rpi-cm3-io3.dts
166 @@ -63,8 +63,8 @@
167 "GPIO43",
168 "GPIO44",
169 "GPIO45",
170 - "GPIO46",
171 - "GPIO47",
172 + "SMPS_SCL",
173 + "SMPS_SDA",
174 /* Used by eMMC */
175 "SD_CLK_R",
176 "SD_CMD_R",
177 diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
178 index fa5dd992e3273..c7e350ea03fe3 100644
179 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
180 +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
181 @@ -128,7 +128,7 @@
182 samsung,i2c-max-bus-freq = <20000>;
183
184 eeprom@50 {
185 - compatible = "samsung,s524ad0xd1";
186 + compatible = "samsung,s524ad0xd1", "atmel,24c128";
187 reg = <0x50>;
188 };
189
190 @@ -287,7 +287,7 @@
191 samsung,i2c-max-bus-freq = <20000>;
192
193 eeprom@51 {
194 - compatible = "samsung,s524ad0xd1";
195 + compatible = "samsung,s524ad0xd1", "atmel,24c128";
196 reg = <0x51>;
197 };
198
199 diff --git a/arch/arm/boot/dts/ox820.dtsi b/arch/arm/boot/dts/ox820.dtsi
200 index 90846a7655b49..dde4364892bf0 100644
201 --- a/arch/arm/boot/dts/ox820.dtsi
202 +++ b/arch/arm/boot/dts/ox820.dtsi
203 @@ -287,7 +287,7 @@
204 clocks = <&armclk>;
205 };
206
207 - gic: gic@1000 {
208 + gic: interrupt-controller@1000 {
209 compatible = "arm,arm11mp-gic";
210 interrupt-controller;
211 #interrupt-cells = <3>;
212 diff --git a/arch/arm/boot/dts/suniv-f1c100s.dtsi b/arch/arm/boot/dts/suniv-f1c100s.dtsi
213 index 6100d3b75f613..def8301014487 100644
214 --- a/arch/arm/boot/dts/suniv-f1c100s.dtsi
215 +++ b/arch/arm/boot/dts/suniv-f1c100s.dtsi
216 @@ -104,8 +104,10 @@
217
218 wdt: watchdog@1c20ca0 {
219 compatible = "allwinner,suniv-f1c100s-wdt",
220 - "allwinner,sun4i-a10-wdt";
221 + "allwinner,sun6i-a31-wdt";
222 reg = <0x01c20ca0 0x20>;
223 + interrupts = <16>;
224 + clocks = <&osc32k>;
225 };
226
227 uart0: serial@1c25000 {
228 diff --git a/arch/arm/mach-hisi/platsmp.c b/arch/arm/mach-hisi/platsmp.c
229 index da7a09c1dae56..1cd1d9b0aabf9 100644
230 --- a/arch/arm/mach-hisi/platsmp.c
231 +++ b/arch/arm/mach-hisi/platsmp.c
232 @@ -67,14 +67,17 @@ static void __init hi3xxx_smp_prepare_cpus(unsigned int max_cpus)
233 }
234 ctrl_base = of_iomap(np, 0);
235 if (!ctrl_base) {
236 + of_node_put(np);
237 pr_err("failed to map address\n");
238 return;
239 }
240 if (of_property_read_u32(np, "smp-offset", &offset) < 0) {
241 + of_node_put(np);
242 pr_err("failed to find smp-offset property\n");
243 return;
244 }
245 ctrl_base += offset;
246 + of_node_put(np);
247 }
248 }
249
250 @@ -160,6 +163,7 @@ static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle)
251 if (WARN_ON(!node))
252 return -1;
253 ctrl_base = of_iomap(node, 0);
254 + of_node_put(node);
255
256 /* set the secondary core boot from DDR */
257 remap_reg_value = readl_relaxed(ctrl_base + REG_SC_CTRL);
258 diff --git a/arch/arm/mach-mediatek/Kconfig b/arch/arm/mach-mediatek/Kconfig
259 index 9e0f592d87d8e..35a3430c7942d 100644
260 --- a/arch/arm/mach-mediatek/Kconfig
261 +++ b/arch/arm/mach-mediatek/Kconfig
262 @@ -30,6 +30,7 @@ config MACH_MT7623
263 config MACH_MT7629
264 bool "MediaTek MT7629 SoCs support"
265 default ARCH_MEDIATEK
266 + select HAVE_ARM_ARCH_TIMER
267
268 config MACH_MT8127
269 bool "MediaTek MT8127 SoCs support"
270 diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
271 index bd5be82101f32..d89bda12bf3cd 100644
272 --- a/arch/arm/mach-omap1/clock.c
273 +++ b/arch/arm/mach-omap1/clock.c
274 @@ -41,7 +41,7 @@ static DEFINE_SPINLOCK(clockfw_lock);
275 unsigned long omap1_uart_recalc(struct clk *clk)
276 {
277 unsigned int val = __raw_readl(clk->enable_reg);
278 - return val & clk->enable_bit ? 48000000 : 12000000;
279 + return val & 1 << clk->enable_bit ? 48000000 : 12000000;
280 }
281
282 unsigned long omap1_sossi_recalc(struct clk *clk)
283 diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
284 index 425855f456f2b..719e6395797cb 100644
285 --- a/arch/arm/mach-pxa/cm-x300.c
286 +++ b/arch/arm/mach-pxa/cm-x300.c
287 @@ -355,13 +355,13 @@ static struct platform_device cm_x300_spi_gpio = {
288 static struct gpiod_lookup_table cm_x300_spi_gpiod_table = {
289 .dev_id = "spi_gpio",
290 .table = {
291 - GPIO_LOOKUP("gpio-pxa", GPIO_LCD_SCL,
292 + GPIO_LOOKUP("pca9555.1", GPIO_LCD_SCL - GPIO_LCD_BASE,
293 "sck", GPIO_ACTIVE_HIGH),
294 - GPIO_LOOKUP("gpio-pxa", GPIO_LCD_DIN,
295 + GPIO_LOOKUP("pca9555.1", GPIO_LCD_DIN - GPIO_LCD_BASE,
296 "mosi", GPIO_ACTIVE_HIGH),
297 - GPIO_LOOKUP("gpio-pxa", GPIO_LCD_DOUT,
298 + GPIO_LOOKUP("pca9555.1", GPIO_LCD_DOUT - GPIO_LCD_BASE,
299 "miso", GPIO_ACTIVE_HIGH),
300 - GPIO_LOOKUP("gpio-pxa", GPIO_LCD_CS,
301 + GPIO_LOOKUP("pca9555.1", GPIO_LCD_CS - GPIO_LCD_BASE,
302 "cs", GPIO_ACTIVE_HIGH),
303 { },
304 },
305 diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
306 index e1a394ac3eea7..8f2d4faa26120 100644
307 --- a/arch/arm/mach-pxa/magician.c
308 +++ b/arch/arm/mach-pxa/magician.c
309 @@ -675,7 +675,7 @@ static struct platform_device bq24022 = {
310 static struct gpiod_lookup_table bq24022_gpiod_table = {
311 .dev_id = "gpio-regulator",
312 .table = {
313 - GPIO_LOOKUP("gpio-pxa", EGPIO_MAGICIAN_BQ24022_ISET2,
314 + GPIO_LOOKUP("htc-egpio-0", EGPIO_MAGICIAN_BQ24022_ISET2 - MAGICIAN_EGPIO_BASE,
315 NULL, GPIO_ACTIVE_HIGH),
316 GPIO_LOOKUP("gpio-pxa", GPIO30_MAGICIAN_BQ24022_nCHARGE_EN,
317 "enable", GPIO_ACTIVE_LOW),
318 diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
319 index f537ff1c3ba7e..3fbcaa3b4e182 100644
320 --- a/arch/arm/mach-pxa/tosa.c
321 +++ b/arch/arm/mach-pxa/tosa.c
322 @@ -295,9 +295,9 @@ static struct gpiod_lookup_table tosa_mci_gpio_table = {
323 .table = {
324 GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_nSD_DETECT,
325 "cd", GPIO_ACTIVE_LOW),
326 - GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_SD_WP,
327 + GPIO_LOOKUP("sharp-scoop.0", TOSA_GPIO_SD_WP - TOSA_SCOOP_GPIO_BASE,
328 "wp", GPIO_ACTIVE_LOW),
329 - GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_PWR_ON,
330 + GPIO_LOOKUP("sharp-scoop.0", TOSA_GPIO_PWR_ON - TOSA_SCOOP_GPIO_BASE,
331 "power", GPIO_ACTIVE_HIGH),
332 { },
333 },
334 diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
335 index 46a903c88c6a0..f553cde614f92 100644
336 --- a/arch/arm/mach-vexpress/dcscb.c
337 +++ b/arch/arm/mach-vexpress/dcscb.c
338 @@ -143,6 +143,7 @@ static int __init dcscb_init(void)
339 if (!node)
340 return -ENODEV;
341 dcscb_base = of_iomap(node, 0);
342 + of_node_put(node);
343 if (!dcscb_base)
344 return -EADDRNOTAVAIL;
345 cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
346 diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
347 index 9dccf4db319b1..90202e5608d1e 100644
348 --- a/arch/arm64/Kconfig.platforms
349 +++ b/arch/arm64/Kconfig.platforms
350 @@ -225,6 +225,7 @@ config ARCH_STRATIX10
351
352 config ARCH_SYNQUACER
353 bool "Socionext SynQuacer SoC Family"
354 + select IRQ_FASTEOI_HIERARCHY_HANDLERS
355
356 config ARCH_TEGRA
357 bool "NVIDIA Tegra SoC Family"
358 diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
359 index 67ee5f5601046..7822592664ffb 100644
360 --- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
361 +++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
362 @@ -482,7 +482,7 @@
363 clocks {
364 sleep_clk: sleep_clk {
365 compatible = "fixed-clock";
366 - clock-frequency = <32000>;
367 + clock-frequency = <32768>;
368 #clock-cells = <0>;
369 };
370
371 diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
372 index 95942d917de53..4496f7e1c68f8 100644
373 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
374 +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
375 @@ -1447,6 +1447,7 @@
376 reg = <0xf780 0x24>;
377 clocks = <&sdhci>;
378 clock-names = "emmcclk";
379 + drive-impedance-ohm = <50>;
380 #phy-cells = <0>;
381 status = "disabled";
382 };
383 @@ -1457,7 +1458,6 @@
384 clock-names = "refclk";
385 #phy-cells = <1>;
386 resets = <&cru SRST_PCIEPHY>;
387 - drive-impedance-ohm = <50>;
388 reset-names = "phy";
389 status = "disabled";
390 };
391 diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
392 index 3c18c2454089b..51274bab25653 100644
393 --- a/arch/arm64/kernel/sys_compat.c
394 +++ b/arch/arm64/kernel/sys_compat.c
395 @@ -115,6 +115,6 @@ long compat_arm_syscall(struct pt_regs *regs, int scno)
396 (compat_thumb_mode(regs) ? 2 : 4);
397
398 arm64_notify_die("Oops - bad compat syscall(2)", regs,
399 - SIGILL, ILL_ILLTRP, addr, scno);
400 + SIGILL, ILL_ILLTRP, addr, 0);
401 return 0;
402 }
403 diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
404 index 17a8d1484f9b9..9f71ca4414825 100644
405 --- a/arch/arm64/net/bpf_jit_comp.c
406 +++ b/arch/arm64/net/bpf_jit_comp.c
407 @@ -973,6 +973,7 @@ skip_init_ctx:
408 bpf_jit_binary_free(header);
409 prog->bpf_func = NULL;
410 prog->jited = 0;
411 + prog->jited_len = 0;
412 goto out_off;
413 }
414 bpf_jit_binary_lock_ro(header);
415 diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
416 index 60ac1cd8b96fb..6bc7fc14163f8 100644
417 --- a/arch/m68k/Kconfig.cpu
418 +++ b/arch/m68k/Kconfig.cpu
419 @@ -309,7 +309,7 @@ comment "Processor Specific Options"
420
421 config M68KFPU_EMU
422 bool "Math emulation support"
423 - depends on MMU
424 + depends on M68KCLASSIC && FPU
425 help
426 At some point in the future, this will cause floating-point math
427 instructions to be emulated by the kernel on machines that lack a
428 diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
429 index b88a980f56f8a..f0527b155c057 100644
430 --- a/arch/m68k/Kconfig.machine
431 +++ b/arch/m68k/Kconfig.machine
432 @@ -320,6 +320,7 @@ comment "Machine Options"
433
434 config UBOOT
435 bool "Support for U-Boot command line parameters"
436 + depends on COLDFIRE
437 help
438 If you say Y here kernel will try to collect command
439 line parameters from the initial u-boot stack.
440 diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h
441 index c18165b0d9043..6b02484665691 100644
442 --- a/arch/m68k/include/asm/pgtable_no.h
443 +++ b/arch/m68k/include/asm/pgtable_no.h
444 @@ -42,7 +42,8 @@ extern void paging_init(void);
445 * ZERO_PAGE is a global shared page that is always zero: used
446 * for zero-mapped memory areas etc..
447 */
448 -#define ZERO_PAGE(vaddr) (virt_to_page(0))
449 +extern void *empty_zero_page;
450 +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
451
452 /*
453 * All 32bit addresses are effectively valid for vmalloc...
454 diff --git a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
455 index 136d6d464e320..93c69fc7bbd8c 100644
456 --- a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
457 +++ b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
458 @@ -28,7 +28,6 @@
459 #define cpu_has_6k_cache 0
460 #define cpu_has_8k_cache 0
461 #define cpu_has_tx39_cache 0
462 -#define cpu_has_fpu 1
463 #define cpu_has_nofpuex 0
464 #define cpu_has_32fpr 1
465 #define cpu_has_counter 1
466 diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
467 index 69e3e0b556bf7..1b0d4bb617a9c 100644
468 --- a/arch/mips/kernel/mips-cpc.c
469 +++ b/arch/mips/kernel/mips-cpc.c
470 @@ -27,6 +27,7 @@ phys_addr_t __weak mips_cpc_default_phys_base(void)
471 cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
472 if (cpc_node) {
473 err = of_address_to_resource(cpc_node, 0, &res);
474 + of_node_put(cpc_node);
475 if (!err)
476 return res.start;
477 }
478 diff --git a/arch/openrisc/include/asm/timex.h b/arch/openrisc/include/asm/timex.h
479 index d52b4e536e3f9..5487fa93dd9be 100644
480 --- a/arch/openrisc/include/asm/timex.h
481 +++ b/arch/openrisc/include/asm/timex.h
482 @@ -23,6 +23,7 @@ static inline cycles_t get_cycles(void)
483 {
484 return mfspr(SPR_TTCR);
485 }
486 +#define get_cycles get_cycles
487
488 /* This isn't really used any more */
489 #define CLOCK_TICK_RATE 1000
490 diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
491 index b0dc974f9a743..ffbbf639b7f95 100644
492 --- a/arch/openrisc/kernel/head.S
493 +++ b/arch/openrisc/kernel/head.S
494 @@ -521,6 +521,15 @@ _start:
495 l.ori r3,r0,0x1
496 l.mtspr r0,r3,SPR_SR
497
498 + /*
499 + * Start the TTCR as early as possible, so that the RNG can make use of
500 + * measurements of boot time from the earliest opportunity. Especially
501 + * important is that the TTCR does not return zero by the time we reach
502 + * rand_initialize().
503 + */
504 + l.movhi r3,hi(SPR_TTMR_CR)
505 + l.mtspr r0,r3,SPR_TTMR
506 +
507 CLEAR_GPR(r1)
508 CLEAR_GPR(r2)
509 CLEAR_GPR(r3)
510 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
511 index 0d8f9246ce153..d92353a96f811 100644
512 --- a/arch/powerpc/include/asm/page.h
513 +++ b/arch/powerpc/include/asm/page.h
514 @@ -216,6 +216,9 @@ static inline bool pfn_valid(unsigned long pfn)
515 #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
516 #else
517 #ifdef CONFIG_PPC64
518 +
519 +#define VIRTUAL_WARN_ON(x) WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && (x))
520 +
521 /*
522 * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
523 * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
524 @@ -223,13 +226,13 @@ static inline bool pfn_valid(unsigned long pfn)
525 */
526 #define __va(x) \
527 ({ \
528 - VIRTUAL_BUG_ON((unsigned long)(x) >= PAGE_OFFSET); \
529 + VIRTUAL_WARN_ON((unsigned long)(x) >= PAGE_OFFSET); \
530 (void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \
531 })
532
533 #define __pa(x) \
534 ({ \
535 - VIRTUAL_BUG_ON((unsigned long)(x) < PAGE_OFFSET); \
536 + VIRTUAL_WARN_ON((unsigned long)(x) < PAGE_OFFSET); \
537 (unsigned long)(x) & 0x0fffffffffffffffUL; \
538 })
539
540 diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
541 index 0455dc1b27977..69d64f406204f 100644
542 --- a/arch/powerpc/kernel/fadump.c
543 +++ b/arch/powerpc/kernel/fadump.c
544 @@ -835,7 +835,6 @@ static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info)
545 sizeof(struct fadump_memory_range));
546 return 0;
547 }
548 -
549 static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
550 u64 base, u64 end)
551 {
552 @@ -854,7 +853,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
553 start = mem_ranges[mrange_info->mem_range_cnt - 1].base;
554 size = mem_ranges[mrange_info->mem_range_cnt - 1].size;
555
556 - if ((start + size) == base)
557 + /*
558 + * Boot memory area needs separate PT_LOAD segment(s) as it
559 + * is moved to a different location at the time of crash.
560 + * So, fold only if the region is not boot memory area.
561 + */
562 + if ((start + size) == base && start >= fw_dump.boot_mem_top)
563 is_adjacent = true;
564 }
565 if (!is_adjacent) {
566 diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
567 index a36fd053c3dba..0615ba86baef3 100644
568 --- a/arch/powerpc/kernel/idle.c
569 +++ b/arch/powerpc/kernel/idle.c
570 @@ -37,7 +37,7 @@ static int __init powersave_off(char *arg)
571 {
572 ppc_md.power_save = NULL;
573 cpuidle_disable = IDLE_POWERSAVE_OFF;
574 - return 0;
575 + return 1;
576 }
577 __setup("powersave=off", powersave_off);
578
579 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
580 index 8c92febf5f443..63bfc5250b67e 100644
581 --- a/arch/powerpc/kernel/ptrace.c
582 +++ b/arch/powerpc/kernel/ptrace.c
583 @@ -3014,8 +3014,13 @@ long arch_ptrace(struct task_struct *child, long request,
584
585 flush_fp_to_thread(child);
586 if (fpidx < (PT_FPSCR - PT_FPR0))
587 - memcpy(&tmp, &child->thread.TS_FPR(fpidx),
588 - sizeof(long));
589 + if (IS_ENABLED(CONFIG_PPC32)) {
590 + // On 32-bit the index we are passed refers to 32-bit words
591 + tmp = ((u32 *)child->thread.fp_state.fpr)[fpidx];
592 + } else {
593 + memcpy(&tmp, &child->thread.TS_FPR(fpidx),
594 + sizeof(long));
595 + }
596 else
597 tmp = child->thread.fp_state.fpscr;
598 }
599 @@ -3047,8 +3052,13 @@ long arch_ptrace(struct task_struct *child, long request,
600
601 flush_fp_to_thread(child);
602 if (fpidx < (PT_FPSCR - PT_FPR0))
603 - memcpy(&child->thread.TS_FPR(fpidx), &data,
604 - sizeof(long));
605 + if (IS_ENABLED(CONFIG_PPC32)) {
606 + // On 32-bit the index we are passed refers to 32-bit words
607 + ((u32 *)child->thread.fp_state.fpr)[fpidx] = data;
608 + } else {
609 + memcpy(&child->thread.TS_FPR(fpidx), &data,
610 + sizeof(long));
611 + }
612 else
613 child->thread.fp_state.fpscr = data;
614 ret = 0;
615 @@ -3398,4 +3408,7 @@ void __init pt_regs_check(void)
616 offsetof(struct user_pt_regs, result));
617
618 BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
619 +
620 + // ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible
621 + BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX));
622 }
623 diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
624 index 944180f55a3c6..25eda98f3b1bd 100644
625 --- a/arch/powerpc/perf/isa207-common.c
626 +++ b/arch/powerpc/perf/isa207-common.c
627 @@ -326,7 +326,8 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
628 if (event_is_threshold(event) && is_thresh_cmp_valid(event)) {
629 mask |= CNST_THRESH_MASK;
630 value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
631 - }
632 + } else if (event_is_threshold(event))
633 + return -1;
634 } else {
635 /*
636 * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
637 diff --git a/arch/powerpc/platforms/4xx/cpm.c b/arch/powerpc/platforms/4xx/cpm.c
638 index ae8b812c92029..2481e78c04234 100644
639 --- a/arch/powerpc/platforms/4xx/cpm.c
640 +++ b/arch/powerpc/platforms/4xx/cpm.c
641 @@ -327,6 +327,6 @@ late_initcall(cpm_init);
642 static int __init cpm_powersave_off(char *arg)
643 {
644 cpm.powersave_off = 1;
645 - return 0;
646 + return 1;
647 }
648 __setup("powersave=off", cpm_powersave_off);
649 diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c
650 index 0f65c51271db9..ec6dc2d7a9db3 100644
651 --- a/arch/powerpc/platforms/8xx/cpm1.c
652 +++ b/arch/powerpc/platforms/8xx/cpm1.c
653 @@ -292,6 +292,7 @@ cpm_setbrg(uint brg, uint rate)
654 out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) |
655 CPM_BRG_EN | CPM_BRG_DIV16);
656 }
657 +EXPORT_SYMBOL(cpm_setbrg);
658
659 struct cpm_ioport16 {
660 __be16 dir, par, odr_sor, dat, intr;
661 diff --git a/arch/powerpc/platforms/powernv/opal-fadump.c b/arch/powerpc/platforms/powernv/opal-fadump.c
662 index d361d37d975f3..f5cea068f0bdc 100644
663 --- a/arch/powerpc/platforms/powernv/opal-fadump.c
664 +++ b/arch/powerpc/platforms/powernv/opal-fadump.c
665 @@ -60,7 +60,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
666 addr = be64_to_cpu(addr);
667 pr_debug("Kernel metadata addr: %llx\n", addr);
668 opal_fdm_active = (void *)addr;
669 - if (opal_fdm_active->registered_regions == 0)
670 + if (be16_to_cpu(opal_fdm_active->registered_regions) == 0)
671 return;
672
673 ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_BOOT_MEM, &addr);
674 @@ -95,17 +95,17 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf);
675 static void opal_fadump_update_config(struct fw_dump *fadump_conf,
676 const struct opal_fadump_mem_struct *fdm)
677 {
678 - pr_debug("Boot memory regions count: %d\n", fdm->region_cnt);
679 + pr_debug("Boot memory regions count: %d\n", be16_to_cpu(fdm->region_cnt));
680
681 /*
682 * The destination address of the first boot memory region is the
683 * destination address of boot memory regions.
684 */
685 - fadump_conf->boot_mem_dest_addr = fdm->rgn[0].dest;
686 + fadump_conf->boot_mem_dest_addr = be64_to_cpu(fdm->rgn[0].dest);
687 pr_debug("Destination address of boot memory regions: %#016llx\n",
688 fadump_conf->boot_mem_dest_addr);
689
690 - fadump_conf->fadumphdr_addr = fdm->fadumphdr_addr;
691 + fadump_conf->fadumphdr_addr = be64_to_cpu(fdm->fadumphdr_addr);
692 }
693
694 /*
695 @@ -126,9 +126,9 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
696 fadump_conf->boot_memory_size = 0;
697
698 pr_debug("Boot memory regions:\n");
699 - for (i = 0; i < fdm->region_cnt; i++) {
700 - base = fdm->rgn[i].src;
701 - size = fdm->rgn[i].size;
702 + for (i = 0; i < be16_to_cpu(fdm->region_cnt); i++) {
703 + base = be64_to_cpu(fdm->rgn[i].src);
704 + size = be64_to_cpu(fdm->rgn[i].size);
705 pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size);
706
707 fadump_conf->boot_mem_addr[i] = base;
708 @@ -143,7 +143,7 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
709 * Start address of reserve dump area (permanent reservation) for
710 * re-registering FADump after dump capture.
711 */
712 - fadump_conf->reserve_dump_area_start = fdm->rgn[0].dest;
713 + fadump_conf->reserve_dump_area_start = be64_to_cpu(fdm->rgn[0].dest);
714
715 /*
716 * Rarely, but it can so happen that system crashes before all
717 @@ -155,13 +155,14 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
718 * Hope the memory that could not be preserved only has pages
719 * that are usually filtered out while saving the vmcore.
720 */
721 - if (fdm->region_cnt > fdm->registered_regions) {
722 + if (be16_to_cpu(fdm->region_cnt) > be16_to_cpu(fdm->registered_regions)) {
723 pr_warn("Not all memory regions were saved!!!\n");
724 pr_warn(" Unsaved memory regions:\n");
725 - i = fdm->registered_regions;
726 - while (i < fdm->region_cnt) {
727 + i = be16_to_cpu(fdm->registered_regions);
728 + while (i < be16_to_cpu(fdm->region_cnt)) {
729 pr_warn("\t[%03d] base: 0x%llx, size: 0x%llx\n",
730 - i, fdm->rgn[i].src, fdm->rgn[i].size);
731 + i, be64_to_cpu(fdm->rgn[i].src),
732 + be64_to_cpu(fdm->rgn[i].size));
733 i++;
734 }
735
736 @@ -170,7 +171,7 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
737 }
738
739 fadump_conf->boot_mem_top = (fadump_conf->boot_memory_size + hole_size);
740 - fadump_conf->boot_mem_regs_cnt = fdm->region_cnt;
741 + fadump_conf->boot_mem_regs_cnt = be16_to_cpu(fdm->region_cnt);
742 opal_fadump_update_config(fadump_conf, fdm);
743 }
744
745 @@ -178,35 +179,38 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
746 static void opal_fadump_init_metadata(struct opal_fadump_mem_struct *fdm)
747 {
748 fdm->version = OPAL_FADUMP_VERSION;
749 - fdm->region_cnt = 0;
750 - fdm->registered_regions = 0;
751 - fdm->fadumphdr_addr = 0;
752 + fdm->region_cnt = cpu_to_be16(0);
753 + fdm->registered_regions = cpu_to_be16(0);
754 + fdm->fadumphdr_addr = cpu_to_be64(0);
755 }
756
757 static u64 opal_fadump_init_mem_struct(struct fw_dump *fadump_conf)
758 {
759 u64 addr = fadump_conf->reserve_dump_area_start;
760 + u16 reg_cnt;
761 int i;
762
763 opal_fdm = __va(fadump_conf->kernel_metadata);
764 opal_fadump_init_metadata(opal_fdm);
765
766 /* Boot memory regions */
767 + reg_cnt = be16_to_cpu(opal_fdm->region_cnt);
768 for (i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) {
769 - opal_fdm->rgn[i].src = fadump_conf->boot_mem_addr[i];
770 - opal_fdm->rgn[i].dest = addr;
771 - opal_fdm->rgn[i].size = fadump_conf->boot_mem_sz[i];
772 + opal_fdm->rgn[i].src = cpu_to_be64(fadump_conf->boot_mem_addr[i]);
773 + opal_fdm->rgn[i].dest = cpu_to_be64(addr);
774 + opal_fdm->rgn[i].size = cpu_to_be64(fadump_conf->boot_mem_sz[i]);
775
776 - opal_fdm->region_cnt++;
777 + reg_cnt++;
778 addr += fadump_conf->boot_mem_sz[i];
779 }
780 + opal_fdm->region_cnt = cpu_to_be16(reg_cnt);
781
782 /*
783 * Kernel metadata is passed to f/w and retrieved in capture kerenl.
784 * So, use it to save fadump header address instead of calculating it.
785 */
786 - opal_fdm->fadumphdr_addr = (opal_fdm->rgn[0].dest +
787 - fadump_conf->boot_memory_size);
788 + opal_fdm->fadumphdr_addr = cpu_to_be64(be64_to_cpu(opal_fdm->rgn[0].dest) +
789 + fadump_conf->boot_memory_size);
790
791 opal_fadump_update_config(fadump_conf, opal_fdm);
792
793 @@ -269,18 +273,21 @@ static u64 opal_fadump_get_bootmem_min(void)
794 static int opal_fadump_register(struct fw_dump *fadump_conf)
795 {
796 s64 rc = OPAL_PARAMETER;
797 + u16 registered_regs;
798 int i, err = -EIO;
799
800 - for (i = 0; i < opal_fdm->region_cnt; i++) {
801 + registered_regs = be16_to_cpu(opal_fdm->registered_regions);
802 + for (i = 0; i < be16_to_cpu(opal_fdm->region_cnt); i++) {
803 rc = opal_mpipl_update(OPAL_MPIPL_ADD_RANGE,
804 - opal_fdm->rgn[i].src,
805 - opal_fdm->rgn[i].dest,
806 - opal_fdm->rgn[i].size);
807 + be64_to_cpu(opal_fdm->rgn[i].src),
808 + be64_to_cpu(opal_fdm->rgn[i].dest),
809 + be64_to_cpu(opal_fdm->rgn[i].size));
810 if (rc != OPAL_SUCCESS)
811 break;
812
813 - opal_fdm->registered_regions++;
814 + registered_regs++;
815 }
816 + opal_fdm->registered_regions = cpu_to_be16(registered_regs);
817
818 switch (rc) {
819 case OPAL_SUCCESS:
820 @@ -291,7 +298,8 @@ static int opal_fadump_register(struct fw_dump *fadump_conf)
821 case OPAL_RESOURCE:
822 /* If MAX regions limit in f/w is hit, warn and proceed. */
823 pr_warn("%d regions could not be registered for MPIPL as MAX limit is reached!\n",
824 - (opal_fdm->region_cnt - opal_fdm->registered_regions));
825 + (be16_to_cpu(opal_fdm->region_cnt) -
826 + be16_to_cpu(opal_fdm->registered_regions)));
827 fadump_conf->dump_registered = 1;
828 err = 0;
829 break;
830 @@ -312,7 +320,7 @@ static int opal_fadump_register(struct fw_dump *fadump_conf)
831 * If some regions were registered before OPAL_MPIPL_ADD_RANGE
832 * OPAL call failed, unregister all regions.
833 */
834 - if ((err < 0) && (opal_fdm->registered_regions > 0))
835 + if ((err < 0) && (be16_to_cpu(opal_fdm->registered_regions) > 0))
836 opal_fadump_unregister(fadump_conf);
837
838 return err;
839 @@ -328,7 +336,7 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf)
840 return -EIO;
841 }
842
843 - opal_fdm->registered_regions = 0;
844 + opal_fdm->registered_regions = cpu_to_be16(0);
845 fadump_conf->dump_registered = 0;
846 return 0;
847 }
848 @@ -563,19 +571,20 @@ static void opal_fadump_region_show(struct fw_dump *fadump_conf,
849 else
850 fdm_ptr = opal_fdm;
851
852 - for (i = 0; i < fdm_ptr->region_cnt; i++) {
853 + for (i = 0; i < be16_to_cpu(fdm_ptr->region_cnt); i++) {
854 /*
855 * Only regions that are registered for MPIPL
856 * would have dump data.
857 */
858 if ((fadump_conf->dump_active) &&
859 - (i < fdm_ptr->registered_regions))
860 - dumped_bytes = fdm_ptr->rgn[i].size;
861 + (i < be16_to_cpu(fdm_ptr->registered_regions)))
862 + dumped_bytes = be64_to_cpu(fdm_ptr->rgn[i].size);
863
864 seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ",
865 - fdm_ptr->rgn[i].src, fdm_ptr->rgn[i].dest);
866 + be64_to_cpu(fdm_ptr->rgn[i].src),
867 + be64_to_cpu(fdm_ptr->rgn[i].dest));
868 seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n",
869 - fdm_ptr->rgn[i].size, dumped_bytes);
870 + be64_to_cpu(fdm_ptr->rgn[i].size), dumped_bytes);
871 }
872
873 /* Dump is active. Show reserved area start address. */
874 @@ -624,6 +633,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
875 {
876 const __be32 *prop;
877 unsigned long dn;
878 + __be64 be_addr;
879 u64 addr = 0;
880 int i, len;
881 s64 ret;
882 @@ -680,13 +690,13 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
883 if (!prop)
884 return;
885
886 - ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &addr);
887 - if ((ret != OPAL_SUCCESS) || !addr) {
888 + ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &be_addr);
889 + if ((ret != OPAL_SUCCESS) || !be_addr) {
890 pr_err("Failed to get Kernel metadata (%lld)\n", ret);
891 return;
892 }
893
894 - addr = be64_to_cpu(addr);
895 + addr = be64_to_cpu(be_addr);
896 pr_debug("Kernel metadata addr: %llx\n", addr);
897
898 opal_fdm_active = __va(addr);
899 @@ -697,14 +707,14 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
900 }
901
902 /* Kernel regions not registered with f/w for MPIPL */
903 - if (opal_fdm_active->registered_regions == 0) {
904 + if (be16_to_cpu(opal_fdm_active->registered_regions) == 0) {
905 opal_fdm_active = NULL;
906 return;
907 }
908
909 - ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &addr);
910 - if (addr) {
911 - addr = be64_to_cpu(addr);
912 + ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &be_addr);
913 + if (be_addr) {
914 + addr = be64_to_cpu(be_addr);
915 pr_debug("CPU metadata addr: %llx\n", addr);
916 opal_cpu_metadata = __va(addr);
917 }
918 diff --git a/arch/powerpc/platforms/powernv/opal-fadump.h b/arch/powerpc/platforms/powernv/opal-fadump.h
919 index f1e9ecf548c5d..3f715efb0aa6e 100644
920 --- a/arch/powerpc/platforms/powernv/opal-fadump.h
921 +++ b/arch/powerpc/platforms/powernv/opal-fadump.h
922 @@ -31,14 +31,14 @@
923 * OPAL FADump kernel metadata
924 *
925 * The address of this structure will be registered with f/w for retrieving
926 - * and processing during crash dump.
927 + * in the capture kernel to process the crash dump.
928 */
929 struct opal_fadump_mem_struct {
930 u8 version;
931 u8 reserved[3];
932 - u16 region_cnt; /* number of regions */
933 - u16 registered_regions; /* Regions registered for MPIPL */
934 - u64 fadumphdr_addr;
935 + __be16 region_cnt; /* number of regions */
936 + __be16 registered_regions; /* Regions registered for MPIPL */
937 + __be64 fadumphdr_addr;
938 struct opal_mpipl_region rgn[FADUMP_MAX_MEM_REGS];
939 } __packed;
940
941 @@ -135,7 +135,7 @@ static inline void opal_fadump_read_regs(char *bufp, unsigned int regs_cnt,
942 for (i = 0; i < regs_cnt; i++, bufp += reg_entry_size) {
943 reg_entry = (struct hdat_fadump_reg_entry *)bufp;
944 val = (cpu_endian ? be64_to_cpu(reg_entry->reg_val) :
945 - reg_entry->reg_val);
946 + (u64)(reg_entry->reg_val));
947 opal_fadump_set_regval_regnum(regs,
948 be32_to_cpu(reg_entry->reg_type),
949 be32_to_cpu(reg_entry->reg_num),
950 diff --git a/arch/powerpc/platforms/powernv/ultravisor.c b/arch/powerpc/platforms/powernv/ultravisor.c
951 index e4a00ad06f9d3..67c8c4b2d8b17 100644
952 --- a/arch/powerpc/platforms/powernv/ultravisor.c
953 +++ b/arch/powerpc/platforms/powernv/ultravisor.c
954 @@ -55,6 +55,7 @@ static int __init uv_init(void)
955 return -ENODEV;
956
957 uv_memcons = memcons_init(node, "memcons");
958 + of_node_put(node);
959 if (!uv_memcons)
960 return -ENOENT;
961
962 diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
963 index 6b4a34b36d987..8ff9bcfe4b8d4 100644
964 --- a/arch/powerpc/sysdev/dart_iommu.c
965 +++ b/arch/powerpc/sysdev/dart_iommu.c
966 @@ -403,9 +403,10 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
967 }
968
969 /* Initialize the DART HW */
970 - if (dart_init(dn) != 0)
971 + if (dart_init(dn) != 0) {
972 + of_node_put(dn);
973 return;
974 -
975 + }
976 /*
977 * U4 supports a DART bypass, we use it for 64-bit capable devices to
978 * improve performance. However, that only works for devices connected
979 @@ -418,6 +419,7 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
980
981 /* Setup pci_dma ops */
982 set_pci_dma_ops(&dma_iommu_ops);
983 + of_node_put(dn);
984 }
985
986 #ifdef CONFIG_PM
987 diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
988 index 07c164f7f8cfe..3f9f78621cf3c 100644
989 --- a/arch/powerpc/sysdev/fsl_rio.c
990 +++ b/arch/powerpc/sysdev/fsl_rio.c
991 @@ -505,8 +505,10 @@ int fsl_rio_setup(struct platform_device *dev)
992 if (rc) {
993 dev_err(&dev->dev, "Can't get %pOF property 'reg'\n",
994 rmu_node);
995 + of_node_put(rmu_node);
996 goto err_rmu;
997 }
998 + of_node_put(rmu_node);
999 rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs));
1000 if (!rmu_regs_win) {
1001 dev_err(&dev->dev, "Unable to map rmu register window\n");
1002 diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
1003 index 68fd2540b0931..7fa520efcefa0 100644
1004 --- a/arch/powerpc/sysdev/xics/icp-opal.c
1005 +++ b/arch/powerpc/sysdev/xics/icp-opal.c
1006 @@ -195,6 +195,7 @@ int icp_opal_init(void)
1007
1008 printk("XICS: Using OPAL ICP fallbacks\n");
1009
1010 + of_node_put(np);
1011 return 0;
1012 }
1013
1014 diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
1015 index 9803e96d29247..558cfe570ccf8 100644
1016 --- a/arch/s390/crypto/aes_s390.c
1017 +++ b/arch/s390/crypto/aes_s390.c
1018 @@ -861,7 +861,7 @@ static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
1019 unsigned int nbytes)
1020 {
1021 gw->walk_bytes_remain -= nbytes;
1022 - scatterwalk_unmap(&gw->walk);
1023 + scatterwalk_unmap(gw->walk_ptr);
1024 scatterwalk_advance(&gw->walk, nbytes);
1025 scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
1026 gw->walk_ptr = NULL;
1027 @@ -936,7 +936,7 @@ static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
1028 goto out;
1029 }
1030
1031 - scatterwalk_unmap(&gw->walk);
1032 + scatterwalk_unmap(gw->walk_ptr);
1033 gw->walk_ptr = NULL;
1034
1035 gw->ptr = gw->buf;
1036 diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
1037 index b5ea9e14c017a..3dcd8ab3db73b 100644
1038 --- a/arch/s390/include/asm/preempt.h
1039 +++ b/arch/s390/include/asm/preempt.h
1040 @@ -52,10 +52,17 @@ static inline bool test_preempt_need_resched(void)
1041
1042 static inline void __preempt_count_add(int val)
1043 {
1044 - if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
1045 - __atomic_add_const(val, &S390_lowcore.preempt_count);
1046 - else
1047 - __atomic_add(val, &S390_lowcore.preempt_count);
1048 + /*
1049 + * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
1050 + * enabled, gcc 12 fails to handle __builtin_constant_p().
1051 + */
1052 + if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
1053 + if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
1054 + __atomic_add_const(val, &S390_lowcore.preempt_count);
1055 + return;
1056 + }
1057 + }
1058 + __atomic_add(val, &S390_lowcore.preempt_count);
1059 }
1060
1061 static inline void __preempt_count_sub(int val)
1062 diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
1063 index 5e5a4e1f0e6cf..19ee8355b2a7f 100644
1064 --- a/arch/s390/mm/gmap.c
1065 +++ b/arch/s390/mm/gmap.c
1066 @@ -2579,6 +2579,18 @@ static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
1067 return 0;
1068 }
1069
1070 +/*
1071 + * Give a chance to schedule after setting a key to 256 pages.
1072 + * We only hold the mm lock, which is a rwsem and the kvm srcu.
1073 + * Both can sleep.
1074 + */
1075 +static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr,
1076 + unsigned long next, struct mm_walk *walk)
1077 +{
1078 + cond_resched();
1079 + return 0;
1080 +}
1081 +
1082 static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
1083 unsigned long hmask, unsigned long next,
1084 struct mm_walk *walk)
1085 @@ -2601,12 +2613,14 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
1086 end = start + HPAGE_SIZE - 1;
1087 __storage_key_init_range(start, end);
1088 set_bit(PG_arch_1, &page->flags);
1089 + cond_resched();
1090 return 0;
1091 }
1092
1093 static const struct mm_walk_ops enable_skey_walk_ops = {
1094 .hugetlb_entry = __s390_enable_skey_hugetlb,
1095 .pte_entry = __s390_enable_skey_pte,
1096 + .pmd_entry = __s390_enable_skey_pmd,
1097 };
1098
1099 int s390_enable_skey(void)
1100 diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c
1101 index 6040817c036f3..25727ed648b72 100644
1102 --- a/arch/um/drivers/chan_user.c
1103 +++ b/arch/um/drivers/chan_user.c
1104 @@ -220,7 +220,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
1105 unsigned long *stack_out)
1106 {
1107 struct winch_data data;
1108 - int fds[2], n, err;
1109 + int fds[2], n, err, pid;
1110 char c;
1111
1112 err = os_pipe(fds, 1, 1);
1113 @@ -238,8 +238,9 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
1114 * problem with /dev/net/tun, which if held open by this
1115 * thread, prevents the TUN/TAP device from being reused.
1116 */
1117 - err = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out);
1118 - if (err < 0) {
1119 + pid = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out);
1120 + if (pid < 0) {
1121 + err = pid;
1122 printk(UM_KERN_ERR "fork of winch_thread failed - errno = %d\n",
1123 -err);
1124 goto out_close;
1125 @@ -263,7 +264,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
1126 goto out_close;
1127 }
1128
1129 - return err;
1130 + return pid;
1131
1132 out_close:
1133 close(fds[1]);
1134 diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
1135 index 4c19ce4c49f18..66ab6a07330b2 100644
1136 --- a/arch/um/include/asm/thread_info.h
1137 +++ b/arch/um/include/asm/thread_info.h
1138 @@ -63,6 +63,7 @@ static inline struct thread_info *current_thread_info(void)
1139 #define TIF_RESTORE_SIGMASK 7
1140 #define TIF_NOTIFY_RESUME 8
1141 #define TIF_SECCOMP 9 /* secure computing */
1142 +#define TIF_SINGLESTEP 10 /* single stepping userspace */
1143
1144 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
1145 #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
1146 @@ -70,5 +71,6 @@ static inline struct thread_info *current_thread_info(void)
1147 #define _TIF_MEMDIE (1 << TIF_MEMDIE)
1148 #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
1149 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1150 +#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
1151
1152 #endif
1153 diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
1154 index e8fd5d540b05d..7f7a74c82abb6 100644
1155 --- a/arch/um/kernel/exec.c
1156 +++ b/arch/um/kernel/exec.c
1157 @@ -44,7 +44,7 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
1158 {
1159 PT_REGS_IP(regs) = eip;
1160 PT_REGS_SP(regs) = esp;
1161 - current->ptrace &= ~PT_DTRACE;
1162 + clear_thread_flag(TIF_SINGLESTEP);
1163 #ifdef SUBARCH_EXECVE1
1164 SUBARCH_EXECVE1(regs->regs);
1165 #endif
1166 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
1167 index 17045e7211bfd..d71dd7725bef1 100644
1168 --- a/arch/um/kernel/process.c
1169 +++ b/arch/um/kernel/process.c
1170 @@ -380,7 +380,7 @@ int singlestepping(void * t)
1171 {
1172 struct task_struct *task = t ? t : current;
1173
1174 - if (!(task->ptrace & PT_DTRACE))
1175 + if (!test_thread_flag(TIF_SINGLESTEP))
1176 return 0;
1177
1178 if (task->thread.singlestep_syscall)
1179 diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
1180 index b425f47bddbb3..d37802ced5636 100644
1181 --- a/arch/um/kernel/ptrace.c
1182 +++ b/arch/um/kernel/ptrace.c
1183 @@ -12,7 +12,7 @@
1184
1185 void user_enable_single_step(struct task_struct *child)
1186 {
1187 - child->ptrace |= PT_DTRACE;
1188 + set_tsk_thread_flag(child, TIF_SINGLESTEP);
1189 child->thread.singlestep_syscall = 0;
1190
1191 #ifdef SUBARCH_SET_SINGLESTEPPING
1192 @@ -22,7 +22,7 @@ void user_enable_single_step(struct task_struct *child)
1193
1194 void user_disable_single_step(struct task_struct *child)
1195 {
1196 - child->ptrace &= ~PT_DTRACE;
1197 + clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1198 child->thread.singlestep_syscall = 0;
1199
1200 #ifdef SUBARCH_SET_SINGLESTEPPING
1201 @@ -121,7 +121,7 @@ static void send_sigtrap(struct uml_pt_regs *regs, int error_code)
1202 }
1203
1204 /*
1205 - * XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and
1206 + * XXX Check TIF_SINGLESTEP for singlestepping check and
1207 * PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check
1208 */
1209 int syscall_trace_enter(struct pt_regs *regs)
1210 @@ -145,7 +145,7 @@ void syscall_trace_leave(struct pt_regs *regs)
1211 audit_syscall_exit(regs);
1212
1213 /* Fake a debug trap */
1214 - if (ptraced & PT_DTRACE)
1215 + if (test_thread_flag(TIF_SINGLESTEP))
1216 send_sigtrap(&regs->regs, 0);
1217
1218 if (!test_thread_flag(TIF_SYSCALL_TRACE))
1219 diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
1220 index 3d57c71c532e4..01628195ae520 100644
1221 --- a/arch/um/kernel/signal.c
1222 +++ b/arch/um/kernel/signal.c
1223 @@ -53,7 +53,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1224 unsigned long sp;
1225 int err;
1226
1227 - if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
1228 + if (test_thread_flag(TIF_SINGLESTEP) && (current->ptrace & PT_PTRACED))
1229 singlestep = 1;
1230
1231 /* Did we come from a system call? */
1232 @@ -128,7 +128,7 @@ void do_signal(struct pt_regs *regs)
1233 * on the host. The tracing thread will check this flag and
1234 * PTRACE_SYSCALL if necessary.
1235 */
1236 - if (current->ptrace & PT_DTRACE)
1237 + if (test_thread_flag(TIF_SINGLESTEP))
1238 current->thread.singlestep_syscall =
1239 is_syscall(PT_REGS_IP(&current->thread.regs));
1240
1241 diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
1242 index f5937742b2901..3613cfb83c6dc 100644
1243 --- a/arch/x86/entry/vdso/vma.c
1244 +++ b/arch/x86/entry/vdso/vma.c
1245 @@ -323,7 +323,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
1246 static __init int vdso_setup(char *s)
1247 {
1248 vdso64_enabled = simple_strtoul(s, NULL, 0);
1249 - return 0;
1250 + return 1;
1251 }
1252 __setup("vdso=", vdso_setup);
1253
1254 diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
1255 index b7baaa9733173..2e930d8c04d95 100644
1256 --- a/arch/x86/events/amd/ibs.c
1257 +++ b/arch/x86/events/amd/ibs.c
1258 @@ -312,6 +312,16 @@ static int perf_ibs_init(struct perf_event *event)
1259 hwc->config_base = perf_ibs->msr;
1260 hwc->config = config;
1261
1262 + /*
1263 + * rip recorded by IbsOpRip will not be consistent with rsp and rbp
1264 + * recorded as part of interrupt regs. Thus we need to use rip from
1265 + * interrupt regs while unwinding call stack. Setting _EARLY flag
1266 + * makes sure we unwind call-stack before perf sample rip is set to
1267 + * IbsOpRip.
1268 + */
1269 + if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
1270 + event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
1271 +
1272 return 0;
1273 }
1274
1275 @@ -683,6 +693,14 @@ fail:
1276 data.raw = &raw;
1277 }
1278
1279 + /*
1280 + * rip recorded by IbsOpRip will not be consistent with rsp and rbp
1281 + * recorded as part of interrupt regs. Thus we need to use rip from
1282 + * interrupt regs while unwinding call stack.
1283 + */
1284 + if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
1285 + data.callchain = perf_callchain(event, iregs);
1286 +
1287 throttle = perf_event_overflow(event, &data, &regs);
1288 out:
1289 if (throttle) {
1290 diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
1291 index b33540e1efa88..f2976204e8b5d 100644
1292 --- a/arch/x86/events/intel/core.c
1293 +++ b/arch/x86/events/intel/core.c
1294 @@ -250,7 +250,7 @@ static struct event_constraint intel_icl_event_constraints[] = {
1295 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
1296 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
1297 INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
1298 - INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
1299 + INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf),
1300 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
1301 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
1302 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
1303 diff --git a/arch/x86/include/asm/acenv.h b/arch/x86/include/asm/acenv.h
1304 index 9aff97f0de7fd..d937c55e717e6 100644
1305 --- a/arch/x86/include/asm/acenv.h
1306 +++ b/arch/x86/include/asm/acenv.h
1307 @@ -13,7 +13,19 @@
1308
1309 /* Asm macros */
1310
1311 -#define ACPI_FLUSH_CPU_CACHE() wbinvd()
1312 +/*
1313 + * ACPI_FLUSH_CPU_CACHE() flushes caches on entering sleep states.
1314 + * It is required to prevent data loss.
1315 + *
1316 + * While running inside virtual machine, the kernel can bypass cache flushing.
1317 + * Changing sleep state in a virtual machine doesn't affect the host system
1318 + * sleep state and cannot lead to data loss.
1319 + */
1320 +#define ACPI_FLUSH_CPU_CACHE() \
1321 +do { \
1322 + if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) \
1323 + wbinvd(); \
1324 +} while (0)
1325
1326 int __acpi_acquire_global_lock(unsigned int *lock);
1327 int __acpi_release_global_lock(unsigned int *lock);
1328 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
1329 index 59bf91c57aa85..619c1f80a2abe 100644
1330 --- a/arch/x86/include/asm/cpufeature.h
1331 +++ b/arch/x86/include/asm/cpufeature.h
1332 @@ -49,7 +49,7 @@ extern const char * const x86_power_flags[32];
1333 extern const char * const x86_bug_flags[NBUGINTS*32];
1334
1335 #define test_cpu_cap(c, bit) \
1336 - test_bit(bit, (unsigned long *)((c)->x86_capability))
1337 + arch_test_bit(bit, (unsigned long *)((c)->x86_capability))
1338
1339 /*
1340 * There are 32 bits/features in each mask word. The high bits
1341 diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
1342 index fdbd9d7b7bca1..3b97aa9215430 100644
1343 --- a/arch/x86/include/asm/suspend_32.h
1344 +++ b/arch/x86/include/asm/suspend_32.h
1345 @@ -21,7 +21,6 @@ struct saved_context {
1346 #endif
1347 unsigned long cr0, cr2, cr3, cr4;
1348 u64 misc_enable;
1349 - bool misc_enable_saved;
1350 struct saved_msrs saved_msrs;
1351 struct desc_ptr gdt_desc;
1352 struct desc_ptr idt;
1353 @@ -30,6 +29,7 @@ struct saved_context {
1354 unsigned long tr;
1355 unsigned long safety;
1356 unsigned long return_address;
1357 + bool misc_enable_saved;
1358 } __attribute__((packed));
1359
1360 /* routines for saving/restoring kernel state */
1361 diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
1362 index 35bb35d28733e..54df06687d834 100644
1363 --- a/arch/x86/include/asm/suspend_64.h
1364 +++ b/arch/x86/include/asm/suspend_64.h
1365 @@ -14,9 +14,13 @@
1366 * Image of the saved processor state, used by the low level ACPI suspend to
1367 * RAM code and by the low level hibernation code.
1368 *
1369 - * If you modify it, fix arch/x86/kernel/acpi/wakeup_64.S and make sure that
1370 - * __save/__restore_processor_state(), defined in arch/x86/kernel/suspend_64.c,
1371 - * still work as required.
1372 + * If you modify it, check how it is used in arch/x86/kernel/acpi/wakeup_64.S
1373 + * and make sure that __save/__restore_processor_state(), defined in
1374 + * arch/x86/power/cpu.c, still work as required.
1375 + *
1376 + * Because the structure is packed, make sure to avoid unaligned members. For
1377 + * optimisation purposes but also because tools like kmemleak only search for
1378 + * pointers that are aligned.
1379 */
1380 struct saved_context {
1381 struct pt_regs regs;
1382 @@ -36,7 +40,6 @@ struct saved_context {
1383
1384 unsigned long cr0, cr2, cr3, cr4;
1385 u64 misc_enable;
1386 - bool misc_enable_saved;
1387 struct saved_msrs saved_msrs;
1388 unsigned long efer;
1389 u16 gdt_pad; /* Unused */
1390 @@ -48,6 +51,7 @@ struct saved_context {
1391 unsigned long tr;
1392 unsigned long safety;
1393 unsigned long return_address;
1394 + bool misc_enable_saved;
1395 } __attribute__((packed));
1396
1397 #define loaddebug(thread,register) \
1398 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
1399 index 4e4476b832be2..68c7340325233 100644
1400 --- a/arch/x86/kernel/apic/apic.c
1401 +++ b/arch/x86/kernel/apic/apic.c
1402 @@ -168,7 +168,7 @@ static __init int setup_apicpmtimer(char *s)
1403 {
1404 apic_calibrate_pmtmr = 1;
1405 notsc_setup(NULL);
1406 - return 0;
1407 + return 1;
1408 }
1409 __setup("apicpmtimer", setup_apicpmtimer);
1410 #endif
1411 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
1412 index 11d5c5950e2d3..44688917d51fd 100644
1413 --- a/arch/x86/kernel/cpu/intel.c
1414 +++ b/arch/x86/kernel/cpu/intel.c
1415 @@ -97,7 +97,7 @@ static bool ring3mwait_disabled __read_mostly;
1416 static int __init ring3mwait_disable(char *__unused)
1417 {
1418 ring3mwait_disabled = true;
1419 - return 0;
1420 + return 1;
1421 }
1422 __setup("ring3mwait=disable", ring3mwait_disable);
1423
1424 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
1425 index 60d2c3798ba28..2f97d1a1032f3 100644
1426 --- a/arch/x86/kernel/step.c
1427 +++ b/arch/x86/kernel/step.c
1428 @@ -175,8 +175,7 @@ void set_task_blockstep(struct task_struct *task, bool on)
1429 *
1430 * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
1431 * task is current or it can't be running, otherwise we can race
1432 - * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
1433 - * PTRACE_KILL is not safe.
1434 + * with __switch_to_xtra(). We rely on ptrace_freeze_traced().
1435 */
1436 local_irq_disable();
1437 debugctl = get_debugctlmsr();
1438 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
1439 index f7476ce23b6e0..42e31358a9d32 100644
1440 --- a/arch/x86/kernel/sys_x86_64.c
1441 +++ b/arch/x86/kernel/sys_x86_64.c
1442 @@ -70,9 +70,6 @@ static int __init control_va_addr_alignment(char *str)
1443 if (*str == 0)
1444 return 1;
1445
1446 - if (*str == '=')
1447 - str++;
1448 -
1449 if (!strcmp(str, "32"))
1450 va_align.flags = ALIGN_VA_32;
1451 else if (!strcmp(str, "64"))
1452 @@ -82,11 +79,11 @@ static int __init control_va_addr_alignment(char *str)
1453 else if (!strcmp(str, "on"))
1454 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
1455 else
1456 - return 0;
1457 + pr_warn("invalid option value: 'align_va_addr=%s'\n", str);
1458
1459 return 1;
1460 }
1461 -__setup("align_va_addr", control_va_addr_alignment);
1462 +__setup("align_va_addr=", control_va_addr_alignment);
1463
1464 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
1465 unsigned long, prot, unsigned long, flags,
1466 diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
1467 index 3041015b05f71..9f61ae64b7277 100644
1468 --- a/arch/x86/kvm/vmx/nested.c
1469 +++ b/arch/x86/kvm/vmx/nested.c
1470 @@ -3746,12 +3746,12 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
1471 /* update exit information fields: */
1472 vmcs12->vm_exit_reason = exit_reason;
1473 vmcs12->exit_qualification = exit_qualification;
1474 - vmcs12->vm_exit_intr_info = exit_intr_info;
1475 -
1476 - vmcs12->idt_vectoring_info_field = 0;
1477 - vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1478 - vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
1479
1480 + /*
1481 + * On VM-Exit due to a failed VM-Entry, the VMCS isn't marked launched
1482 + * and only EXIT_REASON and EXIT_QUALIFICATION are updated, all other
1483 + * exit info fields are unmodified.
1484 + */
1485 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
1486 vmcs12->launch_state = 1;
1487
1488 @@ -3763,8 +3763,13 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
1489 * Transfer the event that L0 or L1 may wanted to inject into
1490 * L2 to IDT_VECTORING_INFO_FIELD.
1491 */
1492 + vmcs12->idt_vectoring_info_field = 0;
1493 vmcs12_save_pending_event(vcpu, vmcs12);
1494
1495 + vmcs12->vm_exit_intr_info = exit_intr_info;
1496 + vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1497 + vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
1498 +
1499 /*
1500 * According to spec, there's no need to store the guest's
1501 * MSRs if the exit is due to a VM-entry failure that occurs
1502 diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
1503 index c126571e5e2ee..3d1cfad36ba21 100644
1504 --- a/arch/x86/lib/delay.c
1505 +++ b/arch/x86/lib/delay.c
1506 @@ -43,8 +43,8 @@ static void delay_loop(unsigned long loops)
1507 " jnz 2b \n"
1508 "3: dec %0 \n"
1509
1510 - : /* we don't need output */
1511 - :"a" (loops)
1512 + : "+a" (loops)
1513 + :
1514 );
1515 }
1516
1517 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
1518 index 35b2e35c22035..c7c4e2f8c6a5c 100644
1519 --- a/arch/x86/mm/pat.c
1520 +++ b/arch/x86/mm/pat.c
1521 @@ -75,7 +75,7 @@ int pat_debug_enable;
1522 static int __init pat_debug_setup(char *str)
1523 {
1524 pat_debug_enable = 1;
1525 - return 0;
1526 + return 1;
1527 }
1528 __setup("debugpat", pat_debug_setup);
1529
1530 diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c
1531 index 3ee234b6234dd..255a44dd415a9 100644
1532 --- a/arch/x86/um/ldt.c
1533 +++ b/arch/x86/um/ldt.c
1534 @@ -23,9 +23,11 @@ static long write_ldt_entry(struct mm_id *mm_idp, int func,
1535 {
1536 long res;
1537 void *stub_addr;
1538 +
1539 + BUILD_BUG_ON(sizeof(*desc) % sizeof(long));
1540 +
1541 res = syscall_stub_data(mm_idp, (unsigned long *)desc,
1542 - (sizeof(*desc) + sizeof(long) - 1) &
1543 - ~(sizeof(long) - 1),
1544 + sizeof(*desc) / sizeof(long),
1545 addr, &stub_addr);
1546 if (!res) {
1547 unsigned long args[] = { func,
1548 diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
1549 index 145742d70a9f2..998b4249065a6 100644
1550 --- a/arch/xtensa/kernel/ptrace.c
1551 +++ b/arch/xtensa/kernel/ptrace.c
1552 @@ -225,12 +225,12 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1553
1554 void user_enable_single_step(struct task_struct *child)
1555 {
1556 - child->ptrace |= PT_SINGLESTEP;
1557 + set_tsk_thread_flag(child, TIF_SINGLESTEP);
1558 }
1559
1560 void user_disable_single_step(struct task_struct *child)
1561 {
1562 - child->ptrace &= ~PT_SINGLESTEP;
1563 + clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1564 }
1565
1566 /*
1567 diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
1568 index dae83cddd6ca2..cf2bd960b30d4 100644
1569 --- a/arch/xtensa/kernel/signal.c
1570 +++ b/arch/xtensa/kernel/signal.c
1571 @@ -465,7 +465,7 @@ static void do_signal(struct pt_regs *regs)
1572 /* Set up the stack frame */
1573 ret = setup_frame(&ksig, sigmask_to_save(), regs);
1574 signal_setup_done(ret, &ksig, 0);
1575 - if (current->ptrace & PT_SINGLESTEP)
1576 + if (test_thread_flag(TIF_SINGLESTEP))
1577 task_pt_regs(current)->icountlevel = 1;
1578
1579 return;
1580 @@ -491,7 +491,7 @@ static void do_signal(struct pt_regs *regs)
1581 /* If there's no signal to deliver, we just restore the saved mask. */
1582 restore_saved_sigmask();
1583
1584 - if (current->ptrace & PT_SINGLESTEP)
1585 + if (test_thread_flag(TIF_SINGLESTEP))
1586 task_pt_regs(current)->icountlevel = 1;
1587 return;
1588 }
1589 diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
1590 index c17eb794f0aef..09d721b1f6acf 100644
1591 --- a/block/bfq-cgroup.c
1592 +++ b/block/bfq-cgroup.c
1593 @@ -536,6 +536,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd)
1594 */
1595 bfqg->bfqd = bfqd;
1596 bfqg->active_entities = 0;
1597 + bfqg->online = true;
1598 bfqg->rq_pos_tree = RB_ROOT;
1599 }
1600
1601 @@ -564,28 +565,11 @@ static void bfq_group_set_parent(struct bfq_group *bfqg,
1602 entity->sched_data = &parent->sched_data;
1603 }
1604
1605 -static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
1606 - struct blkcg *blkcg)
1607 +static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
1608 {
1609 - struct blkcg_gq *blkg;
1610 -
1611 - blkg = blkg_lookup(blkcg, bfqd->queue);
1612 - if (likely(blkg))
1613 - return blkg_to_bfqg(blkg);
1614 - return NULL;
1615 -}
1616 -
1617 -struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
1618 - struct blkcg *blkcg)
1619 -{
1620 - struct bfq_group *bfqg, *parent;
1621 + struct bfq_group *parent;
1622 struct bfq_entity *entity;
1623
1624 - bfqg = bfq_lookup_bfqg(bfqd, blkcg);
1625 -
1626 - if (unlikely(!bfqg))
1627 - return NULL;
1628 -
1629 /*
1630 * Update chain of bfq_groups as we might be handling a leaf group
1631 * which, along with some of its relatives, has not been hooked yet
1632 @@ -602,8 +586,24 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
1633 bfq_group_set_parent(curr_bfqg, parent);
1634 }
1635 }
1636 +}
1637
1638 - return bfqg;
1639 +struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
1640 +{
1641 + struct blkcg_gq *blkg = bio->bi_blkg;
1642 + struct bfq_group *bfqg;
1643 +
1644 + while (blkg) {
1645 + bfqg = blkg_to_bfqg(blkg);
1646 + if (bfqg->online) {
1647 + bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
1648 + return bfqg;
1649 + }
1650 + blkg = blkg->parent;
1651 + }
1652 + bio_associate_blkg_from_css(bio,
1653 + &bfqg_to_blkg(bfqd->root_group)->blkcg->css);
1654 + return bfqd->root_group;
1655 }
1656
1657 /**
1658 @@ -679,25 +679,15 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1659 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
1660 * sure that the reference to cgroup is valid across the call (see
1661 * comments in bfq_bic_update_cgroup on this issue)
1662 - *
1663 - * NOTE: an alternative approach might have been to store the current
1664 - * cgroup in bfqq and getting a reference to it, reducing the lookup
1665 - * time here, at the price of slightly more complex code.
1666 */
1667 -static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
1668 - struct bfq_io_cq *bic,
1669 - struct blkcg *blkcg)
1670 +static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
1671 + struct bfq_io_cq *bic,
1672 + struct bfq_group *bfqg)
1673 {
1674 struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
1675 struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
1676 - struct bfq_group *bfqg;
1677 struct bfq_entity *entity;
1678
1679 - bfqg = bfq_find_set_group(bfqd, blkcg);
1680 -
1681 - if (unlikely(!bfqg))
1682 - bfqg = bfqd->root_group;
1683 -
1684 if (async_bfqq) {
1685 entity = &async_bfqq->entity;
1686
1687 @@ -708,9 +698,39 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
1688 }
1689
1690 if (sync_bfqq) {
1691 - entity = &sync_bfqq->entity;
1692 - if (entity->sched_data != &bfqg->sched_data)
1693 - bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
1694 + if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) {
1695 + /* We are the only user of this bfqq, just move it */
1696 + if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
1697 + bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
1698 + } else {
1699 + struct bfq_queue *bfqq;
1700 +
1701 + /*
1702 + * The queue was merged to a different queue. Check
1703 + * that the merge chain still belongs to the same
1704 + * cgroup.
1705 + */
1706 + for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq)
1707 + if (bfqq->entity.sched_data !=
1708 + &bfqg->sched_data)
1709 + break;
1710 + if (bfqq) {
1711 + /*
1712 + * Some queue changed cgroup so the merge is
1713 + * not valid anymore. We cannot easily just
1714 + * cancel the merge (by clearing new_bfqq) as
1715 + * there may be other processes using this
1716 + * queue and holding refs to all queues below
1717 + * sync_bfqq->new_bfqq. Similarly if the merge
1718 + * already happened, we need to detach from
1719 + * bfqq now so that we cannot merge bio to a
1720 + * request from the old cgroup.
1721 + */
1722 + bfq_put_cooperator(sync_bfqq);
1723 + bfq_release_process_ref(bfqd, sync_bfqq);
1724 + bic_set_bfqq(bic, NULL, 1);
1725 + }
1726 + }
1727 }
1728
1729 return bfqg;
1730 @@ -719,20 +739,24 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
1731 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
1732 {
1733 struct bfq_data *bfqd = bic_to_bfqd(bic);
1734 - struct bfq_group *bfqg = NULL;
1735 + struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
1736 uint64_t serial_nr;
1737
1738 - rcu_read_lock();
1739 - serial_nr = __bio_blkcg(bio)->css.serial_nr;
1740 + serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr;
1741
1742 /*
1743 * Check whether blkcg has changed. The condition may trigger
1744 * spuriously on a newly created cic but there's no harm.
1745 */
1746 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
1747 - goto out;
1748 + return;
1749
1750 - bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
1751 + /*
1752 + * New cgroup for this process. Make sure it is linked to bfq internal
1753 + * cgroup hierarchy.
1754 + */
1755 + bfq_link_bfqg(bfqd, bfqg);
1756 + __bfq_bic_change_cgroup(bfqd, bic, bfqg);
1757 /*
1758 * Update blkg_path for bfq_log_* functions. We cache this
1759 * path, and update it here, for the following
1760 @@ -785,8 +809,6 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
1761 */
1762 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
1763 bic->blkcg_serial_nr = serial_nr;
1764 -out:
1765 - rcu_read_unlock();
1766 }
1767
1768 /**
1769 @@ -914,6 +936,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
1770
1771 put_async_queues:
1772 bfq_put_async_queues(bfqd, bfqg);
1773 + bfqg->online = false;
1774
1775 spin_unlock_irqrestore(&bfqd->lock, flags);
1776 /*
1777 @@ -1402,7 +1425,7 @@ void bfq_end_wr_async(struct bfq_data *bfqd)
1778 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1779 }
1780
1781 -struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
1782 +struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
1783 {
1784 return bfqd->root_group;
1785 }
1786 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
1787 index d46806182b051..962701d3f46bd 100644
1788 --- a/block/bfq-iosched.c
1789 +++ b/block/bfq-iosched.c
1790 @@ -2227,10 +2227,17 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
1791
1792 spin_lock_irq(&bfqd->lock);
1793
1794 - if (bic)
1795 + if (bic) {
1796 + /*
1797 + * Make sure cgroup info is uptodate for current process before
1798 + * considering the merge.
1799 + */
1800 + bfq_bic_update_cgroup(bic, bio);
1801 +
1802 bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
1803 - else
1804 + } else {
1805 bfqd->bio_bfqq = NULL;
1806 + }
1807 bfqd->bio_bic = bic;
1808
1809 ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
1810 @@ -2260,8 +2267,6 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
1811 return ELEVATOR_NO_MERGE;
1812 }
1813
1814 -static struct bfq_queue *bfq_init_rq(struct request *rq);
1815 -
1816 static void bfq_request_merged(struct request_queue *q, struct request *req,
1817 enum elv_merge type)
1818 {
1819 @@ -2270,7 +2275,7 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
1820 blk_rq_pos(req) <
1821 blk_rq_pos(container_of(rb_prev(&req->rb_node),
1822 struct request, rb_node))) {
1823 - struct bfq_queue *bfqq = bfq_init_rq(req);
1824 + struct bfq_queue *bfqq = RQ_BFQQ(req);
1825 struct bfq_data *bfqd;
1826 struct request *prev, *next_rq;
1827
1828 @@ -2322,8 +2327,8 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
1829 static void bfq_requests_merged(struct request_queue *q, struct request *rq,
1830 struct request *next)
1831 {
1832 - struct bfq_queue *bfqq = bfq_init_rq(rq),
1833 - *next_bfqq = bfq_init_rq(next);
1834 + struct bfq_queue *bfqq = RQ_BFQQ(rq),
1835 + *next_bfqq = RQ_BFQQ(next);
1836
1837 if (!bfqq)
1838 return;
1839 @@ -2502,6 +2507,14 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
1840 if (process_refs == 0 || new_process_refs == 0)
1841 return NULL;
1842
1843 + /*
1844 + * Make sure merged queues belong to the same parent. Parents could
1845 + * have changed since the time we decided the two queues are suitable
1846 + * for merging.
1847 + */
1848 + if (new_bfqq->entity.parent != bfqq->entity.parent)
1849 + return NULL;
1850 +
1851 bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
1852 new_bfqq->pid);
1853
1854 @@ -4914,7 +4927,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
1855 bfqg_and_blkg_put(bfqg);
1856 }
1857
1858 -static void bfq_put_cooperator(struct bfq_queue *bfqq)
1859 +void bfq_put_cooperator(struct bfq_queue *bfqq)
1860 {
1861 struct bfq_queue *__bfqq, *next;
1862
1863 @@ -5145,14 +5158,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
1864 struct bfq_queue *bfqq;
1865 struct bfq_group *bfqg;
1866
1867 - rcu_read_lock();
1868 -
1869 - bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio));
1870 - if (!bfqg) {
1871 - bfqq = &bfqd->oom_bfqq;
1872 - goto out;
1873 - }
1874 -
1875 + bfqg = bfq_bio_bfqg(bfqd, bio);
1876 if (!is_sync) {
1877 async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
1878 ioprio);
1879 @@ -5196,7 +5202,6 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
1880 out:
1881 bfqq->ref++; /* get a process reference to this queue */
1882 bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
1883 - rcu_read_unlock();
1884 return bfqq;
1885 }
1886
1887 @@ -5499,6 +5504,8 @@ static inline void bfq_update_insert_stats(struct request_queue *q,
1888 unsigned int cmd_flags) {}
1889 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1890
1891 +static struct bfq_queue *bfq_init_rq(struct request *rq);
1892 +
1893 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1894 bool at_head)
1895 {
1896 @@ -5509,17 +5516,14 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1897 unsigned int cmd_flags;
1898
1899 spin_lock_irq(&bfqd->lock);
1900 + bfqq = bfq_init_rq(rq);
1901 if (blk_mq_sched_try_insert_merge(q, rq)) {
1902 spin_unlock_irq(&bfqd->lock);
1903 return;
1904 }
1905
1906 - spin_unlock_irq(&bfqd->lock);
1907 -
1908 blk_mq_sched_request_inserted(rq);
1909
1910 - spin_lock_irq(&bfqd->lock);
1911 - bfqq = bfq_init_rq(rq);
1912 if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
1913 if (at_head)
1914 list_add(&rq->queuelist, &bfqd->dispatch);
1915 diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
1916 index de98fdfe9ea17..f6cc2b4180086 100644
1917 --- a/block/bfq-iosched.h
1918 +++ b/block/bfq-iosched.h
1919 @@ -896,6 +896,8 @@ struct bfq_group {
1920
1921 /* reference counter (see comments in bfq_bic_update_cgroup) */
1922 int ref;
1923 + /* Is bfq_group still online? */
1924 + bool online;
1925
1926 struct bfq_entity entity;
1927 struct bfq_sched_data sched_data;
1928 @@ -949,6 +951,7 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
1929 void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1930 bool compensate, enum bfqq_expiration reason);
1931 void bfq_put_queue(struct bfq_queue *bfqq);
1932 +void bfq_put_cooperator(struct bfq_queue *bfqq);
1933 void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
1934 void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq);
1935 void bfq_schedule_dispatch(struct bfq_data *bfqd);
1936 @@ -975,8 +978,7 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1937 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg);
1938 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio);
1939 void bfq_end_wr_async(struct bfq_data *bfqd);
1940 -struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
1941 - struct blkcg *blkcg);
1942 +struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio);
1943 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
1944 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
1945 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
1946 diff --git a/block/bio.c b/block/bio.c
1947 index 40004a3631a80..08dbdc32ceaa8 100644
1948 --- a/block/bio.c
1949 +++ b/block/bio.c
1950 @@ -2179,7 +2179,7 @@ void bio_clone_blkg_association(struct bio *dst, struct bio *src)
1951 rcu_read_lock();
1952
1953 if (src->bi_blkg)
1954 - __bio_associate_blkg(dst, src->bi_blkg);
1955 + bio_associate_blkg_from_css(dst, &bio_blkcg(src)->css);
1956
1957 rcu_read_unlock();
1958 }
1959 diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
1960 index 71a82528d4bfe..a4156b3b33c31 100644
1961 --- a/block/blk-iolatency.c
1962 +++ b/block/blk-iolatency.c
1963 @@ -86,7 +86,17 @@ struct iolatency_grp;
1964 struct blk_iolatency {
1965 struct rq_qos rqos;
1966 struct timer_list timer;
1967 - atomic_t enabled;
1968 +
1969 + /*
1970 + * ->enabled is the master enable switch gating the throttling logic and
1971 + * inflight tracking. The number of cgroups which have iolat enabled is
1972 + * tracked in ->enable_cnt, and ->enable is flipped on/off accordingly
1973 + * from ->enable_work with the request_queue frozen. For details, See
1974 + * blkiolatency_enable_work_fn().
1975 + */
1976 + bool enabled;
1977 + atomic_t enable_cnt;
1978 + struct work_struct enable_work;
1979 };
1980
1981 static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
1982 @@ -94,11 +104,6 @@ static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
1983 return container_of(rqos, struct blk_iolatency, rqos);
1984 }
1985
1986 -static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
1987 -{
1988 - return atomic_read(&blkiolat->enabled) > 0;
1989 -}
1990 -
1991 struct child_latency_info {
1992 spinlock_t lock;
1993
1994 @@ -463,7 +468,7 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
1995 struct blkcg_gq *blkg = bio->bi_blkg;
1996 bool issue_as_root = bio_issue_as_root_blkg(bio);
1997
1998 - if (!blk_iolatency_enabled(blkiolat))
1999 + if (!blkiolat->enabled)
2000 return;
2001
2002 while (blkg && blkg->parent) {
2003 @@ -593,7 +598,6 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
2004 u64 window_start;
2005 u64 now = ktime_to_ns(ktime_get());
2006 bool issue_as_root = bio_issue_as_root_blkg(bio);
2007 - bool enabled = false;
2008 int inflight = 0;
2009
2010 blkg = bio->bi_blkg;
2011 @@ -604,8 +608,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
2012 if (!iolat)
2013 return;
2014
2015 - enabled = blk_iolatency_enabled(iolat->blkiolat);
2016 - if (!enabled)
2017 + if (!iolat->blkiolat->enabled)
2018 return;
2019
2020 while (blkg && blkg->parent) {
2021 @@ -643,6 +646,7 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
2022 struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
2023
2024 del_timer_sync(&blkiolat->timer);
2025 + flush_work(&blkiolat->enable_work);
2026 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
2027 kfree(blkiolat);
2028 }
2029 @@ -714,6 +718,44 @@ next:
2030 rcu_read_unlock();
2031 }
2032
2033 +/**
2034 + * blkiolatency_enable_work_fn - Enable or disable iolatency on the device
2035 + * @work: enable_work of the blk_iolatency of interest
2036 + *
2037 + * iolatency needs to keep track of the number of in-flight IOs per cgroup. This
2038 + * is relatively expensive as it involves walking up the hierarchy twice for
2039 + * every IO. Thus, if iolatency is not enabled in any cgroup for the device, we
2040 + * want to disable the in-flight tracking.
2041 + *
2042 + * We have to make sure that the counting is balanced - we don't want to leak
2043 + * the in-flight counts by disabling accounting in the completion path while IOs
2044 + * are in flight. This is achieved by ensuring that no IO is in flight by
2045 + * freezing the queue while flipping ->enabled. As this requires a sleepable
2046 + * context, ->enabled flipping is punted to this work function.
2047 + */
2048 +static void blkiolatency_enable_work_fn(struct work_struct *work)
2049 +{
2050 + struct blk_iolatency *blkiolat = container_of(work, struct blk_iolatency,
2051 + enable_work);
2052 + bool enabled;
2053 +
2054 + /*
2055 + * There can only be one instance of this function running for @blkiolat
2056 + * and it's guaranteed to be executed at least once after the latest
2057 + * ->enabled_cnt modification. Acting on the latest ->enable_cnt is
2058 + * sufficient.
2059 + *
2060 + * Also, we know @blkiolat is safe to access as ->enable_work is flushed
2061 + * in blkcg_iolatency_exit().
2062 + */
2063 + enabled = atomic_read(&blkiolat->enable_cnt);
2064 + if (enabled != blkiolat->enabled) {
2065 + blk_mq_freeze_queue(blkiolat->rqos.q);
2066 + blkiolat->enabled = enabled;
2067 + blk_mq_unfreeze_queue(blkiolat->rqos.q);
2068 + }
2069 +}
2070 +
2071 int blk_iolatency_init(struct request_queue *q)
2072 {
2073 struct blk_iolatency *blkiolat;
2074 @@ -739,17 +781,15 @@ int blk_iolatency_init(struct request_queue *q)
2075 }
2076
2077 timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
2078 + INIT_WORK(&blkiolat->enable_work, blkiolatency_enable_work_fn);
2079
2080 return 0;
2081 }
2082
2083 -/*
2084 - * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
2085 - * return 0.
2086 - */
2087 -static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
2088 +static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
2089 {
2090 struct iolatency_grp *iolat = blkg_to_lat(blkg);
2091 + struct blk_iolatency *blkiolat = iolat->blkiolat;
2092 u64 oldval = iolat->min_lat_nsec;
2093
2094 iolat->min_lat_nsec = val;
2095 @@ -757,13 +797,15 @@ static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
2096 iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
2097 BLKIOLATENCY_MAX_WIN_SIZE);
2098
2099 - if (!oldval && val)
2100 - return 1;
2101 + if (!oldval && val) {
2102 + if (atomic_inc_return(&blkiolat->enable_cnt) == 1)
2103 + schedule_work(&blkiolat->enable_work);
2104 + }
2105 if (oldval && !val) {
2106 blkcg_clear_delay(blkg);
2107 - return -1;
2108 + if (atomic_dec_return(&blkiolat->enable_cnt) == 0)
2109 + schedule_work(&blkiolat->enable_work);
2110 }
2111 - return 0;
2112 }
2113
2114 static void iolatency_clear_scaling(struct blkcg_gq *blkg)
2115 @@ -795,7 +837,6 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
2116 u64 lat_val = 0;
2117 u64 oldval;
2118 int ret;
2119 - int enable = 0;
2120
2121 ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
2122 if (ret)
2123 @@ -830,41 +871,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
2124 blkg = ctx.blkg;
2125 oldval = iolat->min_lat_nsec;
2126
2127 - enable = iolatency_set_min_lat_nsec(blkg, lat_val);
2128 - if (enable) {
2129 - if (!blk_get_queue(blkg->q)) {
2130 - ret = -ENODEV;
2131 - goto out;
2132 - }
2133 -
2134 - blkg_get(blkg);
2135 - }
2136 -
2137 - if (oldval != iolat->min_lat_nsec) {
2138 + iolatency_set_min_lat_nsec(blkg, lat_val);
2139 + if (oldval != iolat->min_lat_nsec)
2140 iolatency_clear_scaling(blkg);
2141 - }
2142 -
2143 ret = 0;
2144 out:
2145 blkg_conf_finish(&ctx);
2146 - if (ret == 0 && enable) {
2147 - struct iolatency_grp *tmp = blkg_to_lat(blkg);
2148 - struct blk_iolatency *blkiolat = tmp->blkiolat;
2149 -
2150 - blk_mq_freeze_queue(blkg->q);
2151 -
2152 - if (enable == 1)
2153 - atomic_inc(&blkiolat->enabled);
2154 - else if (enable == -1)
2155 - atomic_dec(&blkiolat->enabled);
2156 - else
2157 - WARN_ON_ONCE(1);
2158 -
2159 - blk_mq_unfreeze_queue(blkg->q);
2160 -
2161 - blkg_put(blkg);
2162 - blk_put_queue(blkg->q);
2163 - }
2164 return ret ?: nbytes;
2165 }
2166
2167 @@ -1005,14 +1017,8 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd)
2168 {
2169 struct iolatency_grp *iolat = pd_to_lat(pd);
2170 struct blkcg_gq *blkg = lat_to_blkg(iolat);
2171 - struct blk_iolatency *blkiolat = iolat->blkiolat;
2172 - int ret;
2173
2174 - ret = iolatency_set_min_lat_nsec(blkg, 0);
2175 - if (ret == 1)
2176 - atomic_inc(&blkiolat->enabled);
2177 - if (ret == -1)
2178 - atomic_dec(&blkiolat->enabled);
2179 + iolatency_set_min_lat_nsec(blkg, 0);
2180 iolatency_clear_scaling(blkg);
2181 }
2182
2183 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
2184 index 927760b316a4d..43a1a855886bd 100644
2185 --- a/crypto/cryptd.c
2186 +++ b/crypto/cryptd.c
2187 @@ -39,6 +39,10 @@ struct cryptd_cpu_queue {
2188 };
2189
2190 struct cryptd_queue {
2191 + /*
2192 + * Protected by disabling BH to allow enqueueing from softinterrupt and
2193 + * dequeuing from kworker (cryptd_queue_worker()).
2194 + */
2195 struct cryptd_cpu_queue __percpu *cpu_queue;
2196 };
2197
2198 @@ -125,28 +129,28 @@ static void cryptd_fini_queue(struct cryptd_queue *queue)
2199 static int cryptd_enqueue_request(struct cryptd_queue *queue,
2200 struct crypto_async_request *request)
2201 {
2202 - int cpu, err;
2203 + int err;
2204 struct cryptd_cpu_queue *cpu_queue;
2205 refcount_t *refcnt;
2206
2207 - cpu = get_cpu();
2208 + local_bh_disable();
2209 cpu_queue = this_cpu_ptr(queue->cpu_queue);
2210 err = crypto_enqueue_request(&cpu_queue->queue, request);
2211
2212 refcnt = crypto_tfm_ctx(request->tfm);
2213
2214 if (err == -ENOSPC)
2215 - goto out_put_cpu;
2216 + goto out;
2217
2218 - queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
2219 + queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
2220
2221 if (!refcount_read(refcnt))
2222 - goto out_put_cpu;
2223 + goto out;
2224
2225 refcount_inc(refcnt);
2226
2227 -out_put_cpu:
2228 - put_cpu();
2229 +out:
2230 + local_bh_enable();
2231
2232 return err;
2233 }
2234 @@ -162,15 +166,10 @@ static void cryptd_queue_worker(struct work_struct *work)
2235 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
2236 /*
2237 * Only handle one request at a time to avoid hogging crypto workqueue.
2238 - * preempt_disable/enable is used to prevent being preempted by
2239 - * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
2240 - * cryptd_enqueue_request() being accessed from software interrupts.
2241 */
2242 local_bh_disable();
2243 - preempt_disable();
2244 backlog = crypto_get_backlog(&cpu_queue->queue);
2245 req = crypto_dequeue_request(&cpu_queue->queue);
2246 - preempt_enable();
2247 local_bh_enable();
2248
2249 if (!req)
2250 diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
2251 index a4b7cdd0c8446..1b0aeb8320448 100644
2252 --- a/drivers/acpi/property.c
2253 +++ b/drivers/acpi/property.c
2254 @@ -430,6 +430,16 @@ void acpi_init_properties(struct acpi_device *adev)
2255 acpi_extract_apple_properties(adev);
2256 }
2257
2258 +static void acpi_free_device_properties(struct list_head *list)
2259 +{
2260 + struct acpi_device_properties *props, *tmp;
2261 +
2262 + list_for_each_entry_safe(props, tmp, list, list) {
2263 + list_del(&props->list);
2264 + kfree(props);
2265 + }
2266 +}
2267 +
2268 static void acpi_destroy_nondev_subnodes(struct list_head *list)
2269 {
2270 struct acpi_data_node *dn, *next;
2271 @@ -442,22 +452,18 @@ static void acpi_destroy_nondev_subnodes(struct list_head *list)
2272 wait_for_completion(&dn->kobj_done);
2273 list_del(&dn->sibling);
2274 ACPI_FREE((void *)dn->data.pointer);
2275 + acpi_free_device_properties(&dn->data.properties);
2276 kfree(dn);
2277 }
2278 }
2279
2280 void acpi_free_properties(struct acpi_device *adev)
2281 {
2282 - struct acpi_device_properties *props, *tmp;
2283 -
2284 acpi_destroy_nondev_subnodes(&adev->data.subnodes);
2285 ACPI_FREE((void *)adev->data.pointer);
2286 adev->data.of_compatible = NULL;
2287 adev->data.pointer = NULL;
2288 - list_for_each_entry_safe(props, tmp, &adev->data.properties, list) {
2289 - list_del(&props->list);
2290 - kfree(props);
2291 - }
2292 + acpi_free_device_properties(&adev->data.properties);
2293 }
2294
2295 /**
2296 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
2297 index b0e23e3fe0d56..34966128293b1 100644
2298 --- a/drivers/acpi/sleep.c
2299 +++ b/drivers/acpi/sleep.c
2300 @@ -374,6 +374,18 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
2301 DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
2302 },
2303 },
2304 + /*
2305 + * ASUS B1400CEAE hangs on resume from suspend (see
2306 + * https://bugzilla.kernel.org/show_bug.cgi?id=215742).
2307 + */
2308 + {
2309 + .callback = init_default_s3,
2310 + .ident = "ASUS B1400CEAE",
2311 + .matches = {
2312 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
2313 + DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
2314 + },
2315 + },
2316 {},
2317 };
2318
2319 diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
2320 index 12a505bb9c5b1..c4f36312b8a42 100644
2321 --- a/drivers/ata/libata-transport.c
2322 +++ b/drivers/ata/libata-transport.c
2323 @@ -196,7 +196,7 @@ static struct {
2324 { XFER_PIO_0, "XFER_PIO_0" },
2325 { XFER_PIO_SLOW, "XFER_PIO_SLOW" }
2326 };
2327 -ata_bitfield_name_match(xfer,ata_xfer_names)
2328 +ata_bitfield_name_search(xfer, ata_xfer_names)
2329
2330 /*
2331 * ATA Port attributes
2332 diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
2333 index ac3b1fda820ff..c240d8cbfd417 100644
2334 --- a/drivers/ata/pata_octeon_cf.c
2335 +++ b/drivers/ata/pata_octeon_cf.c
2336 @@ -888,12 +888,14 @@ static int octeon_cf_probe(struct platform_device *pdev)
2337 int i;
2338 res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
2339 if (!res_dma) {
2340 + put_device(&dma_dev->dev);
2341 of_node_put(dma_node);
2342 return -EINVAL;
2343 }
2344 cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start,
2345 resource_size(res_dma));
2346 if (!cf_port->dma_base) {
2347 + put_device(&dma_dev->dev);
2348 of_node_put(dma_node);
2349 return -EINVAL;
2350 }
2351 @@ -903,6 +905,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
2352 irq = i;
2353 irq_handler = octeon_cf_interrupt;
2354 }
2355 + put_device(&dma_dev->dev);
2356 }
2357 of_node_put(dma_node);
2358 }
2359 diff --git a/drivers/base/bus.c b/drivers/base/bus.c
2360 index a1d1e82563244..7d7d28f498edd 100644
2361 --- a/drivers/base/bus.c
2362 +++ b/drivers/base/bus.c
2363 @@ -620,7 +620,7 @@ int bus_add_driver(struct device_driver *drv)
2364 if (drv->bus->p->drivers_autoprobe) {
2365 error = driver_attach(drv);
2366 if (error)
2367 - goto out_unregister;
2368 + goto out_del_list;
2369 }
2370 module_add_driver(drv->owner, drv);
2371
2372 @@ -647,6 +647,8 @@ int bus_add_driver(struct device_driver *drv)
2373
2374 return 0;
2375
2376 +out_del_list:
2377 + klist_del(&priv->knode_bus);
2378 out_unregister:
2379 kobject_put(&priv->kobj);
2380 /* drv->p is freed in driver_release() */
2381 diff --git a/drivers/base/dd.c b/drivers/base/dd.c
2382 index 26cd4ce3ac75f..6f85280fef8d3 100644
2383 --- a/drivers/base/dd.c
2384 +++ b/drivers/base/dd.c
2385 @@ -873,6 +873,7 @@ out_unlock:
2386 static int __device_attach(struct device *dev, bool allow_async)
2387 {
2388 int ret = 0;
2389 + bool async = false;
2390
2391 device_lock(dev);
2392 if (dev->p->dead) {
2393 @@ -911,7 +912,7 @@ static int __device_attach(struct device *dev, bool allow_async)
2394 */
2395 dev_dbg(dev, "scheduling asynchronous probe\n");
2396 get_device(dev);
2397 - async_schedule_dev(__device_attach_async_helper, dev);
2398 + async = true;
2399 } else {
2400 pm_request_idle(dev);
2401 }
2402 @@ -921,6 +922,8 @@ static int __device_attach(struct device *dev, bool allow_async)
2403 }
2404 out_unlock:
2405 device_unlock(dev);
2406 + if (async)
2407 + async_schedule_dev(__device_attach_async_helper, dev);
2408 return ret;
2409 }
2410
2411 diff --git a/drivers/base/node.c b/drivers/base/node.c
2412 index 62a052990bb9b..666eb55c0774e 100644
2413 --- a/drivers/base/node.c
2414 +++ b/drivers/base/node.c
2415 @@ -641,6 +641,7 @@ static int register_node(struct node *node, int num)
2416 */
2417 void unregister_node(struct node *node)
2418 {
2419 + compaction_unregister_node(node);
2420 hugetlb_unregister_node(node); /* no-op, if memoryless node */
2421 node_remove_accesses(node);
2422 node_remove_caches(node);
2423 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
2424 index ba10fa24fa1f1..5ece2fd70d9cf 100644
2425 --- a/drivers/block/drbd/drbd_main.c
2426 +++ b/drivers/block/drbd/drbd_main.c
2427 @@ -3709,9 +3709,8 @@ const char *cmdname(enum drbd_packet cmd)
2428 * when we want to support more than
2429 * one PRO_VERSION */
2430 static const char *cmdnames[] = {
2431 +
2432 [P_DATA] = "Data",
2433 - [P_WSAME] = "WriteSame",
2434 - [P_TRIM] = "Trim",
2435 [P_DATA_REPLY] = "DataReply",
2436 [P_RS_DATA_REPLY] = "RSDataReply",
2437 [P_BARRIER] = "Barrier",
2438 @@ -3722,7 +3721,6 @@ const char *cmdname(enum drbd_packet cmd)
2439 [P_DATA_REQUEST] = "DataRequest",
2440 [P_RS_DATA_REQUEST] = "RSDataRequest",
2441 [P_SYNC_PARAM] = "SyncParam",
2442 - [P_SYNC_PARAM89] = "SyncParam89",
2443 [P_PROTOCOL] = "ReportProtocol",
2444 [P_UUIDS] = "ReportUUIDs",
2445 [P_SIZES] = "ReportSizes",
2446 @@ -3730,6 +3728,7 @@ const char *cmdname(enum drbd_packet cmd)
2447 [P_SYNC_UUID] = "ReportSyncUUID",
2448 [P_AUTH_CHALLENGE] = "AuthChallenge",
2449 [P_AUTH_RESPONSE] = "AuthResponse",
2450 + [P_STATE_CHG_REQ] = "StateChgRequest",
2451 [P_PING] = "Ping",
2452 [P_PING_ACK] = "PingAck",
2453 [P_RECV_ACK] = "RecvAck",
2454 @@ -3740,24 +3739,26 @@ const char *cmdname(enum drbd_packet cmd)
2455 [P_NEG_DREPLY] = "NegDReply",
2456 [P_NEG_RS_DREPLY] = "NegRSDReply",
2457 [P_BARRIER_ACK] = "BarrierAck",
2458 - [P_STATE_CHG_REQ] = "StateChgRequest",
2459 [P_STATE_CHG_REPLY] = "StateChgReply",
2460 [P_OV_REQUEST] = "OVRequest",
2461 [P_OV_REPLY] = "OVReply",
2462 [P_OV_RESULT] = "OVResult",
2463 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
2464 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
2465 + [P_SYNC_PARAM89] = "SyncParam89",
2466 [P_COMPRESSED_BITMAP] = "CBitmap",
2467 [P_DELAY_PROBE] = "DelayProbe",
2468 [P_OUT_OF_SYNC] = "OutOfSync",
2469 - [P_RETRY_WRITE] = "RetryWrite",
2470 [P_RS_CANCEL] = "RSCancel",
2471 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
2472 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
2473 [P_RETRY_WRITE] = "retry_write",
2474 [P_PROTOCOL_UPDATE] = "protocol_update",
2475 + [P_TRIM] = "Trim",
2476 [P_RS_THIN_REQ] = "rs_thin_req",
2477 [P_RS_DEALLOCATED] = "rs_deallocated",
2478 + [P_WSAME] = "WriteSame",
2479 + [P_ZEROES] = "Zeroes",
2480
2481 /* enum drbd_packet, but not commands - obsoleted flags:
2482 * P_MAY_IGNORE
2483 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
2484 index 25e81b1a59a54..09323b0510f0b 100644
2485 --- a/drivers/block/nbd.c
2486 +++ b/drivers/block/nbd.c
2487 @@ -865,11 +865,15 @@ static int wait_for_reconnect(struct nbd_device *nbd)
2488 struct nbd_config *config = nbd->config;
2489 if (!config->dead_conn_timeout)
2490 return 0;
2491 - if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
2492 +
2493 + if (!wait_event_timeout(config->conn_wait,
2494 + test_bit(NBD_RT_DISCONNECTED,
2495 + &config->runtime_flags) ||
2496 + atomic_read(&config->live_connections) > 0,
2497 + config->dead_conn_timeout))
2498 return 0;
2499 - return wait_event_timeout(config->conn_wait,
2500 - atomic_read(&config->live_connections) > 0,
2501 - config->dead_conn_timeout) > 0;
2502 +
2503 + return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
2504 }
2505
2506 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
2507 @@ -1340,7 +1344,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
2508 static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
2509 struct block_device *bdev)
2510 {
2511 - sock_shutdown(nbd);
2512 + nbd_clear_sock(nbd);
2513 __invalidate_device(bdev, true);
2514 nbd_bdev_reset(bdev);
2515 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
2516 @@ -1453,15 +1457,20 @@ static struct nbd_config *nbd_alloc_config(void)
2517 {
2518 struct nbd_config *config;
2519
2520 + if (!try_module_get(THIS_MODULE))
2521 + return ERR_PTR(-ENODEV);
2522 +
2523 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
2524 - if (!config)
2525 - return NULL;
2526 + if (!config) {
2527 + module_put(THIS_MODULE);
2528 + return ERR_PTR(-ENOMEM);
2529 + }
2530 +
2531 atomic_set(&config->recv_threads, 0);
2532 init_waitqueue_head(&config->recv_wq);
2533 init_waitqueue_head(&config->conn_wait);
2534 config->blksize = NBD_DEF_BLKSIZE;
2535 atomic_set(&config->live_connections, 0);
2536 - try_module_get(THIS_MODULE);
2537 return config;
2538 }
2539
2540 @@ -1488,12 +1497,13 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
2541 mutex_unlock(&nbd->config_lock);
2542 goto out;
2543 }
2544 - config = nbd->config = nbd_alloc_config();
2545 - if (!config) {
2546 - ret = -ENOMEM;
2547 + config = nbd_alloc_config();
2548 + if (IS_ERR(config)) {
2549 + ret = PTR_ERR(config);
2550 mutex_unlock(&nbd->config_lock);
2551 goto out;
2552 }
2553 + nbd->config = config;
2554 refcount_set(&nbd->config_refs, 1);
2555 refcount_inc(&nbd->refs);
2556 mutex_unlock(&nbd->config_lock);
2557 @@ -1915,13 +1925,14 @@ again:
2558 nbd_put(nbd);
2559 return -EINVAL;
2560 }
2561 - config = nbd->config = nbd_alloc_config();
2562 - if (!nbd->config) {
2563 + config = nbd_alloc_config();
2564 + if (IS_ERR(config)) {
2565 mutex_unlock(&nbd->config_lock);
2566 nbd_put(nbd);
2567 printk(KERN_ERR "nbd: couldn't allocate config\n");
2568 - return -ENOMEM;
2569 + return PTR_ERR(config);
2570 }
2571 + nbd->config = config;
2572 refcount_set(&nbd->config_refs, 1);
2573 set_bit(NBD_RT_BOUND, &config->runtime_flags);
2574
2575 @@ -2014,6 +2025,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
2576 mutex_lock(&nbd->config_lock);
2577 nbd_disconnect(nbd);
2578 sock_shutdown(nbd);
2579 + wake_up(&nbd->config->conn_wait);
2580 /*
2581 * Make sure recv thread has finished, so it does not drop the last
2582 * config ref and try to destroy the workqueue from inside the work
2583 @@ -2441,6 +2453,12 @@ static void __exit nbd_cleanup(void)
2584 struct nbd_device *nbd;
2585 LIST_HEAD(del_list);
2586
2587 + /*
2588 + * Unregister netlink interface prior to waiting
2589 + * for the completion of netlink commands.
2590 + */
2591 + genl_unregister_family(&nbd_genl_family);
2592 +
2593 nbd_dbg_close();
2594
2595 mutex_lock(&nbd_index_mutex);
2596 @@ -2450,13 +2468,15 @@ static void __exit nbd_cleanup(void)
2597 while (!list_empty(&del_list)) {
2598 nbd = list_first_entry(&del_list, struct nbd_device, list);
2599 list_del_init(&nbd->list);
2600 + if (refcount_read(&nbd->config_refs))
2601 + printk(KERN_ERR "nbd: possibly leaking nbd_config (ref %d)\n",
2602 + refcount_read(&nbd->config_refs));
2603 if (refcount_read(&nbd->refs) != 1)
2604 printk(KERN_ERR "nbd: possibly leaking a device\n");
2605 nbd_put(nbd);
2606 }
2607
2608 idr_destroy(&nbd_index_idr);
2609 - genl_unregister_family(&nbd_genl_family);
2610 unregister_blkdev(NBD_MAJOR, "nbd");
2611 }
2612
2613 diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
2614 index 2a5cd502feae7..9b3ea86c20e5e 100644
2615 --- a/drivers/block/virtio_blk.c
2616 +++ b/drivers/block/virtio_blk.c
2617 @@ -976,11 +976,12 @@ static int virtblk_probe(struct virtio_device *vdev)
2618 blk_queue_io_opt(q, blk_size * opt_io_size);
2619
2620 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
2621 - q->limits.discard_granularity = blk_size;
2622 -
2623 virtio_cread(vdev, struct virtio_blk_config,
2624 discard_sector_alignment, &v);
2625 - q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
2626 + if (v)
2627 + q->limits.discard_granularity = v << SECTOR_SHIFT;
2628 + else
2629 + q->limits.discard_granularity = blk_size;
2630
2631 virtio_cread(vdev, struct virtio_blk_config,
2632 max_discard_sectors, &v);
2633 diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
2634 index 469ca73de4ce7..44aeceaccfa48 100644
2635 --- a/drivers/bus/ti-sysc.c
2636 +++ b/drivers/bus/ti-sysc.c
2637 @@ -2724,7 +2724,9 @@ static int sysc_remove(struct platform_device *pdev)
2638 struct sysc *ddata = platform_get_drvdata(pdev);
2639 int error;
2640
2641 - cancel_delayed_work_sync(&ddata->idle_work);
2642 + /* Device can still be enabled, see deferred idle quirk in probe */
2643 + if (cancel_delayed_work_sync(&ddata->idle_work))
2644 + ti_sysc_idle(&ddata->idle_work.work);
2645
2646 error = pm_runtime_get_sync(ddata->dev);
2647 if (error < 0) {
2648 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
2649 index ad2e6d55d4a59..736970312bbc9 100644
2650 --- a/drivers/char/ipmi/ipmi_msghandler.c
2651 +++ b/drivers/char/ipmi/ipmi_msghandler.c
2652 @@ -11,8 +11,8 @@
2653 * Copyright 2002 MontaVista Software Inc.
2654 */
2655
2656 -#define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
2657 -#define dev_fmt pr_fmt
2658 +#define pr_fmt(fmt) "IPMI message handler: " fmt
2659 +#define dev_fmt(fmt) pr_fmt(fmt)
2660
2661 #include <linux/module.h>
2662 #include <linux/errno.h>
2663 diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
2664 index bb42a1c92cae5..60fb6c62f224b 100644
2665 --- a/drivers/char/ipmi/ipmi_ssif.c
2666 +++ b/drivers/char/ipmi/ipmi_ssif.c
2667 @@ -845,6 +845,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
2668 break;
2669
2670 case SSIF_GETTING_EVENTS:
2671 + if (!msg) {
2672 + /* Should never happen, but just in case. */
2673 + dev_warn(&ssif_info->client->dev,
2674 + "No message set while getting events\n");
2675 + ipmi_ssif_unlock_cond(ssif_info, flags);
2676 + break;
2677 + }
2678 +
2679 if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
2680 /* Error getting event, probably done. */
2681 msg->done(msg);
2682 @@ -869,6 +877,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
2683 break;
2684
2685 case SSIF_GETTING_MESSAGES:
2686 + if (!msg) {
2687 + /* Should never happen, but just in case. */
2688 + dev_warn(&ssif_info->client->dev,
2689 + "No message set while getting messages\n");
2690 + ipmi_ssif_unlock_cond(ssif_info, flags);
2691 + break;
2692 + }
2693 +
2694 if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
2695 /* Error getting event, probably done. */
2696 msg->done(msg);
2697 @@ -892,6 +908,13 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
2698 deliver_recv_msg(ssif_info, msg);
2699 }
2700 break;
2701 +
2702 + default:
2703 + /* Should never happen, but just in case. */
2704 + dev_warn(&ssif_info->client->dev,
2705 + "Invalid state in message done handling: %d\n",
2706 + ssif_info->ssif_state);
2707 + ipmi_ssif_unlock_cond(ssif_info, flags);
2708 }
2709
2710 flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
2711 diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c
2712 index 56c0cc32d0ac6..d514b44e67dd1 100644
2713 --- a/drivers/clocksource/timer-oxnas-rps.c
2714 +++ b/drivers/clocksource/timer-oxnas-rps.c
2715 @@ -236,7 +236,7 @@ static int __init oxnas_rps_timer_init(struct device_node *np)
2716 }
2717
2718 rps->irq = irq_of_parse_and_map(np, 0);
2719 - if (rps->irq < 0) {
2720 + if (!rps->irq) {
2721 ret = -EINVAL;
2722 goto err_iomap;
2723 }
2724 diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
2725 index 4b04ffbe5e7e9..e3be5c2f57b8e 100644
2726 --- a/drivers/clocksource/timer-riscv.c
2727 +++ b/drivers/clocksource/timer-riscv.c
2728 @@ -26,7 +26,7 @@ static int riscv_clock_next_event(unsigned long delta,
2729
2730 static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
2731 .name = "riscv_timer_clockevent",
2732 - .features = CLOCK_EVT_FEAT_ONESHOT,
2733 + .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP,
2734 .rating = 100,
2735 .set_next_event = riscv_clock_next_event,
2736 };
2737 diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
2738 index 9c841980eed13..c9aa0498fb840 100644
2739 --- a/drivers/clocksource/timer-sp804.c
2740 +++ b/drivers/clocksource/timer-sp804.c
2741 @@ -215,6 +215,11 @@ static int __init sp804_of_init(struct device_node *np)
2742 struct clk *clk1, *clk2;
2743 const char *name = of_get_property(np, "compatible", NULL);
2744
2745 + if (initialized) {
2746 + pr_debug("%pOF: skipping further SP804 timer device\n", np);
2747 + return 0;
2748 + }
2749 +
2750 base = of_iomap(np, 0);
2751 if (!base)
2752 return -ENXIO;
2753 @@ -223,11 +228,6 @@ static int __init sp804_of_init(struct device_node *np)
2754 writel(0, base + TIMER_CTRL);
2755 writel(0, base + TIMER_2_BASE + TIMER_CTRL);
2756
2757 - if (initialized || !of_device_is_available(np)) {
2758 - ret = -EINVAL;
2759 - goto err;
2760 - }
2761 -
2762 clk1 = of_clk_get(np, 0);
2763 if (IS_ERR(clk1))
2764 clk1 = NULL;
2765 diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
2766 index 84ceddfee76b4..708dc63b2f099 100644
2767 --- a/drivers/crypto/marvell/cipher.c
2768 +++ b/drivers/crypto/marvell/cipher.c
2769 @@ -610,7 +610,6 @@ struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
2770 .decrypt = mv_cesa_ecb_des3_ede_decrypt,
2771 .min_keysize = DES3_EDE_KEY_SIZE,
2772 .max_keysize = DES3_EDE_KEY_SIZE,
2773 - .ivsize = DES3_EDE_BLOCK_SIZE,
2774 .base = {
2775 .cra_name = "ecb(des3_ede)",
2776 .cra_driver_name = "mv-ecb-des3-ede",
2777 diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
2778 index 027769e39f9b8..a491dcfa1dd07 100644
2779 --- a/drivers/devfreq/rk3399_dmc.c
2780 +++ b/drivers/devfreq/rk3399_dmc.c
2781 @@ -485,6 +485,8 @@ static int rk3399_dmcfreq_remove(struct platform_device *pdev)
2782 {
2783 struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev);
2784
2785 + devfreq_event_disable_edev(dmcfreq->edev);
2786 +
2787 /*
2788 * Before remove the opp table we need to unregister the opp notifier.
2789 */
2790 diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
2791 index a05355d1292e8..c902c24806404 100644
2792 --- a/drivers/dma/stm32-mdma.c
2793 +++ b/drivers/dma/stm32-mdma.c
2794 @@ -40,7 +40,6 @@
2795 STM32_MDMA_SHIFT(mask))
2796
2797 #define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
2798 -#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */
2799
2800 /* MDMA Channel x interrupt/status register */
2801 #define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */
2802 @@ -196,7 +195,7 @@
2803
2804 #define STM32_MDMA_MAX_BUF_LEN 128
2805 #define STM32_MDMA_MAX_BLOCK_LEN 65536
2806 -#define STM32_MDMA_MAX_CHANNELS 63
2807 +#define STM32_MDMA_MAX_CHANNELS 32
2808 #define STM32_MDMA_MAX_REQUESTS 256
2809 #define STM32_MDMA_MAX_BURST 128
2810 #define STM32_MDMA_VERY_HIGH_PRIORITY 0x11
2811 @@ -1351,21 +1350,11 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
2812
2813 /* Find out which channel generates the interrupt */
2814 status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
2815 - if (status) {
2816 - id = __ffs(status);
2817 - } else {
2818 - status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1);
2819 - if (!status) {
2820 - dev_dbg(mdma2dev(dmadev), "spurious it\n");
2821 - return IRQ_NONE;
2822 - }
2823 - id = __ffs(status);
2824 - /*
2825 - * As GISR0 provides status for channel id from 0 to 31,
2826 - * so GISR1 provides status for channel id from 32 to 62
2827 - */
2828 - id += 32;
2829 + if (!status) {
2830 + dev_dbg(mdma2dev(dmadev), "spurious it\n");
2831 + return IRQ_NONE;
2832 }
2833 + id = __ffs(status);
2834
2835 chan = &dmadev->chan[id];
2836 if (!chan) {
2837 diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
2838 index 84009c5e0f330..b61d0c79dffb6 100644
2839 --- a/drivers/dma/xilinx/zynqmp_dma.c
2840 +++ b/drivers/dma/xilinx/zynqmp_dma.c
2841 @@ -232,7 +232,7 @@ struct zynqmp_dma_chan {
2842 bool is_dmacoherent;
2843 struct tasklet_struct tasklet;
2844 bool idle;
2845 - u32 desc_size;
2846 + size_t desc_size;
2847 bool err;
2848 u32 bus_width;
2849 u32 src_burst_len;
2850 @@ -489,7 +489,8 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
2851 }
2852
2853 chan->desc_pool_v = dma_alloc_coherent(chan->dev,
2854 - (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
2855 + (2 * ZYNQMP_DMA_DESC_SIZE(chan) *
2856 + ZYNQMP_DMA_NUM_DESCS),
2857 &chan->desc_pool_p, GFP_KERNEL);
2858 if (!chan->desc_pool_v)
2859 return -ENOMEM;
2860 diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
2861 index 5c9e156cd0862..6b905c3d30f4f 100644
2862 --- a/drivers/extcon/extcon.c
2863 +++ b/drivers/extcon/extcon.c
2864 @@ -1230,19 +1230,14 @@ int extcon_dev_register(struct extcon_dev *edev)
2865 edev->dev.type = &edev->extcon_dev_type;
2866 }
2867
2868 - ret = device_register(&edev->dev);
2869 - if (ret) {
2870 - put_device(&edev->dev);
2871 - goto err_dev;
2872 - }
2873 -
2874 spin_lock_init(&edev->lock);
2875 - edev->nh = devm_kcalloc(&edev->dev, edev->max_supported,
2876 - sizeof(*edev->nh), GFP_KERNEL);
2877 - if (!edev->nh) {
2878 - ret = -ENOMEM;
2879 - device_unregister(&edev->dev);
2880 - goto err_dev;
2881 + if (edev->max_supported) {
2882 + edev->nh = kcalloc(edev->max_supported, sizeof(*edev->nh),
2883 + GFP_KERNEL);
2884 + if (!edev->nh) {
2885 + ret = -ENOMEM;
2886 + goto err_alloc_nh;
2887 + }
2888 }
2889
2890 for (index = 0; index < edev->max_supported; index++)
2891 @@ -1253,6 +1248,12 @@ int extcon_dev_register(struct extcon_dev *edev)
2892 dev_set_drvdata(&edev->dev, edev);
2893 edev->state = 0;
2894
2895 + ret = device_register(&edev->dev);
2896 + if (ret) {
2897 + put_device(&edev->dev);
2898 + goto err_dev;
2899 + }
2900 +
2901 mutex_lock(&extcon_dev_list_lock);
2902 list_add(&edev->entry, &extcon_dev_list);
2903 mutex_unlock(&extcon_dev_list_lock);
2904 @@ -1260,6 +1261,9 @@ int extcon_dev_register(struct extcon_dev *edev)
2905 return 0;
2906
2907 err_dev:
2908 + if (edev->max_supported)
2909 + kfree(edev->nh);
2910 +err_alloc_nh:
2911 if (edev->max_supported)
2912 kfree(edev->extcon_dev_type.groups);
2913 err_alloc_groups:
2914 @@ -1320,6 +1324,7 @@ void extcon_dev_unregister(struct extcon_dev *edev)
2915 if (edev->max_supported) {
2916 kfree(edev->extcon_dev_type.groups);
2917 kfree(edev->cables);
2918 + kfree(edev->nh);
2919 }
2920
2921 put_device(&edev->dev);
2922 diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
2923 index f986ee8919f03..2be32e86445f4 100644
2924 --- a/drivers/firmware/arm_scmi/base.c
2925 +++ b/drivers/firmware/arm_scmi/base.c
2926 @@ -164,7 +164,7 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
2927 break;
2928
2929 loop_num_ret = le32_to_cpu(*num_ret);
2930 - if (tot_num_ret + loop_num_ret > MAX_PROTOCOLS_IMP) {
2931 + if (loop_num_ret > MAX_PROTOCOLS_IMP - tot_num_ret) {
2932 dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP");
2933 break;
2934 }
2935 diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
2936 index b6180023eba7c..2858e05636e98 100644
2937 --- a/drivers/firmware/dmi-sysfs.c
2938 +++ b/drivers/firmware/dmi-sysfs.c
2939 @@ -603,7 +603,7 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
2940 "%d-%d", dh->type, entry->instance);
2941
2942 if (*ret) {
2943 - kfree(entry);
2944 + kobject_put(&entry->kobj);
2945 return;
2946 }
2947
2948 diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
2949 index b2b4ba240fb11..08c422380a00d 100644
2950 --- a/drivers/firmware/stratix10-svc.c
2951 +++ b/drivers/firmware/stratix10-svc.c
2952 @@ -934,17 +934,17 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory);
2953 void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
2954 {
2955 struct stratix10_svc_data_mem *pmem;
2956 - size_t size = 0;
2957
2958 list_for_each_entry(pmem, &svc_data_mem, node)
2959 if (pmem->vaddr == kaddr) {
2960 - size = pmem->size;
2961 - break;
2962 + gen_pool_free(chan->ctrl->genpool,
2963 + (unsigned long)kaddr, pmem->size);
2964 + pmem->vaddr = NULL;
2965 + list_del(&pmem->node);
2966 + return;
2967 }
2968
2969 - gen_pool_free(chan->ctrl->genpool, (unsigned long)kaddr, size);
2970 - pmem->vaddr = NULL;
2971 - list_del(&pmem->node);
2972 + list_del(&svc_data_mem);
2973 }
2974 EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);
2975
2976 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
2977 index fddeea2b17e50..7eeb98fe50ed7 100644
2978 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
2979 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
2980 @@ -114,7 +114,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
2981 int ret;
2982
2983 if (cs->in.num_chunks == 0)
2984 - return 0;
2985 + return -EINVAL;
2986
2987 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
2988 if (!chunk_array)
2989 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
2990 index 3a6115ad01965..f3250db7f9c27 100644
2991 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
2992 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
2993 @@ -568,8 +568,7 @@ int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
2994
2995 void amdgpu_ucode_free_bo(struct amdgpu_device *adev)
2996 {
2997 - if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT)
2998 - amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
2999 + amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
3000 &adev->firmware.fw_buf_mc,
3001 &adev->firmware.fw_buf_ptr);
3002 }
3003 diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
3004 index 4b3faaccecb94..c8a5a5698edd9 100644
3005 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
3006 +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
3007 @@ -1609,19 +1609,7 @@ static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate)
3008
3009 static u8 kv_get_acp_boot_level(struct amdgpu_device *adev)
3010 {
3011 - u8 i;
3012 - struct amdgpu_clock_voltage_dependency_table *table =
3013 - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
3014 -
3015 - for (i = 0; i < table->count; i++) {
3016 - if (table->entries[i].clk >= 0) /* XXX */
3017 - break;
3018 - }
3019 -
3020 - if (i >= table->count)
3021 - i = table->count - 1;
3022 -
3023 - return i;
3024 + return 0;
3025 }
3026
3027 static void kv_update_acp_boot_level(struct amdgpu_device *adev)
3028 diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
3029 index 4cb4c891120b2..9931d5c17cfb6 100644
3030 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
3031 +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
3032 @@ -7250,17 +7250,15 @@ static int si_parse_power_table(struct amdgpu_device *adev)
3033 if (!adev->pm.dpm.ps)
3034 return -ENOMEM;
3035 power_state_offset = (u8 *)state_array->states;
3036 - for (i = 0; i < state_array->ucNumEntries; i++) {
3037 + for (adev->pm.dpm.num_ps = 0, i = 0; i < state_array->ucNumEntries; i++) {
3038 u8 *idx;
3039 power_state = (union pplib_power_state *)power_state_offset;
3040 non_clock_array_index = power_state->v2.nonClockInfoIndex;
3041 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
3042 &non_clock_info_array->nonClockInfo[non_clock_array_index];
3043 ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL);
3044 - if (ps == NULL) {
3045 - kfree(adev->pm.dpm.ps);
3046 + if (ps == NULL)
3047 return -ENOMEM;
3048 - }
3049 adev->pm.dpm.ps[i].ps_priv = ps;
3050 si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
3051 non_clock_info,
3052 @@ -7282,8 +7280,8 @@ static int si_parse_power_table(struct amdgpu_device *adev)
3053 k++;
3054 }
3055 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
3056 + adev->pm.dpm.num_ps++;
3057 }
3058 - adev->pm.dpm.num_ps = state_array->ucNumEntries;
3059
3060 /* fill in the vce power states */
3061 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
3062 diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
3063 index 98e915e325ddf..bc3f42e915e91 100644
3064 --- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
3065 +++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
3066 @@ -264,6 +264,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms,
3067
3068 formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
3069 layer->layer_type, &n_formats);
3070 + if (!formats) {
3071 + kfree(kplane);
3072 + return -ENOMEM;
3073 + }
3074
3075 err = drm_universal_plane_init(&kms->base, plane,
3076 get_possible_crtcs(kms, c->pipeline),
3077 @@ -274,8 +278,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms,
3078
3079 komeda_put_fourcc_list(formats);
3080
3081 - if (err)
3082 - goto cleanup;
3083 + if (err) {
3084 + kfree(kplane);
3085 + return err;
3086 + }
3087
3088 drm_plane_helper_add(plane, &komeda_plane_helper_funcs);
3089
3090 diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
3091 index 587d94798f5c2..af729094260c4 100644
3092 --- a/drivers/gpu/drm/arm/malidp_crtc.c
3093 +++ b/drivers/gpu/drm/arm/malidp_crtc.c
3094 @@ -483,7 +483,10 @@ static void malidp_crtc_reset(struct drm_crtc *crtc)
3095 if (crtc->state)
3096 malidp_crtc_destroy_state(crtc, crtc->state);
3097
3098 - __drm_atomic_helper_crtc_reset(crtc, &state->base);
3099 + if (state)
3100 + __drm_atomic_helper_crtc_reset(crtc, &state->base);
3101 + else
3102 + __drm_atomic_helper_crtc_reset(crtc, NULL);
3103 }
3104
3105 static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
3106 diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
3107 index 9e13e466e72c0..e7bf32f234d71 100644
3108 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
3109 +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
3110 @@ -1225,6 +1225,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
3111 return 0;
3112
3113 err_unregister_cec:
3114 + cec_unregister_adapter(adv7511->cec_adap);
3115 i2c_unregister_device(adv7511->i2c_cec);
3116 if (adv7511->cec_clk)
3117 clk_disable_unprepare(adv7511->cec_clk);
3118 diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
3119 index 1f26890a8da6e..c6a51d1c7ec9e 100644
3120 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
3121 +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
3122 @@ -1630,8 +1630,19 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux,
3123 struct drm_dp_aux_msg *msg)
3124 {
3125 struct analogix_dp_device *dp = to_dp(aux);
3126 + int ret;
3127 +
3128 + pm_runtime_get_sync(dp->dev);
3129 +
3130 + ret = analogix_dp_detect_hpd(dp);
3131 + if (ret)
3132 + goto out;
3133
3134 - return analogix_dp_transfer(dp, msg);
3135 + ret = analogix_dp_transfer(dp, msg);
3136 +out:
3137 + pm_runtime_put(dp->dev);
3138 +
3139 + return ret;
3140 }
3141
3142 struct analogix_dp_device *
3143 @@ -1696,8 +1707,10 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
3144 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3145
3146 dp->reg_base = devm_ioremap_resource(&pdev->dev, res);
3147 - if (IS_ERR(dp->reg_base))
3148 - return ERR_CAST(dp->reg_base);
3149 + if (IS_ERR(dp->reg_base)) {
3150 + ret = PTR_ERR(dp->reg_base);
3151 + goto err_disable_clk;
3152 + }
3153
3154 dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd");
3155
3156 @@ -1709,7 +1722,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
3157 if (IS_ERR(dp->hpd_gpiod)) {
3158 dev_err(dev, "error getting HDP GPIO: %ld\n",
3159 PTR_ERR(dp->hpd_gpiod));
3160 - return ERR_CAST(dp->hpd_gpiod);
3161 + ret = PTR_ERR(dp->hpd_gpiod);
3162 + goto err_disable_clk;
3163 }
3164
3165 if (dp->hpd_gpiod) {
3166 @@ -1729,7 +1743,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
3167
3168 if (dp->irq == -ENXIO) {
3169 dev_err(&pdev->dev, "failed to get irq\n");
3170 - return ERR_PTR(-ENODEV);
3171 + ret = -ENODEV;
3172 + goto err_disable_clk;
3173 }
3174
3175 ret = devm_request_threaded_irq(&pdev->dev, dp->irq,
3176 @@ -1738,11 +1753,15 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
3177 irq_flags, "analogix-dp", dp);
3178 if (ret) {
3179 dev_err(&pdev->dev, "failed to request irq\n");
3180 - return ERR_PTR(ret);
3181 + goto err_disable_clk;
3182 }
3183 disable_irq(dp->irq);
3184
3185 return dp;
3186 +
3187 +err_disable_clk:
3188 + clk_disable_unprepare(dp->clock);
3189 + return ERR_PTR(ret);
3190 }
3191 EXPORT_SYMBOL_GPL(analogix_dp_probe);
3192
3193 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
3194 index aeeab1b57aad3..2dc6dd6230d76 100644
3195 --- a/drivers/gpu/drm/drm_edid.c
3196 +++ b/drivers/gpu/drm/drm_edid.c
3197 @@ -1702,9 +1702,6 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
3198
3199 connector_bad_edid(connector, edid, edid[0x7e] + 1);
3200
3201 - edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
3202 - edid[0x7e] = valid_extensions;
3203 -
3204 new = kmalloc_array(valid_extensions + 1, EDID_LENGTH,
3205 GFP_KERNEL);
3206 if (!new)
3207 @@ -1721,6 +1718,9 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
3208 base += EDID_LENGTH;
3209 }
3210
3211 + new[EDID_LENGTH - 1] += new[0x7e] - valid_extensions;
3212 + new[0x7e] = valid_extensions;
3213 +
3214 kfree(edid);
3215 edid = new;
3216 }
3217 diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
3218 index d6ad60ab0d389..6bdebcca56905 100644
3219 --- a/drivers/gpu/drm/drm_plane.c
3220 +++ b/drivers/gpu/drm/drm_plane.c
3221 @@ -186,6 +186,13 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
3222 if (WARN_ON(config->num_total_plane >= 32))
3223 return -EINVAL;
3224
3225 + /*
3226 + * First driver to need more than 64 formats needs to fix this. Each
3227 + * format is encoded as a bit and the current code only supports a u64.
3228 + */
3229 + if (WARN_ON(format_count > 64))
3230 + return -EINVAL;
3231 +
3232 WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
3233 (!funcs->atomic_destroy_state ||
3234 !funcs->atomic_duplicate_state));
3235 @@ -207,13 +214,6 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
3236 return -ENOMEM;
3237 }
3238
3239 - /*
3240 - * First driver to need more than 64 formats needs to fix this. Each
3241 - * format is encoded as a bit and the current code only supports a u64.
3242 - */
3243 - if (WARN_ON(format_count > 64))
3244 - return -EINVAL;
3245 -
3246 if (format_modifiers) {
3247 const uint64_t *temp_modifiers = format_modifiers;
3248 while (*temp_modifiers++ != DRM_FORMAT_MOD_INVALID)
3249 diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
3250 index 707f5c1a58740..790cbb20aaeba 100644
3251 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
3252 +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
3253 @@ -289,6 +289,12 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
3254
3255 mutex_lock(&context->lock);
3256
3257 + /* Bail if the mapping has been reaped by another thread */
3258 + if (!mapping->context) {
3259 + mutex_unlock(&context->lock);
3260 + return;
3261 + }
3262 +
3263 /* If the vram node is on the mm, unmap and remove the node */
3264 if (mapping->vram_node.mm == &context->mm)
3265 etnaviv_iommu_remove_mapping(context, mapping);
3266 diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
3267 index 4256410535f06..65e67e12a0a1a 100644
3268 --- a/drivers/gpu/drm/gma500/psb_intel_display.c
3269 +++ b/drivers/gpu/drm/gma500/psb_intel_display.c
3270 @@ -532,14 +532,15 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
3271
3272 struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
3273 {
3274 - struct drm_crtc *crtc = NULL;
3275 + struct drm_crtc *crtc;
3276
3277 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3278 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
3279 +
3280 if (gma_crtc->pipe == pipe)
3281 - break;
3282 + return crtc;
3283 }
3284 - return crtc;
3285 + return NULL;
3286 }
3287
3288 int gma_connector_clones(struct drm_device *dev, int type_mask)
3289 diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
3290 index 2256c9789fc2c..f19264e91d4db 100644
3291 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c
3292 +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
3293 @@ -68,7 +68,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
3294 drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
3295 if (plane == &ipu_crtc->plane[0]->base)
3296 disable_full = true;
3297 - if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
3298 + if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
3299 disable_partial = true;
3300 }
3301
3302 diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
3303 index cb29b649fcdba..12bf937694977 100644
3304 --- a/drivers/gpu/drm/mediatek/mtk_cec.c
3305 +++ b/drivers/gpu/drm/mediatek/mtk_cec.c
3306 @@ -84,7 +84,7 @@ static void mtk_cec_mask(struct mtk_cec *cec, unsigned int offset,
3307 u32 tmp = readl(cec->regs + offset) & ~mask;
3308
3309 tmp |= val & mask;
3310 - writel(val, cec->regs + offset);
3311 + writel(tmp, cec->regs + offset);
3312 }
3313
3314 void mtk_cec_set_hpd_event(struct device *dev,
3315 diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
3316 index df2656e579917..a3ae6c1d341bf 100644
3317 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
3318 +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
3319 @@ -891,6 +891,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
3320 BUG_ON(!node);
3321
3322 ret = a6xx_gmu_init(a6xx_gpu, node);
3323 + of_node_put(node);
3324 if (ret) {
3325 a6xx_destroy(&(a6xx_gpu->base.base));
3326 return ERR_PTR(ret);
3327 diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
3328 index 72f487692adbb..c08c67338d73d 100644
3329 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
3330 +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
3331 @@ -599,8 +599,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
3332 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
3333 u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
3334
3335 - if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
3336 + if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) {
3337 dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
3338 + dpu_kms->hw_vbif[vbif_idx] = NULL;
3339 + }
3340 }
3341 }
3342
3343 diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
3344 index 395146884a222..03d60eb092577 100644
3345 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
3346 +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
3347 @@ -534,9 +534,15 @@ int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
3348 if (ret)
3349 return ret;
3350
3351 - mdp5_mixer_release(new_crtc_state->state, old_mixer);
3352 + ret = mdp5_mixer_release(new_crtc_state->state, old_mixer);
3353 + if (ret)
3354 + return ret;
3355 +
3356 if (old_r_mixer) {
3357 - mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
3358 + ret = mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
3359 + if (ret)
3360 + return ret;
3361 +
3362 if (!need_right_mixer)
3363 pipeline->r_mixer = NULL;
3364 }
3365 @@ -903,8 +909,10 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
3366
3367 ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
3368 &mdp5_crtc->cursor.iova);
3369 - if (ret)
3370 + if (ret) {
3371 + drm_gem_object_put(cursor_bo);
3372 return -EINVAL;
3373 + }
3374
3375 pm_runtime_get_sync(&pdev->dev);
3376
3377 diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
3378 index 77823ccdd0f8f..39d0082eedcca 100644
3379 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
3380 +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
3381 @@ -698,9 +698,9 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
3382 pdev = mdp5_kms->pdev;
3383
3384 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
3385 - if (irq < 0) {
3386 - ret = irq;
3387 - DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
3388 + if (!irq) {
3389 + ret = -EINVAL;
3390 + DRM_DEV_ERROR(&pdev->dev, "failed to get irq\n");
3391 goto fail;
3392 }
3393
3394 diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
3395 index 954db683ae444..2536def2a0005 100644
3396 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
3397 +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
3398 @@ -116,21 +116,28 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
3399 return 0;
3400 }
3401
3402 -void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
3403 +int mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
3404 {
3405 struct mdp5_global_state *global_state = mdp5_get_global_state(s);
3406 - struct mdp5_hw_mixer_state *new_state = &global_state->hwmixer;
3407 + struct mdp5_hw_mixer_state *new_state;
3408
3409 if (!mixer)
3410 - return;
3411 + return 0;
3412 +
3413 + if (IS_ERR(global_state))
3414 + return PTR_ERR(global_state);
3415 +
3416 + new_state = &global_state->hwmixer;
3417
3418 if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx]))
3419 - return;
3420 + return -EINVAL;
3421
3422 DBG("%s: release from crtc %s", mixer->name,
3423 new_state->hwmixer_to_crtc[mixer->idx]->name);
3424
3425 new_state->hwmixer_to_crtc[mixer->idx] = NULL;
3426 +
3427 + return 0;
3428 }
3429
3430 void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
3431 diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
3432 index 43c9ba43ce185..545ee223b9d74 100644
3433 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
3434 +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
3435 @@ -30,7 +30,7 @@ void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm);
3436 int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
3437 uint32_t caps, struct mdp5_hw_mixer **mixer,
3438 struct mdp5_hw_mixer **r_mixer);
3439 -void mdp5_mixer_release(struct drm_atomic_state *s,
3440 - struct mdp5_hw_mixer *mixer);
3441 +int mdp5_mixer_release(struct drm_atomic_state *s,
3442 + struct mdp5_hw_mixer *mixer);
3443
3444 #endif /* __MDP5_LM_H__ */
3445 diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
3446 index ba6695963aa66..a4f5cb90f3e80 100644
3447 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
3448 +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
3449 @@ -119,18 +119,23 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
3450 return 0;
3451 }
3452
3453 -void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
3454 +int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
3455 {
3456 struct msm_drm_private *priv = s->dev->dev_private;
3457 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
3458 struct mdp5_global_state *state = mdp5_get_global_state(s);
3459 - struct mdp5_hw_pipe_state *new_state = &state->hwpipe;
3460 + struct mdp5_hw_pipe_state *new_state;
3461
3462 if (!hwpipe)
3463 - return;
3464 + return 0;
3465 +
3466 + if (IS_ERR(state))
3467 + return PTR_ERR(state);
3468 +
3469 + new_state = &state->hwpipe;
3470
3471 if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx]))
3472 - return;
3473 + return -EINVAL;
3474
3475 DBG("%s: release from plane %s", hwpipe->name,
3476 new_state->hwpipe_to_plane[hwpipe->idx]->name);
3477 @@ -141,6 +146,8 @@ void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
3478 }
3479
3480 new_state->hwpipe_to_plane[hwpipe->idx] = NULL;
3481 +
3482 + return 0;
3483 }
3484
3485 void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe)
3486 diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
3487 index 9b26d0761bd4f..cca67938cab21 100644
3488 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
3489 +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
3490 @@ -37,7 +37,7 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
3491 uint32_t caps, uint32_t blkcfg,
3492 struct mdp5_hw_pipe **hwpipe,
3493 struct mdp5_hw_pipe **r_hwpipe);
3494 -void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
3495 +int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
3496
3497 struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
3498 uint32_t reg_offset, uint32_t caps);
3499 diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
3500 index da07993339702..0dc23c86747e8 100644
3501 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
3502 +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
3503 @@ -393,12 +393,24 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
3504 mdp5_state->r_hwpipe = NULL;
3505
3506
3507 - mdp5_pipe_release(state->state, old_hwpipe);
3508 - mdp5_pipe_release(state->state, old_right_hwpipe);
3509 + ret = mdp5_pipe_release(state->state, old_hwpipe);
3510 + if (ret)
3511 + return ret;
3512 +
3513 + ret = mdp5_pipe_release(state->state, old_right_hwpipe);
3514 + if (ret)
3515 + return ret;
3516 +
3517 }
3518 } else {
3519 - mdp5_pipe_release(state->state, mdp5_state->hwpipe);
3520 - mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
3521 + ret = mdp5_pipe_release(state->state, mdp5_state->hwpipe);
3522 + if (ret)
3523 + return ret;
3524 +
3525 + ret = mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
3526 + if (ret)
3527 + return ret;
3528 +
3529 mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
3530 }
3531
3532 diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
3533 index 423c4ae2be10d..743142e15b4c1 100644
3534 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c
3535 +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
3536 @@ -1348,10 +1348,10 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
3537 dsi_get_bpp(msm_host->format) / 8;
3538
3539 len = dsi_cmd_dma_add(msm_host, msg);
3540 - if (!len) {
3541 + if (len < 0) {
3542 pr_err("%s: failed to add cmd type = 0x%x\n",
3543 __func__, msg->type);
3544 - return -EINVAL;
3545 + return len;
3546 }
3547
3548 /* for video mode, do not send cmds more than
3549 @@ -1370,10 +1370,14 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
3550 }
3551
3552 ret = dsi_cmd_dma_tx(msm_host, len);
3553 - if (ret < len) {
3554 - pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
3555 - __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
3556 - return -ECOMM;
3557 + if (ret < 0) {
3558 + pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d, ret=%d\n",
3559 + __func__, msg->type, (*(u8 *)(msg->tx_buf)), len, ret);
3560 + return ret;
3561 + } else if (ret < len) {
3562 + pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, ret=%d len=%d\n",
3563 + __func__, msg->type, (*(u8 *)(msg->tx_buf)), ret, len);
3564 + return -EIO;
3565 }
3566
3567 return len;
3568 @@ -2099,9 +2103,12 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
3569 }
3570
3571 ret = dsi_cmds2buf_tx(msm_host, msg);
3572 - if (ret < msg->tx_len) {
3573 + if (ret < 0) {
3574 pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
3575 return ret;
3576 + } else if (ret < msg->tx_len) {
3577 + pr_err("%s: Read cmd Tx failed, too short: %d\n", __func__, ret);
3578 + return -ECOMM;
3579 }
3580
3581 /*
3582 diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
3583 index 1a7e77373407f..e4c9ff934e5b8 100644
3584 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c
3585 +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
3586 @@ -142,6 +142,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
3587 /* HDCP needs physical address of hdmi register */
3588 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3589 config->mmio_name);
3590 + if (!res) {
3591 + ret = -EINVAL;
3592 + goto fail;
3593 + }
3594 hdmi->mmio_phy_addr = res->start;
3595
3596 hdmi->qfprom_mmio = msm_ioremap(pdev,
3597 @@ -311,9 +315,9 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
3598 }
3599
3600 hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
3601 - if (hdmi->irq < 0) {
3602 - ret = hdmi->irq;
3603 - DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
3604 + if (!hdmi->irq) {
3605 + ret = -EINVAL;
3606 + DRM_DEV_ERROR(dev->dev, "failed to get irq\n");
3607 goto fail;
3608 }
3609
3610 diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
3611 index d7c8948427fe0..705a834ba1e66 100644
3612 --- a/drivers/gpu/drm/msm/msm_gem_prime.c
3613 +++ b/drivers/gpu/drm/msm/msm_gem_prime.c
3614 @@ -17,7 +17,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
3615 int npages = obj->size >> PAGE_SHIFT;
3616
3617 if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
3618 - return NULL;
3619 + return ERR_PTR(-ENOMEM);
3620
3621 return drm_prime_pages_to_sg(msm_obj->pages, npages);
3622 }
3623 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
3624 index 40e564524b7a9..93a49cbfb81d6 100644
3625 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
3626 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
3627 @@ -135,10 +135,10 @@ nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
3628
3629 list_for_each_entry_from_reverse(cstate, &pstate->list, head) {
3630 if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
3631 - break;
3632 + return cstate;
3633 }
3634
3635 - return cstate;
3636 + return NULL;
3637 }
3638
3639 static struct nvkm_cstate *
3640 @@ -169,6 +169,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
3641 if (!list_empty(&pstate->list)) {
3642 cstate = nvkm_cstate_get(clk, pstate, cstatei);
3643 cstate = nvkm_cstate_find_best(clk, pstate, cstate);
3644 + if (!cstate)
3645 + return -EINVAL;
3646 } else {
3647 cstate = &pstate->base;
3648 }
3649 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
3650 index bc63f4cecf5d5..ca6ccd69424e0 100644
3651 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
3652 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
3653 @@ -477,6 +477,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
3654 native_mode->vdisplay != 0 &&
3655 native_mode->clock != 0) {
3656 mode = drm_mode_duplicate(dev, native_mode);
3657 + if (!mode)
3658 + return NULL;
3659 mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
3660 drm_mode_set_name(mode);
3661
3662 @@ -491,6 +493,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
3663 * simpler.
3664 */
3665 mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
3666 + if (!mode)
3667 + return NULL;
3668 mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
3669 DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
3670 }
3671 diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
3672 index 84e3decb17b1f..2e4e1933a43c1 100644
3673 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
3674 +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
3675 @@ -1848,10 +1848,10 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
3676 vop_win_init(vop);
3677
3678 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3679 - vop->len = resource_size(res);
3680 vop->regs = devm_ioremap_resource(dev, res);
3681 if (IS_ERR(vop->regs))
3682 return PTR_ERR(vop->regs);
3683 + vop->len = resource_size(res);
3684
3685 vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
3686 if (!vop->regsbak)
3687 diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
3688 index 43d756b7810ee..67e23317c7ded 100644
3689 --- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
3690 +++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
3691 @@ -58,11 +58,13 @@ struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev,
3692 int tilcdc_add_component_encoder(struct drm_device *ddev)
3693 {
3694 struct tilcdc_drm_private *priv = ddev->dev_private;
3695 - struct drm_encoder *encoder;
3696 + struct drm_encoder *encoder = NULL, *iter;
3697
3698 - list_for_each_entry(encoder, &ddev->mode_config.encoder_list, head)
3699 - if (encoder->possible_crtcs & (1 << priv->crtc->index))
3700 + list_for_each_entry(iter, &ddev->mode_config.encoder_list, head)
3701 + if (iter->possible_crtcs & (1 << priv->crtc->index)) {
3702 + encoder = iter;
3703 break;
3704 + }
3705
3706 if (!encoder) {
3707 dev_err(ddev->dev, "%s: No suitable encoder found\n", __func__);
3708 diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
3709 index bf720206727f0..0d9263f65d95b 100644
3710 --- a/drivers/gpu/drm/vc4/vc4_txp.c
3711 +++ b/drivers/gpu/drm/vc4/vc4_txp.c
3712 @@ -285,12 +285,18 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
3713 if (WARN_ON(i == ARRAY_SIZE(drm_fmts)))
3714 return;
3715
3716 - ctrl = TXP_GO | TXP_VSTART_AT_EOF | TXP_EI |
3717 + ctrl = TXP_GO | TXP_EI |
3718 VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) |
3719 VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT);
3720
3721 if (fb->format->has_alpha)
3722 ctrl |= TXP_ALPHA_ENABLE;
3723 + else
3724 + /*
3725 + * If TXP_ALPHA_ENABLE isn't set and TXP_ALPHA_INVERT is, the
3726 + * hardware will force the output padding to be 0xff.
3727 + */
3728 + ctrl |= TXP_ALPHA_INVERT;
3729
3730 gem = drm_fb_cma_get_gem_obj(fb, 0);
3731 TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]);
3732 diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
3733 index e622485ae8267..7e34307eb075e 100644
3734 --- a/drivers/gpu/drm/virtio/virtgpu_display.c
3735 +++ b/drivers/gpu/drm/virtio/virtgpu_display.c
3736 @@ -174,6 +174,8 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
3737 DRM_DEBUG("add mode: %dx%d\n", width, height);
3738 mode = drm_cvt_mode(connector->dev, width, height, 60,
3739 false, false, false);
3740 + if (!mode)
3741 + return count;
3742 mode->type |= DRM_MODE_TYPE_PREFERRED;
3743 drm_mode_probed_add(connector, mode);
3744 count++;
3745 diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
3746 index 74ad8bf98bfd5..e8c5e3ac9fff1 100644
3747 --- a/drivers/hid/hid-bigbenff.c
3748 +++ b/drivers/hid/hid-bigbenff.c
3749 @@ -347,6 +347,12 @@ static int bigben_probe(struct hid_device *hid,
3750 bigben->report = list_entry(report_list->next,
3751 struct hid_report, list);
3752
3753 + if (list_empty(&hid->inputs)) {
3754 + hid_err(hid, "no inputs found\n");
3755 + error = -ENODEV;
3756 + goto error_hw_stop;
3757 + }
3758 +
3759 hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
3760 set_bit(FF_RUMBLE, hidinput->input->ffbit);
3761
3762 diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
3763 index 0e8f424025fea..838673303f77f 100644
3764 --- a/drivers/hid/hid-elan.c
3765 +++ b/drivers/hid/hid-elan.c
3766 @@ -188,7 +188,6 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi)
3767 ret = input_mt_init_slots(input, ELAN_MAX_FINGERS, INPUT_MT_POINTER);
3768 if (ret) {
3769 hid_err(hdev, "Failed to init elan MT slots: %d\n", ret);
3770 - input_free_device(input);
3771 return ret;
3772 }
3773
3774 @@ -200,7 +199,6 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi)
3775 hid_err(hdev, "Failed to register elan input device: %d\n",
3776 ret);
3777 input_mt_destroy_slots(input);
3778 - input_free_device(input);
3779 return ret;
3780 }
3781
3782 diff --git a/drivers/hid/hid-led.c b/drivers/hid/hid-led.c
3783 index c2c66ceca1327..7d82f8d426bbc 100644
3784 --- a/drivers/hid/hid-led.c
3785 +++ b/drivers/hid/hid-led.c
3786 @@ -366,7 +366,7 @@ static const struct hidled_config hidled_configs[] = {
3787 .type = DREAM_CHEEKY,
3788 .name = "Dream Cheeky Webmail Notifier",
3789 .short_name = "dream_cheeky",
3790 - .max_brightness = 31,
3791 + .max_brightness = 63,
3792 .num_leds = 1,
3793 .report_size = 9,
3794 .report_type = RAW_REQUEST,
3795 diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
3796 index a2175394cd253..c73b93b9bb87d 100644
3797 --- a/drivers/hwmon/hwmon.c
3798 +++ b/drivers/hwmon/hwmon.c
3799 @@ -715,11 +715,12 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
3800
3801 /**
3802 * hwmon_device_register_with_info - register w/ hwmon
3803 - * @dev: the parent device
3804 - * @name: hwmon name attribute
3805 - * @drvdata: driver data to attach to created device
3806 - * @chip: pointer to hwmon chip information
3807 + * @dev: the parent device (mandatory)
3808 + * @name: hwmon name attribute (mandatory)
3809 + * @drvdata: driver data to attach to created device (optional)
3810 + * @chip: pointer to hwmon chip information (mandatory)
3811 * @extra_groups: pointer to list of additional non-standard attribute groups
3812 + * (optional)
3813 *
3814 * hwmon_device_unregister() must be called when the device is no
3815 * longer needed.
3816 @@ -732,13 +733,10 @@ hwmon_device_register_with_info(struct device *dev, const char *name,
3817 const struct hwmon_chip_info *chip,
3818 const struct attribute_group **extra_groups)
3819 {
3820 - if (!name)
3821 - return ERR_PTR(-EINVAL);
3822 -
3823 - if (chip && (!chip->ops || !chip->ops->is_visible || !chip->info))
3824 + if (!dev || !name || !chip)
3825 return ERR_PTR(-EINVAL);
3826
3827 - if (chip && !dev)
3828 + if (!chip->ops || !chip->ops->is_visible || !chip->info)
3829 return ERR_PTR(-EINVAL);
3830
3831 return __hwmon_device_register(dev, name, drvdata, chip, extra_groups);
3832 diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
3833 index 96544b348c273..ebe34fd6adb0a 100644
3834 --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
3835 +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
3836 @@ -379,9 +379,10 @@ static int debug_notifier_call(struct notifier_block *self,
3837 int cpu;
3838 struct debug_drvdata *drvdata;
3839
3840 - mutex_lock(&debug_lock);
3841 + /* Bail out if we can't acquire the mutex or the functionality is off */
3842 + if (!mutex_trylock(&debug_lock))
3843 + return NOTIFY_DONE;
3844
3845 - /* Bail out if the functionality is disabled */
3846 if (!debug_enable)
3847 goto skip_dump;
3848
3849 @@ -400,7 +401,7 @@ static int debug_notifier_call(struct notifier_block *self,
3850
3851 skip_dump:
3852 mutex_unlock(&debug_lock);
3853 - return 0;
3854 + return NOTIFY_DONE;
3855 }
3856
3857 static struct notifier_block debug_notifier = {
3858 diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
3859 index a3fcc35ffd3b6..f74d5ad2f1faa 100644
3860 --- a/drivers/i2c/busses/i2c-at91-master.c
3861 +++ b/drivers/i2c/busses/i2c-at91-master.c
3862 @@ -609,6 +609,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
3863 unsigned int_addr_flag = 0;
3864 struct i2c_msg *m_start = msg;
3865 bool is_read;
3866 + u8 *dma_buf = NULL;
3867
3868 dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
3869
3870 @@ -656,7 +657,17 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
3871 dev->msg = m_start;
3872 dev->recv_len_abort = false;
3873
3874 + if (dev->use_dma) {
3875 + dma_buf = i2c_get_dma_safe_msg_buf(m_start, 1);
3876 + if (!dma_buf) {
3877 + ret = -ENOMEM;
3878 + goto out;
3879 + }
3880 + dev->buf = dma_buf;
3881 + }
3882 +
3883 ret = at91_do_twi_transfer(dev);
3884 + i2c_put_dma_safe_msg_buf(dma_buf, m_start, !ret);
3885
3886 ret = (ret < 0) ? ret : num;
3887 out:
3888 diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
3889 index 17f0dd1f891e2..8a3a0991bc1c5 100644
3890 --- a/drivers/i2c/busses/i2c-cadence.c
3891 +++ b/drivers/i2c/busses/i2c-cadence.c
3892 @@ -506,7 +506,7 @@ static void cdns_i2c_master_reset(struct i2c_adapter *adap)
3893 static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
3894 struct i2c_adapter *adap)
3895 {
3896 - unsigned long time_left;
3897 + unsigned long time_left, msg_timeout;
3898 u32 reg;
3899
3900 id->p_msg = msg;
3901 @@ -531,8 +531,16 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
3902 else
3903 cdns_i2c_msend(id);
3904
3905 + /* Minimal time to execute this message */
3906 + msg_timeout = msecs_to_jiffies((1000 * msg->len * BITS_PER_BYTE) / id->i2c_clk);
3907 + /* Plus some wiggle room */
3908 + msg_timeout += msecs_to_jiffies(500);
3909 +
3910 + if (msg_timeout < adap->timeout)
3911 + msg_timeout = adap->timeout;
3912 +
3913 /* Wait for the signal of completion */
3914 - time_left = wait_for_completion_timeout(&id->xfer_done, adap->timeout);
3915 + time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout);
3916 if (time_left == 0) {
3917 cdns_i2c_master_reset(adap);
3918 dev_err(id->adap.dev.parent,
3919 diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
3920 index 635cc1e7b1234..793a803919c52 100644
3921 --- a/drivers/iio/adc/ad7124.c
3922 +++ b/drivers/iio/adc/ad7124.c
3923 @@ -142,7 +142,6 @@ static const struct iio_chan_spec ad7124_channel_template = {
3924 .sign = 'u',
3925 .realbits = 24,
3926 .storagebits = 32,
3927 - .shift = 8,
3928 .endianness = IIO_BE,
3929 },
3930 };
3931 diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
3932 index a6c046575ec3a..5b79c8b9ccde1 100644
3933 --- a/drivers/iio/adc/sc27xx_adc.c
3934 +++ b/drivers/iio/adc/sc27xx_adc.c
3935 @@ -36,8 +36,8 @@
3936
3937 /* Bits and mask definition for SC27XX_ADC_CH_CFG register */
3938 #define SC27XX_ADC_CHN_ID_MASK GENMASK(4, 0)
3939 -#define SC27XX_ADC_SCALE_MASK GENMASK(10, 8)
3940 -#define SC27XX_ADC_SCALE_SHIFT 8
3941 +#define SC27XX_ADC_SCALE_MASK GENMASK(10, 9)
3942 +#define SC27XX_ADC_SCALE_SHIFT 9
3943
3944 /* Bits definitions for SC27XX_ADC_INT_EN registers */
3945 #define SC27XX_ADC_IRQ_EN BIT(0)
3946 @@ -103,14 +103,14 @@ static struct sc27xx_adc_linear_graph small_scale_graph = {
3947 100, 341,
3948 };
3949
3950 -static const struct sc27xx_adc_linear_graph big_scale_graph_calib = {
3951 - 4200, 856,
3952 - 3600, 733,
3953 +static const struct sc27xx_adc_linear_graph sc2731_big_scale_graph_calib = {
3954 + 4200, 850,
3955 + 3600, 728,
3956 };
3957
3958 -static const struct sc27xx_adc_linear_graph small_scale_graph_calib = {
3959 - 1000, 833,
3960 - 100, 80,
3961 +static const struct sc27xx_adc_linear_graph sc2731_small_scale_graph_calib = {
3962 + 1000, 838,
3963 + 100, 84,
3964 };
3965
3966 static int sc27xx_adc_get_calib_data(u32 calib_data, int calib_adc)
3967 @@ -130,11 +130,11 @@ static int sc27xx_adc_scale_calibration(struct sc27xx_adc_data *data,
3968 size_t len;
3969
3970 if (big_scale) {
3971 - calib_graph = &big_scale_graph_calib;
3972 + calib_graph = &sc2731_big_scale_graph_calib;
3973 graph = &big_scale_graph;
3974 cell_name = "big_scale_calib";
3975 } else {
3976 - calib_graph = &small_scale_graph_calib;
3977 + calib_graph = &sc2731_small_scale_graph_calib;
3978 graph = &small_scale_graph;
3979 cell_name = "small_scale_calib";
3980 }
3981 diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
3982 index bd72727fc417a..35ae801c4d35f 100644
3983 --- a/drivers/iio/adc/stmpe-adc.c
3984 +++ b/drivers/iio/adc/stmpe-adc.c
3985 @@ -61,7 +61,7 @@ struct stmpe_adc {
3986 static int stmpe_read_voltage(struct stmpe_adc *info,
3987 struct iio_chan_spec const *chan, int *val)
3988 {
3989 - long ret;
3990 + unsigned long ret;
3991
3992 mutex_lock(&info->lock);
3993
3994 @@ -79,7 +79,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info,
3995
3996 ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT);
3997
3998 - if (ret <= 0) {
3999 + if (ret == 0) {
4000 stmpe_reg_write(info->stmpe, STMPE_REG_ADC_INT_STA,
4001 STMPE_ADC_CH(info->channel));
4002 mutex_unlock(&info->lock);
4003 @@ -96,7 +96,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info,
4004 static int stmpe_read_temp(struct stmpe_adc *info,
4005 struct iio_chan_spec const *chan, int *val)
4006 {
4007 - long ret;
4008 + unsigned long ret;
4009
4010 mutex_lock(&info->lock);
4011
4012 @@ -114,7 +114,7 @@ static int stmpe_read_temp(struct stmpe_adc *info,
4013
4014 ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT);
4015
4016 - if (ret <= 0) {
4017 + if (ret == 0) {
4018 mutex_unlock(&info->lock);
4019 return -ETIMEDOUT;
4020 }
4021 diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
4022 index 364683783ae52..c25b0bc89b0c2 100644
4023 --- a/drivers/iio/common/st_sensors/st_sensors_core.c
4024 +++ b/drivers/iio/common/st_sensors/st_sensors_core.c
4025 @@ -76,16 +76,18 @@ st_sensors_match_odr_error:
4026
4027 int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
4028 {
4029 - int err;
4030 + int err = 0;
4031 struct st_sensor_odr_avl odr_out = {0, 0};
4032 struct st_sensor_data *sdata = iio_priv(indio_dev);
4033
4034 + mutex_lock(&sdata->odr_lock);
4035 +
4036 if (!sdata->sensor_settings->odr.mask)
4037 - return 0;
4038 + goto unlock_mutex;
4039
4040 err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
4041 if (err < 0)
4042 - goto st_sensors_match_odr_error;
4043 + goto unlock_mutex;
4044
4045 if ((sdata->sensor_settings->odr.addr ==
4046 sdata->sensor_settings->pw.addr) &&
4047 @@ -108,7 +110,9 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
4048 if (err >= 0)
4049 sdata->odr = odr_out.hz;
4050
4051 -st_sensors_match_odr_error:
4052 +unlock_mutex:
4053 + mutex_unlock(&sdata->odr_lock);
4054 +
4055 return err;
4056 }
4057 EXPORT_SYMBOL(st_sensors_set_odr);
4058 @@ -384,6 +388,8 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
4059 struct st_sensors_platform_data *of_pdata;
4060 int err = 0;
4061
4062 + mutex_init(&sdata->odr_lock);
4063 +
4064 /* If OF/DT pdata exists, it will take precedence of anything else */
4065 of_pdata = st_sensors_of_probe(indio_dev->dev.parent, pdata);
4066 if (of_pdata)
4067 @@ -575,18 +581,24 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
4068 err = -EBUSY;
4069 goto out;
4070 } else {
4071 + mutex_lock(&sdata->odr_lock);
4072 err = st_sensors_set_enable(indio_dev, true);
4073 - if (err < 0)
4074 + if (err < 0) {
4075 + mutex_unlock(&sdata->odr_lock);
4076 goto out;
4077 + }
4078
4079 msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr);
4080 err = st_sensors_read_axis_data(indio_dev, ch, val);
4081 - if (err < 0)
4082 + if (err < 0) {
4083 + mutex_unlock(&sdata->odr_lock);
4084 goto out;
4085 + }
4086
4087 *val = *val >> ch->scan_type.shift;
4088
4089 err = st_sensors_set_enable(indio_dev, false);
4090 + mutex_unlock(&sdata->odr_lock);
4091 }
4092 out:
4093 mutex_unlock(&indio_dev->mlock);
4094 diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c
4095 index 6cb02299a2152..18cfe1cb7a408 100644
4096 --- a/drivers/iio/dummy/iio_simple_dummy.c
4097 +++ b/drivers/iio/dummy/iio_simple_dummy.c
4098 @@ -568,10 +568,9 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
4099 struct iio_sw_device *swd;
4100
4101 swd = kzalloc(sizeof(*swd), GFP_KERNEL);
4102 - if (!swd) {
4103 - ret = -ENOMEM;
4104 - goto error_kzalloc;
4105 - }
4106 + if (!swd)
4107 + return ERR_PTR(-ENOMEM);
4108 +
4109 /*
4110 * Allocate an IIO device.
4111 *
4112 @@ -583,7 +582,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
4113 indio_dev = iio_device_alloc(sizeof(*st));
4114 if (!indio_dev) {
4115 ret = -ENOMEM;
4116 - goto error_ret;
4117 + goto error_free_swd;
4118 }
4119
4120 st = iio_priv(indio_dev);
4121 @@ -614,6 +613,10 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
4122 * indio_dev->name = spi_get_device_id(spi)->name;
4123 */
4124 indio_dev->name = kstrdup(name, GFP_KERNEL);
4125 + if (!indio_dev->name) {
4126 + ret = -ENOMEM;
4127 + goto error_free_device;
4128 + }
4129
4130 /* Provide description of available channels */
4131 indio_dev->channels = iio_dummy_channels;
4132 @@ -630,7 +633,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
4133
4134 ret = iio_simple_dummy_events_register(indio_dev);
4135 if (ret < 0)
4136 - goto error_free_device;
4137 + goto error_free_name;
4138
4139 ret = iio_simple_dummy_configure_buffer(indio_dev);
4140 if (ret < 0)
4141 @@ -647,11 +650,12 @@ error_unconfigure_buffer:
4142 iio_simple_dummy_unconfigure_buffer(indio_dev);
4143 error_unregister_events:
4144 iio_simple_dummy_events_unregister(indio_dev);
4145 +error_free_name:
4146 + kfree(indio_dev->name);
4147 error_free_device:
4148 iio_device_free(indio_dev);
4149 -error_ret:
4150 +error_free_swd:
4151 kfree(swd);
4152 -error_kzalloc:
4153 return ERR_PTR(ret);
4154 }
4155
4156 diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
4157 index 89e1dfd07a1bf..8c7ba7bad42b9 100644
4158 --- a/drivers/infiniband/hw/hfi1/file_ops.c
4159 +++ b/drivers/infiniband/hw/hfi1/file_ops.c
4160 @@ -308,6 +308,8 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
4161 unsigned long dim = from->nr_segs;
4162 int idx;
4163
4164 + if (!HFI1_CAP_IS_KSET(SDMA))
4165 + return -EINVAL;
4166 idx = srcu_read_lock(&fd->pq_srcu);
4167 pq = srcu_dereference(fd->pq, &fd->pq_srcu);
4168 if (!cq || !pq) {
4169 diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
4170 index 85583f51124e2..d698c26282ea1 100644
4171 --- a/drivers/infiniband/hw/hfi1/init.c
4172 +++ b/drivers/infiniband/hw/hfi1/init.c
4173 @@ -543,7 +543,7 @@ void set_link_ipg(struct hfi1_pportdata *ppd)
4174 u16 shift, mult;
4175 u64 src;
4176 u32 current_egress_rate; /* Mbits /sec */
4177 - u32 max_pkt_time;
4178 + u64 max_pkt_time;
4179 /*
4180 * max_pkt_time is the maximum packet egress time in units
4181 * of the fabric clock period 1/(805 MHz).
4182 diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
4183 index 248be21acdbed..2a684fc6056e1 100644
4184 --- a/drivers/infiniband/hw/hfi1/sdma.c
4185 +++ b/drivers/infiniband/hw/hfi1/sdma.c
4186 @@ -1329,11 +1329,13 @@ void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
4187 kvfree(sde->tx_ring);
4188 sde->tx_ring = NULL;
4189 }
4190 - spin_lock_irq(&dd->sde_map_lock);
4191 - sdma_map_free(rcu_access_pointer(dd->sdma_map));
4192 - RCU_INIT_POINTER(dd->sdma_map, NULL);
4193 - spin_unlock_irq(&dd->sde_map_lock);
4194 - synchronize_rcu();
4195 + if (rcu_access_pointer(dd->sdma_map)) {
4196 + spin_lock_irq(&dd->sde_map_lock);
4197 + sdma_map_free(rcu_access_pointer(dd->sdma_map));
4198 + RCU_INIT_POINTER(dd->sdma_map, NULL);
4199 + spin_unlock_irq(&dd->sde_map_lock);
4200 + synchronize_rcu();
4201 + }
4202 kfree(dd->per_sdma);
4203 dd->per_sdma = NULL;
4204
4205 diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
4206 index 48e8612c1bc8d..e97c13967174c 100644
4207 --- a/drivers/infiniband/sw/rdmavt/qp.c
4208 +++ b/drivers/infiniband/sw/rdmavt/qp.c
4209 @@ -2812,7 +2812,7 @@ void rvt_qp_iter(struct rvt_dev_info *rdi,
4210 EXPORT_SYMBOL(rvt_qp_iter);
4211
4212 /*
4213 - * This should be called with s_lock held.
4214 + * This should be called with s_lock and r_lock held.
4215 */
4216 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
4217 enum ib_wc_status status)
4218 @@ -3171,7 +3171,9 @@ send_comp:
4219 rvp->n_loop_pkts++;
4220 flush_send:
4221 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
4222 + spin_lock(&sqp->r_lock);
4223 rvt_send_complete(sqp, wqe, send_status);
4224 + spin_unlock(&sqp->r_lock);
4225 if (local_ops) {
4226 atomic_dec(&sqp->local_ops_pending);
4227 local_ops = 0;
4228 @@ -3225,7 +3227,9 @@ serr:
4229 spin_unlock_irqrestore(&qp->r_lock, flags);
4230 serr_no_r_lock:
4231 spin_lock_irqsave(&sqp->s_lock, flags);
4232 + spin_lock(&sqp->r_lock);
4233 rvt_send_complete(sqp, wqe, send_status);
4234 + spin_unlock(&sqp->r_lock);
4235 if (sqp->ibqp.qp_type == IB_QPT_RC) {
4236 int lastwqe;
4237
4238 diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
4239 index a4d6e0b7901e9..87702478eb99b 100644
4240 --- a/drivers/infiniband/sw/rxe/rxe_req.c
4241 +++ b/drivers/infiniband/sw/rxe/rxe_req.c
4242 @@ -680,7 +680,7 @@ next_wqe:
4243 opcode = next_opcode(qp, wqe, wqe->wr.opcode);
4244 if (unlikely(opcode < 0)) {
4245 wqe->status = IB_WC_LOC_QP_OP_ERR;
4246 - goto exit;
4247 + goto err;
4248 }
4249
4250 mask = rxe_opcode[opcode].mask;
4251 diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
4252 index fe43e5557ed72..cdcb7737c46aa 100644
4253 --- a/drivers/input/misc/sparcspkr.c
4254 +++ b/drivers/input/misc/sparcspkr.c
4255 @@ -205,6 +205,7 @@ static int bbc_beep_probe(struct platform_device *op)
4256
4257 info = &state->u.bbc;
4258 info->clock_freq = of_getintprop_default(dp, "clock-frequency", 0);
4259 + of_node_put(dp);
4260 if (!info->clock_freq)
4261 goto out_free;
4262
4263 diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
4264 index 59a14505b9cd1..ca150618d32f1 100644
4265 --- a/drivers/input/mouse/bcm5974.c
4266 +++ b/drivers/input/mouse/bcm5974.c
4267 @@ -942,17 +942,22 @@ static int bcm5974_probe(struct usb_interface *iface,
4268 if (!dev->tp_data)
4269 goto err_free_bt_buffer;
4270
4271 - if (dev->bt_urb)
4272 + if (dev->bt_urb) {
4273 usb_fill_int_urb(dev->bt_urb, udev,
4274 usb_rcvintpipe(udev, cfg->bt_ep),
4275 dev->bt_data, dev->cfg.bt_datalen,
4276 bcm5974_irq_button, dev, 1);
4277
4278 + dev->bt_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
4279 + }
4280 +
4281 usb_fill_int_urb(dev->tp_urb, udev,
4282 usb_rcvintpipe(udev, cfg->tp_ep),
4283 dev->tp_data, dev->cfg.tp_datalen,
4284 bcm5974_irq_trackpad, dev, 1);
4285
4286 + dev->tp_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
4287 +
4288 /* create bcm5974 device */
4289 usb_make_path(udev, dev->phys, sizeof(dev->phys));
4290 strlcat(dev->phys, "/input0", sizeof(dev->phys));
4291 diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
4292 index be1dd504d5b1d..20bc2279a2f24 100644
4293 --- a/drivers/input/touchscreen/stmfts.c
4294 +++ b/drivers/input/touchscreen/stmfts.c
4295 @@ -337,13 +337,15 @@ static int stmfts_input_open(struct input_dev *dev)
4296 struct stmfts_data *sdata = input_get_drvdata(dev);
4297 int err;
4298
4299 - err = pm_runtime_get_sync(&sdata->client->dev);
4300 - if (err < 0)
4301 - goto out;
4302 + err = pm_runtime_resume_and_get(&sdata->client->dev);
4303 + if (err)
4304 + return err;
4305
4306 err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON);
4307 - if (err)
4308 - goto out;
4309 + if (err) {
4310 + pm_runtime_put_sync(&sdata->client->dev);
4311 + return err;
4312 + }
4313
4314 mutex_lock(&sdata->mutex);
4315 sdata->running = true;
4316 @@ -366,9 +368,7 @@ static int stmfts_input_open(struct input_dev *dev)
4317 "failed to enable touchkey\n");
4318 }
4319
4320 -out:
4321 - pm_runtime_put_noidle(&sdata->client->dev);
4322 - return err;
4323 + return 0;
4324 }
4325
4326 static void stmfts_input_close(struct input_dev *dev)
4327 diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
4328 index 7502fa84e2537..82d0083104182 100644
4329 --- a/drivers/iommu/amd_iommu_init.c
4330 +++ b/drivers/iommu/amd_iommu_init.c
4331 @@ -83,7 +83,7 @@
4332 #define ACPI_DEVFLAG_LINT1 0x80
4333 #define ACPI_DEVFLAG_ATSDIS 0x10000000
4334
4335 -#define LOOP_TIMEOUT 100000
4336 +#define LOOP_TIMEOUT 2000000
4337 /*
4338 * ACPI table definitions
4339 *
4340 diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
4341 index be99d408cf35d..cba0097eba39c 100644
4342 --- a/drivers/iommu/msm_iommu.c
4343 +++ b/drivers/iommu/msm_iommu.c
4344 @@ -636,16 +636,19 @@ static void insert_iommu_master(struct device *dev,
4345 static int qcom_iommu_of_xlate(struct device *dev,
4346 struct of_phandle_args *spec)
4347 {
4348 - struct msm_iommu_dev *iommu;
4349 + struct msm_iommu_dev *iommu = NULL, *iter;
4350 unsigned long flags;
4351 int ret = 0;
4352
4353 spin_lock_irqsave(&msm_iommu_lock, flags);
4354 - list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
4355 - if (iommu->dev->of_node == spec->np)
4356 + list_for_each_entry(iter, &qcom_iommu_devices, dev_node) {
4357 + if (iter->dev->of_node == spec->np) {
4358 + iommu = iter;
4359 break;
4360 + }
4361 + }
4362
4363 - if (!iommu || iommu->dev->of_node != spec->np) {
4364 + if (!iommu) {
4365 ret = -ENODEV;
4366 goto fail;
4367 }
4368 diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
4369 index c2f6c78fee444..18d7c818a174c 100644
4370 --- a/drivers/iommu/mtk_iommu.c
4371 +++ b/drivers/iommu/mtk_iommu.c
4372 @@ -769,8 +769,7 @@ static int mtk_iommu_remove(struct platform_device *pdev)
4373 iommu_device_sysfs_remove(&data->iommu);
4374 iommu_device_unregister(&data->iommu);
4375
4376 - if (iommu_present(&platform_bus_type))
4377 - bus_set_iommu(&platform_bus_type, NULL);
4378 + list_del(&data->list);
4379
4380 clk_disable_unprepare(data->bclk);
4381 devm_free_irq(&pdev->dev, data->irq, data);
4382 diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
4383 index 5849ac5a2ad3b..0fd428db3aa4d 100644
4384 --- a/drivers/irqchip/irq-armada-370-xp.c
4385 +++ b/drivers/irqchip/irq-armada-370-xp.c
4386 @@ -392,7 +392,16 @@ static void armada_xp_mpic_smp_cpu_init(void)
4387
4388 static void armada_xp_mpic_perf_init(void)
4389 {
4390 - unsigned long cpuid = cpu_logical_map(smp_processor_id());
4391 + unsigned long cpuid;
4392 +
4393 + /*
4394 + * This Performance Counter Overflow interrupt is specific for
4395 + * Armada 370 and XP. It is not available on Armada 375, 38x and 39x.
4396 + */
4397 + if (!of_machine_is_compatible("marvell,armada-370-xp"))
4398 + return;
4399 +
4400 + cpuid = cpu_logical_map(smp_processor_id());
4401
4402 /* Enable Performance Counter Overflow interrupts */
4403 writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid),
4404 diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c
4405 index 8d591c179f812..3d3210828e9bf 100644
4406 --- a/drivers/irqchip/irq-aspeed-i2c-ic.c
4407 +++ b/drivers/irqchip/irq-aspeed-i2c-ic.c
4408 @@ -79,8 +79,8 @@ static int __init aspeed_i2c_ic_of_init(struct device_node *node,
4409 }
4410
4411 i2c_ic->parent_irq = irq_of_parse_and_map(node, 0);
4412 - if (i2c_ic->parent_irq < 0) {
4413 - ret = i2c_ic->parent_irq;
4414 + if (!i2c_ic->parent_irq) {
4415 + ret = -EINVAL;
4416 goto err_iounmap;
4417 }
4418
4419 diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
4420 index abd011fcecf4a..c7db617e1a2f6 100644
4421 --- a/drivers/irqchip/irq-sni-exiu.c
4422 +++ b/drivers/irqchip/irq-sni-exiu.c
4423 @@ -37,11 +37,26 @@ struct exiu_irq_data {
4424 u32 spi_base;
4425 };
4426
4427 -static void exiu_irq_eoi(struct irq_data *d)
4428 +static void exiu_irq_ack(struct irq_data *d)
4429 {
4430 struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
4431
4432 writel(BIT(d->hwirq), data->base + EIREQCLR);
4433 +}
4434 +
4435 +static void exiu_irq_eoi(struct irq_data *d)
4436 +{
4437 + struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
4438 +
4439 + /*
4440 + * Level triggered interrupts are latched and must be cleared during
4441 + * EOI or the interrupt will be jammed on. Of course if a level
4442 + * triggered interrupt is still asserted then the write will not clear
4443 + * the interrupt.
4444 + */
4445 + if (irqd_is_level_type(d))
4446 + writel(BIT(d->hwirq), data->base + EIREQCLR);
4447 +
4448 irq_chip_eoi_parent(d);
4449 }
4450
4451 @@ -91,10 +106,13 @@ static int exiu_irq_set_type(struct irq_data *d, unsigned int type)
4452 writel_relaxed(val, data->base + EILVL);
4453
4454 val = readl_relaxed(data->base + EIEDG);
4455 - if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
4456 + if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH) {
4457 val &= ~BIT(d->hwirq);
4458 - else
4459 + irq_set_handler_locked(d, handle_fasteoi_irq);
4460 + } else {
4461 val |= BIT(d->hwirq);
4462 + irq_set_handler_locked(d, handle_fasteoi_ack_irq);
4463 + }
4464 writel_relaxed(val, data->base + EIEDG);
4465
4466 writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
4467 @@ -104,6 +122,7 @@ static int exiu_irq_set_type(struct irq_data *d, unsigned int type)
4468
4469 static struct irq_chip exiu_irq_chip = {
4470 .name = "EXIU",
4471 + .irq_ack = exiu_irq_ack,
4472 .irq_eoi = exiu_irq_eoi,
4473 .irq_enable = exiu_irq_enable,
4474 .irq_mask = exiu_irq_mask,
4475 diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
4476 index 27933338f7b36..8c581c985aa7d 100644
4477 --- a/drivers/irqchip/irq-xtensa-mx.c
4478 +++ b/drivers/irqchip/irq-xtensa-mx.c
4479 @@ -151,14 +151,25 @@ static struct irq_chip xtensa_mx_irq_chip = {
4480 .irq_set_affinity = xtensa_mx_irq_set_affinity,
4481 };
4482
4483 +static void __init xtensa_mx_init_common(struct irq_domain *root_domain)
4484 +{
4485 + unsigned int i;
4486 +
4487 + irq_set_default_host(root_domain);
4488 + secondary_init_irq();
4489 +
4490 + /* Initialize default IRQ routing to CPU 0 */
4491 + for (i = 0; i < XCHAL_NUM_EXTINTERRUPTS; ++i)
4492 + set_er(1, MIROUT(i));
4493 +}
4494 +
4495 int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
4496 {
4497 struct irq_domain *root_domain =
4498 irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
4499 &xtensa_mx_irq_domain_ops,
4500 &xtensa_mx_irq_chip);
4501 - irq_set_default_host(root_domain);
4502 - secondary_init_irq();
4503 + xtensa_mx_init_common(root_domain);
4504 return 0;
4505 }
4506
4507 @@ -168,8 +179,7 @@ static int __init xtensa_mx_init(struct device_node *np,
4508 struct irq_domain *root_domain =
4509 irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
4510 &xtensa_mx_irq_chip);
4511 - irq_set_default_host(root_domain);
4512 - secondary_init_irq();
4513 + xtensa_mx_init_common(root_domain);
4514 return 0;
4515 }
4516 IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
4517 diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
4518 index 574e122ae1050..b5a534206eddd 100644
4519 --- a/drivers/macintosh/Kconfig
4520 +++ b/drivers/macintosh/Kconfig
4521 @@ -44,6 +44,7 @@ config ADB_IOP
4522 config ADB_CUDA
4523 bool "Support for Cuda/Egret based Macs and PowerMacs"
4524 depends on (ADB || PPC_PMAC) && !PPC_PMAC64
4525 + select RTC_LIB
4526 help
4527 This provides support for Cuda/Egret based Macintosh and
4528 Power Macintosh systems. This includes most m68k based Macs,
4529 @@ -57,6 +58,7 @@ config ADB_CUDA
4530 config ADB_PMU
4531 bool "Support for PMU based PowerMacs and PowerBooks"
4532 depends on PPC_PMAC || MAC
4533 + select RTC_LIB
4534 help
4535 On PowerBooks, iBooks, and recent iMacs and Power Macintoshes, the
4536 PMU is an embedded microprocessor whose primary function is to
4537 @@ -67,6 +69,10 @@ config ADB_PMU
4538 this device; you should do so if your machine is one of those
4539 mentioned above.
4540
4541 +config ADB_PMU_EVENT
4542 + def_bool y
4543 + depends on ADB_PMU && INPUT=y
4544 +
4545 config ADB_PMU_LED
4546 bool "Support for the Power/iBook front LED"
4547 depends on PPC_PMAC && ADB_PMU
4548 diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
4549 index 49819b1b6f201..712edcb3e0b08 100644
4550 --- a/drivers/macintosh/Makefile
4551 +++ b/drivers/macintosh/Makefile
4552 @@ -12,7 +12,8 @@ obj-$(CONFIG_MAC_EMUMOUSEBTN) += mac_hid.o
4553 obj-$(CONFIG_INPUT_ADBHID) += adbhid.o
4554 obj-$(CONFIG_ANSLCD) += ans-lcd.o
4555
4556 -obj-$(CONFIG_ADB_PMU) += via-pmu.o via-pmu-event.o
4557 +obj-$(CONFIG_ADB_PMU) += via-pmu.o
4558 +obj-$(CONFIG_ADB_PMU_EVENT) += via-pmu-event.o
4559 obj-$(CONFIG_ADB_PMU_LED) += via-pmu-led.o
4560 obj-$(CONFIG_PMAC_BACKLIGHT) += via-pmu-backlight.o
4561 obj-$(CONFIG_ADB_CUDA) += via-cuda.o
4562 diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
4563 index 21d532a78fa47..d8b6ac2ec313f 100644
4564 --- a/drivers/macintosh/via-pmu.c
4565 +++ b/drivers/macintosh/via-pmu.c
4566 @@ -1464,7 +1464,7 @@ next:
4567 pmu_pass_intr(data, len);
4568 /* len == 6 is probably a bad check. But how do I
4569 * know what PMU versions send what events here? */
4570 - if (len == 6) {
4571 + if (IS_ENABLED(CONFIG_ADB_PMU_EVENT) && len == 6) {
4572 via_pmu_event(PMU_EVT_POWER, !!(data[1]&8));
4573 via_pmu_event(PMU_EVT_LID, data[1]&1);
4574 }
4575 diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
4576 index 3e7d4b20ab34f..4229b9b5da98f 100644
4577 --- a/drivers/mailbox/mailbox.c
4578 +++ b/drivers/mailbox/mailbox.c
4579 @@ -82,11 +82,11 @@ static void msg_submit(struct mbox_chan *chan)
4580 exit:
4581 spin_unlock_irqrestore(&chan->lock, flags);
4582
4583 - /* kick start the timer immediately to avoid delays */
4584 if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
4585 - /* but only if not already active */
4586 - if (!hrtimer_active(&chan->mbox->poll_hrt))
4587 - hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
4588 + /* kick start the timer immediately to avoid delays */
4589 + spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags);
4590 + hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
4591 + spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags);
4592 }
4593 }
4594
4595 @@ -120,20 +120,26 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
4596 container_of(hrtimer, struct mbox_controller, poll_hrt);
4597 bool txdone, resched = false;
4598 int i;
4599 + unsigned long flags;
4600
4601 for (i = 0; i < mbox->num_chans; i++) {
4602 struct mbox_chan *chan = &mbox->chans[i];
4603
4604 if (chan->active_req && chan->cl) {
4605 - resched = true;
4606 txdone = chan->mbox->ops->last_tx_done(chan);
4607 if (txdone)
4608 tx_tick(chan, 0);
4609 + else
4610 + resched = true;
4611 }
4612 }
4613
4614 if (resched) {
4615 - hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
4616 + spin_lock_irqsave(&mbox->poll_hrt_lock, flags);
4617 + if (!hrtimer_is_queued(hrtimer))
4618 + hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
4619 + spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags);
4620 +
4621 return HRTIMER_RESTART;
4622 }
4623 return HRTIMER_NORESTART;
4624 @@ -500,6 +506,7 @@ int mbox_controller_register(struct mbox_controller *mbox)
4625 hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
4626 HRTIMER_MODE_REL);
4627 mbox->poll_hrt.function = txdone_hrtimer;
4628 + spin_lock_init(&mbox->poll_hrt_lock);
4629 }
4630
4631 for (i = 0; i < mbox->num_chans; i++) {
4632 diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
4633 index 4045ae748f17e..f5d24620d8182 100644
4634 --- a/drivers/md/bcache/request.c
4635 +++ b/drivers/md/bcache/request.c
4636 @@ -1119,6 +1119,12 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
4637 * which would call closure_get(&dc->disk.cl)
4638 */
4639 ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
4640 + if (!ddip) {
4641 + bio->bi_status = BLK_STS_RESOURCE;
4642 + bio->bi_end_io(bio);
4643 + return;
4644 + }
4645 +
4646 ddip->d = d;
4647 ddip->start_time = jiffies;
4648 ddip->bi_end_io = bio->bi_end_io;
4649 diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
4650 index d7eef5292ae2f..a95e20c3d0d4f 100644
4651 --- a/drivers/md/md-bitmap.c
4652 +++ b/drivers/md/md-bitmap.c
4653 @@ -642,14 +642,6 @@ re_read:
4654 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
4655 write_behind = le32_to_cpu(sb->write_behind);
4656 sectors_reserved = le32_to_cpu(sb->sectors_reserved);
4657 - /* Setup nodes/clustername only if bitmap version is
4658 - * cluster-compatible
4659 - */
4660 - if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
4661 - nodes = le32_to_cpu(sb->nodes);
4662 - strlcpy(bitmap->mddev->bitmap_info.cluster_name,
4663 - sb->cluster_name, 64);
4664 - }
4665
4666 /* verify that the bitmap-specific fields are valid */
4667 if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
4668 @@ -671,6 +663,16 @@ re_read:
4669 goto out;
4670 }
4671
4672 + /*
4673 + * Setup nodes/clustername only if bitmap version is
4674 + * cluster-compatible
4675 + */
4676 + if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
4677 + nodes = le32_to_cpu(sb->nodes);
4678 + strlcpy(bitmap->mddev->bitmap_info.cluster_name,
4679 + sb->cluster_name, 64);
4680 + }
4681 +
4682 /* keep the array size field of the bitmap superblock up to date */
4683 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
4684
4685 @@ -703,9 +705,9 @@ re_read:
4686
4687 out:
4688 kunmap_atomic(sb);
4689 - /* Assigning chunksize is required for "re_read" */
4690 - bitmap->mddev->bitmap_info.chunksize = chunksize;
4691 if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
4692 + /* Assigning chunksize is required for "re_read" */
4693 + bitmap->mddev->bitmap_info.chunksize = chunksize;
4694 err = md_setup_cluster(bitmap->mddev, nodes);
4695 if (err) {
4696 pr_warn("%s: Could not setup cluster service (%d)\n",
4697 @@ -716,18 +718,18 @@ out:
4698 goto re_read;
4699 }
4700
4701 -
4702 out_no_sb:
4703 - if (test_bit(BITMAP_STALE, &bitmap->flags))
4704 - bitmap->events_cleared = bitmap->mddev->events;
4705 - bitmap->mddev->bitmap_info.chunksize = chunksize;
4706 - bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
4707 - bitmap->mddev->bitmap_info.max_write_behind = write_behind;
4708 - bitmap->mddev->bitmap_info.nodes = nodes;
4709 - if (bitmap->mddev->bitmap_info.space == 0 ||
4710 - bitmap->mddev->bitmap_info.space > sectors_reserved)
4711 - bitmap->mddev->bitmap_info.space = sectors_reserved;
4712 - if (err) {
4713 + if (err == 0) {
4714 + if (test_bit(BITMAP_STALE, &bitmap->flags))
4715 + bitmap->events_cleared = bitmap->mddev->events;
4716 + bitmap->mddev->bitmap_info.chunksize = chunksize;
4717 + bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
4718 + bitmap->mddev->bitmap_info.max_write_behind = write_behind;
4719 + bitmap->mddev->bitmap_info.nodes = nodes;
4720 + if (bitmap->mddev->bitmap_info.space == 0 ||
4721 + bitmap->mddev->bitmap_info.space > sectors_reserved)
4722 + bitmap->mddev->bitmap_info.space = sectors_reserved;
4723 + } else {
4724 md_bitmap_print_sb(bitmap);
4725 if (bitmap->cluster_slot < 0)
4726 md_cluster_stop(bitmap->mddev);
4727 diff --git a/drivers/md/md.c b/drivers/md/md.c
4728 index c178b2f406de3..11fd3b32b5621 100644
4729 --- a/drivers/md/md.c
4730 +++ b/drivers/md/md.c
4731 @@ -2532,14 +2532,16 @@ static void sync_sbs(struct mddev *mddev, int nospares)
4732
4733 static bool does_sb_need_changing(struct mddev *mddev)
4734 {
4735 - struct md_rdev *rdev;
4736 + struct md_rdev *rdev = NULL, *iter;
4737 struct mdp_superblock_1 *sb;
4738 int role;
4739
4740 /* Find a good rdev */
4741 - rdev_for_each(rdev, mddev)
4742 - if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
4743 + rdev_for_each(iter, mddev)
4744 + if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) {
4745 + rdev = iter;
4746 break;
4747 + }
4748
4749 /* No good device found. */
4750 if (!rdev)
4751 @@ -7775,17 +7777,22 @@ EXPORT_SYMBOL(md_register_thread);
4752
4753 void md_unregister_thread(struct md_thread **threadp)
4754 {
4755 - struct md_thread *thread = *threadp;
4756 - if (!thread)
4757 - return;
4758 - pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
4759 - /* Locking ensures that mddev_unlock does not wake_up a
4760 + struct md_thread *thread;
4761 +
4762 + /*
4763 + * Locking ensures that mddev_unlock does not wake_up a
4764 * non-existent thread
4765 */
4766 spin_lock(&pers_lock);
4767 + thread = *threadp;
4768 + if (!thread) {
4769 + spin_unlock(&pers_lock);
4770 + return;
4771 + }
4772 *threadp = NULL;
4773 spin_unlock(&pers_lock);
4774
4775 + pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
4776 kthread_stop(thread->tsk);
4777 kfree(thread);
4778 }
4779 @@ -9529,16 +9536,18 @@ static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
4780
4781 void md_reload_sb(struct mddev *mddev, int nr)
4782 {
4783 - struct md_rdev *rdev;
4784 + struct md_rdev *rdev = NULL, *iter;
4785 int err;
4786
4787 /* Find the rdev */
4788 - rdev_for_each_rcu(rdev, mddev) {
4789 - if (rdev->desc_nr == nr)
4790 + rdev_for_each_rcu(iter, mddev) {
4791 + if (iter->desc_nr == nr) {
4792 + rdev = iter;
4793 break;
4794 + }
4795 }
4796
4797 - if (!rdev || rdev->desc_nr != nr) {
4798 + if (!rdev) {
4799 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
4800 return;
4801 }
4802 diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
4803 index 322386ff5d225..0ead5a7887f14 100644
4804 --- a/drivers/md/raid0.c
4805 +++ b/drivers/md/raid0.c
4806 @@ -143,21 +143,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
4807 pr_debug("md/raid0:%s: FINAL %d zones\n",
4808 mdname(mddev), conf->nr_strip_zones);
4809
4810 - if (conf->nr_strip_zones == 1) {
4811 - conf->layout = RAID0_ORIG_LAYOUT;
4812 - } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
4813 - mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
4814 - conf->layout = mddev->layout;
4815 - } else if (default_layout == RAID0_ORIG_LAYOUT ||
4816 - default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
4817 - conf->layout = default_layout;
4818 - } else {
4819 - pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
4820 - mdname(mddev));
4821 - pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
4822 - err = -ENOTSUPP;
4823 - goto abort;
4824 - }
4825 /*
4826 * now since we have the hard sector sizes, we can make sure
4827 * chunk size is a multiple of that sector size
4828 @@ -288,6 +273,22 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
4829 (unsigned long long)smallest->sectors);
4830 }
4831
4832 + if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) {
4833 + conf->layout = RAID0_ORIG_LAYOUT;
4834 + } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
4835 + mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
4836 + conf->layout = mddev->layout;
4837 + } else if (default_layout == RAID0_ORIG_LAYOUT ||
4838 + default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
4839 + conf->layout = default_layout;
4840 + } else {
4841 + pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
4842 + mdname(mddev));
4843 + pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
4844 + err = -EOPNOTSUPP;
4845 + goto abort;
4846 + }
4847 +
4848 pr_debug("md/raid0:%s: done.\n", mdname(mddev));
4849 *private_conf = conf;
4850
4851 diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
4852 index 56857ac0a0be2..c665f7d20c448 100644
4853 --- a/drivers/media/cec/cec-adap.c
4854 +++ b/drivers/media/cec/cec-adap.c
4855 @@ -1263,7 +1263,7 @@ static int cec_config_log_addr(struct cec_adapter *adap,
4856 * While trying to poll the physical address was reset
4857 * and the adapter was unconfigured, so bail out.
4858 */
4859 - if (!adap->is_configuring)
4860 + if (adap->phys_addr == CEC_PHYS_ADDR_INVALID)
4861 return -EINTR;
4862
4863 if (err)
4864 @@ -1321,7 +1321,6 @@ static void cec_adap_unconfigure(struct cec_adapter *adap)
4865 adap->phys_addr != CEC_PHYS_ADDR_INVALID)
4866 WARN_ON(adap->ops->adap_log_addr(adap, CEC_LOG_ADDR_INVALID));
4867 adap->log_addrs.log_addr_mask = 0;
4868 - adap->is_configuring = false;
4869 adap->is_configured = false;
4870 memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs));
4871 cec_flush(adap);
4872 @@ -1514,9 +1513,10 @@ unconfigure:
4873 for (i = 0; i < las->num_log_addrs; i++)
4874 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
4875 cec_adap_unconfigure(adap);
4876 + adap->is_configuring = false;
4877 adap->kthread_config = NULL;
4878 - mutex_unlock(&adap->lock);
4879 complete(&adap->config_completion);
4880 + mutex_unlock(&adap->lock);
4881 return 0;
4882 }
4883
4884 diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
4885 index b42b289faaef4..154776d0069ea 100644
4886 --- a/drivers/media/i2c/ov7670.c
4887 +++ b/drivers/media/i2c/ov7670.c
4888 @@ -2000,7 +2000,6 @@ static int ov7670_remove(struct i2c_client *client)
4889 v4l2_async_unregister_subdev(sd);
4890 v4l2_ctrl_handler_free(&info->hdl);
4891 media_entity_cleanup(&info->sd.entity);
4892 - ov7670_power_off(sd);
4893 return 0;
4894 }
4895
4896 diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
4897 index ead0acb7807c8..6747ecb4911b1 100644
4898 --- a/drivers/media/pci/cx23885/cx23885-core.c
4899 +++ b/drivers/media/pci/cx23885/cx23885-core.c
4900 @@ -2154,7 +2154,7 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
4901 err = pci_set_dma_mask(pci_dev, 0xffffffff);
4902 if (err) {
4903 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
4904 - goto fail_ctrl;
4905 + goto fail_dma_set_mask;
4906 }
4907
4908 err = request_irq(pci_dev->irq, cx23885_irq,
4909 @@ -2162,7 +2162,7 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
4910 if (err < 0) {
4911 pr_err("%s: can't get IRQ %d\n",
4912 dev->name, pci_dev->irq);
4913 - goto fail_irq;
4914 + goto fail_dma_set_mask;
4915 }
4916
4917 switch (dev->board) {
4918 @@ -2184,7 +2184,7 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
4919
4920 return 0;
4921
4922 -fail_irq:
4923 +fail_dma_set_mask:
4924 cx23885_dev_unregister(dev);
4925 fail_ctrl:
4926 v4l2_ctrl_handler_free(hdl);
4927 diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
4928 index 44839a6461e88..534829e352d1d 100644
4929 --- a/drivers/media/pci/cx25821/cx25821-core.c
4930 +++ b/drivers/media/pci/cx25821/cx25821-core.c
4931 @@ -1340,11 +1340,11 @@ static void cx25821_finidev(struct pci_dev *pci_dev)
4932 struct cx25821_dev *dev = get_cx25821(v4l2_dev);
4933
4934 cx25821_shutdown(dev);
4935 - pci_disable_device(pci_dev);
4936
4937 /* unregister stuff */
4938 if (pci_dev->irq)
4939 free_irq(pci_dev->irq, dev);
4940 + pci_disable_device(pci_dev);
4941
4942 cx25821_dev_unregister(dev);
4943 v4l2_device_unregister(v4l2_dev);
4944 diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
4945 index c87eddb1c93f7..c3f0b143330a5 100644
4946 --- a/drivers/media/platform/aspeed-video.c
4947 +++ b/drivers/media/platform/aspeed-video.c
4948 @@ -1688,6 +1688,7 @@ static int aspeed_video_probe(struct platform_device *pdev)
4949
4950 rc = aspeed_video_setup_video(video);
4951 if (rc) {
4952 + aspeed_video_free_buf(video, &video->jpeg);
4953 clk_unprepare(video->vclk);
4954 clk_unprepare(video->eclk);
4955 return rc;
4956 @@ -1715,8 +1716,7 @@ static int aspeed_video_remove(struct platform_device *pdev)
4957
4958 v4l2_device_unregister(v4l2_dev);
4959
4960 - dma_free_coherent(video->dev, VE_JPEG_HEADER_SIZE, video->jpeg.virt,
4961 - video->jpeg.dma);
4962 + aspeed_video_free_buf(video, &video->jpeg);
4963
4964 of_reserved_mem_device_release(dev);
4965
4966 diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
4967 index 0adc54832657b..ebe5e44b6fd38 100644
4968 --- a/drivers/media/platform/coda/coda-common.c
4969 +++ b/drivers/media/platform/coda/coda-common.c
4970 @@ -1192,7 +1192,8 @@ static int coda_enum_frameintervals(struct file *file, void *fh,
4971 struct v4l2_frmivalenum *f)
4972 {
4973 struct coda_ctx *ctx = fh_to_ctx(fh);
4974 - int i;
4975 + struct coda_q_data *q_data;
4976 + const struct coda_codec *codec;
4977
4978 if (f->index)
4979 return -EINVAL;
4980 @@ -1201,12 +1202,19 @@ static int coda_enum_frameintervals(struct file *file, void *fh,
4981 if (!ctx->vdoa && f->pixel_format == V4L2_PIX_FMT_YUYV)
4982 return -EINVAL;
4983
4984 - for (i = 0; i < CODA_MAX_FORMATS; i++) {
4985 - if (f->pixel_format == ctx->cvd->src_formats[i] ||
4986 - f->pixel_format == ctx->cvd->dst_formats[i])
4987 - break;
4988 + if (coda_format_normalize_yuv(f->pixel_format) == V4L2_PIX_FMT_YUV420) {
4989 + q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
4990 + codec = coda_find_codec(ctx->dev, f->pixel_format,
4991 + q_data->fourcc);
4992 + } else {
4993 + codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_YUV420,
4994 + f->pixel_format);
4995 }
4996 - if (i == CODA_MAX_FORMATS)
4997 + if (!codec)
4998 + return -EINVAL;
4999 +
5000 + if (f->width < MIN_W || f->width > codec->max_w ||
5001 + f->height < MIN_H || f->height > codec->max_h)
5002 return -EINVAL;
5003
5004 f->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
5005 @@ -2164,8 +2172,8 @@ static void coda_encode_ctrls(struct coda_ctx *ctx)
5006 V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET, -12, 12, 1, 0);
5007 v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
5008 V4L2_CID_MPEG_VIDEO_H264_PROFILE,
5009 - V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0,
5010 - V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE);
5011 + V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE, 0x0,
5012 + V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE);
5013 if (ctx->dev->devtype->product == CODA_HX4 ||
5014 ctx->dev->devtype->product == CODA_7541) {
5015 v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
5016 @@ -2179,12 +2187,15 @@ static void coda_encode_ctrls(struct coda_ctx *ctx)
5017 if (ctx->dev->devtype->product == CODA_960) {
5018 v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
5019 V4L2_CID_MPEG_VIDEO_H264_LEVEL,
5020 - V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
5021 - ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
5022 + V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
5023 + ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
5024 + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
5025 (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
5026 (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
5027 (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
5028 - (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0)),
5029 + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
5030 + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
5031 + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2)),
5032 V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
5033 }
5034 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
5035 @@ -2246,7 +2257,7 @@ static void coda_decode_ctrls(struct coda_ctx *ctx)
5036 ctx->h264_profile_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
5037 &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
5038 V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
5039 - ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
5040 + ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
5041 (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
5042 (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)),
5043 V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
5044 diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
5045 index 64148b7e0d986..9bb14bb2e4987 100644
5046 --- a/drivers/media/platform/exynos4-is/fimc-is.c
5047 +++ b/drivers/media/platform/exynos4-is/fimc-is.c
5048 @@ -141,7 +141,7 @@ static int fimc_is_enable_clocks(struct fimc_is *is)
5049 dev_err(&is->pdev->dev, "clock %s enable failed\n",
5050 fimc_is_clocks[i]);
5051 for (--i; i >= 0; i--)
5052 - clk_disable(is->clocks[i]);
5053 + clk_disable_unprepare(is->clocks[i]);
5054 return ret;
5055 }
5056 pr_debug("enabled clock: %s\n", fimc_is_clocks[i]);
5057 diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.h b/drivers/media/platform/exynos4-is/fimc-isp-video.h
5058 index edcb3a5e3cb90..2dd4ddbc748a1 100644
5059 --- a/drivers/media/platform/exynos4-is/fimc-isp-video.h
5060 +++ b/drivers/media/platform/exynos4-is/fimc-isp-video.h
5061 @@ -32,7 +32,7 @@ static inline int fimc_isp_video_device_register(struct fimc_isp *isp,
5062 return 0;
5063 }
5064
5065 -void fimc_isp_video_device_unregister(struct fimc_isp *isp,
5066 +static inline void fimc_isp_video_device_unregister(struct fimc_isp *isp,
5067 enum v4l2_buf_type type)
5068 {
5069 }
5070 diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
5071 index 3d8b1284d1f35..68964a80fe619 100644
5072 --- a/drivers/media/platform/qcom/venus/hfi.c
5073 +++ b/drivers/media/platform/qcom/venus/hfi.c
5074 @@ -104,6 +104,9 @@ int hfi_core_deinit(struct venus_core *core, bool blocking)
5075 mutex_lock(&core->lock);
5076 }
5077
5078 + if (!core->ops)
5079 + goto unlock;
5080 +
5081 ret = core->ops->core_deinit(core);
5082
5083 if (!ret)
5084 diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
5085 index 2791107e641bc..29732b49a2cdb 100644
5086 --- a/drivers/media/platform/sti/delta/delta-v4l2.c
5087 +++ b/drivers/media/platform/sti/delta/delta-v4l2.c
5088 @@ -1862,7 +1862,7 @@ static int delta_probe(struct platform_device *pdev)
5089 if (ret) {
5090 dev_err(delta->dev, "%s failed to initialize firmware ipc channel\n",
5091 DELTA_PREFIX);
5092 - goto err;
5093 + goto err_pm_disable;
5094 }
5095
5096 /* register all available decoders */
5097 @@ -1876,7 +1876,7 @@ static int delta_probe(struct platform_device *pdev)
5098 if (ret) {
5099 dev_err(delta->dev, "%s failed to register V4L2 device\n",
5100 DELTA_PREFIX);
5101 - goto err;
5102 + goto err_pm_disable;
5103 }
5104
5105 delta->work_queue = create_workqueue(DELTA_NAME);
5106 @@ -1901,6 +1901,8 @@ err_work_queue:
5107 destroy_workqueue(delta->work_queue);
5108 err_v4l2:
5109 v4l2_device_unregister(&delta->v4l2_dev);
5110 +err_pm_disable:
5111 + pm_runtime_disable(dev);
5112 err:
5113 return ret;
5114 }
5115 diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
5116 index 85587c1b6a373..75083cb234fe3 100644
5117 --- a/drivers/media/platform/vsp1/vsp1_rpf.c
5118 +++ b/drivers/media/platform/vsp1/vsp1_rpf.c
5119 @@ -291,11 +291,11 @@ static void rpf_configure_partition(struct vsp1_entity *entity,
5120 + crop.left * fmtinfo->bpp[0] / 8;
5121
5122 if (format->num_planes > 1) {
5123 + unsigned int bpl = format->plane_fmt[1].bytesperline;
5124 unsigned int offset;
5125
5126 - offset = crop.top * format->plane_fmt[1].bytesperline
5127 - + crop.left / fmtinfo->hsub
5128 - * fmtinfo->bpp[1] / 8;
5129 + offset = crop.top / fmtinfo->vsub * bpl
5130 + + crop.left / fmtinfo->hsub * fmtinfo->bpp[1] / 8;
5131 mem.addr[1] += offset;
5132 mem.addr[2] += offset;
5133 }
5134 diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
5135 index 2f00679f65a0a..11e7fcfc3f195 100644
5136 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
5137 +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
5138 @@ -2570,6 +2570,11 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
5139 } while (0);
5140 mutex_unlock(&pvr2_unit_mtx);
5141
5142 + INIT_WORK(&hdw->workpoll, pvr2_hdw_worker_poll);
5143 +
5144 + if (hdw->unit_number == -1)
5145 + goto fail;
5146 +
5147 cnt1 = 0;
5148 cnt2 = scnprintf(hdw->name+cnt1,sizeof(hdw->name)-cnt1,"pvrusb2");
5149 cnt1 += cnt2;
5150 @@ -2581,8 +2586,6 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
5151 if (cnt1 >= sizeof(hdw->name)) cnt1 = sizeof(hdw->name)-1;
5152 hdw->name[cnt1] = 0;
5153
5154 - INIT_WORK(&hdw->workpoll,pvr2_hdw_worker_poll);
5155 -
5156 pvr2_trace(PVR2_TRACE_INIT,"Driver unit number is %d, name is %s",
5157 hdw->unit_number,hdw->name);
5158
5159 diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
5160 index 3126ee9e965c9..96ef64b6a232b 100644
5161 --- a/drivers/media/usb/uvc/uvc_v4l2.c
5162 +++ b/drivers/media/usb/uvc/uvc_v4l2.c
5163 @@ -859,29 +859,31 @@ static int uvc_ioctl_enum_input(struct file *file, void *fh,
5164 struct uvc_video_chain *chain = handle->chain;
5165 const struct uvc_entity *selector = chain->selector;
5166 struct uvc_entity *iterm = NULL;
5167 + struct uvc_entity *it;
5168 u32 index = input->index;
5169 - int pin = 0;
5170
5171 if (selector == NULL ||
5172 (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
5173 if (index != 0)
5174 return -EINVAL;
5175 - list_for_each_entry(iterm, &chain->entities, chain) {
5176 - if (UVC_ENTITY_IS_ITERM(iterm))
5177 + list_for_each_entry(it, &chain->entities, chain) {
5178 + if (UVC_ENTITY_IS_ITERM(it)) {
5179 + iterm = it;
5180 break;
5181 + }
5182 }
5183 - pin = iterm->id;
5184 } else if (index < selector->bNrInPins) {
5185 - pin = selector->baSourceID[index];
5186 - list_for_each_entry(iterm, &chain->entities, chain) {
5187 - if (!UVC_ENTITY_IS_ITERM(iterm))
5188 + list_for_each_entry(it, &chain->entities, chain) {
5189 + if (!UVC_ENTITY_IS_ITERM(it))
5190 continue;
5191 - if (iterm->id == pin)
5192 + if (it->id == selector->baSourceID[index]) {
5193 + iterm = it;
5194 break;
5195 + }
5196 }
5197 }
5198
5199 - if (iterm == NULL || iterm->id != pin)
5200 + if (iterm == NULL)
5201 return -EINVAL;
5202
5203 memset(input, 0, sizeof(*input));
5204 diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
5205 index e5c8bc998eb4e..965820481f1e1 100644
5206 --- a/drivers/mfd/davinci_voicecodec.c
5207 +++ b/drivers/mfd/davinci_voicecodec.c
5208 @@ -46,14 +46,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
5209 }
5210 clk_enable(davinci_vc->clk);
5211
5212 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5213 -
5214 - fifo_base = (dma_addr_t)res->start;
5215 - davinci_vc->base = devm_ioremap_resource(&pdev->dev, res);
5216 + davinci_vc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
5217 if (IS_ERR(davinci_vc->base)) {
5218 ret = PTR_ERR(davinci_vc->base);
5219 goto fail;
5220 }
5221 + fifo_base = (dma_addr_t)res->start;
5222
5223 davinci_vc->regmap = devm_regmap_init_mmio(&pdev->dev,
5224 davinci_vc->base,
5225 diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
5226 index a1d9be82734de..88387c7e74433 100644
5227 --- a/drivers/mfd/ipaq-micro.c
5228 +++ b/drivers/mfd/ipaq-micro.c
5229 @@ -407,7 +407,7 @@ static int __init micro_probe(struct platform_device *pdev)
5230 micro_reset_comm(micro);
5231
5232 irq = platform_get_irq(pdev, 0);
5233 - if (!irq)
5234 + if (irq < 0)
5235 return -EINVAL;
5236 ret = devm_request_irq(&pdev->dev, irq, micro_serial_isr,
5237 IRQF_SHARED, "ipaq-micro",
5238 diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
5239 index a328cab110143..4aef33d07cc36 100644
5240 --- a/drivers/misc/cardreader/rtsx_usb.c
5241 +++ b/drivers/misc/cardreader/rtsx_usb.c
5242 @@ -667,6 +667,7 @@ static int rtsx_usb_probe(struct usb_interface *intf,
5243 return 0;
5244
5245 out_init_fail:
5246 + usb_set_intfdata(ucr->pusb_intf, NULL);
5247 usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
5248 ucr->iobuf_dma);
5249 return ret;
5250 diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
5251 index e172719dd86d0..4617c63b10260 100644
5252 --- a/drivers/misc/lkdtm/usercopy.c
5253 +++ b/drivers/misc/lkdtm/usercopy.c
5254 @@ -30,12 +30,12 @@ static const unsigned char test_text[] = "This is a test.\n";
5255 */
5256 static noinline unsigned char *trick_compiler(unsigned char *stack)
5257 {
5258 - return stack + 0;
5259 + return stack + unconst;
5260 }
5261
5262 static noinline unsigned char *do_usercopy_stack_callee(int value)
5263 {
5264 - unsigned char buf[32];
5265 + unsigned char buf[128];
5266 int i;
5267
5268 /* Exercise stack to avoid everything living in registers. */
5269 @@ -43,7 +43,12 @@ static noinline unsigned char *do_usercopy_stack_callee(int value)
5270 buf[i] = value & 0xff;
5271 }
5272
5273 - return trick_compiler(buf);
5274 + /*
5275 + * Put the target buffer in the middle of stack allocation
5276 + * so that we don't step on future stack users regardless
5277 + * of stack growth direction.
5278 + */
5279 + return trick_compiler(&buf[(128/2)-32]);
5280 }
5281
5282 static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
5283 @@ -66,6 +71,12 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
5284 bad_stack -= sizeof(unsigned long);
5285 }
5286
5287 +#ifdef ARCH_HAS_CURRENT_STACK_POINTER
5288 + pr_info("stack : %px\n", (void *)current_stack_pointer);
5289 +#endif
5290 + pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
5291 + pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
5292 +
5293 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
5294 PROT_READ | PROT_WRITE | PROT_EXEC,
5295 MAP_ANONYMOUS | MAP_PRIVATE, 0);
5296 diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
5297 index 4d1b44de14921..c742ab02ae186 100644
5298 --- a/drivers/misc/ocxl/file.c
5299 +++ b/drivers/misc/ocxl/file.c
5300 @@ -558,7 +558,9 @@ int ocxl_file_register_afu(struct ocxl_afu *afu)
5301
5302 err_unregister:
5303 ocxl_sysfs_unregister_afu(info); // safe to call even if register failed
5304 + free_minor(info);
5305 device_unregister(&info->dev);
5306 + return rc;
5307 err_put:
5308 ocxl_afu_put(afu);
5309 free_minor(info);
5310 diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
5311 index 709f117fd5772..482e01ece0b7f 100644
5312 --- a/drivers/mmc/core/block.c
5313 +++ b/drivers/mmc/core/block.c
5314 @@ -1492,8 +1492,7 @@ void mmc_blk_cqe_recovery(struct mmc_queue *mq)
5315 err = mmc_cqe_recovery(host);
5316 if (err)
5317 mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
5318 - else
5319 - mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
5320 + mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
5321
5322 pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
5323 }
5324 diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
5325 index f816c06ef9160..a316c912a118f 100644
5326 --- a/drivers/mmc/host/jz4740_mmc.c
5327 +++ b/drivers/mmc/host/jz4740_mmc.c
5328 @@ -224,6 +224,26 @@ static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
5329 return PTR_ERR(host->dma_rx);
5330 }
5331
5332 + /*
5333 + * Limit the maximum segment size in any SG entry according to
5334 + * the parameters of the DMA engine device.
5335 + */
5336 + if (host->dma_tx) {
5337 + struct device *dev = host->dma_tx->device->dev;
5338 + unsigned int max_seg_size = dma_get_max_seg_size(dev);
5339 +
5340 + if (max_seg_size < host->mmc->max_seg_size)
5341 + host->mmc->max_seg_size = max_seg_size;
5342 + }
5343 +
5344 + if (host->dma_rx) {
5345 + struct device *dev = host->dma_rx->device->dev;
5346 + unsigned int max_seg_size = dma_get_max_seg_size(dev);
5347 +
5348 + if (max_seg_size < host->mmc->max_seg_size)
5349 + host->mmc->max_seg_size = max_seg_size;
5350 + }
5351 +
5352 return 0;
5353 }
5354
5355 diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
5356 index 9c98ddef0097d..006221284d0ae 100644
5357 --- a/drivers/mtd/chips/cfi_cmdset_0002.c
5358 +++ b/drivers/mtd/chips/cfi_cmdset_0002.c
5359 @@ -59,6 +59,10 @@
5360 #define CFI_SR_WBASB BIT(3)
5361 #define CFI_SR_SLSB BIT(1)
5362
5363 +enum cfi_quirks {
5364 + CFI_QUIRK_DQ_TRUE_DATA = BIT(0),
5365 +};
5366 +
5367 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
5368 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
5369 #if !FORCE_WORD_WRITE
5370 @@ -432,6 +436,15 @@ static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
5371 mtd->name);
5372 }
5373
5374 +static void fixup_quirks(struct mtd_info *mtd)
5375 +{
5376 + struct map_info *map = mtd->priv;
5377 + struct cfi_private *cfi = map->fldrv_priv;
5378 +
5379 + if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x0c01)
5380 + cfi->quirks |= CFI_QUIRK_DQ_TRUE_DATA;
5381 +}
5382 +
5383 /* Used to fix CFI-Tables of chips without Extended Query Tables */
5384 static struct cfi_fixup cfi_nopri_fixup_table[] = {
5385 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
5386 @@ -470,6 +483,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
5387 #if !FORCE_WORD_WRITE
5388 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
5389 #endif
5390 + { CFI_MFR_ANY, CFI_ID_ANY, fixup_quirks },
5391 { 0, 0, NULL }
5392 };
5393 static struct cfi_fixup jedec_fixup_table[] = {
5394 @@ -798,21 +812,25 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
5395 }
5396
5397 /*
5398 - * Return true if the chip is ready.
5399 + * Return true if the chip is ready and has the correct value.
5400 *
5401 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
5402 * non-suspended sector) and is indicated by no toggle bits toggling.
5403 *
5404 + * Error are indicated by toggling bits or bits held with the wrong value,
5405 + * or with bits toggling.
5406 + *
5407 * Note that anything more complicated than checking if no bits are toggling
5408 * (including checking DQ5 for an error status) is tricky to get working
5409 * correctly and is therefore not done (particularly with interleaved chips
5410 * as each chip must be checked independently of the others).
5411 */
5412 static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
5413 - unsigned long addr)
5414 + unsigned long addr, map_word *expected)
5415 {
5416 struct cfi_private *cfi = map->fldrv_priv;
5417 map_word d, t;
5418 + int ret;
5419
5420 if (cfi_use_status_reg(cfi)) {
5421 map_word ready = CMD(CFI_SR_DRB);
5422 @@ -822,57 +840,32 @@ static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
5423 */
5424 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
5425 cfi->device_type, NULL);
5426 - d = map_read(map, addr);
5427 + t = map_read(map, addr);
5428
5429 - return map_word_andequal(map, d, ready, ready);
5430 + return map_word_andequal(map, t, ready, ready);
5431 }
5432
5433 d = map_read(map, addr);
5434 t = map_read(map, addr);
5435
5436 - return map_word_equal(map, d, t);
5437 + ret = map_word_equal(map, d, t);
5438 +
5439 + if (!ret || !expected)
5440 + return ret;
5441 +
5442 + return map_word_equal(map, t, *expected);
5443 }
5444
5445 -/*
5446 - * Return true if the chip is ready and has the correct value.
5447 - *
5448 - * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
5449 - * non-suspended sector) and it is indicated by no bits toggling.
5450 - *
5451 - * Error are indicated by toggling bits or bits held with the wrong value,
5452 - * or with bits toggling.
5453 - *
5454 - * Note that anything more complicated than checking if no bits are toggling
5455 - * (including checking DQ5 for an error status) is tricky to get working
5456 - * correctly and is therefore not done (particularly with interleaved chips
5457 - * as each chip must be checked independently of the others).
5458 - *
5459 - */
5460 static int __xipram chip_good(struct map_info *map, struct flchip *chip,
5461 - unsigned long addr, map_word expected)
5462 + unsigned long addr, map_word *expected)
5463 {
5464 struct cfi_private *cfi = map->fldrv_priv;
5465 - map_word oldd, curd;
5466 -
5467 - if (cfi_use_status_reg(cfi)) {
5468 - map_word ready = CMD(CFI_SR_DRB);
5469 -
5470 - /*
5471 - * For chips that support status register, check device
5472 - * ready bit
5473 - */
5474 - cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
5475 - cfi->device_type, NULL);
5476 - curd = map_read(map, addr);
5477 -
5478 - return map_word_andequal(map, curd, ready, ready);
5479 - }
5480 + map_word *datum = expected;
5481
5482 - oldd = map_read(map, addr);
5483 - curd = map_read(map, addr);
5484 + if (cfi->quirks & CFI_QUIRK_DQ_TRUE_DATA)
5485 + datum = NULL;
5486
5487 - return map_word_equal(map, oldd, curd) &&
5488 - map_word_equal(map, curd, expected);
5489 + return chip_ready(map, chip, addr, datum);
5490 }
5491
5492 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
5493 @@ -889,7 +882,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
5494
5495 case FL_STATUS:
5496 for (;;) {
5497 - if (chip_ready(map, chip, adr))
5498 + if (chip_ready(map, chip, adr, NULL))
5499 break;
5500
5501 if (time_after(jiffies, timeo)) {
5502 @@ -927,7 +920,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
5503 chip->state = FL_ERASE_SUSPENDING;
5504 chip->erase_suspended = 1;
5505 for (;;) {
5506 - if (chip_ready(map, chip, adr))
5507 + if (chip_ready(map, chip, adr, NULL))
5508 break;
5509
5510 if (time_after(jiffies, timeo)) {
5511 @@ -1459,7 +1452,7 @@ static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
5512 /* wait for chip to become ready */
5513 timeo = jiffies + msecs_to_jiffies(2);
5514 for (;;) {
5515 - if (chip_ready(map, chip, adr))
5516 + if (chip_ready(map, chip, adr, NULL))
5517 break;
5518
5519 if (time_after(jiffies, timeo)) {
5520 @@ -1695,7 +1688,7 @@ static int __xipram do_write_oneword_once(struct map_info *map,
5521 * "chip_good" to avoid the failure due to scheduling.
5522 */
5523 if (time_after(jiffies, timeo) &&
5524 - !chip_good(map, chip, adr, datum)) {
5525 + !chip_good(map, chip, adr, &datum)) {
5526 xip_enable(map, chip, adr);
5527 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
5528 xip_disable(map, chip, adr);
5529 @@ -1703,7 +1696,7 @@ static int __xipram do_write_oneword_once(struct map_info *map,
5530 break;
5531 }
5532
5533 - if (chip_good(map, chip, adr, datum)) {
5534 + if (chip_good(map, chip, adr, &datum)) {
5535 if (cfi_check_err_status(map, chip, adr))
5536 ret = -EIO;
5537 break;
5538 @@ -1975,14 +1968,14 @@ static int __xipram do_write_buffer_wait(struct map_info *map,
5539 * "chip_good" to avoid the failure due to scheduling.
5540 */
5541 if (time_after(jiffies, timeo) &&
5542 - !chip_good(map, chip, adr, datum)) {
5543 + !chip_good(map, chip, adr, &datum)) {
5544 pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
5545 __func__, adr);
5546 ret = -EIO;
5547 break;
5548 }
5549
5550 - if (chip_good(map, chip, adr, datum)) {
5551 + if (chip_good(map, chip, adr, &datum)) {
5552 if (cfi_check_err_status(map, chip, adr))
5553 ret = -EIO;
5554 break;
5555 @@ -2191,7 +2184,7 @@ static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
5556 * If the driver thinks the chip is idle, and no toggle bits
5557 * are changing, then the chip is actually idle for sure.
5558 */
5559 - if (chip->state == FL_READY && chip_ready(map, chip, adr))
5560 + if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL))
5561 return 0;
5562
5563 /*
5564 @@ -2208,7 +2201,7 @@ static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
5565
5566 /* wait for the chip to become ready */
5567 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
5568 - if (chip_ready(map, chip, adr))
5569 + if (chip_ready(map, chip, adr, NULL))
5570 return 0;
5571
5572 udelay(1);
5573 @@ -2272,13 +2265,13 @@ retry:
5574 map_write(map, datum, adr);
5575
5576 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
5577 - if (chip_ready(map, chip, adr))
5578 + if (chip_ready(map, chip, adr, NULL))
5579 break;
5580
5581 udelay(1);
5582 }
5583
5584 - if (!chip_good(map, chip, adr, datum) ||
5585 + if (!chip_ready(map, chip, adr, &datum) ||
5586 cfi_check_err_status(map, chip, adr)) {
5587 /* reset on all failures. */
5588 map_write(map, CMD(0xF0), chip->start);
5589 @@ -2420,6 +2413,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
5590 DECLARE_WAITQUEUE(wait, current);
5591 int ret = 0;
5592 int retry_cnt = 0;
5593 + map_word datum = map_word_ff(map);
5594
5595 adr = cfi->addr_unlock1;
5596
5597 @@ -2474,7 +2468,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
5598 chip->erase_suspended = 0;
5599 }
5600
5601 - if (chip_good(map, chip, adr, map_word_ff(map))) {
5602 + if (chip_ready(map, chip, adr, &datum)) {
5603 if (cfi_check_err_status(map, chip, adr))
5604 ret = -EIO;
5605 break;
5606 @@ -2519,6 +2513,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
5607 DECLARE_WAITQUEUE(wait, current);
5608 int ret = 0;
5609 int retry_cnt = 0;
5610 + map_word datum = map_word_ff(map);
5611
5612 adr += chip->start;
5613
5614 @@ -2573,7 +2568,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
5615 chip->erase_suspended = 0;
5616 }
5617
5618 - if (chip_good(map, chip, adr, map_word_ff(map))) {
5619 + if (chip_ready(map, chip, adr, &datum)) {
5620 if (cfi_check_err_status(map, chip, adr))
5621 ret = -EIO;
5622 break;
5623 @@ -2767,7 +2762,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
5624 */
5625 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
5626 for (;;) {
5627 - if (chip_ready(map, chip, adr))
5628 + if (chip_ready(map, chip, adr, NULL))
5629 break;
5630
5631 if (time_after(jiffies, timeo)) {
5632 diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
5633 index 1bc7b3a056046..6ea95ade4ca6b 100644
5634 --- a/drivers/mtd/ubi/vmt.c
5635 +++ b/drivers/mtd/ubi/vmt.c
5636 @@ -309,7 +309,6 @@ out_mapping:
5637 ubi->volumes[vol_id] = NULL;
5638 ubi->vol_count -= 1;
5639 spin_unlock(&ubi->volumes_lock);
5640 - ubi_eba_destroy_table(eba_tbl);
5641 out_acc:
5642 spin_lock(&ubi->volumes_lock);
5643 ubi->rsvd_pebs -= vol->reserved_pebs;
5644 diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
5645 index 008d3d492bd1c..be3811311db2d 100644
5646 --- a/drivers/net/can/xilinx_can.c
5647 +++ b/drivers/net/can/xilinx_can.c
5648 @@ -239,7 +239,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = {
5649 };
5650
5651 /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
5652 -static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
5653 +static const struct can_bittiming_const xcan_data_bittiming_const_canfd = {
5654 .name = DRIVER_NAME,
5655 .tseg1_min = 1,
5656 .tseg1_max = 16,
5657 @@ -265,7 +265,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
5658 };
5659
5660 /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
5661 -static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
5662 +static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
5663 .name = DRIVER_NAME,
5664 .tseg1_min = 1,
5665 .tseg1_max = 32,
5666 diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
5667 index 0c191d395f8f3..b546002e5fd41 100644
5668 --- a/drivers/net/dsa/lantiq_gswip.c
5669 +++ b/drivers/net/dsa/lantiq_gswip.c
5670 @@ -1958,8 +1958,10 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
5671 for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
5672 err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
5673 gphy_fw_np, i);
5674 - if (err)
5675 + if (err) {
5676 + of_node_put(gphy_fw_np);
5677 goto remove_gphy;
5678 + }
5679 i++;
5680 }
5681
5682 diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
5683 index 87d28ef82559e..b336ed071fa89 100644
5684 --- a/drivers/net/dsa/mv88e6xxx/chip.c
5685 +++ b/drivers/net/dsa/mv88e6xxx/chip.c
5686 @@ -2910,6 +2910,7 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
5687 */
5688 child = of_get_child_by_name(np, "mdio");
5689 err = mv88e6xxx_mdio_register(chip, child, false);
5690 + of_node_put(child);
5691 if (err)
5692 return err;
5693
5694 diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
5695 index 1f8c3b669dc14..f36536114790b 100644
5696 --- a/drivers/net/ethernet/altera/altera_tse_main.c
5697 +++ b/drivers/net/ethernet/altera/altera_tse_main.c
5698 @@ -163,7 +163,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
5699 mdio = mdiobus_alloc();
5700 if (mdio == NULL) {
5701 netdev_err(dev, "Error allocating MDIO bus\n");
5702 - return -ENOMEM;
5703 + ret = -ENOMEM;
5704 + goto put_node;
5705 }
5706
5707 mdio->name = ALTERA_TSE_RESOURCE_NAME;
5708 @@ -180,6 +181,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
5709 mdio->id);
5710 goto out_free_mdio;
5711 }
5712 + of_node_put(mdio_node);
5713
5714 if (netif_msg_drv(priv))
5715 netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
5716 @@ -189,6 +191,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
5717 out_free_mdio:
5718 mdiobus_free(mdio);
5719 mdio = NULL;
5720 +put_node:
5721 + of_node_put(mdio_node);
5722 return ret;
5723 }
5724
5725 diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
5726 index 7046ad6d3d0e3..ac50da49ca770 100644
5727 --- a/drivers/net/ethernet/broadcom/Makefile
5728 +++ b/drivers/net/ethernet/broadcom/Makefile
5729 @@ -16,3 +16,8 @@ obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o
5730 obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
5731 obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
5732 obj-$(CONFIG_BNXT) += bnxt/
5733 +
5734 +# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
5735 +ifndef KBUILD_EXTRA_WARN
5736 +CFLAGS_tg3.o += -Wno-array-bounds
5737 +endif
5738 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
5739 index 47a920128760e..cf5c2b9465eba 100644
5740 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
5741 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
5742 @@ -1157,9 +1157,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
5743
5744 switch (xcast_mode) {
5745 case IXGBEVF_XCAST_MODE_NONE:
5746 - disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
5747 + disable = IXGBE_VMOLR_ROMPE |
5748 IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
5749 - enable = 0;
5750 + enable = IXGBE_VMOLR_BAM;
5751 break;
5752 case IXGBEVF_XCAST_MODE_MULTI:
5753 disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
5754 @@ -1181,9 +1181,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
5755 return -EPERM;
5756 }
5757
5758 - disable = 0;
5759 + disable = IXGBE_VMOLR_VPE;
5760 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
5761 - IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
5762 + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE;
5763 break;
5764 default:
5765 return -EOPNOTSUPP;
5766 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
5767 index 3351d4f9363af..5dce4cd60f58d 100644
5768 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
5769 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
5770 @@ -1962,6 +1962,9 @@ static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
5771 struct ethtool_rx_flow_spec *fsp =
5772 (struct ethtool_rx_flow_spec *)&cmd->fs;
5773
5774 + if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
5775 + return -EINVAL;
5776 +
5777 /* only tcp dst ipv4 is meaningful, others are meaningless */
5778 fsp->flow_type = TCP_V4_FLOW;
5779 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
5780 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
5781 index dd029d91bbc2d..b711148a9d503 100644
5782 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
5783 +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
5784 @@ -2083,7 +2083,7 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev,
5785 en_err(priv,
5786 "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
5787 i, offset, ee->len - i, ret);
5788 - return 0;
5789 + return ret;
5790 }
5791
5792 i += ret;
5793 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
5794 index 97359417c6e7f..f8144ce7e476d 100644
5795 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
5796 +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
5797 @@ -673,6 +673,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
5798 if (!tracer->owner)
5799 return;
5800
5801 + if (unlikely(!tracer->str_db.loaded))
5802 + goto arm;
5803 +
5804 block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
5805 start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
5806
5807 @@ -730,6 +733,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
5808 &tmp_trace_block[TRACES_PER_BLOCK - 1]);
5809 }
5810
5811 +arm:
5812 mlx5_fw_tracer_arm(dev);
5813 }
5814
5815 @@ -1084,8 +1088,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
5816 queue_work(tracer->work_queue, &tracer->ownership_change_work);
5817 break;
5818 case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
5819 - if (likely(tracer->str_db.loaded))
5820 - queue_work(tracer->work_queue, &tracer->handle_traces_work);
5821 + queue_work(tracer->work_queue, &tracer->handle_traces_work);
5822 break;
5823 default:
5824 mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
5825 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5826 index 73291051808f9..35630b538c826 100644
5827 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5828 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5829 @@ -4638,6 +4638,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
5830
5831 unlock:
5832 mutex_unlock(&priv->state_lock);
5833 +
5834 + /* Need to fix some features. */
5835 + if (!err)
5836 + netdev_update_features(netdev);
5837 +
5838 return err;
5839 }
5840
5841 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
5842 index 5baf2c666d293..41087c0618c11 100644
5843 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
5844 +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
5845 @@ -1450,9 +1450,22 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
5846 return NULL;
5847 }
5848
5849 -static bool check_conflicting_actions(u32 action1, u32 action2)
5850 +static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
5851 + const struct mlx5_fs_vlan *vlan1)
5852 {
5853 - u32 xored_actions = action1 ^ action2;
5854 + return vlan0->ethtype != vlan1->ethtype ||
5855 + vlan0->vid != vlan1->vid ||
5856 + vlan0->prio != vlan1->prio;
5857 +}
5858 +
5859 +static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
5860 + const struct mlx5_flow_act *act2)
5861 +{
5862 + u32 action1 = act1->action;
5863 + u32 action2 = act2->action;
5864 + u32 xored_actions;
5865 +
5866 + xored_actions = action1 ^ action2;
5867
5868 /* if one rule only wants to count, it's ok */
5869 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
5870 @@ -1469,6 +1482,22 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
5871 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
5872 return true;
5873
5874 + if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
5875 + act1->pkt_reformat != act2->pkt_reformat)
5876 + return true;
5877 +
5878 + if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
5879 + act1->modify_hdr != act2->modify_hdr)
5880 + return true;
5881 +
5882 + if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
5883 + check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
5884 + return true;
5885 +
5886 + if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
5887 + check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
5888 + return true;
5889 +
5890 return false;
5891 }
5892
5893 @@ -1476,7 +1505,7 @@ static int check_conflicting_ftes(struct fs_fte *fte,
5894 const struct mlx5_flow_context *flow_context,
5895 const struct mlx5_flow_act *flow_act)
5896 {
5897 - if (check_conflicting_actions(flow_act->action, fte->action.action)) {
5898 + if (check_conflicting_actions(flow_act, &fte->action)) {
5899 mlx5_core_warn(get_dev(&fte->node),
5900 "Found two FTEs with conflicting actions\n");
5901 return -EEXIST;
5902 @@ -1937,16 +1966,16 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
5903 down_write_ref_node(&fte->node, false);
5904 for (i = handle->num_rules - 1; i >= 0; i--)
5905 tree_remove_node(&handle->rule[i]->node, true);
5906 - if (fte->dests_size) {
5907 - if (fte->modify_mask)
5908 - modify_fte(fte);
5909 - up_write_ref_node(&fte->node, false);
5910 - } else if (list_empty(&fte->node.children)) {
5911 + if (list_empty(&fte->node.children)) {
5912 del_hw_fte(&fte->node);
5913 /* Avoid double call to del_hw_fte */
5914 fte->node.del_hw_func = NULL;
5915 up_write_ref_node(&fte->node, false);
5916 tree_put_node(&fte->node, false);
5917 + } else if (fte->dests_size) {
5918 + if (fte->modify_mask)
5919 + modify_fte(fte);
5920 + up_write_ref_node(&fte->node, false);
5921 } else {
5922 up_write_ref_node(&fte->node, false);
5923 }
5924 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
5925 index 348f02e336f68..d643685067541 100644
5926 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
5927 +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
5928 @@ -43,11 +43,10 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns,
5929 err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action);
5930 if (err && action) {
5931 err = mlx5dr_action_destroy(action);
5932 - if (err) {
5933 - action = NULL;
5934 - mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
5935 - err);
5936 - }
5937 + if (err)
5938 + mlx5_core_err(ns->dev,
5939 + "Failed to destroy action (%d)\n", err);
5940 + action = NULL;
5941 }
5942 ft->fs_dr_table.miss_action = action;
5943 if (old_miss_action) {
5944 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
5945 index 21296fa7f7fbf..bf51ed94952c5 100644
5946 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
5947 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
5948 @@ -227,8 +227,6 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
5949 static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev,
5950 struct dcb_app *app)
5951 {
5952 - int prio;
5953 -
5954 if (app->priority >= IEEE_8021QAZ_MAX_TCS) {
5955 netdev_err(dev, "APP entry with priority value %u is invalid\n",
5956 app->priority);
5957 @@ -242,17 +240,6 @@ static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev,
5958 app->protocol);
5959 return -EINVAL;
5960 }
5961 -
5962 - /* Warn about any DSCP APP entries with the same PID. */
5963 - prio = fls(dcb_ieee_getapp_mask(dev, app));
5964 - if (prio--) {
5965 - if (prio < app->priority)
5966 - netdev_warn(dev, "Choosing priority %d for DSCP %d in favor of previously-active value of %d\n",
5967 - app->priority, app->protocol, prio);
5968 - else if (prio > app->priority)
5969 - netdev_warn(dev, "Ignoring new priority %d for DSCP %d in favor of current value of %d\n",
5970 - app->priority, app->protocol, prio);
5971 - }
5972 break;
5973
5974 case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
5975 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
5976 index 89e578e25ff8f..10857914c552b 100644
5977 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
5978 +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
5979 @@ -266,8 +266,6 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
5980
5981 /* Init to unknowns */
5982 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5983 - ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
5984 - ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
5985 cmd->base.port = PORT_OTHER;
5986 cmd->base.speed = SPEED_UNKNOWN;
5987 cmd->base.duplex = DUPLEX_UNKNOWN;
5988 @@ -275,6 +273,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
5989 port = nfp_port_from_netdev(netdev);
5990 eth_port = nfp_port_get_eth_port(port);
5991 if (eth_port) {
5992 + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
5993 + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
5994 cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
5995 AUTONEG_ENABLE : AUTONEG_DISABLE;
5996 nfp_net_set_fec_link_mode(eth_port, cmd);
5997 diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
5998 index b0a439248ff69..05c24db507a2c 100644
5999 --- a/drivers/net/phy/mdio_bus.c
6000 +++ b/drivers/net/phy/mdio_bus.c
6001 @@ -753,7 +753,6 @@ int __init mdio_bus_init(void)
6002
6003 return ret;
6004 }
6005 -EXPORT_SYMBOL_GPL(mdio_bus_init);
6006
6007 #if IS_ENABLED(CONFIG_PHYLIB)
6008 void mdio_bus_exit(void)
6009 diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
6010 index 18cc5e4280e83..721153dcfd15a 100644
6011 --- a/drivers/net/phy/micrel.c
6012 +++ b/drivers/net/phy/micrel.c
6013 @@ -282,7 +282,7 @@ static int kszphy_config_reset(struct phy_device *phydev)
6014 }
6015 }
6016
6017 - if (priv->led_mode >= 0)
6018 + if (priv->type && priv->led_mode >= 0)
6019 kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode);
6020
6021 return 0;
6022 @@ -298,10 +298,10 @@ static int kszphy_config_init(struct phy_device *phydev)
6023
6024 type = priv->type;
6025
6026 - if (type->has_broadcast_disable)
6027 + if (type && type->has_broadcast_disable)
6028 kszphy_broadcast_disable(phydev);
6029
6030 - if (type->has_nand_tree_disable)
6031 + if (type && type->has_nand_tree_disable)
6032 kszphy_nand_tree_disable(phydev);
6033
6034 return kszphy_config_reset(phydev);
6035 @@ -939,7 +939,7 @@ static int kszphy_probe(struct phy_device *phydev)
6036
6037 priv->type = type;
6038
6039 - if (type->led_mode_reg) {
6040 + if (type && type->led_mode_reg) {
6041 ret = of_property_read_u32(np, "micrel,led-mode",
6042 &priv->led_mode);
6043 if (ret)
6044 @@ -960,7 +960,8 @@ static int kszphy_probe(struct phy_device *phydev)
6045 unsigned long rate = clk_get_rate(clk);
6046 bool rmii_ref_clk_sel_25_mhz;
6047
6048 - priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel;
6049 + if (type)
6050 + priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel;
6051 rmii_ref_clk_sel_25_mhz = of_property_read_bool(np,
6052 "micrel,rmii-reference-clock-select-25-mhz");
6053
6054 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
6055 index b0a4ca3559fd8..abed1effd95ca 100644
6056 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
6057 +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
6058 @@ -5615,7 +5615,7 @@ unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
6059
6060 static u8 ar9003_get_eepmisc(struct ath_hw *ah)
6061 {
6062 - return ah->eeprom.map4k.baseEepHeader.eepMisc;
6063 + return ah->eeprom.ar9300_eep.baseEepHeader.opCapFlags.eepMisc;
6064 }
6065
6066 const struct eeprom_ops eep_ar9300_ops = {
6067 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
6068 index a171dbb29fbb6..ad949eb02f3d2 100644
6069 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
6070 +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
6071 @@ -720,7 +720,7 @@
6072 #define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \
6073 (AR_SREV_9462(ah) ? 0x16290 : 0x16284))
6074 #define AR_CH0_TOP2_XPABIASLVL (AR_SREV_9561(ah) ? 0x1e00 : 0xf000)
6075 -#define AR_CH0_TOP2_XPABIASLVL_S 12
6076 +#define AR_CH0_TOP2_XPABIASLVL_S (AR_SREV_9561(ah) ? 9 : 12)
6077
6078 #define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \
6079 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : \
6080 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
6081 index 628f45c8c06f2..eeaf63de71bfd 100644
6082 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
6083 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
6084 @@ -1005,6 +1005,14 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
6085 goto rx_next;
6086 }
6087
6088 + if (rxstatus->rs_keyix >= ATH_KEYMAX &&
6089 + rxstatus->rs_keyix != ATH9K_RXKEYIX_INVALID) {
6090 + ath_dbg(common, ANY,
6091 + "Invalid keyix, dropping (keyix: %d)\n",
6092 + rxstatus->rs_keyix);
6093 + goto rx_next;
6094 + }
6095 +
6096 /* Get the RX status information */
6097
6098 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
6099 diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
6100 index 2407931440edb..dfab6be1080cb 100644
6101 --- a/drivers/net/wireless/ath/carl9170/tx.c
6102 +++ b/drivers/net/wireless/ath/carl9170/tx.c
6103 @@ -1557,6 +1557,9 @@ static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar)
6104 goto out;
6105 }
6106 } while (ar->beacon_enabled && i--);
6107 +
6108 + /* no entry found in list */
6109 + return NULL;
6110 }
6111
6112 out:
6113 diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
6114 index 32ce1b42ce08b..0ef62ef77af64 100644
6115 --- a/drivers/net/wireless/broadcom/b43/phy_n.c
6116 +++ b/drivers/net/wireless/broadcom/b43/phy_n.c
6117 @@ -582,7 +582,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
6118 u16 data[4];
6119 s16 gain[2];
6120 u16 minmax[2];
6121 - static const u16 lna_gain[4] = { -2, 10, 19, 25 };
6122 + static const s16 lna_gain[4] = { -2, 10, 19, 25 };
6123
6124 if (nphy->hang_avoid)
6125 b43_nphy_stay_in_carrier_search(dev, 1);
6126 diff --git a/drivers/net/wireless/broadcom/b43legacy/phy.c b/drivers/net/wireless/broadcom/b43legacy/phy.c
6127 index a659259bc51aa..6e76055e136d2 100644
6128 --- a/drivers/net/wireless/broadcom/b43legacy/phy.c
6129 +++ b/drivers/net/wireless/broadcom/b43legacy/phy.c
6130 @@ -1123,7 +1123,7 @@ void b43legacy_phy_lo_b_measure(struct b43legacy_wldev *dev)
6131 struct b43legacy_phy *phy = &dev->phy;
6132 u16 regstack[12] = { 0 };
6133 u16 mls;
6134 - u16 fval;
6135 + s16 fval;
6136 int i;
6137 int j;
6138
6139 diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
6140 index d9baa2fa603b2..e4c60caa6543c 100644
6141 --- a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
6142 +++ b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
6143 @@ -383,7 +383,7 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
6144
6145 /* Each fragment may need to have room for encryption
6146 * pre/postfix */
6147 - if (host_encrypt)
6148 + if (host_encrypt && crypt && crypt->ops)
6149 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
6150 crypt->ops->extra_mpdu_postfix_len;
6151
6152 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
6153 index 22136e4832ea6..b2a6e9b7d0a10 100644
6154 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
6155 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
6156 @@ -626,6 +626,9 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
6157 struct iwl_power_vifs *power_iterator = _data;
6158 bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX;
6159
6160 + if (!mvmvif->uploaded)
6161 + return;
6162 +
6163 switch (ieee80211_vif_type_p2p(vif)) {
6164 case NL80211_IFTYPE_P2P_DEVICE:
6165 break;
6166 diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
6167 index 238accfe4f41d..c4176e357b22c 100644
6168 --- a/drivers/net/wireless/marvell/mwifiex/11h.c
6169 +++ b/drivers/net/wireless/marvell/mwifiex/11h.c
6170 @@ -303,5 +303,7 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
6171
6172 mwifiex_dbg(priv->adapter, MSG,
6173 "indicating channel switch completion to kernel\n");
6174 + mutex_lock(&priv->wdev.mtx);
6175 cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef);
6176 + mutex_unlock(&priv->wdev.mtx);
6177 }
6178 diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
6179 index d5f65372356bf..0b305badae989 100644
6180 --- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
6181 +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
6182 @@ -460,8 +460,10 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
6183 struct rtl8180_priv *priv = dev->priv;
6184 struct rtl8180_tx_ring *ring;
6185 struct rtl8180_tx_desc *entry;
6186 + unsigned int prio = 0;
6187 unsigned long flags;
6188 - unsigned int idx, prio, hw_prio;
6189 + unsigned int idx, hw_prio;
6190 +
6191 dma_addr_t mapping;
6192 u32 tx_flags;
6193 u8 rc_flags;
6194 @@ -470,7 +472,9 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
6195 /* do arithmetic and then convert to le16 */
6196 u16 frame_duration = 0;
6197
6198 - prio = skb_get_queue_mapping(skb);
6199 + /* rtl8180/rtl8185 only has one useable tx queue */
6200 + if (dev->queues > IEEE80211_AC_BK)
6201 + prio = skb_get_queue_mapping(skb);
6202 ring = &priv->tx_ring[prio];
6203
6204 mapping = pci_map_single(priv->pdev, skb->data,
6205 diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
6206 index bad06939a247c..9bcb187d37dcd 100644
6207 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c
6208 +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
6209 @@ -1013,7 +1013,7 @@ int rtl_usb_probe(struct usb_interface *intf,
6210 hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) +
6211 sizeof(struct rtl_usb_priv), &rtl_ops);
6212 if (!hw) {
6213 - WARN_ONCE(true, "rtl_usb: ieee80211 alloc failed\n");
6214 + pr_warn("rtl_usb: ieee80211 alloc failed\n");
6215 return -ENOMEM;
6216 }
6217 rtlpriv = hw->priv;
6218 diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
6219 index a7ab6dab0f32d..ccaace2a5b0e5 100644
6220 --- a/drivers/nfc/st21nfca/se.c
6221 +++ b/drivers/nfc/st21nfca/se.c
6222 @@ -241,7 +241,7 @@ int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
6223 }
6224 EXPORT_SYMBOL(st21nfca_hci_se_io);
6225
6226 -static void st21nfca_se_wt_timeout(struct timer_list *t)
6227 +static void st21nfca_se_wt_work(struct work_struct *work)
6228 {
6229 /*
6230 * No answer from the secure element
6231 @@ -254,8 +254,9 @@ static void st21nfca_se_wt_timeout(struct timer_list *t)
6232 */
6233 /* hardware reset managed through VCC_UICC_OUT power supply */
6234 u8 param = 0x01;
6235 - struct st21nfca_hci_info *info = from_timer(info, t,
6236 - se_info.bwi_timer);
6237 + struct st21nfca_hci_info *info = container_of(work,
6238 + struct st21nfca_hci_info,
6239 + se_info.timeout_work);
6240
6241 pr_debug("\n");
6242
6243 @@ -273,6 +274,13 @@ static void st21nfca_se_wt_timeout(struct timer_list *t)
6244 info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
6245 }
6246
6247 +static void st21nfca_se_wt_timeout(struct timer_list *t)
6248 +{
6249 + struct st21nfca_hci_info *info = from_timer(info, t, se_info.bwi_timer);
6250 +
6251 + schedule_work(&info->se_info.timeout_work);
6252 +}
6253 +
6254 static void st21nfca_se_activation_timeout(struct timer_list *t)
6255 {
6256 struct st21nfca_hci_info *info = from_timer(info, t,
6257 @@ -311,7 +319,7 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
6258 * AID 81 5 to 16
6259 * PARAMETERS 82 0 to 255
6260 */
6261 - if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
6262 + if (skb->len < NFC_MIN_AID_LENGTH + 2 ||
6263 skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
6264 return -EPROTO;
6265
6266 @@ -323,22 +331,29 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
6267 transaction->aid_len = skb->data[1];
6268
6269 /* Checking if the length of the AID is valid */
6270 - if (transaction->aid_len > sizeof(transaction->aid))
6271 + if (transaction->aid_len > sizeof(transaction->aid)) {
6272 + devm_kfree(dev, transaction);
6273 return -EINVAL;
6274 + }
6275
6276 memcpy(transaction->aid, &skb->data[2],
6277 transaction->aid_len);
6278
6279 /* Check next byte is PARAMETERS tag (82) */
6280 if (skb->data[transaction->aid_len + 2] !=
6281 - NFC_EVT_TRANSACTION_PARAMS_TAG)
6282 + NFC_EVT_TRANSACTION_PARAMS_TAG) {
6283 + devm_kfree(dev, transaction);
6284 return -EPROTO;
6285 + }
6286
6287 transaction->params_len = skb->data[transaction->aid_len + 3];
6288
6289 /* Total size is allocated (skb->len - 2) minus fixed array members */
6290 - if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction)))
6291 + if (transaction->params_len > ((skb->len - 2) -
6292 + sizeof(struct nfc_evt_transaction))) {
6293 + devm_kfree(dev, transaction);
6294 return -EINVAL;
6295 + }
6296
6297 memcpy(transaction->params, skb->data +
6298 transaction->aid_len + 4, transaction->params_len);
6299 @@ -365,6 +380,7 @@ int st21nfca_apdu_reader_event_received(struct nfc_hci_dev *hdev,
6300 switch (event) {
6301 case ST21NFCA_EVT_TRANSMIT_DATA:
6302 del_timer_sync(&info->se_info.bwi_timer);
6303 + cancel_work_sync(&info->se_info.timeout_work);
6304 info->se_info.bwi_active = false;
6305 r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE,
6306 ST21NFCA_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0);
6307 @@ -394,6 +410,7 @@ void st21nfca_se_init(struct nfc_hci_dev *hdev)
6308 struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
6309
6310 init_completion(&info->se_info.req_completion);
6311 + INIT_WORK(&info->se_info.timeout_work, st21nfca_se_wt_work);
6312 /* initialize timers */
6313 timer_setup(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 0);
6314 info->se_info.bwi_active = false;
6315 @@ -421,6 +438,7 @@ void st21nfca_se_deinit(struct nfc_hci_dev *hdev)
6316 if (info->se_info.se_active)
6317 del_timer_sync(&info->se_info.se_active_timer);
6318
6319 + cancel_work_sync(&info->se_info.timeout_work);
6320 info->se_info.bwi_active = false;
6321 info->se_info.se_active = false;
6322 }
6323 diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h
6324 index 5e0de0fef1d4e..0e4a93d11efb7 100644
6325 --- a/drivers/nfc/st21nfca/st21nfca.h
6326 +++ b/drivers/nfc/st21nfca/st21nfca.h
6327 @@ -141,6 +141,7 @@ struct st21nfca_se_info {
6328
6329 se_io_cb_t cb;
6330 void *cb_context;
6331 + struct work_struct timeout_work;
6332 };
6333
6334 struct st21nfca_hci_info {
6335 diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
6336 index 35d265014e1ec..0e23d8c277925 100644
6337 --- a/drivers/nvdimm/security.c
6338 +++ b/drivers/nvdimm/security.c
6339 @@ -379,11 +379,6 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
6340 || !nvdimm->sec.flags)
6341 return -EOPNOTSUPP;
6342
6343 - if (dev->driver == NULL) {
6344 - dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
6345 - return -EINVAL;
6346 - }
6347 -
6348 rc = check_security_state(nvdimm);
6349 if (rc)
6350 return rc;
6351 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
6352 index af516c35afe6f..10fe7a7a2163c 100644
6353 --- a/drivers/nvme/host/pci.c
6354 +++ b/drivers/nvme/host/pci.c
6355 @@ -1674,6 +1674,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
6356 dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
6357 if (IS_ERR(dev->ctrl.admin_q)) {
6358 blk_mq_free_tag_set(&dev->admin_tagset);
6359 + dev->ctrl.admin_q = NULL;
6360 return -ENOMEM;
6361 }
6362 if (!blk_get_queue(dev->ctrl.admin_q)) {
6363 diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
6364 index 1688f576ee8ac..8420ef42d89ea 100644
6365 --- a/drivers/of/overlay.c
6366 +++ b/drivers/of/overlay.c
6367 @@ -170,9 +170,7 @@ static int overlay_notify(struct overlay_changeset *ovcs,
6368
6369 ret = blocking_notifier_call_chain(&overlay_notify_chain,
6370 action, &nd);
6371 - if (ret == NOTIFY_OK || ret == NOTIFY_STOP)
6372 - return 0;
6373 - if (ret) {
6374 + if (notifier_to_errno(ret)) {
6375 ret = notifier_to_errno(ret);
6376 pr_err("overlay changeset %s notifier error %d, target: %pOF\n",
6377 of_overlay_action_name[action], ret, nd.target);
6378 diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
6379 index acfbd34032a86..b34b52b364d5f 100644
6380 --- a/drivers/pci/controller/dwc/pci-imx6.c
6381 +++ b/drivers/pci/controller/dwc/pci-imx6.c
6382 @@ -413,6 +413,11 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
6383 dev_err(dev, "failed to disable vpcie regulator: %d\n",
6384 ret);
6385 }
6386 +
6387 + /* Some boards don't have PCIe reset GPIO. */
6388 + if (gpio_is_valid(imx6_pcie->reset_gpio))
6389 + gpio_set_value_cansleep(imx6_pcie->reset_gpio,
6390 + imx6_pcie->gpio_active_high);
6391 }
6392
6393 static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
6394 @@ -535,15 +540,6 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
6395 /* allow the clocks to stabilize */
6396 usleep_range(200, 500);
6397
6398 - /* Some boards don't have PCIe reset GPIO. */
6399 - if (gpio_is_valid(imx6_pcie->reset_gpio)) {
6400 - gpio_set_value_cansleep(imx6_pcie->reset_gpio,
6401 - imx6_pcie->gpio_active_high);
6402 - msleep(100);
6403 - gpio_set_value_cansleep(imx6_pcie->reset_gpio,
6404 - !imx6_pcie->gpio_active_high);
6405 - }
6406 -
6407 switch (imx6_pcie->drvdata->variant) {
6408 case IMX8MQ:
6409 reset_control_deassert(imx6_pcie->pciephy_reset);
6410 @@ -586,6 +582,15 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
6411 break;
6412 }
6413
6414 + /* Some boards don't have PCIe reset GPIO. */
6415 + if (gpio_is_valid(imx6_pcie->reset_gpio)) {
6416 + msleep(100);
6417 + gpio_set_value_cansleep(imx6_pcie->reset_gpio,
6418 + !imx6_pcie->gpio_active_high);
6419 + /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
6420 + msleep(100);
6421 + }
6422 +
6423 return;
6424
6425 err_ref_clk:
6426 diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
6427 index a8eab4e67af10..17f411772f0ca 100644
6428 --- a/drivers/pci/controller/dwc/pcie-qcom.c
6429 +++ b/drivers/pci/controller/dwc/pcie-qcom.c
6430 @@ -1343,22 +1343,21 @@ static int qcom_pcie_probe(struct platform_device *pdev)
6431 }
6432
6433 ret = phy_init(pcie->phy);
6434 - if (ret) {
6435 - pm_runtime_disable(&pdev->dev);
6436 + if (ret)
6437 goto err_pm_runtime_put;
6438 - }
6439
6440 platform_set_drvdata(pdev, pcie);
6441
6442 ret = dw_pcie_host_init(pp);
6443 if (ret) {
6444 dev_err(dev, "cannot initialize host\n");
6445 - pm_runtime_disable(&pdev->dev);
6446 - goto err_pm_runtime_put;
6447 + goto err_phy_exit;
6448 }
6449
6450 return 0;
6451
6452 +err_phy_exit:
6453 + phy_exit(pcie->phy);
6454 err_pm_runtime_put:
6455 pm_runtime_put(dev);
6456 pm_runtime_disable(dev);
6457 diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/pcie-cadence-ep.c
6458 index def7820cb8247..5e23d575e200a 100644
6459 --- a/drivers/pci/controller/pcie-cadence-ep.c
6460 +++ b/drivers/pci/controller/pcie-cadence-ep.c
6461 @@ -178,8 +178,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
6462 struct cdns_pcie *pcie = &ep->pcie;
6463 u32 r;
6464
6465 - r = find_first_zero_bit(&ep->ob_region_map,
6466 - sizeof(ep->ob_region_map) * BITS_PER_LONG);
6467 + r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
6468 if (r >= ep->max_regions - 1) {
6469 dev_err(&epc->dev, "no free outbound region\n");
6470 return -EINVAL;
6471 diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
6472 index d743b0a489886..b82edefffd15f 100644
6473 --- a/drivers/pci/controller/pcie-rockchip-ep.c
6474 +++ b/drivers/pci/controller/pcie-rockchip-ep.c
6475 @@ -263,8 +263,7 @@ static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
6476 struct rockchip_pcie *pcie = &ep->rockchip;
6477 u32 r;
6478
6479 - r = find_first_zero_bit(&ep->ob_region_map,
6480 - sizeof(ep->ob_region_map) * BITS_PER_LONG);
6481 + r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
6482 /*
6483 * Region 0 is reserved for configuration space and shouldn't
6484 * be used elsewhere per TRM, so leave it out.
6485 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
6486 index d539eb379743e..ec741f92246d6 100644
6487 --- a/drivers/pci/pci.c
6488 +++ b/drivers/pci/pci.c
6489 @@ -2613,6 +2613,8 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
6490 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
6491 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
6492 },
6493 + },
6494 + {
6495 /*
6496 * Downstream device is not accessible after putting a root port
6497 * into D3cold and back into D0 on Elo i2.
6498 @@ -4915,18 +4917,18 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
6499
6500 static void pci_dev_lock(struct pci_dev *dev)
6501 {
6502 - pci_cfg_access_lock(dev);
6503 /* block PM suspend, driver probe, etc. */
6504 device_lock(&dev->dev);
6505 + pci_cfg_access_lock(dev);
6506 }
6507
6508 /* Return 1 on successful lock, 0 on contention */
6509 static int pci_dev_trylock(struct pci_dev *dev)
6510 {
6511 - if (pci_cfg_access_trylock(dev)) {
6512 - if (device_trylock(&dev->dev))
6513 + if (device_trylock(&dev->dev)) {
6514 + if (pci_cfg_access_trylock(dev))
6515 return 1;
6516 - pci_cfg_access_unlock(dev);
6517 + device_unlock(&dev->dev);
6518 }
6519
6520 return 0;
6521 @@ -4934,8 +4936,8 @@ static int pci_dev_trylock(struct pci_dev *dev)
6522
6523 static void pci_dev_unlock(struct pci_dev *dev)
6524 {
6525 - device_unlock(&dev->dev);
6526 pci_cfg_access_unlock(dev);
6527 + device_unlock(&dev->dev);
6528 }
6529
6530 static void pci_dev_save_and_disable(struct pci_dev *dev)
6531 diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
6532 index e004d8da03dcb..73df71a142536 100644
6533 --- a/drivers/pcmcia/Kconfig
6534 +++ b/drivers/pcmcia/Kconfig
6535 @@ -151,7 +151,7 @@ config TCIC
6536
6537 config PCMCIA_ALCHEMY_DEVBOARD
6538 tristate "Alchemy Db/Pb1xxx PCMCIA socket services"
6539 - depends on MIPS_ALCHEMY && PCMCIA
6540 + depends on MIPS_DB1XXX && PCMCIA
6541 help
6542 Enable this driver of you want PCMCIA support on your Alchemy
6543 Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200, DB1300
6544 diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
6545 index 5ddbf9a1f328b..21d40c6658545 100644
6546 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c
6547 +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
6548 @@ -1517,7 +1517,7 @@ static int qcom_qmp_phy_enable(struct phy *phy)
6549 qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
6550 ret = reset_control_deassert(qmp->ufs_reset);
6551 if (ret)
6552 - goto err_lane_rst;
6553 + goto err_pcs_ready;
6554
6555 /*
6556 * Pull out PHY from POWER DOWN state.
6557 @@ -1860,6 +1860,11 @@ static const struct phy_ops qcom_qmp_ufs_ops = {
6558 .owner = THIS_MODULE,
6559 };
6560
6561 +static void qcom_qmp_reset_control_put(void *data)
6562 +{
6563 + reset_control_put(data);
6564 +}
6565 +
6566 static
6567 int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
6568 {
6569 @@ -1929,7 +1934,7 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
6570 * all phys that don't need this.
6571 */
6572 snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
6573 - qphy->pipe_clk = of_clk_get_by_name(np, prop_name);
6574 + qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
6575 if (IS_ERR(qphy->pipe_clk)) {
6576 if (qmp->cfg->type == PHY_TYPE_PCIE ||
6577 qmp->cfg->type == PHY_TYPE_USB3) {
6578 @@ -1951,6 +1956,10 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
6579 dev_err(dev, "failed to get lane%d reset\n", id);
6580 return PTR_ERR(qphy->lane_rst);
6581 }
6582 + ret = devm_add_action_or_reset(dev, qcom_qmp_reset_control_put,
6583 + qphy->lane_rst);
6584 + if (ret)
6585 + return ret;
6586 }
6587
6588 if (qmp->cfg->type == PHY_TYPE_UFS)
6589 diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
6590 index f56add78d58ce..359b2ecfcbdb3 100644
6591 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
6592 +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
6593 @@ -773,7 +773,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
6594 for (i = 0; i < nr_irq_parent; i++) {
6595 int irq = irq_of_parse_and_map(np, i);
6596
6597 - if (irq < 0)
6598 + if (!irq)
6599 continue;
6600
6601 gpiochip_set_chained_irqchip(gc, irqchip, irq,
6602 diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
6603 index bf3f14fb5f244..05e4120fd7022 100644
6604 --- a/drivers/pwm/pwm-lp3943.c
6605 +++ b/drivers/pwm/pwm-lp3943.c
6606 @@ -125,6 +125,7 @@ static int lp3943_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
6607 if (err)
6608 return err;
6609
6610 + duty_ns = min(duty_ns, period_ns);
6611 val = (u8)(duty_ns * LP3943_MAX_DUTY / period_ns);
6612
6613 return lp3943_write_byte(lp3943, reg_duty, val);
6614 diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
6615 index 7fd793d8536cd..ae2addadb36f2 100644
6616 --- a/drivers/regulator/core.c
6617 +++ b/drivers/regulator/core.c
6618 @@ -1988,10 +1988,13 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
6619 rdev->exclusive = 1;
6620
6621 ret = _regulator_is_enabled(rdev);
6622 - if (ret > 0)
6623 + if (ret > 0) {
6624 rdev->use_count = 1;
6625 - else
6626 + regulator->enable_count = 1;
6627 + } else {
6628 rdev->use_count = 0;
6629 + regulator->enable_count = 0;
6630 + }
6631 }
6632
6633 device_link_add(dev, &rdev->dev, DL_FLAG_STATELESS);
6634 diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
6635 index 44b1da7cc3744..f873d97100e28 100644
6636 --- a/drivers/regulator/pfuze100-regulator.c
6637 +++ b/drivers/regulator/pfuze100-regulator.c
6638 @@ -528,6 +528,7 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
6639 parent = of_get_child_by_name(np, "regulators");
6640 if (!parent) {
6641 dev_err(dev, "regulators node not found\n");
6642 + of_node_put(np);
6643 return -EINVAL;
6644 }
6645
6646 @@ -557,6 +558,7 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
6647 }
6648
6649 of_node_put(parent);
6650 + of_node_put(np);
6651 if (ret < 0) {
6652 dev_err(dev, "Error parsing regulator init data: %d\n",
6653 ret);
6654 diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
6655 index 19903de6268db..a4db9f6100d2f 100644
6656 --- a/drivers/rpmsg/qcom_smd.c
6657 +++ b/drivers/rpmsg/qcom_smd.c
6658 @@ -1388,9 +1388,9 @@ static int qcom_smd_parse_edge(struct device *dev,
6659 edge->name = node->name;
6660
6661 irq = irq_of_parse_and_map(node, 0);
6662 - if (irq < 0) {
6663 + if (!irq) {
6664 dev_err(dev, "required smd interrupt missing\n");
6665 - ret = irq;
6666 + ret = -EINVAL;
6667 goto put_node;
6668 }
6669
6670 diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
6671 index b216bdcba0da4..dd3901b0a4ed2 100644
6672 --- a/drivers/rtc/rtc-mt6397.c
6673 +++ b/drivers/rtc/rtc-mt6397.c
6674 @@ -331,6 +331,8 @@ static int mtk_rtc_probe(struct platform_device *pdev)
6675 return -ENOMEM;
6676
6677 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6678 + if (!res)
6679 + return -EINVAL;
6680 rtc->addr_base = res->start;
6681
6682 rtc->irq = platform_get_irq(pdev, 0);
6683 diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
6684 index 5fb06930912a0..c4a6609d8fae1 100644
6685 --- a/drivers/scsi/dc395x.c
6686 +++ b/drivers/scsi/dc395x.c
6687 @@ -3664,10 +3664,19 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
6688 #endif
6689 if (dcb->target_lun != 0) {
6690 /* Copy settings */
6691 - struct DeviceCtlBlk *p;
6692 - list_for_each_entry(p, &acb->dcb_list, list)
6693 - if (p->target_id == dcb->target_id)
6694 + struct DeviceCtlBlk *p = NULL, *iter;
6695 +
6696 + list_for_each_entry(iter, &acb->dcb_list, list)
6697 + if (iter->target_id == dcb->target_id) {
6698 + p = iter;
6699 break;
6700 + }
6701 +
6702 + if (!p) {
6703 + kfree(dcb);
6704 + return NULL;
6705 + }
6706 +
6707 dprintkdbg(DBG_1,
6708 "device_alloc: <%02i-%i> copy from <%02i-%i>\n",
6709 dcb->target_id, dcb->target_lun,
6710 diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
6711 index 07a0dadc75bf5..7ce2a0434e1e5 100644
6712 --- a/drivers/scsi/fcoe/fcoe_ctlr.c
6713 +++ b/drivers/scsi/fcoe/fcoe_ctlr.c
6714 @@ -1966,7 +1966,7 @@ EXPORT_SYMBOL(fcoe_ctlr_recv_flogi);
6715 *
6716 * Returns: u64 fc world wide name
6717 */
6718 -u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
6719 +u64 fcoe_wwn_from_mac(unsigned char mac[ETH_ALEN],
6720 unsigned int scheme, unsigned int port)
6721 {
6722 u64 wwn;
6723 diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
6724 index ff6d4aa924213..8b1ba690039b7 100644
6725 --- a/drivers/scsi/megaraid.c
6726 +++ b/drivers/scsi/megaraid.c
6727 @@ -4635,7 +4635,7 @@ static int __init megaraid_init(void)
6728 * major number allocation.
6729 */
6730 major = register_chrdev(0, "megadev_legacy", &megadev_fops);
6731 - if (!major) {
6732 + if (major < 0) {
6733 printk(KERN_WARNING
6734 "megaraid: failed to register char device\n");
6735 }
6736 diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
6737 index 539ac8ce4fcd7..35b32920a94a0 100644
6738 --- a/drivers/scsi/myrb.c
6739 +++ b/drivers/scsi/myrb.c
6740 @@ -1241,7 +1241,8 @@ static void myrb_cleanup(struct myrb_hba *cb)
6741 myrb_unmap(cb);
6742
6743 if (cb->mmio_base) {
6744 - cb->disable_intr(cb->io_base);
6745 + if (cb->disable_intr)
6746 + cb->disable_intr(cb->io_base);
6747 iounmap(cb->mmio_base);
6748 }
6749 if (cb->irq)
6750 @@ -3516,9 +3517,13 @@ static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
6751 mutex_init(&cb->dcmd_mutex);
6752 mutex_init(&cb->dma_mutex);
6753 cb->pdev = pdev;
6754 + cb->host = shost;
6755
6756 - if (pci_enable_device(pdev))
6757 - goto failure;
6758 + if (pci_enable_device(pdev)) {
6759 + dev_err(&pdev->dev, "Failed to enable PCI device\n");
6760 + scsi_host_put(shost);
6761 + return NULL;
6762 + }
6763
6764 if (privdata->hw_init == DAC960_PD_hw_init ||
6765 privdata->hw_init == DAC960_P_hw_init) {
6766 diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
6767 index 4f066e3b19af1..7c9664c0c4c4f 100644
6768 --- a/drivers/scsi/ufs/ufs-qcom.c
6769 +++ b/drivers/scsi/ufs/ufs-qcom.c
6770 @@ -781,8 +781,11 @@ static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
6771
6772 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
6773
6774 - /* ensure that ref_clk is enabled/disabled before we return */
6775 - wmb();
6776 + /*
6777 + * Make sure the write to ref_clk reaches the destination and
6778 + * not stored in a Write Buffer (WB).
6779 + */
6780 + readl(host->dev_ref_clk_ctrl_mmio);
6781
6782 /*
6783 * If we call hibern8 exit after this, we need to make sure that
6784 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
6785 index ebf7ae1ef70d4..670f4c7934f85 100644
6786 --- a/drivers/scsi/ufs/ufshcd.c
6787 +++ b/drivers/scsi/ufs/ufshcd.c
6788 @@ -118,8 +118,13 @@ int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
6789 if (!regs)
6790 return -ENOMEM;
6791
6792 - for (pos = 0; pos < len; pos += 4)
6793 + for (pos = 0; pos < len; pos += 4) {
6794 + if (offset == 0 &&
6795 + pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
6796 + pos <= REG_UIC_ERROR_CODE_DME)
6797 + continue;
6798 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
6799 + }
6800
6801 ufshcd_hex_dump(prefix, regs, len);
6802 kfree(regs);
6803 diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
6804 index 42e0b8f647aef..d42bcca3b98e2 100644
6805 --- a/drivers/soc/qcom/smp2p.c
6806 +++ b/drivers/soc/qcom/smp2p.c
6807 @@ -420,6 +420,7 @@ static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
6808 }
6809
6810 smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
6811 + of_node_put(syscon);
6812 if (IS_ERR(smp2p->ipc_regmap))
6813 return PTR_ERR(smp2p->ipc_regmap);
6814
6815 diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
6816 index c428d0f78816e..6564f15c53190 100644
6817 --- a/drivers/soc/qcom/smsm.c
6818 +++ b/drivers/soc/qcom/smsm.c
6819 @@ -359,6 +359,7 @@ static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id)
6820 return 0;
6821
6822 host->ipc_regmap = syscon_node_to_regmap(syscon);
6823 + of_node_put(syscon);
6824 if (IS_ERR(host->ipc_regmap))
6825 return PTR_ERR(host->ipc_regmap);
6826
6827 diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
6828 index 494cf2b5bf7b6..343ff61ccccbb 100644
6829 --- a/drivers/soc/rockchip/grf.c
6830 +++ b/drivers/soc/rockchip/grf.c
6831 @@ -148,12 +148,14 @@ static int __init rockchip_grf_init(void)
6832 return -ENODEV;
6833 if (!match || !match->data) {
6834 pr_err("%s: missing grf data\n", __func__);
6835 + of_node_put(np);
6836 return -EINVAL;
6837 }
6838
6839 grf_info = match->data;
6840
6841 grf = syscon_node_to_regmap(np);
6842 + of_node_put(np);
6843 if (IS_ERR(grf)) {
6844 pr_err("%s: could not get grf syscon\n", __func__);
6845 return PTR_ERR(grf);
6846 diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
6847 index e9ef80983b791..5a6b02843f2bc 100644
6848 --- a/drivers/spi/spi-img-spfi.c
6849 +++ b/drivers/spi/spi-img-spfi.c
6850 @@ -771,7 +771,7 @@ static int img_spfi_resume(struct device *dev)
6851 int ret;
6852
6853 ret = pm_runtime_get_sync(dev);
6854 - if (ret) {
6855 + if (ret < 0) {
6856 pm_runtime_put_noidle(dev);
6857 return ret;
6858 }
6859 diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
6860 index 7222c7689c3c4..0524741d73b90 100644
6861 --- a/drivers/spi/spi-rspi.c
6862 +++ b/drivers/spi/spi-rspi.c
6863 @@ -1044,14 +1044,11 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev,
6864 }
6865
6866 memset(&cfg, 0, sizeof(cfg));
6867 + cfg.dst_addr = port_addr + RSPI_SPDR;
6868 + cfg.src_addr = port_addr + RSPI_SPDR;
6869 + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
6870 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
6871 cfg.direction = dir;
6872 - if (dir == DMA_MEM_TO_DEV) {
6873 - cfg.dst_addr = port_addr;
6874 - cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
6875 - } else {
6876 - cfg.src_addr = port_addr;
6877 - cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
6878 - }
6879
6880 ret = dmaengine_slave_config(chan, &cfg);
6881 if (ret) {
6882 @@ -1082,12 +1079,12 @@ static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr,
6883 }
6884
6885 ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
6886 - res->start + RSPI_SPDR);
6887 + res->start);
6888 if (!ctlr->dma_tx)
6889 return -ENODEV;
6890
6891 ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
6892 - res->start + RSPI_SPDR);
6893 + res->start);
6894 if (!ctlr->dma_rx) {
6895 dma_release_channel(ctlr->dma_tx);
6896 ctlr->dma_tx = NULL;
6897 diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
6898 index ea77d915216a2..8070b74202170 100644
6899 --- a/drivers/spi/spi-stm32-qspi.c
6900 +++ b/drivers/spi/spi-stm32-qspi.c
6901 @@ -293,7 +293,8 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
6902 if (!op->data.nbytes)
6903 goto wait_nobusy;
6904
6905 - if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
6906 + if ((readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF) ||
6907 + qspi->fmode == CCR_FMODE_APM)
6908 goto out;
6909
6910 reinit_completion(&qspi->data_completion);
6911 diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
6912 index 6b6ef89442837..4bbad00244ab8 100644
6913 --- a/drivers/spi/spi-ti-qspi.c
6914 +++ b/drivers/spi/spi-ti-qspi.c
6915 @@ -401,6 +401,7 @@ static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
6916 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
6917 struct dma_async_tx_descriptor *tx;
6918 int ret;
6919 + unsigned long time_left;
6920
6921 tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
6922 if (!tx) {
6923 @@ -420,9 +421,9 @@ static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
6924 }
6925
6926 dma_async_issue_pending(chan);
6927 - ret = wait_for_completion_timeout(&qspi->transfer_complete,
6928 + time_left = wait_for_completion_timeout(&qspi->transfer_complete,
6929 msecs_to_jiffies(len));
6930 - if (ret <= 0) {
6931 + if (time_left == 0) {
6932 dmaengine_terminate_sync(chan);
6933 dev_err(qspi->dev, "DMA wait_for_completion_timeout\n");
6934 return -ETIMEDOUT;
6935 diff --git a/drivers/staging/fieldbus/anybuss/host.c b/drivers/staging/fieldbus/anybuss/host.c
6936 index f69dc49304571..b7a91bdef6f41 100644
6937 --- a/drivers/staging/fieldbus/anybuss/host.c
6938 +++ b/drivers/staging/fieldbus/anybuss/host.c
6939 @@ -1384,7 +1384,7 @@ anybuss_host_common_probe(struct device *dev,
6940 goto err_device;
6941 return cd;
6942 err_device:
6943 - device_unregister(&cd->client->dev);
6944 + put_device(&cd->client->dev);
6945 err_kthread:
6946 kthread_stop(cd->qthread);
6947 err_reset:
6948 diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
6949 index 3259bf02ba25e..2418fbf1d2ab9 100644
6950 --- a/drivers/staging/greybus/audio_codec.c
6951 +++ b/drivers/staging/greybus/audio_codec.c
6952 @@ -620,8 +620,8 @@ static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
6953 break;
6954 }
6955 if (!data) {
6956 - dev_err(dai->dev, "%s:%s DATA connection missing\n",
6957 - dai->name, module->name);
6958 + dev_err(dai->dev, "%s DATA connection missing\n",
6959 + dai->name);
6960 mutex_unlock(&codec->lock);
6961 return -ENODEV;
6962 }
6963 diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
6964 index 4ff8fd694c600..0154f5791b121 100644
6965 --- a/drivers/staging/rtl8192e/rtllib_softmac.c
6966 +++ b/drivers/staging/rtl8192e/rtllib_softmac.c
6967 @@ -651,9 +651,9 @@ static void rtllib_beacons_stop(struct rtllib_device *ieee)
6968 spin_lock_irqsave(&ieee->beacon_lock, flags);
6969
6970 ieee->beacon_txing = 0;
6971 - del_timer_sync(&ieee->beacon_timer);
6972
6973 spin_unlock_irqrestore(&ieee->beacon_lock, flags);
6974 + del_timer_sync(&ieee->beacon_timer);
6975
6976 }
6977
6978 diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
6979 index 33a6af7aad225..a869694337f72 100644
6980 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
6981 +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
6982 @@ -528,9 +528,9 @@ static void ieee80211_beacons_stop(struct ieee80211_device *ieee)
6983 spin_lock_irqsave(&ieee->beacon_lock, flags);
6984
6985 ieee->beacon_txing = 0;
6986 - del_timer_sync(&ieee->beacon_timer);
6987
6988 spin_unlock_irqrestore(&ieee->beacon_lock, flags);
6989 + del_timer_sync(&ieee->beacon_timer);
6990 }
6991
6992 void ieee80211_stop_send_beacons(struct ieee80211_device *ieee)
6993 diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
6994 index 49188ab046123..f7c1258eaa394 100644
6995 --- a/drivers/staging/rtl8712/usb_intf.c
6996 +++ b/drivers/staging/rtl8712/usb_intf.c
6997 @@ -539,13 +539,13 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
6998 } else {
6999 AutoloadFail = false;
7000 }
7001 - if (((mac[0] == 0xff) && (mac[1] == 0xff) &&
7002 + if ((!AutoloadFail) ||
7003 + ((mac[0] == 0xff) && (mac[1] == 0xff) &&
7004 (mac[2] == 0xff) && (mac[3] == 0xff) &&
7005 (mac[4] == 0xff) && (mac[5] == 0xff)) ||
7006 ((mac[0] == 0x00) && (mac[1] == 0x00) &&
7007 (mac[2] == 0x00) && (mac[3] == 0x00) &&
7008 - (mac[4] == 0x00) && (mac[5] == 0x00)) ||
7009 - (!AutoloadFail)) {
7010 + (mac[4] == 0x00) && (mac[5] == 0x00))) {
7011 mac[0] = 0x00;
7012 mac[1] = 0xe0;
7013 mac[2] = 0x4c;
7014 diff --git a/drivers/staging/rtl8712/usb_ops.c b/drivers/staging/rtl8712/usb_ops.c
7015 index e64845e6adf3d..af9966d03979c 100644
7016 --- a/drivers/staging/rtl8712/usb_ops.c
7017 +++ b/drivers/staging/rtl8712/usb_ops.c
7018 @@ -29,7 +29,8 @@ static u8 usb_read8(struct intf_hdl *intfhdl, u32 addr)
7019 u16 wvalue;
7020 u16 index;
7021 u16 len;
7022 - __le32 data;
7023 + int status;
7024 + __le32 data = 0;
7025 struct intf_priv *intfpriv = intfhdl->pintfpriv;
7026
7027 request = 0x05;
7028 @@ -37,8 +38,10 @@ static u8 usb_read8(struct intf_hdl *intfhdl, u32 addr)
7029 index = 0;
7030 wvalue = (u16)(addr & 0x0000ffff);
7031 len = 1;
7032 - r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
7033 - requesttype);
7034 + status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
7035 + &data, len, requesttype);
7036 + if (status < 0)
7037 + return 0;
7038 return (u8)(le32_to_cpu(data) & 0x0ff);
7039 }
7040
7041 @@ -49,7 +52,8 @@ static u16 usb_read16(struct intf_hdl *intfhdl, u32 addr)
7042 u16 wvalue;
7043 u16 index;
7044 u16 len;
7045 - __le32 data;
7046 + int status;
7047 + __le32 data = 0;
7048 struct intf_priv *intfpriv = intfhdl->pintfpriv;
7049
7050 request = 0x05;
7051 @@ -57,8 +61,10 @@ static u16 usb_read16(struct intf_hdl *intfhdl, u32 addr)
7052 index = 0;
7053 wvalue = (u16)(addr & 0x0000ffff);
7054 len = 2;
7055 - r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
7056 - requesttype);
7057 + status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
7058 + &data, len, requesttype);
7059 + if (status < 0)
7060 + return 0;
7061 return (u16)(le32_to_cpu(data) & 0xffff);
7062 }
7063
7064 @@ -69,7 +75,8 @@ static u32 usb_read32(struct intf_hdl *intfhdl, u32 addr)
7065 u16 wvalue;
7066 u16 index;
7067 u16 len;
7068 - __le32 data;
7069 + int status;
7070 + __le32 data = 0;
7071 struct intf_priv *intfpriv = intfhdl->pintfpriv;
7072
7073 request = 0x05;
7074 @@ -77,8 +84,10 @@ static u32 usb_read32(struct intf_hdl *intfhdl, u32 addr)
7075 index = 0;
7076 wvalue = (u16)(addr & 0x0000ffff);
7077 len = 4;
7078 - r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
7079 - requesttype);
7080 + status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
7081 + &data, len, requesttype);
7082 + if (status < 0)
7083 + return 0;
7084 return le32_to_cpu(data);
7085 }
7086
7087 diff --git a/drivers/thermal/broadcom/sr-thermal.c b/drivers/thermal/broadcom/sr-thermal.c
7088 index 475ce29007713..85ab9edd580cc 100644
7089 --- a/drivers/thermal/broadcom/sr-thermal.c
7090 +++ b/drivers/thermal/broadcom/sr-thermal.c
7091 @@ -60,6 +60,9 @@ static int sr_thermal_probe(struct platform_device *pdev)
7092 return -ENOMEM;
7093
7094 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
7095 + if (!res)
7096 + return -ENOENT;
7097 +
7098 sr_thermal->regs = (void __iomem *)devm_memremap(&pdev->dev, res->start,
7099 resource_size(res),
7100 MEMREMAP_WB);
7101 diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
7102 index c8c5cdfc5e199..abc84d84f6386 100644
7103 --- a/drivers/tty/goldfish.c
7104 +++ b/drivers/tty/goldfish.c
7105 @@ -407,6 +407,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
7106 err_tty_register_device_failed:
7107 free_irq(irq, qtty);
7108 err_dec_line_count:
7109 + tty_port_destroy(&qtty->port);
7110 goldfish_tty_current_line_count--;
7111 if (goldfish_tty_current_line_count == 0)
7112 goldfish_tty_delete_driver();
7113 @@ -428,6 +429,7 @@ static int goldfish_tty_remove(struct platform_device *pdev)
7114 iounmap(qtty->base);
7115 qtty->base = NULL;
7116 free_irq(qtty->irq, pdev);
7117 + tty_port_destroy(&qtty->port);
7118 goldfish_tty_current_line_count--;
7119 if (goldfish_tty_current_line_count == 0)
7120 goldfish_tty_delete_driver();
7121 diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
7122 index e24161004ddc1..9b1cddbfc75c0 100644
7123 --- a/drivers/tty/serial/8250/8250_fintek.c
7124 +++ b/drivers/tty/serial/8250/8250_fintek.c
7125 @@ -197,12 +197,12 @@ static int fintek_8250_rs485_config(struct uart_port *port,
7126 if (!pdata)
7127 return -EINVAL;
7128
7129 - /* Hardware do not support same RTS level on send and receive */
7130 - if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
7131 - !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
7132 - return -EINVAL;
7133
7134 if (rs485->flags & SER_RS485_ENABLED) {
7135 + /* Hardware do not support same RTS level on send and receive */
7136 + if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
7137 + !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
7138 + return -EINVAL;
7139 memset(rs485->padding, 0, sizeof(rs485->padding));
7140 config |= RS485_URA;
7141 } else {
7142 diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
7143 index 4446c13629b1c..e06967ca62fa6 100644
7144 --- a/drivers/tty/serial/digicolor-usart.c
7145 +++ b/drivers/tty/serial/digicolor-usart.c
7146 @@ -309,6 +309,8 @@ static void digicolor_uart_set_termios(struct uart_port *port,
7147 case CS8:
7148 default:
7149 config |= UA_CONFIG_CHAR_LEN;
7150 + termios->c_cflag &= ~CSIZE;
7151 + termios->c_cflag |= CS8;
7152 break;
7153 }
7154
7155 diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
7156 index 13e705b53217d..4bdc12908146e 100644
7157 --- a/drivers/tty/serial/fsl_lpuart.c
7158 +++ b/drivers/tty/serial/fsl_lpuart.c
7159 @@ -233,8 +233,6 @@
7160 /* IMX lpuart has four extra unused regs located at the beginning */
7161 #define IMX_REG_OFF 0x10
7162
7163 -static DEFINE_IDA(fsl_lpuart_ida);
7164 -
7165 enum lpuart_type {
7166 VF610_LPUART,
7167 LS1021A_LPUART,
7168 @@ -269,7 +267,6 @@ struct lpuart_port {
7169 int rx_dma_rng_buf_len;
7170 unsigned int dma_tx_nents;
7171 wait_queue_head_t dma_wait;
7172 - bool id_allocated;
7173 };
7174
7175 struct lpuart_soc_data {
7176 @@ -2450,23 +2447,18 @@ static int lpuart_probe(struct platform_device *pdev)
7177
7178 ret = of_alias_get_id(np, "serial");
7179 if (ret < 0) {
7180 - ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
7181 - if (ret < 0) {
7182 - dev_err(&pdev->dev, "port line is full, add device failed\n");
7183 - return ret;
7184 - }
7185 - sport->id_allocated = true;
7186 + dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
7187 + return ret;
7188 }
7189 if (ret >= ARRAY_SIZE(lpuart_ports)) {
7190 dev_err(&pdev->dev, "serial%d out of range\n", ret);
7191 - ret = -EINVAL;
7192 - goto failed_out_of_range;
7193 + return -EINVAL;
7194 }
7195 sport->port.line = ret;
7196
7197 ret = lpuart_enable_clks(sport);
7198 if (ret)
7199 - goto failed_clock_enable;
7200 + return ret;
7201 sport->port.uartclk = lpuart_get_baud_clk_rate(sport);
7202
7203 lpuart_ports[sport->port.line] = sport;
7204 @@ -2516,10 +2508,6 @@ static int lpuart_probe(struct platform_device *pdev)
7205 failed_attach_port:
7206 failed_irq_request:
7207 lpuart_disable_clks(sport);
7208 -failed_clock_enable:
7209 -failed_out_of_range:
7210 - if (sport->id_allocated)
7211 - ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
7212 return ret;
7213 }
7214
7215 @@ -2529,9 +2517,6 @@ static int lpuart_remove(struct platform_device *pdev)
7216
7217 uart_remove_one_port(&lpuart_reg, &sport->port);
7218
7219 - if (sport->id_allocated)
7220 - ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
7221 -
7222 lpuart_disable_clks(sport);
7223
7224 if (sport->dma_tx_chan)
7225 @@ -2663,7 +2648,6 @@ static int __init lpuart_serial_init(void)
7226
7227 static void __exit lpuart_serial_exit(void)
7228 {
7229 - ida_destroy(&fsl_lpuart_ida);
7230 platform_driver_unregister(&lpuart_driver);
7231 uart_unregister_driver(&lpuart_reg);
7232 }
7233 diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
7234 index 624f3d541c687..d047380259b53 100644
7235 --- a/drivers/tty/serial/icom.c
7236 +++ b/drivers/tty/serial/icom.c
7237 @@ -1499,7 +1499,7 @@ static int icom_probe(struct pci_dev *dev,
7238 retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg);
7239 if (retval) {
7240 dev_err(&dev->dev, "PCI Config read FAILED\n");
7241 - return retval;
7242 + goto probe_exit0;
7243 }
7244
7245 pci_write_config_dword(dev, PCI_COMMAND,
7246 diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
7247 index fbc5bc022a392..849ce8c1ef392 100644
7248 --- a/drivers/tty/serial/meson_uart.c
7249 +++ b/drivers/tty/serial/meson_uart.c
7250 @@ -256,6 +256,14 @@ static const char *meson_uart_type(struct uart_port *port)
7251 return (port->type == PORT_MESON) ? "meson_uart" : NULL;
7252 }
7253
7254 +/*
7255 + * This function is called only from probe() using a temporary io mapping
7256 + * in order to perform a reset before setting up the device. Since the
7257 + * temporarily mapped region was successfully requested, there can be no
7258 + * console on this port at this time. Hence it is not necessary for this
7259 + * function to acquire the port->lock. (Since there is no console on this
7260 + * port at this time, the port->lock is not initialized yet.)
7261 + */
7262 static void meson_uart_reset(struct uart_port *port)
7263 {
7264 u32 val;
7265 @@ -270,9 +278,12 @@ static void meson_uart_reset(struct uart_port *port)
7266
7267 static int meson_uart_startup(struct uart_port *port)
7268 {
7269 + unsigned long flags;
7270 u32 val;
7271 int ret = 0;
7272
7273 + spin_lock_irqsave(&port->lock, flags);
7274 +
7275 val = readl(port->membase + AML_UART_CONTROL);
7276 val |= AML_UART_CLEAR_ERR;
7277 writel(val, port->membase + AML_UART_CONTROL);
7278 @@ -288,6 +299,8 @@ static int meson_uart_startup(struct uart_port *port)
7279 val = (AML_UART_RECV_IRQ(1) | AML_UART_XMIT_IRQ(port->fifosize / 2));
7280 writel(val, port->membase + AML_UART_MISC);
7281
7282 + spin_unlock_irqrestore(&port->lock, flags);
7283 +
7284 ret = request_irq(port->irq, meson_uart_interrupt, 0,
7285 port->name, port);
7286
7287 diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
7288 index 5129c2dfbe079..aac96659694d6 100644
7289 --- a/drivers/tty/serial/msm_serial.c
7290 +++ b/drivers/tty/serial/msm_serial.c
7291 @@ -1579,6 +1579,7 @@ static inline struct uart_port *msm_get_port_from_line(unsigned int line)
7292 static void __msm_console_write(struct uart_port *port, const char *s,
7293 unsigned int count, bool is_uartdm)
7294 {
7295 + unsigned long flags;
7296 int i;
7297 int num_newlines = 0;
7298 bool replaced = false;
7299 @@ -1596,6 +1597,8 @@ static void __msm_console_write(struct uart_port *port, const char *s,
7300 num_newlines++;
7301 count += num_newlines;
7302
7303 + local_irq_save(flags);
7304 +
7305 if (port->sysrq)
7306 locked = 0;
7307 else if (oops_in_progress)
7308 @@ -1641,6 +1644,8 @@ static void __msm_console_write(struct uart_port *port, const char *s,
7309
7310 if (locked)
7311 spin_unlock(&port->lock);
7312 +
7313 + local_irq_restore(flags);
7314 }
7315
7316 static void msm_console_write(struct console *co, const char *s,
7317 diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
7318 index c55c8507713c3..e87953f8a7685 100644
7319 --- a/drivers/tty/serial/owl-uart.c
7320 +++ b/drivers/tty/serial/owl-uart.c
7321 @@ -695,6 +695,7 @@ static int owl_uart_probe(struct platform_device *pdev)
7322 owl_port->port.uartclk = clk_get_rate(owl_port->clk);
7323 if (owl_port->port.uartclk == 0) {
7324 dev_err(&pdev->dev, "clock rate is zero\n");
7325 + clk_disable_unprepare(owl_port->clk);
7326 return -EINVAL;
7327 }
7328 owl_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_LOW_LATENCY;
7329 diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
7330 index c16234bca78fb..77f18445bb988 100644
7331 --- a/drivers/tty/serial/pch_uart.c
7332 +++ b/drivers/tty/serial/pch_uart.c
7333 @@ -635,22 +635,6 @@ static int push_rx(struct eg20t_port *priv, const unsigned char *buf,
7334 return 0;
7335 }
7336
7337 -static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
7338 -{
7339 - int ret = 0;
7340 - struct uart_port *port = &priv->port;
7341 -
7342 - if (port->x_char) {
7343 - dev_dbg(priv->port.dev, "%s:X character send %02x (%lu)\n",
7344 - __func__, port->x_char, jiffies);
7345 - buf[0] = port->x_char;
7346 - port->x_char = 0;
7347 - ret = 1;
7348 - }
7349 -
7350 - return ret;
7351 -}
7352 -
7353 static int dma_push_rx(struct eg20t_port *priv, int size)
7354 {
7355 int room;
7356 @@ -900,9 +884,10 @@ static unsigned int handle_tx(struct eg20t_port *priv)
7357
7358 fifo_size = max(priv->fifo_size, 1);
7359 tx_empty = 1;
7360 - if (pop_tx_x(priv, xmit->buf)) {
7361 - pch_uart_hal_write(priv, xmit->buf, 1);
7362 + if (port->x_char) {
7363 + pch_uart_hal_write(priv, &port->x_char, 1);
7364 port->icount.tx++;
7365 + port->x_char = 0;
7366 tx_empty = 0;
7367 fifo_size--;
7368 }
7369 @@ -957,9 +942,11 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
7370 }
7371
7372 fifo_size = max(priv->fifo_size, 1);
7373 - if (pop_tx_x(priv, xmit->buf)) {
7374 - pch_uart_hal_write(priv, xmit->buf, 1);
7375 +
7376 + if (port->x_char) {
7377 + pch_uart_hal_write(priv, &port->x_char, 1);
7378 port->icount.tx++;
7379 + port->x_char = 0;
7380 fifo_size--;
7381 }
7382
7383 diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
7384 index ff9a27d48bca8..877d86ff68190 100644
7385 --- a/drivers/tty/serial/rda-uart.c
7386 +++ b/drivers/tty/serial/rda-uart.c
7387 @@ -262,6 +262,8 @@ static void rda_uart_set_termios(struct uart_port *port,
7388 /* Fall through */
7389 case CS7:
7390 ctrl &= ~RDA_UART_DBITS_8;
7391 + termios->c_cflag &= ~CSIZE;
7392 + termios->c_cflag |= CS7;
7393 break;
7394 default:
7395 ctrl |= RDA_UART_DBITS_8;
7396 diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
7397 index 8e618129e65c9..ff4b44bdf6b67 100644
7398 --- a/drivers/tty/serial/sa1100.c
7399 +++ b/drivers/tty/serial/sa1100.c
7400 @@ -454,6 +454,8 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
7401 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
7402 quot = uart_get_divisor(port, baud);
7403
7404 + del_timer_sync(&sport->timer);
7405 +
7406 spin_lock_irqsave(&sport->port.lock, flags);
7407
7408 sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
7409 @@ -484,8 +486,6 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
7410 UTSR1_TO_SM(UTSR1_ROR);
7411 }
7412
7413 - del_timer_sync(&sport->timer);
7414 -
7415 /*
7416 * Update the per-port timeout.
7417 */
7418 diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
7419 index 8507f18900d09..2783baa5dfe59 100644
7420 --- a/drivers/tty/serial/serial_txx9.c
7421 +++ b/drivers/tty/serial/serial_txx9.c
7422 @@ -648,6 +648,8 @@ serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios,
7423 case CS6: /* not supported */
7424 case CS8:
7425 cval |= TXX9_SILCR_UMODE_8BIT;
7426 + termios->c_cflag &= ~CSIZE;
7427 + termios->c_cflag |= CS8;
7428 break;
7429 }
7430
7431 diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
7432 index ecff9b2088087..c066bb7f07b01 100644
7433 --- a/drivers/tty/serial/sh-sci.c
7434 +++ b/drivers/tty/serial/sh-sci.c
7435 @@ -2395,8 +2395,12 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
7436 int best_clk = -1;
7437 unsigned long flags;
7438
7439 - if ((termios->c_cflag & CSIZE) == CS7)
7440 + if ((termios->c_cflag & CSIZE) == CS7) {
7441 smr_val |= SCSMR_CHR;
7442 + } else {
7443 + termios->c_cflag &= ~CSIZE;
7444 + termios->c_cflag |= CS8;
7445 + }
7446 if (termios->c_cflag & PARENB)
7447 smr_val |= SCSMR_PE;
7448 if (termios->c_cflag & PARODD)
7449 diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
7450 index 6a2dc823ea828..7015632c49905 100644
7451 --- a/drivers/tty/serial/sifive.c
7452 +++ b/drivers/tty/serial/sifive.c
7453 @@ -667,12 +667,16 @@ static void sifive_serial_set_termios(struct uart_port *port,
7454 int rate;
7455 char nstop;
7456
7457 - if ((termios->c_cflag & CSIZE) != CS8)
7458 + if ((termios->c_cflag & CSIZE) != CS8) {
7459 dev_err_once(ssp->port.dev, "only 8-bit words supported\n");
7460 + termios->c_cflag &= ~CSIZE;
7461 + termios->c_cflag |= CS8;
7462 + }
7463 if (termios->c_iflag & (INPCK | PARMRK))
7464 dev_err_once(ssp->port.dev, "parity checking not supported\n");
7465 if (termios->c_iflag & BRKINT)
7466 dev_err_once(ssp->port.dev, "BREAK detection not supported\n");
7467 + termios->c_iflag &= ~(INPCK|PARMRK|BRKINT);
7468
7469 /* Set number of stop bits */
7470 nstop = (termios->c_cflag & CSTOPB) ? 2 : 1;
7471 @@ -973,7 +977,7 @@ static int sifive_serial_probe(struct platform_device *pdev)
7472 /* Set up clock divider */
7473 ssp->clkin_rate = clk_get_rate(ssp->clk);
7474 ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE;
7475 - ssp->port.uartclk = ssp->baud_rate * 16;
7476 + ssp->port.uartclk = ssp->clkin_rate;
7477 __ssp_update_div(ssp);
7478
7479 platform_set_drvdata(pdev, ssp);
7480 diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
7481 index 7971997cdead7..ce35e3a131b16 100644
7482 --- a/drivers/tty/serial/st-asc.c
7483 +++ b/drivers/tty/serial/st-asc.c
7484 @@ -540,10 +540,14 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
7485 /* set character length */
7486 if ((cflag & CSIZE) == CS7) {
7487 ctrl_val |= ASC_CTL_MODE_7BIT_PAR;
7488 + cflag |= PARENB;
7489 } else {
7490 ctrl_val |= (cflag & PARENB) ? ASC_CTL_MODE_8BIT_PAR :
7491 ASC_CTL_MODE_8BIT;
7492 + cflag &= ~CSIZE;
7493 + cflag |= CS8;
7494 }
7495 + termios->c_cflag = cflag;
7496
7497 /* set stop bit */
7498 ctrl_val |= (cflag & CSTOPB) ? ASC_CTL_STOP_2BIT : ASC_CTL_STOP_1BIT;
7499 diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
7500 index d517b911cd042..d5a084ffde892 100644
7501 --- a/drivers/tty/serial/stm32-usart.c
7502 +++ b/drivers/tty/serial/stm32-usart.c
7503 @@ -745,13 +745,22 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
7504 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
7505 * M0 and M1 already cleared by cr1 initialization.
7506 */
7507 - if (bits == 9)
7508 + if (bits == 9) {
7509 cr1 |= USART_CR1_M0;
7510 - else if ((bits == 7) && cfg->has_7bits_data)
7511 + } else if ((bits == 7) && cfg->has_7bits_data) {
7512 cr1 |= USART_CR1_M1;
7513 - else if (bits != 8)
7514 + } else if (bits != 8) {
7515 dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
7516 , bits);
7517 + cflag &= ~CSIZE;
7518 + cflag |= CS8;
7519 + termios->c_cflag = cflag;
7520 + bits = 8;
7521 + if (cflag & PARENB) {
7522 + bits++;
7523 + cr1 |= USART_CR1_M0;
7524 + }
7525 + }
7526
7527 if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
7528 stm32_port->fifoen)) {
7529 diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
7530 index ff345a8e0fcc6..b72471373c71d 100644
7531 --- a/drivers/tty/synclink_gt.c
7532 +++ b/drivers/tty/synclink_gt.c
7533 @@ -1752,6 +1752,8 @@ static int hdlcdev_init(struct slgt_info *info)
7534 */
7535 static void hdlcdev_exit(struct slgt_info *info)
7536 {
7537 + if (!info->netdev)
7538 + return;
7539 unregister_hdlc_device(info->netdev);
7540 free_netdev(info->netdev);
7541 info->netdev = NULL;
7542 diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
7543 index bb148dbfbb88f..47f2370ad85cb 100644
7544 --- a/drivers/tty/tty_buffer.c
7545 +++ b/drivers/tty/tty_buffer.c
7546 @@ -172,7 +172,8 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
7547 have queued and recycle that ? */
7548 if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit)
7549 return NULL;
7550 - p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
7551 + p = kmalloc(sizeof(struct tty_buffer) + 2 * size,
7552 + GFP_ATOMIC | __GFP_NOWARN);
7553 if (p == NULL)
7554 return NULL;
7555
7556 diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
7557 index 9e26b0143a59a..db16efe293e0b 100644
7558 --- a/drivers/usb/core/hcd-pci.c
7559 +++ b/drivers/usb/core/hcd-pci.c
7560 @@ -604,10 +604,10 @@ const struct dev_pm_ops usb_hcd_pci_pm_ops = {
7561 .suspend_noirq = hcd_pci_suspend_noirq,
7562 .resume_noirq = hcd_pci_resume_noirq,
7563 .resume = hcd_pci_resume,
7564 - .freeze = check_root_hub_suspended,
7565 + .freeze = hcd_pci_suspend,
7566 .freeze_noirq = check_root_hub_suspended,
7567 .thaw_noirq = NULL,
7568 - .thaw = NULL,
7569 + .thaw = hcd_pci_resume,
7570 .poweroff = hcd_pci_suspend,
7571 .poweroff_noirq = hcd_pci_suspend_noirq,
7572 .restore_noirq = hcd_pci_resume_noirq,
7573 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
7574 index 39203f2ce6a19..fde211519a973 100644
7575 --- a/drivers/usb/core/hcd.c
7576 +++ b/drivers/usb/core/hcd.c
7577 @@ -2657,6 +2657,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
7578 {
7579 int retval;
7580 struct usb_device *rhdev;
7581 + struct usb_hcd *shared_hcd;
7582
7583 if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
7584 hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
7585 @@ -2813,13 +2814,26 @@ int usb_add_hcd(struct usb_hcd *hcd,
7586 goto err_hcd_driver_start;
7587 }
7588
7589 + /* starting here, usbcore will pay attention to the shared HCD roothub */
7590 + shared_hcd = hcd->shared_hcd;
7591 + if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
7592 + retval = register_root_hub(shared_hcd);
7593 + if (retval != 0)
7594 + goto err_register_root_hub;
7595 +
7596 + if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
7597 + usb_hcd_poll_rh_status(shared_hcd);
7598 + }
7599 +
7600 /* starting here, usbcore will pay attention to this root hub */
7601 - retval = register_root_hub(hcd);
7602 - if (retval != 0)
7603 - goto err_register_root_hub;
7604 + if (!HCD_DEFER_RH_REGISTER(hcd)) {
7605 + retval = register_root_hub(hcd);
7606 + if (retval != 0)
7607 + goto err_register_root_hub;
7608
7609 - if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
7610 - usb_hcd_poll_rh_status(hcd);
7611 + if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
7612 + usb_hcd_poll_rh_status(hcd);
7613 + }
7614
7615 return retval;
7616
7617 @@ -2862,6 +2876,7 @@ EXPORT_SYMBOL_GPL(usb_add_hcd);
7618 void usb_remove_hcd(struct usb_hcd *hcd)
7619 {
7620 struct usb_device *rhdev = hcd->self.root_hub;
7621 + bool rh_registered;
7622
7623 dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
7624
7625 @@ -2872,6 +2887,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
7626
7627 dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
7628 spin_lock_irq (&hcd_root_hub_lock);
7629 + rh_registered = hcd->rh_registered;
7630 hcd->rh_registered = 0;
7631 spin_unlock_irq (&hcd_root_hub_lock);
7632
7633 @@ -2881,7 +2897,8 @@ void usb_remove_hcd(struct usb_hcd *hcd)
7634 cancel_work_sync(&hcd->died_work);
7635
7636 mutex_lock(&usb_bus_idr_lock);
7637 - usb_disconnect(&rhdev); /* Sets rhdev to NULL */
7638 + if (rh_registered)
7639 + usb_disconnect(&rhdev); /* Sets rhdev to NULL */
7640 mutex_unlock(&usb_bus_idr_lock);
7641
7642 /*
7643 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
7644 index d5f233fa6f3b4..f8f2de7899a94 100644
7645 --- a/drivers/usb/core/quirks.c
7646 +++ b/drivers/usb/core/quirks.c
7647 @@ -511,6 +511,9 @@ static const struct usb_device_id usb_quirk_list[] = {
7648 /* DJI CineSSD */
7649 { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
7650
7651 + /* DELL USB GEN2 */
7652 + { USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
7653 +
7654 /* VCOM device */
7655 { USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
7656
7657 diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
7658 index 379bbf27c7ce8..8fd6eefc671c7 100644
7659 --- a/drivers/usb/dwc2/gadget.c
7660 +++ b/drivers/usb/dwc2/gadget.c
7661 @@ -4486,7 +4486,6 @@ static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
7662
7663 WARN_ON(hsotg->driver);
7664
7665 - driver->driver.bus = NULL;
7666 hsotg->driver = driver;
7667 hsotg->gadget.dev.of_node = hsotg->dev->of_node;
7668 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
7669 diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
7670 index 99964f96ff747..955bf820f4102 100644
7671 --- a/drivers/usb/dwc3/dwc3-pci.c
7672 +++ b/drivers/usb/dwc3/dwc3-pci.c
7673 @@ -211,7 +211,7 @@ static void dwc3_pci_resume_work(struct work_struct *work)
7674 int ret;
7675
7676 ret = pm_runtime_get_sync(&dwc3->dev);
7677 - if (ret) {
7678 + if (ret < 0) {
7679 pm_runtime_put_sync_autosuspend(&dwc3->dev);
7680 return;
7681 }
7682 diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
7683 index a87c0b26279e7..00a4e12a1f158 100644
7684 --- a/drivers/usb/host/isp116x-hcd.c
7685 +++ b/drivers/usb/host/isp116x-hcd.c
7686 @@ -1541,10 +1541,12 @@ static int isp116x_remove(struct platform_device *pdev)
7687
7688 iounmap(isp116x->data_reg);
7689 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7690 - release_mem_region(res->start, 2);
7691 + if (res)
7692 + release_mem_region(res->start, 2);
7693 iounmap(isp116x->addr_reg);
7694 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
7695 - release_mem_region(res->start, 2);
7696 + if (res)
7697 + release_mem_region(res->start, 2);
7698
7699 usb_put_hcd(hcd);
7700 return 0;
7701 diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
7702 index 65985247fc00f..f05b6f2b08656 100644
7703 --- a/drivers/usb/host/oxu210hp-hcd.c
7704 +++ b/drivers/usb/host/oxu210hp-hcd.c
7705 @@ -3906,8 +3906,10 @@ static int oxu_bus_suspend(struct usb_hcd *hcd)
7706 }
7707 }
7708
7709 + spin_unlock_irq(&oxu->lock);
7710 /* turn off now-idle HC */
7711 del_timer_sync(&oxu->watchdog);
7712 + spin_lock_irq(&oxu->lock);
7713 ehci_halt(oxu);
7714 hcd->state = HC_STATE_SUSPENDED;
7715
7716 diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
7717 index 5c93226e0e20a..8def19fc50250 100644
7718 --- a/drivers/usb/musb/omap2430.c
7719 +++ b/drivers/usb/musb/omap2430.c
7720 @@ -433,6 +433,7 @@ static int omap2430_probe(struct platform_device *pdev)
7721 control_node = of_parse_phandle(np, "ctrl-module", 0);
7722 if (control_node) {
7723 control_pdev = of_find_device_by_node(control_node);
7724 + of_node_put(control_node);
7725 if (!control_pdev) {
7726 dev_err(&pdev->dev, "Failed to get control device\n");
7727 ret = -EINVAL;
7728 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
7729 index 1ba4a72047dcb..62f79fd5257bc 100644
7730 --- a/drivers/usb/serial/option.c
7731 +++ b/drivers/usb/serial/option.c
7732 @@ -1137,6 +1137,8 @@ static const struct usb_device_id option_ids[] = {
7733 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
7734 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
7735 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
7736 + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */
7737 + .driver_info = RSVD(3) | ZLP },
7738 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
7739 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
7740 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
7741 diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c
7742 index 05cec81dcd3f2..38ddfedef629c 100644
7743 --- a/drivers/usb/storage/karma.c
7744 +++ b/drivers/usb/storage/karma.c
7745 @@ -174,24 +174,25 @@ static void rio_karma_destructor(void *extra)
7746
7747 static int rio_karma_init(struct us_data *us)
7748 {
7749 - int ret = 0;
7750 struct karma_data *data = kzalloc(sizeof(struct karma_data), GFP_NOIO);
7751
7752 if (!data)
7753 - goto out;
7754 + return -ENOMEM;
7755
7756 data->recv = kmalloc(RIO_RECV_LEN, GFP_NOIO);
7757 if (!data->recv) {
7758 kfree(data);
7759 - goto out;
7760 + return -ENOMEM;
7761 }
7762
7763 us->extra = data;
7764 us->extra_destructor = rio_karma_destructor;
7765 - ret = rio_karma_send_command(RIO_ENTER_STORAGE, us);
7766 - data->in_storage = (ret == 0);
7767 -out:
7768 - return ret;
7769 + if (rio_karma_send_command(RIO_ENTER_STORAGE, us))
7770 + return -EIO;
7771 +
7772 + data->in_storage = 1;
7773 +
7774 + return 0;
7775 }
7776
7777 static struct scsi_host_template karma_host_template;
7778 diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
7779 index d8d3892e5a69a..3c6d452e3bf40 100644
7780 --- a/drivers/usb/usbip/stub_dev.c
7781 +++ b/drivers/usb/usbip/stub_dev.c
7782 @@ -393,7 +393,6 @@ static int stub_probe(struct usb_device *udev)
7783
7784 err_port:
7785 dev_set_drvdata(&udev->dev, NULL);
7786 - usb_put_dev(udev);
7787
7788 /* we already have busid_priv, just lock busid_lock */
7789 spin_lock(&busid_priv->busid_lock);
7790 @@ -408,6 +407,7 @@ call_put_busid_priv:
7791 put_busid_priv(busid_priv);
7792
7793 sdev_free:
7794 + usb_put_dev(udev);
7795 stub_device_free(sdev);
7796
7797 return rc;
7798 diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
7799 index e2b0195322340..d3d360ff0d24e 100644
7800 --- a/drivers/usb/usbip/stub_rx.c
7801 +++ b/drivers/usb/usbip/stub_rx.c
7802 @@ -138,7 +138,9 @@ static int tweak_set_configuration_cmd(struct urb *urb)
7803 req = (struct usb_ctrlrequest *) urb->setup_packet;
7804 config = le16_to_cpu(req->wValue);
7805
7806 + usb_lock_device(sdev->udev);
7807 err = usb_set_configuration(sdev->udev, config);
7808 + usb_unlock_device(sdev->udev);
7809 if (err && err != -ENODEV)
7810 dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
7811 config, err);
7812 diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
7813 index 4653de001e261..264cbe385a63b 100644
7814 --- a/drivers/vhost/vringh.c
7815 +++ b/drivers/vhost/vringh.c
7816 @@ -264,7 +264,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
7817 gfp_t gfp,
7818 int (*copy)(void *dst, const void *src, size_t len))
7819 {
7820 - int err, count = 0, up_next, desc_max;
7821 + int err, count = 0, indirect_count = 0, up_next, desc_max;
7822 struct vring_desc desc, *descs;
7823 struct vringh_range range = { -1ULL, 0 }, slowrange;
7824 bool slow = false;
7825 @@ -321,7 +321,12 @@ __vringh_iov(struct vringh *vrh, u16 i,
7826 continue;
7827 }
7828
7829 - if (count++ == vrh->vring.num) {
7830 + if (up_next == -1)
7831 + count++;
7832 + else
7833 + indirect_count++;
7834 +
7835 + if (count > vrh->vring.num || indirect_count > desc_max) {
7836 vringh_bad("Descriptor loop in %p", descs);
7837 err = -ELOOP;
7838 goto fail;
7839 @@ -383,6 +388,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
7840 i = return_from_indirect(vrh, &up_next,
7841 &descs, &desc_max);
7842 slow = false;
7843 + indirect_count = 0;
7844 } else
7845 break;
7846 }
7847 diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
7848 index 7de43be6ef2c2..3b7a7c74bf0a5 100644
7849 --- a/drivers/video/fbdev/amba-clcd.c
7850 +++ b/drivers/video/fbdev/amba-clcd.c
7851 @@ -774,12 +774,15 @@ static int clcdfb_of_vram_setup(struct clcd_fb *fb)
7852 return -ENODEV;
7853
7854 fb->fb.screen_base = of_iomap(memory, 0);
7855 - if (!fb->fb.screen_base)
7856 + if (!fb->fb.screen_base) {
7857 + of_node_put(memory);
7858 return -ENOMEM;
7859 + }
7860
7861 fb->fb.fix.smem_start = of_translate_address(memory,
7862 of_get_address(memory, 0, &size, NULL));
7863 fb->fb.fix.smem_len = size;
7864 + of_node_put(memory);
7865
7866 return 0;
7867 }
7868 diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
7869 index 75b7705140673..1decded4845f7 100644
7870 --- a/drivers/video/fbdev/core/fbcon.c
7871 +++ b/drivers/video/fbdev/core/fbcon.c
7872 @@ -3286,6 +3286,9 @@ static void fbcon_register_existing_fbs(struct work_struct *work)
7873
7874 console_lock();
7875
7876 + deferred_takeover = false;
7877 + logo_shown = FBCON_LOGO_DONTSHOW;
7878 +
7879 for_each_registered_fb(i)
7880 fbcon_fb_registered(registered_fb[i]);
7881
7882 @@ -3303,8 +3306,6 @@ static int fbcon_output_notifier(struct notifier_block *nb,
7883 pr_info("fbcon: Taking over console\n");
7884
7885 dummycon_unregister_output_notifier(&fbcon_output_nb);
7886 - deferred_takeover = false;
7887 - logo_shown = FBCON_LOGO_DONTSHOW;
7888
7889 /* We may get called in atomic context */
7890 schedule_work(&fbcon_deferred_takeover_work);
7891 diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
7892 index 74ffb446e00c9..7c4694d70dac1 100644
7893 --- a/drivers/video/fbdev/pxa3xx-gcu.c
7894 +++ b/drivers/video/fbdev/pxa3xx-gcu.c
7895 @@ -651,6 +651,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
7896 for (i = 0; i < 8; i++) {
7897 ret = pxa3xx_gcu_add_buffer(dev, priv);
7898 if (ret) {
7899 + pxa3xx_gcu_free_buffers(dev, priv);
7900 dev_err(dev, "failed to allocate DMA memory\n");
7901 goto err_disable_clk;
7902 }
7903 @@ -667,15 +668,15 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
7904 SHARED_SIZE, irq);
7905 return 0;
7906
7907 -err_free_dma:
7908 - dma_free_coherent(dev, SHARED_SIZE,
7909 - priv->shared, priv->shared_phys);
7910 +err_disable_clk:
7911 + clk_disable_unprepare(priv->clk);
7912
7913 err_misc_deregister:
7914 misc_deregister(&priv->misc_dev);
7915
7916 -err_disable_clk:
7917 - clk_disable_unprepare(priv->clk);
7918 +err_free_dma:
7919 + dma_free_coherent(dev, SHARED_SIZE,
7920 + priv->shared, priv->shared_phys);
7921
7922 return ret;
7923 }
7924 @@ -688,6 +689,7 @@ static int pxa3xx_gcu_remove(struct platform_device *pdev)
7925 pxa3xx_gcu_wait_idle(priv);
7926 misc_deregister(&priv->misc_dev);
7927 dma_free_coherent(dev, SHARED_SIZE, priv->shared, priv->shared_phys);
7928 + clk_disable_unprepare(priv->clk);
7929 pxa3xx_gcu_free_buffers(dev, priv);
7930
7931 return 0;
7932 diff --git a/drivers/watchdog/ts4800_wdt.c b/drivers/watchdog/ts4800_wdt.c
7933 index c137ad2bd5c31..0ea554c7cda57 100644
7934 --- a/drivers/watchdog/ts4800_wdt.c
7935 +++ b/drivers/watchdog/ts4800_wdt.c
7936 @@ -125,13 +125,16 @@ static int ts4800_wdt_probe(struct platform_device *pdev)
7937 ret = of_property_read_u32_index(np, "syscon", 1, &reg);
7938 if (ret < 0) {
7939 dev_err(dev, "no offset in syscon\n");
7940 + of_node_put(syscon_np);
7941 return ret;
7942 }
7943
7944 /* allocate memory for watchdog struct */
7945 wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
7946 - if (!wdt)
7947 + if (!wdt) {
7948 + of_node_put(syscon_np);
7949 return -ENOMEM;
7950 + }
7951
7952 /* set regmap and offset to know where to write */
7953 wdt->feed_offset = reg;
7954 diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
7955 index 88c5e6361aa05..fddbb39433bee 100644
7956 --- a/drivers/watchdog/wdat_wdt.c
7957 +++ b/drivers/watchdog/wdat_wdt.c
7958 @@ -462,6 +462,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
7959 return ret;
7960
7961 watchdog_set_nowayout(&wdat->wdd, nowayout);
7962 + watchdog_stop_on_reboot(&wdat->wdd);
7963 return devm_watchdog_register_device(dev, &wdat->wdd);
7964 }
7965
7966 diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
7967 index 7b1077f0abcb0..c8aa4f5f85db1 100644
7968 --- a/drivers/xen/xlate_mmu.c
7969 +++ b/drivers/xen/xlate_mmu.c
7970 @@ -261,7 +261,6 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
7971
7972 return 0;
7973 }
7974 -EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);
7975
7976 struct remap_pfn {
7977 struct mm_struct *mm;
7978 diff --git a/fs/afs/dir.c b/fs/afs/dir.c
7979 index 8c39533d122a5..3a355a209919b 100644
7980 --- a/fs/afs/dir.c
7981 +++ b/fs/afs/dir.c
7982 @@ -415,8 +415,11 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
7983 }
7984
7985 /* skip if starts before the current position */
7986 - if (offset < curr)
7987 + if (offset < curr) {
7988 + if (next > curr)
7989 + ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
7990 continue;
7991 + }
7992
7993 /* found the next entry */
7994 if (!dir_emit(ctx, dire->u.name, nlen,
7995 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
7996 index 196f9f64d075c..c999bc0c0691f 100644
7997 --- a/fs/binfmt_flat.c
7998 +++ b/fs/binfmt_flat.c
7999 @@ -422,6 +422,30 @@ static void old_reloc(unsigned long rl)
8000
8001 /****************************************************************************/
8002
8003 +static inline u32 __user *skip_got_header(u32 __user *rp)
8004 +{
8005 + if (IS_ENABLED(CONFIG_RISCV)) {
8006 + /*
8007 + * RISC-V has a 16 byte GOT PLT header for elf64-riscv
8008 + * and 8 byte GOT PLT header for elf32-riscv.
8009 + * Skip the whole GOT PLT header, since it is reserved
8010 + * for the dynamic linker (ld.so).
8011 + */
8012 + u32 rp_val0, rp_val1;
8013 +
8014 + if (get_user(rp_val0, rp))
8015 + return rp;
8016 + if (get_user(rp_val1, rp + 1))
8017 + return rp;
8018 +
8019 + if (rp_val0 == 0xffffffff && rp_val1 == 0xffffffff)
8020 + rp += 4;
8021 + else if (rp_val0 == 0xffffffff)
8022 + rp += 2;
8023 + }
8024 + return rp;
8025 +}
8026 +
8027 static int load_flat_file(struct linux_binprm *bprm,
8028 struct lib_info *libinfo, int id, unsigned long *extra_stack)
8029 {
8030 @@ -769,7 +793,8 @@ static int load_flat_file(struct linux_binprm *bprm,
8031 * image.
8032 */
8033 if (flags & FLAT_FLAG_GOTPIC) {
8034 - for (rp = (u32 __user *)datapos; ; rp++) {
8035 + rp = skip_got_header((u32 __user *) datapos);
8036 + for (; ; rp++) {
8037 u32 addr, rp_val;
8038 if (get_user(rp_val, rp))
8039 return -EFAULT;
8040 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
8041 index f18c6d97932ed..a4b3e6f6bf021 100644
8042 --- a/fs/btrfs/disk-io.c
8043 +++ b/fs/btrfs/disk-io.c
8044 @@ -2927,7 +2927,7 @@ int open_ctree(struct super_block *sb,
8045 ~BTRFS_FEATURE_INCOMPAT_SUPP;
8046 if (features) {
8047 btrfs_err(fs_info,
8048 - "cannot mount because of unsupported optional features (%llx)",
8049 + "cannot mount because of unsupported optional features (0x%llx)",
8050 features);
8051 err = -EINVAL;
8052 goto fail_csum;
8053 @@ -2965,7 +2965,7 @@ int open_ctree(struct super_block *sb,
8054 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
8055 if (!sb_rdonly(sb) && features) {
8056 btrfs_err(fs_info,
8057 - "cannot mount read-write because of unsupported optional features (%llx)",
8058 + "cannot mount read-write because of unsupported optional features (0x%llx)",
8059 features);
8060 err = -EINVAL;
8061 goto fail_csum;
8062 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
8063 index 8898682c91038..c7706a769de12 100644
8064 --- a/fs/btrfs/volumes.c
8065 +++ b/fs/btrfs/volumes.c
8066 @@ -7383,12 +7383,12 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
8067 * do another round of validation checks.
8068 */
8069 if (total_dev != fs_info->fs_devices->total_devices) {
8070 - btrfs_err(fs_info,
8071 - "super_num_devices %llu mismatch with num_devices %llu found here",
8072 + btrfs_warn(fs_info,
8073 +"super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
8074 btrfs_super_num_devices(fs_info->super_copy),
8075 total_dev);
8076 - ret = -EINVAL;
8077 - goto error;
8078 + fs_info->fs_devices->total_devices = total_dev;
8079 + btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
8080 }
8081 if (btrfs_super_total_bytes(fs_info->super_copy) <
8082 fs_info->fs_devices->total_rw_bytes) {
8083 diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
8084 index cb18ee637cb7b..4bcf0226818dc 100644
8085 --- a/fs/ceph/xattr.c
8086 +++ b/fs/ceph/xattr.c
8087 @@ -316,6 +316,14 @@ static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val,
8088 }
8089 #define XATTR_RSTAT_FIELD(_type, _name) \
8090 XATTR_NAME_CEPH(_type, _name, VXATTR_FLAG_RSTAT)
8091 +#define XATTR_RSTAT_FIELD_UPDATABLE(_type, _name) \
8092 + { \
8093 + .name = CEPH_XATTR_NAME(_type, _name), \
8094 + .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
8095 + .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
8096 + .exists_cb = NULL, \
8097 + .flags = VXATTR_FLAG_RSTAT, \
8098 + }
8099 #define XATTR_LAYOUT_FIELD(_type, _name, _field) \
8100 { \
8101 .name = CEPH_XATTR_NAME2(_type, _name, _field), \
8102 @@ -353,7 +361,7 @@ static struct ceph_vxattr ceph_dir_vxattrs[] = {
8103 XATTR_RSTAT_FIELD(dir, rfiles),
8104 XATTR_RSTAT_FIELD(dir, rsubdirs),
8105 XATTR_RSTAT_FIELD(dir, rbytes),
8106 - XATTR_RSTAT_FIELD(dir, rctime),
8107 + XATTR_RSTAT_FIELD_UPDATABLE(dir, rctime),
8108 {
8109 .name = "ceph.dir.pin",
8110 .name_size = sizeof("ceph.dir.pin"),
8111 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
8112 index 9c0e348cb00f7..414936989255a 100644
8113 --- a/fs/cifs/cifsglob.h
8114 +++ b/fs/cifs/cifsglob.h
8115 @@ -1930,11 +1930,13 @@ extern mempool_t *cifs_mid_poolp;
8116
8117 /* Operations for different SMB versions */
8118 #define SMB1_VERSION_STRING "1.0"
8119 +#define SMB20_VERSION_STRING "2.0"
8120 +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
8121 extern struct smb_version_operations smb1_operations;
8122 extern struct smb_version_values smb1_values;
8123 -#define SMB20_VERSION_STRING "2.0"
8124 extern struct smb_version_operations smb20_operations;
8125 extern struct smb_version_values smb20_values;
8126 +#endif /* CIFS_ALLOW_INSECURE_LEGACY */
8127 #define SMB21_VERSION_STRING "2.1"
8128 extern struct smb_version_operations smb21_operations;
8129 extern struct smb_version_values smb21_values;
8130 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
8131 index 7985fe25850b7..57164563eec69 100644
8132 --- a/fs/cifs/smb2ops.c
8133 +++ b/fs/cifs/smb2ops.c
8134 @@ -3487,11 +3487,13 @@ smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
8135 }
8136 }
8137
8138 +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
8139 static bool
8140 smb2_is_read_op(__u32 oplock)
8141 {
8142 return oplock == SMB2_OPLOCK_LEVEL_II;
8143 }
8144 +#endif /* CIFS_ALLOW_INSECURE_LEGACY */
8145
8146 static bool
8147 smb21_is_read_op(__u32 oplock)
8148 @@ -4573,7 +4575,7 @@ out:
8149 return rc;
8150 }
8151
8152 -
8153 +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
8154 struct smb_version_operations smb20_operations = {
8155 .compare_fids = smb2_compare_fids,
8156 .setup_request = smb2_setup_request,
8157 @@ -4670,6 +4672,7 @@ struct smb_version_operations smb20_operations = {
8158 .fiemap = smb3_fiemap,
8159 .llseek = smb3_llseek,
8160 };
8161 +#endif /* CIFS_ALLOW_INSECURE_LEGACY */
8162
8163 struct smb_version_operations smb21_operations = {
8164 .compare_fids = smb2_compare_fids,
8165 @@ -4987,6 +4990,7 @@ struct smb_version_operations smb311_operations = {
8166 .llseek = smb3_llseek,
8167 };
8168
8169 +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
8170 struct smb_version_values smb20_values = {
8171 .version_string = SMB20_VERSION_STRING,
8172 .protocol_id = SMB20_PROT_ID,
8173 @@ -5007,6 +5011,7 @@ struct smb_version_values smb20_values = {
8174 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
8175 .create_lease_size = sizeof(struct create_lease),
8176 };
8177 +#endif /* ALLOW_INSECURE_LEGACY */
8178
8179 struct smb_version_values smb21_values = {
8180 .version_string = SMB21_VERSION_STRING,
8181 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
8182 index e068f82ffeddf..0857eb7a95e28 100644
8183 --- a/fs/cifs/smb2pdu.c
8184 +++ b/fs/cifs/smb2pdu.c
8185 @@ -356,6 +356,9 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
8186 rc = -EHOSTDOWN;
8187 mutex_unlock(&tcon->ses->session_mutex);
8188 goto failed;
8189 + } else if (rc) {
8190 + mutex_unlock(&ses->session_mutex);
8191 + goto out;
8192 }
8193 }
8194 if (rc || !tcon->need_reconnect) {
8195 diff --git a/fs/dax.c b/fs/dax.c
8196 index 12953e892bb25..bcb7c6b43fb2b 100644
8197 --- a/fs/dax.c
8198 +++ b/fs/dax.c
8199 @@ -819,7 +819,8 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
8200 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
8201 goto unlock_pmd;
8202
8203 - flush_cache_page(vma, address, pfn);
8204 + flush_cache_range(vma, address,
8205 + address + HPAGE_PMD_SIZE);
8206 pmd = pmdp_invalidate(vma, address, pmdp);
8207 pmd = pmd_wrprotect(pmd);
8208 pmd = pmd_mkclean(pmd);
8209 diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
8210 index 53500b555bfa8..4ae8becdb51db 100644
8211 --- a/fs/dlm/lock.c
8212 +++ b/fs/dlm/lock.c
8213 @@ -1551,6 +1551,7 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
8214 lkb->lkb_wait_type = 0;
8215 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
8216 lkb->lkb_wait_count--;
8217 + unhold_lkb(lkb);
8218 goto out_del;
8219 }
8220
8221 @@ -1577,6 +1578,7 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
8222 log_error(ls, "remwait error %x reply %d wait_type %d overlap",
8223 lkb->lkb_id, mstype, lkb->lkb_wait_type);
8224 lkb->lkb_wait_count--;
8225 + unhold_lkb(lkb);
8226 lkb->lkb_wait_type = 0;
8227 }
8228
8229 @@ -5312,11 +5314,16 @@ int dlm_recover_waiters_post(struct dlm_ls *ls)
8230 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
8231 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
8232 lkb->lkb_wait_type = 0;
8233 - lkb->lkb_wait_count = 0;
8234 + /* drop all wait_count references we still
8235 + * hold a reference for this iteration.
8236 + */
8237 + while (lkb->lkb_wait_count) {
8238 + lkb->lkb_wait_count--;
8239 + unhold_lkb(lkb);
8240 + }
8241 mutex_lock(&ls->ls_waiters_mutex);
8242 list_del_init(&lkb->lkb_wait_reply);
8243 mutex_unlock(&ls->ls_waiters_mutex);
8244 - unhold_lkb(lkb); /* for waiters list */
8245
8246 if (oc || ou) {
8247 /* do an unlock or cancel instead of resending */
8248 diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
8249 index c38b2b8ffd1d3..a10d2bcfe75a8 100644
8250 --- a/fs/dlm/plock.c
8251 +++ b/fs/dlm/plock.c
8252 @@ -23,11 +23,11 @@ struct plock_op {
8253 struct list_head list;
8254 int done;
8255 struct dlm_plock_info info;
8256 + int (*callback)(struct file_lock *fl, int result);
8257 };
8258
8259 struct plock_xop {
8260 struct plock_op xop;
8261 - int (*callback)(struct file_lock *fl, int result);
8262 void *fl;
8263 void *file;
8264 struct file_lock flc;
8265 @@ -129,19 +129,18 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
8266 /* fl_owner is lockd which doesn't distinguish
8267 processes on the nfs client */
8268 op->info.owner = (__u64) fl->fl_pid;
8269 - xop->callback = fl->fl_lmops->lm_grant;
8270 + op->callback = fl->fl_lmops->lm_grant;
8271 locks_init_lock(&xop->flc);
8272 locks_copy_lock(&xop->flc, fl);
8273 xop->fl = fl;
8274 xop->file = file;
8275 } else {
8276 op->info.owner = (__u64)(long) fl->fl_owner;
8277 - xop->callback = NULL;
8278 }
8279
8280 send_op(op);
8281
8282 - if (xop->callback == NULL) {
8283 + if (!op->callback) {
8284 rv = wait_event_interruptible(recv_wq, (op->done != 0));
8285 if (rv == -ERESTARTSYS) {
8286 log_debug(ls, "dlm_posix_lock: wait killed %llx",
8287 @@ -203,7 +202,7 @@ static int dlm_plock_callback(struct plock_op *op)
8288 file = xop->file;
8289 flc = &xop->flc;
8290 fl = xop->fl;
8291 - notify = xop->callback;
8292 + notify = op->callback;
8293
8294 if (op->info.rv) {
8295 notify(fl, op->info.rv);
8296 @@ -436,10 +435,9 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
8297 if (op->info.fsid == info.fsid &&
8298 op->info.number == info.number &&
8299 op->info.owner == info.owner) {
8300 - struct plock_xop *xop = (struct plock_xop *)op;
8301 list_del_init(&op->list);
8302 memcpy(&op->info, &info, sizeof(info));
8303 - if (xop->callback)
8304 + if (op->callback)
8305 do_callback = 1;
8306 else
8307 op->done = 1;
8308 diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
8309 index 8f665aa1d706e..62384ae77a78f 100644
8310 --- a/fs/ext4/inline.c
8311 +++ b/fs/ext4/inline.c
8312 @@ -2013,6 +2013,18 @@ int ext4_convert_inline_data(struct inode *inode)
8313 if (!ext4_has_inline_data(inode)) {
8314 ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
8315 return 0;
8316 + } else if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
8317 + /*
8318 + * Inode has inline data but EXT4_STATE_MAY_INLINE_DATA is
8319 + * cleared. This means we are in the middle of moving of
8320 + * inline data to delay allocated block. Just force writeout
8321 + * here to finish conversion.
8322 + */
8323 + error = filemap_flush(inode->i_mapping);
8324 + if (error)
8325 + return error;
8326 + if (!ext4_has_inline_data(inode))
8327 + return 0;
8328 }
8329
8330 needed_blocks = ext4_writepage_trans_blocks(inode);
8331 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
8332 index 00686fbe3c27d..1cac574911a79 100644
8333 --- a/fs/ext4/inode.c
8334 +++ b/fs/ext4/inode.c
8335 @@ -5668,6 +5668,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
8336 if (attr->ia_valid & ATTR_SIZE) {
8337 handle_t *handle;
8338 loff_t oldsize = inode->i_size;
8339 + loff_t old_disksize;
8340 int shrink = (attr->ia_size < inode->i_size);
8341
8342 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
8343 @@ -5723,6 +5724,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
8344 inode->i_ctime = inode->i_mtime;
8345 }
8346 down_write(&EXT4_I(inode)->i_data_sem);
8347 + old_disksize = EXT4_I(inode)->i_disksize;
8348 EXT4_I(inode)->i_disksize = attr->ia_size;
8349 rc = ext4_mark_inode_dirty(handle, inode);
8350 if (!error)
8351 @@ -5734,6 +5736,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
8352 */
8353 if (!error)
8354 i_size_write(inode, attr->ia_size);
8355 + else
8356 + EXT4_I(inode)->i_disksize = old_disksize;
8357 up_write(&EXT4_I(inode)->i_data_sem);
8358 ext4_journal_stop(handle);
8359 if (error)
8360 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
8361 index f10307215d583..b01059bb562c0 100644
8362 --- a/fs/ext4/namei.c
8363 +++ b/fs/ext4/namei.c
8364 @@ -273,9 +273,9 @@ static struct dx_frame *dx_probe(struct ext4_filename *fname,
8365 struct dx_hash_info *hinfo,
8366 struct dx_frame *frame);
8367 static void dx_release(struct dx_frame *frames);
8368 -static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
8369 - unsigned blocksize, struct dx_hash_info *hinfo,
8370 - struct dx_map_entry map[]);
8371 +static int dx_make_map(struct inode *dir, struct buffer_head *bh,
8372 + struct dx_hash_info *hinfo,
8373 + struct dx_map_entry *map_tail);
8374 static void dx_sort_map(struct dx_map_entry *map, unsigned count);
8375 static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to,
8376 struct dx_map_entry *offsets, int count, unsigned blocksize);
8377 @@ -750,12 +750,14 @@ static struct dx_frame *
8378 dx_probe(struct ext4_filename *fname, struct inode *dir,
8379 struct dx_hash_info *hinfo, struct dx_frame *frame_in)
8380 {
8381 - unsigned count, indirect;
8382 + unsigned count, indirect, level, i;
8383 struct dx_entry *at, *entries, *p, *q, *m;
8384 struct dx_root *root;
8385 struct dx_frame *frame = frame_in;
8386 struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
8387 u32 hash;
8388 + ext4_lblk_t block;
8389 + ext4_lblk_t blocks[EXT4_HTREE_LEVEL];
8390
8391 memset(frame_in, 0, EXT4_HTREE_LEVEL * sizeof(frame_in[0]));
8392 frame->bh = ext4_read_dirblock(dir, 0, INDEX);
8393 @@ -811,6 +813,8 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
8394 }
8395
8396 dxtrace(printk("Look up %x", hash));
8397 + level = 0;
8398 + blocks[0] = 0;
8399 while (1) {
8400 count = dx_get_count(entries);
8401 if (!count || count > dx_get_limit(entries)) {
8402 @@ -852,15 +856,27 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
8403 dx_get_block(at)));
8404 frame->entries = entries;
8405 frame->at = at;
8406 - if (!indirect--)
8407 +
8408 + block = dx_get_block(at);
8409 + for (i = 0; i <= level; i++) {
8410 + if (blocks[i] == block) {
8411 + ext4_warning_inode(dir,
8412 + "dx entry: tree cycle block %u points back to block %u",
8413 + blocks[level], block);
8414 + goto fail;
8415 + }
8416 + }
8417 + if (++level > indirect)
8418 return frame;
8419 + blocks[level] = block;
8420 frame++;
8421 - frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
8422 + frame->bh = ext4_read_dirblock(dir, block, INDEX);
8423 if (IS_ERR(frame->bh)) {
8424 ret_err = (struct dx_frame *) frame->bh;
8425 frame->bh = NULL;
8426 goto fail;
8427 }
8428 +
8429 entries = ((struct dx_node *) frame->bh->b_data)->entries;
8430
8431 if (dx_get_limit(entries) != dx_node_limit(dir)) {
8432 @@ -1205,15 +1221,23 @@ static inline int search_dirblock(struct buffer_head *bh,
8433 * Create map of hash values, offsets, and sizes, stored at end of block.
8434 * Returns number of entries mapped.
8435 */
8436 -static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
8437 - unsigned blocksize, struct dx_hash_info *hinfo,
8438 +static int dx_make_map(struct inode *dir, struct buffer_head *bh,
8439 + struct dx_hash_info *hinfo,
8440 struct dx_map_entry *map_tail)
8441 {
8442 int count = 0;
8443 - char *base = (char *) de;
8444 + struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)bh->b_data;
8445 + unsigned int buflen = bh->b_size;
8446 + char *base = bh->b_data;
8447 struct dx_hash_info h = *hinfo;
8448
8449 - while ((char *) de < base + blocksize) {
8450 + if (ext4_has_metadata_csum(dir->i_sb))
8451 + buflen -= sizeof(struct ext4_dir_entry_tail);
8452 +
8453 + while ((char *) de < base + buflen) {
8454 + if (ext4_check_dir_entry(dir, NULL, de, bh, base, buflen,
8455 + ((char *)de) - base))
8456 + return -EFSCORRUPTED;
8457 if (de->name_len && de->inode) {
8458 ext4fs_dirhash(dir, de->name, de->name_len, &h);
8459 map_tail--;
8460 @@ -1223,8 +1247,7 @@ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
8461 count++;
8462 cond_resched();
8463 }
8464 - /* XXX: do we need to check rec_len == 0 case? -Chris */
8465 - de = ext4_next_entry(de, blocksize);
8466 + de = ext4_next_entry(de, dir->i_sb->s_blocksize);
8467 }
8468 return count;
8469 }
8470 @@ -1848,8 +1871,11 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
8471
8472 /* create map in the end of data2 block */
8473 map = (struct dx_map_entry *) (data2 + blocksize);
8474 - count = dx_make_map(dir, (struct ext4_dir_entry_2 *) data1,
8475 - blocksize, hinfo, map);
8476 + count = dx_make_map(dir, *bh, hinfo, map);
8477 + if (count < 0) {
8478 + err = count;
8479 + goto journal_error;
8480 + }
8481 map -= count;
8482 dx_sort_map(map, count);
8483 /* Ensure that neither split block is over half full */
8484 @@ -3442,6 +3468,9 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
8485 struct buffer_head *bh;
8486
8487 if (!ext4_has_inline_data(inode)) {
8488 + struct ext4_dir_entry_2 *de;
8489 + unsigned int offset;
8490 +
8491 /* The first directory block must not be a hole, so
8492 * treat it as DIRENT_HTREE
8493 */
8494 @@ -3450,9 +3479,30 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
8495 *retval = PTR_ERR(bh);
8496 return NULL;
8497 }
8498 - *parent_de = ext4_next_entry(
8499 - (struct ext4_dir_entry_2 *)bh->b_data,
8500 - inode->i_sb->s_blocksize);
8501 +
8502 + de = (struct ext4_dir_entry_2 *) bh->b_data;
8503 + if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data,
8504 + bh->b_size, 0) ||
8505 + le32_to_cpu(de->inode) != inode->i_ino ||
8506 + strcmp(".", de->name)) {
8507 + EXT4_ERROR_INODE(inode, "directory missing '.'");
8508 + brelse(bh);
8509 + *retval = -EFSCORRUPTED;
8510 + return NULL;
8511 + }
8512 + offset = ext4_rec_len_from_disk(de->rec_len,
8513 + inode->i_sb->s_blocksize);
8514 + de = ext4_next_entry(de, inode->i_sb->s_blocksize);
8515 + if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data,
8516 + bh->b_size, offset) ||
8517 + le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) {
8518 + EXT4_ERROR_INODE(inode, "directory missing '..'");
8519 + brelse(bh);
8520 + *retval = -EFSCORRUPTED;
8521 + return NULL;
8522 + }
8523 + *parent_de = de;
8524 +
8525 return bh;
8526 }
8527
8528 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
8529 index c13879bd21683..eba2506f43991 100644
8530 --- a/fs/ext4/super.c
8531 +++ b/fs/ext4/super.c
8532 @@ -1703,6 +1703,7 @@ static const struct mount_opts {
8533 MOPT_EXT4_ONLY | MOPT_CLEAR},
8534 {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
8535 {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
8536 + {Opt_commit, 0, MOPT_NO_EXT2},
8537 {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
8538 MOPT_EXT4_ONLY | MOPT_CLEAR},
8539 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
8540 diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
8541 index 54f0d2c4c7d87..44c5110e18f04 100644
8542 --- a/fs/f2fs/checkpoint.c
8543 +++ b/fs/f2fs/checkpoint.c
8544 @@ -149,7 +149,7 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
8545 f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
8546 blkaddr, exist);
8547 set_sbi_flag(sbi, SBI_NEED_FSCK);
8548 - WARN_ON(1);
8549 + dump_stack();
8550 }
8551 return exist;
8552 }
8553 @@ -187,7 +187,7 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
8554 f2fs_warn(sbi, "access invalid blkaddr:%u",
8555 blkaddr);
8556 set_sbi_flag(sbi, SBI_NEED_FSCK);
8557 - WARN_ON(1);
8558 + dump_stack();
8559 return false;
8560 } else {
8561 return __is_bitmap_valid(sbi, blkaddr, type);
8562 diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
8563 index 5645502c156df..c73a1638c18b4 100644
8564 --- a/fs/f2fs/f2fs.h
8565 +++ b/fs/f2fs/f2fs.h
8566 @@ -2100,11 +2100,17 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
8567 {
8568 spin_lock(&sbi->stat_lock);
8569
8570 - f2fs_bug_on(sbi, !sbi->total_valid_block_count);
8571 - f2fs_bug_on(sbi, !sbi->total_valid_node_count);
8572 + if (unlikely(!sbi->total_valid_block_count ||
8573 + !sbi->total_valid_node_count)) {
8574 + f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u",
8575 + sbi->total_valid_block_count,
8576 + sbi->total_valid_node_count);
8577 + set_sbi_flag(sbi, SBI_NEED_FSCK);
8578 + } else {
8579 + sbi->total_valid_block_count--;
8580 + sbi->total_valid_node_count--;
8581 + }
8582
8583 - sbi->total_valid_node_count--;
8584 - sbi->total_valid_block_count--;
8585 if (sbi->reserved_blocks &&
8586 sbi->current_reserved_blocks < sbi->reserved_blocks)
8587 sbi->current_reserved_blocks++;
8588 diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
8589 index 516007bb1ced1..ef08ef0170306 100644
8590 --- a/fs/f2fs/file.c
8591 +++ b/fs/f2fs/file.c
8592 @@ -1320,11 +1320,19 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
8593 ret = -ENOSPC;
8594 break;
8595 }
8596 - if (dn->data_blkaddr != NEW_ADDR) {
8597 - f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
8598 - dn->data_blkaddr = NEW_ADDR;
8599 - f2fs_set_data_blkaddr(dn);
8600 +
8601 + if (dn->data_blkaddr == NEW_ADDR)
8602 + continue;
8603 +
8604 + if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
8605 + DATA_GENERIC_ENHANCE)) {
8606 + ret = -EFSCORRUPTED;
8607 + break;
8608 }
8609 +
8610 + f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
8611 + dn->data_blkaddr = NEW_ADDR;
8612 + f2fs_set_data_blkaddr(dn);
8613 }
8614
8615 f2fs_update_extent_cache_range(dn, start, 0, index - start);
8616 @@ -1600,6 +1608,10 @@ static long f2fs_fallocate(struct file *file, int mode,
8617
8618 inode_lock(inode);
8619
8620 + ret = file_modified(file);
8621 + if (ret)
8622 + goto out;
8623 +
8624 if (mode & FALLOC_FL_PUNCH_HOLE) {
8625 if (offset >= inode->i_size)
8626 goto out;
8627 diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
8628 index 264c19e177797..b5536570707c2 100644
8629 --- a/fs/f2fs/inode.c
8630 +++ b/fs/f2fs/inode.c
8631 @@ -689,8 +689,22 @@ retry:
8632 f2fs_lock_op(sbi);
8633 err = f2fs_remove_inode_page(inode);
8634 f2fs_unlock_op(sbi);
8635 - if (err == -ENOENT)
8636 + if (err == -ENOENT) {
8637 err = 0;
8638 +
8639 + /*
8640 + * in fuzzed image, another node may has the same
8641 + * block address as inode's, if it was truncated
8642 + * previously, truncation of inode node will fail.
8643 + */
8644 + if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
8645 + f2fs_warn(F2FS_I_SB(inode),
8646 + "f2fs_evict_inode: inconsistent node id, ino:%lu",
8647 + inode->i_ino);
8648 + f2fs_inode_synced(inode);
8649 + set_sbi_flag(sbi, SBI_NEED_FSCK);
8650 + }
8651 + }
8652 }
8653
8654 /* give more chances, if ENOMEM case */
8655 diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
8656 index 78c54bb7898df..7759323bd7751 100644
8657 --- a/fs/f2fs/segment.c
8658 +++ b/fs/f2fs/segment.c
8659 @@ -352,16 +352,19 @@ void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
8660 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
8661 struct list_head *head = &fi->inmem_pages;
8662 struct inmem_pages *cur = NULL;
8663 + struct inmem_pages *tmp;
8664
8665 f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
8666
8667 mutex_lock(&fi->inmem_lock);
8668 - list_for_each_entry(cur, head, list) {
8669 - if (cur->page == page)
8670 + list_for_each_entry(tmp, head, list) {
8671 + if (tmp->page == page) {
8672 + cur = tmp;
8673 break;
8674 + }
8675 }
8676
8677 - f2fs_bug_on(sbi, list_empty(head) || cur->page != page);
8678 + f2fs_bug_on(sbi, !cur);
8679 list_del(&cur->list);
8680 mutex_unlock(&fi->inmem_lock);
8681
8682 diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
8683 index 15b343f656093..f39620f475425 100644
8684 --- a/fs/f2fs/segment.h
8685 +++ b/fs/f2fs/segment.h
8686 @@ -542,11 +542,10 @@ static inline int reserved_sections(struct f2fs_sb_info *sbi)
8687 return GET_SEC_FROM_SEG(sbi, (unsigned int)reserved_segments(sbi));
8688 }
8689
8690 -static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi)
8691 +static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
8692 + unsigned int node_blocks, unsigned int dent_blocks)
8693 {
8694 - unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
8695 - get_pages(sbi, F2FS_DIRTY_DENTS);
8696 - unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
8697 +
8698 unsigned int segno, left_blocks;
8699 int i;
8700
8701 @@ -572,19 +571,28 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi)
8702 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
8703 int freed, int needed)
8704 {
8705 - int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
8706 - int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
8707 - int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
8708 + unsigned int total_node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
8709 + get_pages(sbi, F2FS_DIRTY_DENTS) +
8710 + get_pages(sbi, F2FS_DIRTY_IMETA);
8711 + unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
8712 + unsigned int node_secs = total_node_blocks / BLKS_PER_SEC(sbi);
8713 + unsigned int dent_secs = total_dent_blocks / BLKS_PER_SEC(sbi);
8714 + unsigned int node_blocks = total_node_blocks % BLKS_PER_SEC(sbi);
8715 + unsigned int dent_blocks = total_dent_blocks % BLKS_PER_SEC(sbi);
8716 + unsigned int free, need_lower, need_upper;
8717
8718 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
8719 return false;
8720
8721 - if (free_sections(sbi) + freed == reserved_sections(sbi) + needed &&
8722 - has_curseg_enough_space(sbi))
8723 + free = free_sections(sbi) + freed;
8724 + need_lower = node_secs + dent_secs + reserved_sections(sbi) + needed;
8725 + need_upper = need_lower + (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0);
8726 +
8727 + if (free > need_upper)
8728 return false;
8729 - return (free_sections(sbi) + freed) <=
8730 - (node_secs + 2 * dent_secs + imeta_secs +
8731 - reserved_sections(sbi) + needed);
8732 + else if (free <= need_lower)
8733 + return true;
8734 + return !has_curseg_enough_space(sbi, node_blocks, dent_blocks);
8735 }
8736
8737 static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
8738 diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
8739 index 6bd8a944902ef..232c99e4a1ee9 100644
8740 --- a/fs/f2fs/super.c
8741 +++ b/fs/f2fs/super.c
8742 @@ -2080,7 +2080,8 @@ int f2fs_quota_sync(struct super_block *sb, int type)
8743 if (!sb_has_quota_active(sb, cnt))
8744 continue;
8745
8746 - inode_lock(dqopt->files[cnt]);
8747 + if (!f2fs_sb_has_quota_ino(sbi))
8748 + inode_lock(dqopt->files[cnt]);
8749
8750 /*
8751 * do_quotactl
8752 @@ -2099,7 +2100,8 @@ int f2fs_quota_sync(struct super_block *sb, int type)
8753 up_read(&sbi->quota_sem);
8754 f2fs_unlock_op(sbi);
8755
8756 - inode_unlock(dqopt->files[cnt]);
8757 + if (!f2fs_sb_has_quota_ino(sbi))
8758 + inode_unlock(dqopt->files[cnt]);
8759
8760 if (ret)
8761 break;
8762 diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
8763 index 3647c65a0f482..0191eb1dc7f66 100644
8764 --- a/fs/fat/fatent.c
8765 +++ b/fs/fat/fatent.c
8766 @@ -93,7 +93,8 @@ static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
8767 err_brelse:
8768 brelse(bhs[0]);
8769 err:
8770 - fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr);
8771 + fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
8772 + (llu)blocknr);
8773 return -EIO;
8774 }
8775
8776 @@ -106,8 +107,8 @@ static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
8777 fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
8778 fatent->bhs[0] = sb_bread(sb, blocknr);
8779 if (!fatent->bhs[0]) {
8780 - fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
8781 - (llu)blocknr);
8782 + fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
8783 + (llu)blocknr);
8784 return -EIO;
8785 }
8786 fatent->nr_bhs = 1;
8787 diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
8788 index 22e9c88f3960a..5b3a288e0f14b 100644
8789 --- a/fs/fs-writeback.c
8790 +++ b/fs/fs-writeback.c
8791 @@ -1650,11 +1650,12 @@ static long writeback_sb_inodes(struct super_block *sb,
8792 };
8793 unsigned long start_time = jiffies;
8794 long write_chunk;
8795 - long wrote = 0; /* count both pages and inodes */
8796 + long total_wrote = 0; /* count both pages and inodes */
8797
8798 while (!list_empty(&wb->b_io)) {
8799 struct inode *inode = wb_inode(wb->b_io.prev);
8800 struct bdi_writeback *tmp_wb;
8801 + long wrote;
8802
8803 if (inode->i_sb != sb) {
8804 if (work->sb) {
8805 @@ -1730,7 +1731,9 @@ static long writeback_sb_inodes(struct super_block *sb,
8806
8807 wbc_detach_inode(&wbc);
8808 work->nr_pages -= write_chunk - wbc.nr_to_write;
8809 - wrote += write_chunk - wbc.nr_to_write;
8810 + wrote = write_chunk - wbc.nr_to_write - wbc.pages_skipped;
8811 + wrote = wrote < 0 ? 0 : wrote;
8812 + total_wrote += wrote;
8813
8814 if (need_resched()) {
8815 /*
8816 @@ -1752,7 +1755,7 @@ static long writeback_sb_inodes(struct super_block *sb,
8817 tmp_wb = inode_to_wb_and_lock_list(inode);
8818 spin_lock(&inode->i_lock);
8819 if (!(inode->i_state & I_DIRTY_ALL))
8820 - wrote++;
8821 + total_wrote++;
8822 requeue_inode(inode, tmp_wb, &wbc);
8823 inode_sync_complete(inode);
8824 spin_unlock(&inode->i_lock);
8825 @@ -1766,14 +1769,14 @@ static long writeback_sb_inodes(struct super_block *sb,
8826 * bail out to wb_writeback() often enough to check
8827 * background threshold and other termination conditions.
8828 */
8829 - if (wrote) {
8830 + if (total_wrote) {
8831 if (time_is_before_jiffies(start_time + HZ / 10UL))
8832 break;
8833 if (work->nr_pages <= 0)
8834 break;
8835 }
8836 }
8837 - return wrote;
8838 + return total_wrote;
8839 }
8840
8841 static long __writeback_inodes_wb(struct bdi_writeback *wb,
8842 diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
8843 index 5c73751adb2d3..53cd7b2bb580b 100644
8844 --- a/fs/iomap/buffered-io.c
8845 +++ b/fs/iomap/buffered-io.c
8846 @@ -535,7 +535,8 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
8847 * write started inside the existing inode size.
8848 */
8849 if (pos + len > i_size)
8850 - truncate_pagecache_range(inode, max(pos, i_size), pos + len);
8851 + truncate_pagecache_range(inode, max(pos, i_size),
8852 + pos + len - 1);
8853 }
8854
8855 static int
8856 diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
8857 index ad1eba809e7e1..ee2282b8c7a73 100644
8858 --- a/fs/jffs2/fs.c
8859 +++ b/fs/jffs2/fs.c
8860 @@ -603,6 +603,7 @@ out_root:
8861 jffs2_free_raw_node_refs(c);
8862 kvfree(c->blocks);
8863 jffs2_clear_xattr_subsystem(c);
8864 + jffs2_sum_exit(c);
8865 out_inohash:
8866 kfree(c->inocache_list);
8867 out_wbuf:
8868 diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
8869 index 79f3440e204b6..d3cb27487c706 100644
8870 --- a/fs/jfs/jfs_dmap.c
8871 +++ b/fs/jfs/jfs_dmap.c
8872 @@ -385,7 +385,8 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
8873 }
8874
8875 /* write the last buffer. */
8876 - write_metapage(mp);
8877 + if (mp)
8878 + write_metapage(mp);
8879
8880 IREAD_UNLOCK(ipbmap);
8881
8882 diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
8883 index 7d4af6cea2a63..99ee657596b5f 100644
8884 --- a/fs/kernfs/dir.c
8885 +++ b/fs/kernfs/dir.c
8886 @@ -19,7 +19,15 @@
8887
8888 DEFINE_MUTEX(kernfs_mutex);
8889 static DEFINE_SPINLOCK(kernfs_rename_lock); /* kn->parent and ->name */
8890 -static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by rename_lock */
8891 +/*
8892 + * Don't use rename_lock to piggy back on pr_cont_buf. We don't want to
8893 + * call pr_cont() while holding rename_lock. Because sometimes pr_cont()
8894 + * will perform wakeups when releasing console_sem. Holding rename_lock
8895 + * will introduce deadlock if the scheduler reads the kernfs_name in the
8896 + * wakeup path.
8897 + */
8898 +static DEFINE_SPINLOCK(kernfs_pr_cont_lock);
8899 +static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by pr_cont_lock */
8900 static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */
8901
8902 #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
8903 @@ -230,12 +238,12 @@ void pr_cont_kernfs_name(struct kernfs_node *kn)
8904 {
8905 unsigned long flags;
8906
8907 - spin_lock_irqsave(&kernfs_rename_lock, flags);
8908 + spin_lock_irqsave(&kernfs_pr_cont_lock, flags);
8909
8910 - kernfs_name_locked(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf));
8911 + kernfs_name(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf));
8912 pr_cont("%s", kernfs_pr_cont_buf);
8913
8914 - spin_unlock_irqrestore(&kernfs_rename_lock, flags);
8915 + spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags);
8916 }
8917
8918 /**
8919 @@ -249,10 +257,10 @@ void pr_cont_kernfs_path(struct kernfs_node *kn)
8920 unsigned long flags;
8921 int sz;
8922
8923 - spin_lock_irqsave(&kernfs_rename_lock, flags);
8924 + spin_lock_irqsave(&kernfs_pr_cont_lock, flags);
8925
8926 - sz = kernfs_path_from_node_locked(kn, NULL, kernfs_pr_cont_buf,
8927 - sizeof(kernfs_pr_cont_buf));
8928 + sz = kernfs_path_from_node(kn, NULL, kernfs_pr_cont_buf,
8929 + sizeof(kernfs_pr_cont_buf));
8930 if (sz < 0) {
8931 pr_cont("(error)");
8932 goto out;
8933 @@ -266,7 +274,7 @@ void pr_cont_kernfs_path(struct kernfs_node *kn)
8934 pr_cont("%s", kernfs_pr_cont_buf);
8935
8936 out:
8937 - spin_unlock_irqrestore(&kernfs_rename_lock, flags);
8938 + spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags);
8939 }
8940
8941 /**
8942 @@ -870,13 +878,12 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
8943
8944 lockdep_assert_held(&kernfs_mutex);
8945
8946 - /* grab kernfs_rename_lock to piggy back on kernfs_pr_cont_buf */
8947 - spin_lock_irq(&kernfs_rename_lock);
8948 + spin_lock_irq(&kernfs_pr_cont_lock);
8949
8950 len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf));
8951
8952 if (len >= sizeof(kernfs_pr_cont_buf)) {
8953 - spin_unlock_irq(&kernfs_rename_lock);
8954 + spin_unlock_irq(&kernfs_pr_cont_lock);
8955 return NULL;
8956 }
8957
8958 @@ -888,7 +895,7 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
8959 parent = kernfs_find_ns(parent, name, ns);
8960 }
8961
8962 - spin_unlock_irq(&kernfs_rename_lock);
8963 + spin_unlock_irq(&kernfs_pr_cont_lock);
8964
8965 return parent;
8966 }
8967 diff --git a/fs/nfs/file.c b/fs/nfs/file.c
8968 index 73415970af381..3233da79d49a4 100644
8969 --- a/fs/nfs/file.c
8970 +++ b/fs/nfs/file.c
8971 @@ -394,11 +394,8 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
8972 return status;
8973 NFS_I(mapping->host)->write_io += copied;
8974
8975 - if (nfs_ctx_key_to_expire(ctx, mapping->host)) {
8976 - status = nfs_wb_all(mapping->host);
8977 - if (status < 0)
8978 - return status;
8979 - }
8980 + if (nfs_ctx_key_to_expire(ctx, mapping->host))
8981 + nfs_wb_all(mapping->host);
8982
8983 return copied;
8984 }
8985 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
8986 index cf3b00751ff65..ba4a03a69fbf0 100644
8987 --- a/fs/nfs/nfs4proc.c
8988 +++ b/fs/nfs/nfs4proc.c
8989 @@ -3041,6 +3041,10 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
8990 }
8991
8992 out:
8993 + if (opendata->lgp) {
8994 + nfs4_lgopen_release(opendata->lgp);
8995 + opendata->lgp = NULL;
8996 + }
8997 if (!opendata->cancelled)
8998 nfs4_sequence_free_slot(&opendata->o_res.seq_res);
8999 return ret;
9000 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
9001 index 0471b6e0da16f..2fe48982fbb48 100644
9002 --- a/fs/nfs/pnfs.c
9003 +++ b/fs/nfs/pnfs.c
9004 @@ -1961,6 +1961,7 @@ lookup_again:
9005 lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
9006 if (lo == NULL) {
9007 spin_unlock(&ino->i_lock);
9008 + lseg = ERR_PTR(-ENOMEM);
9009 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
9010 PNFS_UPDATE_LAYOUT_NOMEM);
9011 goto out;
9012 @@ -2090,6 +2091,7 @@ lookup_again:
9013
9014 lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &stateid, &arg, gfp_flags);
9015 if (!lgp) {
9016 + lseg = ERR_PTR(-ENOMEM);
9017 trace_pnfs_update_layout(ino, pos, count, iomode, lo, NULL,
9018 PNFS_UPDATE_LAYOUT_NOMEM);
9019 nfs_layoutget_end(lo);
9020 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
9021 index 30d8e7bc1cef3..10ce264a64567 100644
9022 --- a/fs/nfs/write.c
9023 +++ b/fs/nfs/write.c
9024 @@ -692,11 +692,7 @@ static int nfs_writepage_locked(struct page *page,
9025 err = nfs_do_writepage(page, wbc, &pgio);
9026 pgio.pg_error = 0;
9027 nfs_pageio_complete(&pgio);
9028 - if (err < 0)
9029 - return err;
9030 - if (nfs_error_is_fatal(pgio.pg_error))
9031 - return pgio.pg_error;
9032 - return 0;
9033 + return err;
9034 }
9035
9036 int nfs_writepage(struct page *page, struct writeback_control *wbc)
9037 @@ -747,9 +743,6 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
9038
9039 if (err < 0)
9040 goto out_err;
9041 - err = pgio.pg_error;
9042 - if (nfs_error_is_fatal(err))
9043 - goto out_err;
9044 return 0;
9045 out_err:
9046 return err;
9047 @@ -1429,7 +1422,7 @@ static void nfs_async_write_error(struct list_head *head, int error)
9048 while (!list_empty(head)) {
9049 req = nfs_list_entry(head->next);
9050 nfs_list_remove_request(req);
9051 - if (nfs_error_is_fatal(error))
9052 + if (nfs_error_is_fatal_on_server(error))
9053 nfs_write_error(req, error);
9054 else
9055 nfs_redirty_request(req);
9056 diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
9057 index 1e2bfd26b3521..7df9ad4d84338 100644
9058 --- a/fs/notify/fdinfo.c
9059 +++ b/fs/notify/fdinfo.c
9060 @@ -84,16 +84,9 @@ static void inotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
9061 inode_mark = container_of(mark, struct inotify_inode_mark, fsn_mark);
9062 inode = igrab(fsnotify_conn_inode(mark->connector));
9063 if (inode) {
9064 - /*
9065 - * IN_ALL_EVENTS represents all of the mask bits
9066 - * that we expose to userspace. There is at
9067 - * least one bit (FS_EVENT_ON_CHILD) which is
9068 - * used only internally to the kernel.
9069 - */
9070 - u32 mask = mark->mask & IN_ALL_EVENTS;
9071 - seq_printf(m, "inotify wd:%x ino:%lx sdev:%x mask:%x ignored_mask:%x ",
9072 + seq_printf(m, "inotify wd:%x ino:%lx sdev:%x mask:%x ignored_mask:0 ",
9073 inode_mark->wd, inode->i_ino, inode->i_sb->s_dev,
9074 - mask, mark->ignored_mask);
9075 + inotify_mark_user_mask(mark));
9076 show_mark_fhandle(m, inode);
9077 seq_putc(m, '\n');
9078 iput(inode);
9079 diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h
9080 index 3f246f7b8a92b..8b8bf52dd08b0 100644
9081 --- a/fs/notify/inotify/inotify.h
9082 +++ b/fs/notify/inotify/inotify.h
9083 @@ -22,6 +22,18 @@ static inline struct inotify_event_info *INOTIFY_E(struct fsnotify_event *fse)
9084 return container_of(fse, struct inotify_event_info, fse);
9085 }
9086
9087 +/*
9088 + * INOTIFY_USER_FLAGS represents all of the mask bits that we expose to
9089 + * userspace. There is at least one bit (FS_EVENT_ON_CHILD) which is
9090 + * used only internally to the kernel.
9091 + */
9092 +#define INOTIFY_USER_MASK (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK)
9093 +
9094 +static inline __u32 inotify_mark_user_mask(struct fsnotify_mark *fsn_mark)
9095 +{
9096 + return fsn_mark->mask & INOTIFY_USER_MASK;
9097 +}
9098 +
9099 extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
9100 struct fsnotify_group *group);
9101 extern int inotify_handle_event(struct fsnotify_group *group,
9102 diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
9103 index 81ffc8629fc4b..b949b2c02f4be 100644
9104 --- a/fs/notify/inotify/inotify_user.c
9105 +++ b/fs/notify/inotify/inotify_user.c
9106 @@ -86,7 +86,7 @@ static inline __u32 inotify_arg_to_mask(u32 arg)
9107 mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
9108
9109 /* mask off the flags used to open the fd */
9110 - mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
9111 + mask |= (arg & INOTIFY_USER_MASK);
9112
9113 return mask;
9114 }
9115 diff --git a/fs/notify/mark.c b/fs/notify/mark.c
9116 index 1d96216dffd19..fdf8e03bf3df7 100644
9117 --- a/fs/notify/mark.c
9118 +++ b/fs/notify/mark.c
9119 @@ -426,7 +426,7 @@ void fsnotify_free_mark(struct fsnotify_mark *mark)
9120 void fsnotify_destroy_mark(struct fsnotify_mark *mark,
9121 struct fsnotify_group *group)
9122 {
9123 - mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
9124 + mutex_lock(&group->mark_mutex);
9125 fsnotify_detach_mark(mark);
9126 mutex_unlock(&group->mark_mutex);
9127 fsnotify_free_mark(mark);
9128 @@ -738,7 +738,7 @@ void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
9129 * move marks to free to to_free list in one go and then free marks in
9130 * to_free list one by one.
9131 */
9132 - mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
9133 + mutex_lock(&group->mark_mutex);
9134 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
9135 if ((1U << mark->connector->type) & type_mask)
9136 list_move(&mark->g_list, &to_free);
9137 @@ -747,7 +747,7 @@ void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
9138
9139 clear:
9140 while (1) {
9141 - mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
9142 + mutex_lock(&group->mark_mutex);
9143 if (list_empty(head)) {
9144 mutex_unlock(&group->mark_mutex);
9145 break;
9146 diff --git a/fs/ocfs2/dlmfs/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c
9147 index 3df5be25bfb1f..d23bc720753ed 100644
9148 --- a/fs/ocfs2/dlmfs/userdlm.c
9149 +++ b/fs/ocfs2/dlmfs/userdlm.c
9150 @@ -435,6 +435,11 @@ again:
9151 }
9152
9153 spin_lock(&lockres->l_lock);
9154 + if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
9155 + spin_unlock(&lockres->l_lock);
9156 + status = -EAGAIN;
9157 + goto bail;
9158 + }
9159
9160 /* We only compare against the currently granted level
9161 * here. If the lock is blocked waiting on a downconvert,
9162 @@ -601,7 +606,7 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres)
9163 spin_lock(&lockres->l_lock);
9164 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
9165 spin_unlock(&lockres->l_lock);
9166 - return 0;
9167 + goto bail;
9168 }
9169
9170 lockres->l_flags |= USER_LOCK_IN_TEARDOWN;
9171 @@ -615,12 +620,17 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres)
9172 }
9173
9174 if (lockres->l_ro_holders || lockres->l_ex_holders) {
9175 + lockres->l_flags &= ~USER_LOCK_IN_TEARDOWN;
9176 spin_unlock(&lockres->l_lock);
9177 goto bail;
9178 }
9179
9180 status = 0;
9181 if (!(lockres->l_flags & USER_LOCK_ATTACHED)) {
9182 + /*
9183 + * lock is never requested, leave USER_LOCK_IN_TEARDOWN set
9184 + * to avoid new lock request coming in.
9185 + */
9186 spin_unlock(&lockres->l_lock);
9187 goto bail;
9188 }
9189 @@ -631,6 +641,10 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres)
9190
9191 status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK);
9192 if (status) {
9193 + spin_lock(&lockres->l_lock);
9194 + lockres->l_flags &= ~USER_LOCK_IN_TEARDOWN;
9195 + lockres->l_flags &= ~USER_LOCK_BUSY;
9196 + spin_unlock(&lockres->l_lock);
9197 user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
9198 goto bail;
9199 }
9200 diff --git a/fs/proc/generic.c b/fs/proc/generic.c
9201 index 8c3dbe13e647c..372b4dad4863e 100644
9202 --- a/fs/proc/generic.c
9203 +++ b/fs/proc/generic.c
9204 @@ -446,6 +446,9 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
9205 proc_set_user(ent, (*parent)->uid, (*parent)->gid);
9206
9207 ent->proc_dops = &proc_misc_dentry_ops;
9208 + /* Revalidate everything under /proc/${pid}/net */
9209 + if ((*parent)->proc_dops == &proc_net_dentry_ops)
9210 + pde_force_lookup(ent);
9211
9212 out:
9213 return ent;
9214 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
9215 index 313b7c751867f..9cd5b47199cba 100644
9216 --- a/fs/proc/proc_net.c
9217 +++ b/fs/proc/proc_net.c
9218 @@ -343,6 +343,9 @@ static __net_init int proc_net_ns_init(struct net *net)
9219
9220 proc_set_user(netd, uid, gid);
9221
9222 + /* Seed dentry revalidation for /proc/${pid}/net */
9223 + pde_force_lookup(netd);
9224 +
9225 err = -EEXIST;
9226 net_statd = proc_net_mkdir(net, "stat", netd);
9227 if (!net_statd)
9228 diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
9229 index b9719418c3d26..f40a97417b682 100644
9230 --- a/include/drm/drm_edid.h
9231 +++ b/include/drm/drm_edid.h
9232 @@ -116,7 +116,7 @@ struct detailed_data_monitor_range {
9233 u8 supported_scalings;
9234 u8 preferred_refresh;
9235 } __attribute__((packed)) cvt;
9236 - } formula;
9237 + } __attribute__((packed)) formula;
9238 } __attribute__((packed));
9239
9240 struct detailed_data_wpindex {
9241 @@ -149,7 +149,7 @@ struct detailed_non_pixel {
9242 struct detailed_data_wpindex color;
9243 struct std_timing timings[6];
9244 struct cvt_timing cvt[4];
9245 - } data;
9246 + } __attribute__((packed)) data;
9247 } __attribute__((packed));
9248
9249 #define EDID_DETAIL_EST_TIMINGS 0xf7
9250 @@ -167,7 +167,7 @@ struct detailed_timing {
9251 union {
9252 struct detailed_pixel_timing pixel_data;
9253 struct detailed_non_pixel other_data;
9254 - } data;
9255 + } __attribute__((packed)) data;
9256 } __attribute__((packed));
9257
9258 #define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0)
9259 diff --git a/include/linux/bpf.h b/include/linux/bpf.h
9260 index a73ca7c9c7d0e..5705cda3c4c4d 100644
9261 --- a/include/linux/bpf.h
9262 +++ b/include/linux/bpf.h
9263 @@ -929,6 +929,8 @@ void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
9264 struct net_device *netdev);
9265 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
9266
9267 +void unpriv_ebpf_notify(int new_state);
9268 +
9269 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
9270 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
9271
9272 diff --git a/include/linux/efi.h b/include/linux/efi.h
9273 index c82ef0eba4f84..f9b9f9a2fd4a5 100644
9274 --- a/include/linux/efi.h
9275 +++ b/include/linux/efi.h
9276 @@ -165,6 +165,8 @@ struct capsule_info {
9277 size_t page_bytes_remain;
9278 };
9279
9280 +int efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
9281 + size_t hdr_bytes);
9282 int __efi_capsule_setup_info(struct capsule_info *cap_info);
9283
9284 /*
9285 diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
9286 index 686be532f4cb7..7816bf070f835 100644
9287 --- a/include/linux/iio/common/st_sensors.h
9288 +++ b/include/linux/iio/common/st_sensors.h
9289 @@ -228,6 +228,7 @@ struct st_sensor_settings {
9290 * @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
9291 * @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
9292 * @buffer_data: Data used by buffer part.
9293 + * @odr_lock: Local lock for preventing concurrent ODR accesses/changes
9294 */
9295 struct st_sensor_data {
9296 struct device *dev;
9297 @@ -253,6 +254,8 @@ struct st_sensor_data {
9298 s64 hw_timestamp;
9299
9300 char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned;
9301 +
9302 + struct mutex odr_lock;
9303 };
9304
9305 #ifdef CONFIG_IIO_BUFFER
9306 diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
9307 index 36d6ce673503c..6fee33cb52f58 100644
9308 --- a/include/linux/mailbox_controller.h
9309 +++ b/include/linux/mailbox_controller.h
9310 @@ -83,6 +83,7 @@ struct mbox_controller {
9311 const struct of_phandle_args *sp);
9312 /* Internal to API */
9313 struct hrtimer poll_hrt;
9314 + spinlock_t poll_hrt_lock;
9315 struct list_head node;
9316 };
9317
9318 diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
9319 index c98a211086880..f3c149073c213 100644
9320 --- a/include/linux/mtd/cfi.h
9321 +++ b/include/linux/mtd/cfi.h
9322 @@ -286,6 +286,7 @@ struct cfi_private {
9323 map_word sector_erase_cmd;
9324 unsigned long chipshift; /* Because they're of the same type */
9325 const char *im_name; /* inter_module name for cmdset_setup */
9326 + unsigned long quirks;
9327 struct flchip chips[0]; /* per-chip data structure for each chip */
9328 };
9329
9330 diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
9331 index 27e7fa36f707f..a8d79f5b9a52d 100644
9332 --- a/include/linux/nodemask.h
9333 +++ b/include/linux/nodemask.h
9334 @@ -42,11 +42,11 @@
9335 * void nodes_shift_right(dst, src, n) Shift right
9336 * void nodes_shift_left(dst, src, n) Shift left
9337 *
9338 - * int first_node(mask) Number lowest set bit, or MAX_NUMNODES
9339 - * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
9340 - * int next_node_in(node, mask) Next node past 'node', or wrap to first,
9341 + * unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES
9342 + * unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
9343 + * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first,
9344 * or MAX_NUMNODES
9345 - * int first_unset_node(mask) First node not set in mask, or
9346 + * unsigned int first_unset_node(mask) First node not set in mask, or
9347 * MAX_NUMNODES
9348 *
9349 * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set
9350 @@ -153,7 +153,7 @@ static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
9351
9352 #define node_test_and_set(node, nodemask) \
9353 __node_test_and_set((node), &(nodemask))
9354 -static inline int __node_test_and_set(int node, nodemask_t *addr)
9355 +static inline bool __node_test_and_set(int node, nodemask_t *addr)
9356 {
9357 return test_and_set_bit(node, addr->bits);
9358 }
9359 @@ -200,7 +200,7 @@ static inline void __nodes_complement(nodemask_t *dstp,
9360
9361 #define nodes_equal(src1, src2) \
9362 __nodes_equal(&(src1), &(src2), MAX_NUMNODES)
9363 -static inline int __nodes_equal(const nodemask_t *src1p,
9364 +static inline bool __nodes_equal(const nodemask_t *src1p,
9365 const nodemask_t *src2p, unsigned int nbits)
9366 {
9367 return bitmap_equal(src1p->bits, src2p->bits, nbits);
9368 @@ -208,7 +208,7 @@ static inline int __nodes_equal(const nodemask_t *src1p,
9369
9370 #define nodes_intersects(src1, src2) \
9371 __nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
9372 -static inline int __nodes_intersects(const nodemask_t *src1p,
9373 +static inline bool __nodes_intersects(const nodemask_t *src1p,
9374 const nodemask_t *src2p, unsigned int nbits)
9375 {
9376 return bitmap_intersects(src1p->bits, src2p->bits, nbits);
9377 @@ -216,20 +216,20 @@ static inline int __nodes_intersects(const nodemask_t *src1p,
9378
9379 #define nodes_subset(src1, src2) \
9380 __nodes_subset(&(src1), &(src2), MAX_NUMNODES)
9381 -static inline int __nodes_subset(const nodemask_t *src1p,
9382 +static inline bool __nodes_subset(const nodemask_t *src1p,
9383 const nodemask_t *src2p, unsigned int nbits)
9384 {
9385 return bitmap_subset(src1p->bits, src2p->bits, nbits);
9386 }
9387
9388 #define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
9389 -static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
9390 +static inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
9391 {
9392 return bitmap_empty(srcp->bits, nbits);
9393 }
9394
9395 #define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
9396 -static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits)
9397 +static inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
9398 {
9399 return bitmap_full(srcp->bits, nbits);
9400 }
9401 @@ -260,15 +260,15 @@ static inline void __nodes_shift_left(nodemask_t *dstp,
9402 > MAX_NUMNODES, then the silly min_ts could be dropped. */
9403
9404 #define first_node(src) __first_node(&(src))
9405 -static inline int __first_node(const nodemask_t *srcp)
9406 +static inline unsigned int __first_node(const nodemask_t *srcp)
9407 {
9408 - return min_t(int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
9409 + return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
9410 }
9411
9412 #define next_node(n, src) __next_node((n), &(src))
9413 -static inline int __next_node(int n, const nodemask_t *srcp)
9414 +static inline unsigned int __next_node(int n, const nodemask_t *srcp)
9415 {
9416 - return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
9417 + return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
9418 }
9419
9420 /*
9421 @@ -276,7 +276,7 @@ static inline int __next_node(int n, const nodemask_t *srcp)
9422 * the first node in src if needed. Returns MAX_NUMNODES if src is empty.
9423 */
9424 #define next_node_in(n, src) __next_node_in((n), &(src))
9425 -int __next_node_in(int node, const nodemask_t *srcp);
9426 +unsigned int __next_node_in(int node, const nodemask_t *srcp);
9427
9428 static inline void init_nodemask_of_node(nodemask_t *mask, int node)
9429 {
9430 @@ -296,9 +296,9 @@ static inline void init_nodemask_of_node(nodemask_t *mask, int node)
9431 })
9432
9433 #define first_unset_node(mask) __first_unset_node(&(mask))
9434 -static inline int __first_unset_node(const nodemask_t *maskp)
9435 +static inline unsigned int __first_unset_node(const nodemask_t *maskp)
9436 {
9437 - return min_t(int,MAX_NUMNODES,
9438 + return min_t(unsigned int, MAX_NUMNODES,
9439 find_first_zero_bit(maskp->bits, MAX_NUMNODES));
9440 }
9441
9442 @@ -375,14 +375,13 @@ static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
9443 }
9444
9445 #if MAX_NUMNODES > 1
9446 -#define for_each_node_mask(node, mask) \
9447 - for ((node) = first_node(mask); \
9448 - (node) < MAX_NUMNODES; \
9449 - (node) = next_node((node), (mask)))
9450 +#define for_each_node_mask(node, mask) \
9451 + for ((node) = first_node(mask); \
9452 + (node >= 0) && (node) < MAX_NUMNODES; \
9453 + (node) = next_node((node), (mask)))
9454 #else /* MAX_NUMNODES == 1 */
9455 -#define for_each_node_mask(node, mask) \
9456 - if (!nodes_empty(mask)) \
9457 - for ((node) = 0; (node) < 1; (node)++)
9458 +#define for_each_node_mask(node, mask) \
9459 + for ((node) = 0; (node) < 1 && !nodes_empty(mask); (node)++)
9460 #endif /* MAX_NUMNODES */
9461
9462 /*
9463 @@ -435,11 +434,11 @@ static inline int num_node_state(enum node_states state)
9464
9465 #define first_online_node first_node(node_states[N_ONLINE])
9466 #define first_memory_node first_node(node_states[N_MEMORY])
9467 -static inline int next_online_node(int nid)
9468 +static inline unsigned int next_online_node(int nid)
9469 {
9470 return next_node(nid, node_states[N_ONLINE]);
9471 }
9472 -static inline int next_memory_node(int nid)
9473 +static inline unsigned int next_memory_node(int nid)
9474 {
9475 return next_node(nid, node_states[N_MEMORY]);
9476 }
9477 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
9478 index 2a9df80ea8876..ae7dbdfa3d832 100644
9479 --- a/include/linux/ptrace.h
9480 +++ b/include/linux/ptrace.h
9481 @@ -30,7 +30,6 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
9482
9483 #define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
9484 #define PT_PTRACED 0x00000001
9485 -#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
9486
9487 #define PT_OPT_FLAG_SHIFT 3
9488 /* PT_TRACE_* event enable flags */
9489 @@ -47,12 +46,6 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
9490 #define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
9491 #define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
9492
9493 -/* single stepping state bits (used on ARM and PA-RISC) */
9494 -#define PT_SINGLESTEP_BIT 31
9495 -#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
9496 -#define PT_BLOCKSTEP_BIT 30
9497 -#define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
9498 -
9499 extern long arch_ptrace(struct task_struct *child, long request,
9500 unsigned long addr, unsigned long data);
9501 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
9502 diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
9503 index 712b2a603645f..c0eb85b2981e0 100644
9504 --- a/include/linux/usb/hcd.h
9505 +++ b/include/linux/usb/hcd.h
9506 @@ -124,6 +124,7 @@ struct usb_hcd {
9507 #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */
9508 #define HCD_FLAG_DEAD 6 /* controller has died? */
9509 #define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */
9510 +#define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */
9511
9512 /* The flags can be tested using these macros; they are likely to
9513 * be slightly faster than test_bit().
9514 @@ -134,6 +135,7 @@ struct usb_hcd {
9515 #define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
9516 #define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
9517 #define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD))
9518 +#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER))
9519
9520 /*
9521 * Specifies if interfaces are authorized by default
9522 diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
9523 index a01981d7108f9..f6d614926e9e9 100644
9524 --- a/include/net/if_inet6.h
9525 +++ b/include/net/if_inet6.h
9526 @@ -64,6 +64,14 @@ struct inet6_ifaddr {
9527
9528 struct hlist_node addr_lst;
9529 struct list_head if_list;
9530 + /*
9531 + * Used to safely traverse idev->addr_list in process context
9532 + * if the idev->lock needed to protect idev->addr_list cannot be held.
9533 + * In that case, add the items to this list temporarily and iterate
9534 + * without holding idev->lock.
9535 + * See addrconf_ifdown and dev_forward_change.
9536 + */
9537 + struct list_head if_list_aux;
9538
9539 struct list_head tmp_list;
9540 struct inet6_ifaddr *ifpub;
9541 diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
9542 index ae69059ba76d4..1ee396ce0eda8 100644
9543 --- a/include/net/sch_generic.h
9544 +++ b/include/net/sch_generic.h
9545 @@ -160,37 +160,17 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
9546 if (spin_trylock(&qdisc->seqlock))
9547 goto nolock_empty;
9548
9549 - /* Paired with smp_mb__after_atomic() to make sure
9550 - * STATE_MISSED checking is synchronized with clearing
9551 - * in pfifo_fast_dequeue().
9552 + /* No need to insist if the MISSED flag was already set.
9553 + * Note that test_and_set_bit() also gives us memory ordering
9554 + * guarantees wrt potential earlier enqueue() and below
9555 + * spin_trylock(), both of which are necessary to prevent races
9556 */
9557 - smp_mb__before_atomic();
9558 -
9559 - /* If the MISSED flag is set, it means other thread has
9560 - * set the MISSED flag before second spin_trylock(), so
9561 - * we can return false here to avoid multi cpus doing
9562 - * the set_bit() and second spin_trylock() concurrently.
9563 - */
9564 - if (test_bit(__QDISC_STATE_MISSED, &qdisc->state))
9565 + if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
9566 return false;
9567
9568 - /* Set the MISSED flag before the second spin_trylock(),
9569 - * if the second spin_trylock() return false, it means
9570 - * other cpu holding the lock will do dequeuing for us
9571 - * or it will see the MISSED flag set after releasing
9572 - * lock and reschedule the net_tx_action() to do the
9573 - * dequeuing.
9574 - */
9575 - set_bit(__QDISC_STATE_MISSED, &qdisc->state);
9576 -
9577 - /* spin_trylock() only has load-acquire semantic, so use
9578 - * smp_mb__after_atomic() to ensure STATE_MISSED is set
9579 - * before doing the second spin_trylock().
9580 - */
9581 - smp_mb__after_atomic();
9582 -
9583 - /* Retry again in case other CPU may not see the new flag
9584 - * after it releases the lock at the end of qdisc_run_end().
9585 + /* Try to take the lock again to make sure that we will either
9586 + * grab it or the CPU that still has it will see MISSED set
9587 + * when testing it in qdisc_run_end()
9588 */
9589 if (!spin_trylock(&qdisc->seqlock))
9590 return false;
9591 @@ -214,6 +194,12 @@ static inline void qdisc_run_end(struct Qdisc *qdisc)
9592 if (qdisc->flags & TCQ_F_NOLOCK) {
9593 spin_unlock(&qdisc->seqlock);
9594
9595 + /* spin_unlock() only has store-release semantic. The unlock
9596 + * and test_bit() ordering is a store-load ordering, so a full
9597 + * memory barrier is needed here.
9598 + */
9599 + smp_mb();
9600 +
9601 if (unlikely(test_bit(__QDISC_STATE_MISSED,
9602 &qdisc->state))) {
9603 clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
9604 diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
9605 index fac8e89aed81d..310e0dbffda99 100644
9606 --- a/include/scsi/libfcoe.h
9607 +++ b/include/scsi/libfcoe.h
9608 @@ -249,7 +249,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *,
9609 struct fc_frame *);
9610
9611 /* libfcoe funcs */
9612 -u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], unsigned int, unsigned int);
9613 +u64 fcoe_wwn_from_mac(unsigned char mac[ETH_ALEN], unsigned int scheme,
9614 + unsigned int port);
9615 int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
9616 const struct libfc_function_template *, int init_fcp);
9617 u32 fcoe_fc_crc(struct fc_frame *fp);
9618 diff --git a/include/sound/jack.h b/include/sound/jack.h
9619 index 9eb2b5ec1ec41..78f3619f3de94 100644
9620 --- a/include/sound/jack.h
9621 +++ b/include/sound/jack.h
9622 @@ -62,6 +62,7 @@ struct snd_jack {
9623 const char *id;
9624 #ifdef CONFIG_SND_JACK_INPUT_DEV
9625 struct input_dev *input_dev;
9626 + struct mutex input_dev_lock;
9627 int registered;
9628 int type;
9629 char name[100];
9630 diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
9631 index 059b6e45a0283..839bb07b93a71 100644
9632 --- a/include/trace/events/rxrpc.h
9633 +++ b/include/trace/events/rxrpc.h
9634 @@ -1511,7 +1511,7 @@ TRACE_EVENT(rxrpc_call_reset,
9635 __entry->call_serial = call->rx_serial;
9636 __entry->conn_serial = call->conn->hi_serial;
9637 __entry->tx_seq = call->tx_hard_ack;
9638 - __entry->rx_seq = call->ackr_seen;
9639 + __entry->rx_seq = call->rx_hard_ack;
9640 ),
9641
9642 TP_printk("c=%08x %08x:%08x r=%08x/%08x tx=%08x rx=%08x",
9643 diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
9644 index a5ab2973e8dc3..57184c02e3b93 100644
9645 --- a/include/trace/events/vmscan.h
9646 +++ b/include/trace/events/vmscan.h
9647 @@ -283,7 +283,7 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
9648 __field(unsigned long, nr_scanned)
9649 __field(unsigned long, nr_skipped)
9650 __field(unsigned long, nr_taken)
9651 - __field(isolate_mode_t, isolate_mode)
9652 + __field(unsigned int, isolate_mode)
9653 __field(int, lru)
9654 ),
9655
9656 @@ -294,7 +294,7 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
9657 __entry->nr_scanned = nr_scanned;
9658 __entry->nr_skipped = nr_skipped;
9659 __entry->nr_taken = nr_taken;
9660 - __entry->isolate_mode = isolate_mode;
9661 + __entry->isolate_mode = (__force unsigned int)isolate_mode;
9662 __entry->lru = lru;
9663 ),
9664
9665 diff --git a/init/Kconfig b/init/Kconfig
9666 index e6216dc2a1d1c..74f44b753d61d 100644
9667 --- a/init/Kconfig
9668 +++ b/init/Kconfig
9669 @@ -33,6 +33,15 @@ config CC_CAN_LINK
9670 config CC_HAS_ASM_GOTO
9671 def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
9672
9673 +config CC_HAS_ASM_GOTO_TIED_OUTPUT
9674 + depends on CC_HAS_ASM_GOTO_OUTPUT
9675 + # Detect buggy gcc and clang, fixed in gcc-11 clang-14.
9676 + def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .\n": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null)
9677 +
9678 +config CC_HAS_ASM_GOTO_OUTPUT
9679 + depends on CC_HAS_ASM_GOTO
9680 + def_bool $(success,echo 'int foo(int x) { asm goto ("": "=r"(x) ::: bar); return x; bar: return 0; }' | $(CC) -x c - -c -o /dev/null)
9681 +
9682 config TOOLS_SUPPORT_RELR
9683 def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh)
9684
9685 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
9686 index 2ea0c08188e67..12519bf5f330d 100644
9687 --- a/ipc/mqueue.c
9688 +++ b/ipc/mqueue.c
9689 @@ -45,6 +45,7 @@
9690
9691 struct mqueue_fs_context {
9692 struct ipc_namespace *ipc_ns;
9693 + bool newns; /* Set if newly created ipc namespace */
9694 };
9695
9696 #define MQUEUE_MAGIC 0x19800202
9697 @@ -365,6 +366,14 @@ static int mqueue_get_tree(struct fs_context *fc)
9698 {
9699 struct mqueue_fs_context *ctx = fc->fs_private;
9700
9701 + /*
9702 + * With a newly created ipc namespace, we don't need to do a search
9703 + * for an ipc namespace match, but we still need to set s_fs_info.
9704 + */
9705 + if (ctx->newns) {
9706 + fc->s_fs_info = ctx->ipc_ns;
9707 + return get_tree_nodev(fc, mqueue_fill_super);
9708 + }
9709 return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
9710 }
9711
9712 @@ -392,6 +401,10 @@ static int mqueue_init_fs_context(struct fs_context *fc)
9713 return 0;
9714 }
9715
9716 +/*
9717 + * mq_init_ns() is currently the only caller of mq_create_mount().
9718 + * So the ns parameter is always a newly created ipc namespace.
9719 + */
9720 static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
9721 {
9722 struct mqueue_fs_context *ctx;
9723 @@ -403,6 +416,7 @@ static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
9724 return ERR_CAST(fc);
9725
9726 ctx = fc->fs_private;
9727 + ctx->newns = true;
9728 put_ipc_ns(ctx->ipc_ns);
9729 ctx->ipc_ns = get_ipc_ns(ns);
9730 put_user_ns(fc->user_ns);
9731 diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
9732 index 49c7a09d688d7..768ffd6037875 100644
9733 --- a/kernel/bpf/stackmap.c
9734 +++ b/kernel/bpf/stackmap.c
9735 @@ -117,7 +117,6 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
9736 return ERR_PTR(-E2BIG);
9737
9738 cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
9739 - cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
9740 err = bpf_map_charge_init(&mem, cost);
9741 if (err)
9742 return ERR_PTR(err);
9743 diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
9744 index 4dc3bbfd3e3f3..1c133f610f592 100644
9745 --- a/kernel/dma/debug.c
9746 +++ b/kernel/dma/debug.c
9747 @@ -450,7 +450,7 @@ void debug_dma_dump_mappings(struct device *dev)
9748 * At any time debug_dma_assert_idle() can be called to trigger a
9749 * warning if any cachelines in the given page are in the active set.
9750 */
9751 -static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
9752 +static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC);
9753 static DEFINE_SPINLOCK(radix_lock);
9754 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
9755 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
9756 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
9757 index d99f73f83bf5f..aab480e24bd60 100644
9758 --- a/kernel/ptrace.c
9759 +++ b/kernel/ptrace.c
9760 @@ -1219,9 +1219,8 @@ int ptrace_request(struct task_struct *child, long request,
9761 return ptrace_resume(child, request, data);
9762
9763 case PTRACE_KILL:
9764 - if (child->exit_state) /* already dead */
9765 - return 0;
9766 - return ptrace_resume(child, request, SIGKILL);
9767 + send_sig_info(SIGKILL, SEND_SIG_NOINFO, child);
9768 + return 0;
9769
9770 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
9771 case PTRACE_GETREGSET:
9772 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
9773 index 87d9fad9d01d6..d2a68ae7596ec 100644
9774 --- a/kernel/sched/fair.c
9775 +++ b/kernel/sched/fair.c
9776 @@ -4485,8 +4485,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
9777
9778 cfs_rq->throttle_count--;
9779 if (!cfs_rq->throttle_count) {
9780 - cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
9781 - cfs_rq->throttled_clock_task;
9782 + cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
9783 + cfs_rq->throttled_clock_pelt;
9784
9785 /* Add cfs_rq with already running entity in the list */
9786 if (cfs_rq->nr_running >= 1)
9787 @@ -4503,7 +4503,7 @@ static int tg_throttle_down(struct task_group *tg, void *data)
9788
9789 /* group is entering throttled state, stop time */
9790 if (!cfs_rq->throttle_count) {
9791 - cfs_rq->throttled_clock_task = rq_clock_task(rq);
9792 + cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
9793 list_del_leaf_cfs_rq(cfs_rq);
9794 }
9795 cfs_rq->throttle_count++;
9796 @@ -4932,7 +4932,7 @@ static void sync_throttle(struct task_group *tg, int cpu)
9797 pcfs_rq = tg->parent->cfs_rq[cpu];
9798
9799 cfs_rq->throttle_count = pcfs_rq->throttle_count;
9800 - cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
9801 + cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
9802 }
9803
9804 /* conditionally throttle active cfs_rq's from put_prev_entity() */
9805 diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
9806 index afff644da0650..43e2a47489fae 100644
9807 --- a/kernel/sched/pelt.h
9808 +++ b/kernel/sched/pelt.h
9809 @@ -127,9 +127,9 @@ static inline u64 rq_clock_pelt(struct rq *rq)
9810 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
9811 {
9812 if (unlikely(cfs_rq->throttle_count))
9813 - return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
9814 + return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time;
9815
9816 - return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
9817 + return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time;
9818 }
9819 #else
9820 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
9821 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
9822 index fe755c1a0af95..b8a3db59e3267 100644
9823 --- a/kernel/sched/sched.h
9824 +++ b/kernel/sched/sched.h
9825 @@ -570,8 +570,8 @@ struct cfs_rq {
9826 s64 runtime_remaining;
9827
9828 u64 throttled_clock;
9829 - u64 throttled_clock_task;
9830 - u64 throttled_clock_task_time;
9831 + u64 throttled_clock_pelt;
9832 + u64 throttled_clock_pelt_time;
9833 int throttled;
9834 int throttle_count;
9835 struct list_head throttled_list;
9836 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
9837 index 56619766e9103..55da88f18342f 100644
9838 --- a/kernel/trace/trace.c
9839 +++ b/kernel/trace/trace.c
9840 @@ -2537,7 +2537,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
9841 }
9842 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
9843
9844 -static DEFINE_SPINLOCK(tracepoint_iter_lock);
9845 +static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
9846 static DEFINE_MUTEX(tracepoint_printk_mutex);
9847
9848 static void output_printk(struct trace_event_buffer *fbuffer)
9849 @@ -2558,14 +2558,14 @@ static void output_printk(struct trace_event_buffer *fbuffer)
9850
9851 event = &fbuffer->trace_file->event_call->event;
9852
9853 - spin_lock_irqsave(&tracepoint_iter_lock, flags);
9854 + raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
9855 trace_seq_init(&iter->seq);
9856 iter->ent = fbuffer->entry;
9857 event_call->event.funcs->trace(iter, 0, event);
9858 trace_seq_putc(&iter->seq, 0);
9859 printk("%s", iter->seq.buffer);
9860
9861 - spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
9862 + raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
9863 }
9864
9865 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
9866 @@ -5638,12 +5638,18 @@ static void tracing_set_nop(struct trace_array *tr)
9867 tr->current_trace = &nop_trace;
9868 }
9869
9870 +static bool tracer_options_updated;
9871 +
9872 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
9873 {
9874 /* Only enable if the directory has been created already. */
9875 if (!tr->dir)
9876 return;
9877
9878 + /* Only create trace option files after update_tracer_options finish */
9879 + if (!tracer_options_updated)
9880 + return;
9881 +
9882 create_trace_option_files(tr, t);
9883 }
9884
9885 @@ -8391,6 +8397,7 @@ static void __update_tracer_options(struct trace_array *tr)
9886 static void update_tracer_options(struct trace_array *tr)
9887 {
9888 mutex_lock(&trace_types_lock);
9889 + tracer_options_updated = true;
9890 __update_tracer_options(tr);
9891 mutex_unlock(&trace_types_lock);
9892 }
9893 diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
9894 index 413da11260f89..83e1810556853 100644
9895 --- a/kernel/trace/trace_events_hist.c
9896 +++ b/kernel/trace/trace_events_hist.c
9897 @@ -2695,8 +2695,11 @@ static int init_var_ref(struct hist_field *ref_field,
9898 return err;
9899 free:
9900 kfree(ref_field->system);
9901 + ref_field->system = NULL;
9902 kfree(ref_field->event_name);
9903 + ref_field->event_name = NULL;
9904 kfree(ref_field->name);
9905 + ref_field->name = NULL;
9906
9907 goto out;
9908 }
9909 diff --git a/lib/nodemask.c b/lib/nodemask.c
9910 index 3aa454c54c0de..e22647f5181b3 100644
9911 --- a/lib/nodemask.c
9912 +++ b/lib/nodemask.c
9913 @@ -3,9 +3,9 @@
9914 #include <linux/module.h>
9915 #include <linux/random.h>
9916
9917 -int __next_node_in(int node, const nodemask_t *srcp)
9918 +unsigned int __next_node_in(int node, const nodemask_t *srcp)
9919 {
9920 - int ret = __next_node(node, srcp);
9921 + unsigned int ret = __next_node(node, srcp);
9922
9923 if (ret == MAX_NUMNODES)
9924 ret = __first_node(srcp);
9925 diff --git a/mm/compaction.c b/mm/compaction.c
9926 index d686887856fee..0758afd6325da 100644
9927 --- a/mm/compaction.c
9928 +++ b/mm/compaction.c
9929 @@ -1709,6 +1709,8 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
9930
9931 update_fast_start_pfn(cc, free_pfn);
9932 pfn = pageblock_start_pfn(free_pfn);
9933 + if (pfn < cc->zone->zone_start_pfn)
9934 + pfn = cc->zone->zone_start_pfn;
9935 cc->fast_search_fail = 0;
9936 found_block = true;
9937 set_pageblock_skip(freepage);
9938 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
9939 index 20da6ede77041..b6f029a1059f1 100644
9940 --- a/mm/hugetlb.c
9941 +++ b/mm/hugetlb.c
9942 @@ -5033,7 +5033,14 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
9943 pud_clear(pud);
9944 put_page(virt_to_page(ptep));
9945 mm_dec_nr_pmds(mm);
9946 - *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
9947 + /*
9948 + * This update of passed address optimizes loops sequentially
9949 + * processing addresses in increments of huge page size (PMD_SIZE
9950 + * in this case). By clearing the pud, a PUD_SIZE area is unmapped.
9951 + * Update address to the 'last page' in the cleared area so that
9952 + * calling loop can move to first page past this area.
9953 + */
9954 + *addr |= PUD_SIZE - PMD_SIZE;
9955 return 1;
9956 }
9957 #define want_pmd_share() (1)
9958 diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
9959 index 2c616c1c62958..fbfb12e430101 100644
9960 --- a/net/bluetooth/sco.c
9961 +++ b/net/bluetooth/sco.c
9962 @@ -563,19 +563,24 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
9963 addr->sa_family != AF_BLUETOOTH)
9964 return -EINVAL;
9965
9966 - if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
9967 - return -EBADFD;
9968 + lock_sock(sk);
9969 + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
9970 + err = -EBADFD;
9971 + goto done;
9972 + }
9973
9974 - if (sk->sk_type != SOCK_SEQPACKET)
9975 - return -EINVAL;
9976 + if (sk->sk_type != SOCK_SEQPACKET) {
9977 + err = -EINVAL;
9978 + goto done;
9979 + }
9980
9981 hdev = hci_get_route(&sa->sco_bdaddr, &sco_pi(sk)->src, BDADDR_BREDR);
9982 - if (!hdev)
9983 - return -EHOSTUNREACH;
9984 + if (!hdev) {
9985 + err = -EHOSTUNREACH;
9986 + goto done;
9987 + }
9988 hci_dev_lock(hdev);
9989
9990 - lock_sock(sk);
9991 -
9992 /* Set destination address and psm */
9993 bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
9994
9995 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
9996 index 5b38d03f6d79a..614410a6db44b 100644
9997 --- a/net/ipv4/ip_gre.c
9998 +++ b/net/ipv4/ip_gre.c
9999 @@ -602,21 +602,20 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
10000 }
10001
10002 if (dev->header_ops) {
10003 - const int pull_len = tunnel->hlen + sizeof(struct iphdr);
10004 -
10005 if (skb_cow_head(skb, 0))
10006 goto free_skb;
10007
10008 tnl_params = (const struct iphdr *)skb->data;
10009
10010 - if (pull_len > skb_transport_offset(skb))
10011 - goto free_skb;
10012 -
10013 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
10014 * to gre header.
10015 */
10016 - skb_pull(skb, pull_len);
10017 + skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
10018 skb_reset_mac_header(skb);
10019 +
10020 + if (skb->ip_summed == CHECKSUM_PARTIAL &&
10021 + skb_checksum_start(skb) < skb->data)
10022 + goto free_skb;
10023 } else {
10024 if (skb_cow_head(skb, dev->needed_headroom))
10025 goto free_skb;
10026 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
10027 index b0e6fc2c5e108..0808110451a0f 100644
10028 --- a/net/ipv4/tcp_input.c
10029 +++ b/net/ipv4/tcp_input.c
10030 @@ -2578,12 +2578,15 @@ static void tcp_mtup_probe_success(struct sock *sk)
10031 {
10032 struct tcp_sock *tp = tcp_sk(sk);
10033 struct inet_connection_sock *icsk = inet_csk(sk);
10034 + u64 val;
10035
10036 - /* FIXME: breaks with very large cwnd */
10037 tp->prior_ssthresh = tcp_current_ssthresh(sk);
10038 - tp->snd_cwnd = tp->snd_cwnd *
10039 - tcp_mss_to_mtu(sk, tp->mss_cache) /
10040 - icsk->icsk_mtup.probe_size;
10041 +
10042 + val = (u64)tp->snd_cwnd * tcp_mss_to_mtu(sk, tp->mss_cache);
10043 + do_div(val, icsk->icsk_mtup.probe_size);
10044 + WARN_ON_ONCE((u32)val != val);
10045 + tp->snd_cwnd = max_t(u32, 1U, val);
10046 +
10047 tp->snd_cwnd_cnt = 0;
10048 tp->snd_cwnd_stamp = tcp_jiffies32;
10049 tp->snd_ssthresh = tcp_current_ssthresh(sk);
10050 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
10051 index 67493ec6318ad..739fc69cdcc62 100644
10052 --- a/net/ipv4/tcp_output.c
10053 +++ b/net/ipv4/tcp_output.c
10054 @@ -3869,8 +3869,8 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
10055 tcp_rsk(req)->txhash = net_tx_rndhash();
10056 res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
10057 if (!res) {
10058 - __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
10059 - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
10060 + TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
10061 + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
10062 if (unlikely(tcp_passive_fastopen(sk)))
10063 tcp_sk(sk)->total_retrans++;
10064 trace_tcp_retransmit_synack(sk, req);
10065 diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c
10066 index 8a4285712808e..9031b7732fece 100644
10067 --- a/net/ipv4/xfrm4_protocol.c
10068 +++ b/net/ipv4/xfrm4_protocol.c
10069 @@ -298,4 +298,3 @@ void __init xfrm4_protocol_init(void)
10070 {
10071 xfrm_input_register_afinfo(&xfrm4_input_afinfo);
10072 }
10073 -EXPORT_SYMBOL(xfrm4_protocol_init);
10074 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
10075 index 92b32d131e1c3..e29553e4f4ee3 100644
10076 --- a/net/ipv6/addrconf.c
10077 +++ b/net/ipv6/addrconf.c
10078 @@ -789,6 +789,7 @@ static void dev_forward_change(struct inet6_dev *idev)
10079 {
10080 struct net_device *dev;
10081 struct inet6_ifaddr *ifa;
10082 + LIST_HEAD(tmp_addr_list);
10083
10084 if (!idev)
10085 return;
10086 @@ -807,14 +808,24 @@ static void dev_forward_change(struct inet6_dev *idev)
10087 }
10088 }
10089
10090 + read_lock_bh(&idev->lock);
10091 list_for_each_entry(ifa, &idev->addr_list, if_list) {
10092 if (ifa->flags&IFA_F_TENTATIVE)
10093 continue;
10094 + list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
10095 + }
10096 + read_unlock_bh(&idev->lock);
10097 +
10098 + while (!list_empty(&tmp_addr_list)) {
10099 + ifa = list_first_entry(&tmp_addr_list,
10100 + struct inet6_ifaddr, if_list_aux);
10101 + list_del(&ifa->if_list_aux);
10102 if (idev->cnf.forwarding)
10103 addrconf_join_anycast(ifa);
10104 else
10105 addrconf_leave_anycast(ifa);
10106 }
10107 +
10108 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
10109 NETCONFA_FORWARDING,
10110 dev->ifindex, &idev->cnf);
10111 @@ -3713,7 +3724,8 @@ static int addrconf_ifdown(struct net_device *dev, int how)
10112 unsigned long event = how ? NETDEV_UNREGISTER : NETDEV_DOWN;
10113 struct net *net = dev_net(dev);
10114 struct inet6_dev *idev;
10115 - struct inet6_ifaddr *ifa, *tmp;
10116 + struct inet6_ifaddr *ifa;
10117 + LIST_HEAD(tmp_addr_list);
10118 bool keep_addr = false;
10119 bool was_ready;
10120 int state, i;
10121 @@ -3805,16 +3817,23 @@ restart:
10122 write_lock_bh(&idev->lock);
10123 }
10124
10125 - list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
10126 + list_for_each_entry(ifa, &idev->addr_list, if_list)
10127 + list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
10128 + write_unlock_bh(&idev->lock);
10129 +
10130 + while (!list_empty(&tmp_addr_list)) {
10131 struct fib6_info *rt = NULL;
10132 bool keep;
10133
10134 + ifa = list_first_entry(&tmp_addr_list,
10135 + struct inet6_ifaddr, if_list_aux);
10136 + list_del(&ifa->if_list_aux);
10137 +
10138 addrconf_del_dad_work(ifa);
10139
10140 keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
10141 !addr_is_local(&ifa->addr);
10142
10143 - write_unlock_bh(&idev->lock);
10144 spin_lock_bh(&ifa->lock);
10145
10146 if (keep) {
10147 @@ -3845,15 +3864,14 @@ restart:
10148 addrconf_leave_solict(ifa->idev, &ifa->addr);
10149 }
10150
10151 - write_lock_bh(&idev->lock);
10152 if (!keep) {
10153 + write_lock_bh(&idev->lock);
10154 list_del_rcu(&ifa->if_list);
10155 + write_unlock_bh(&idev->lock);
10156 in6_ifa_put(ifa);
10157 }
10158 }
10159
10160 - write_unlock_bh(&idev->lock);
10161 -
10162 /* Step 5: Discard anycast and multicast list */
10163 if (how) {
10164 ipv6_ac_destroy_dev(idev);
10165 @@ -4184,7 +4202,8 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
10166 send_rs = send_mld &&
10167 ipv6_accept_ra(ifp->idev) &&
10168 ifp->idev->cnf.rtr_solicits != 0 &&
10169 - (dev->flags&IFF_LOOPBACK) == 0;
10170 + (dev->flags & IFF_LOOPBACK) == 0 &&
10171 + (dev->type != ARPHRD_TUNNEL);
10172 read_unlock_bh(&ifp->idev->lock);
10173
10174 /* While dad is in progress mld report's source address is in6_addrany.
10175 diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
10176 index ffcfcd2b128f3..a4cad71c42047 100644
10177 --- a/net/ipv6/seg6_hmac.c
10178 +++ b/net/ipv6/seg6_hmac.c
10179 @@ -401,7 +401,6 @@ int __init seg6_hmac_init(void)
10180 {
10181 return seg6_hmac_init_algo();
10182 }
10183 -EXPORT_SYMBOL(seg6_hmac_init);
10184
10185 int __net_init seg6_hmac_net_init(struct net *net)
10186 {
10187 diff --git a/net/key/af_key.c b/net/key/af_key.c
10188 index dd064d5eff6ed..32fe99cd01fc8 100644
10189 --- a/net/key/af_key.c
10190 +++ b/net/key/af_key.c
10191 @@ -2830,10 +2830,12 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
10192 void *ext_hdrs[SADB_EXT_MAX];
10193 int err;
10194
10195 - err = pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
10196 - BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
10197 - if (err)
10198 - return err;
10199 + /* Non-zero return value of pfkey_broadcast() does not always signal
10200 + * an error and even on an actual error we may still want to process
10201 + * the message so rather ignore the return value.
10202 + */
10203 + pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
10204 + BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
10205
10206 memset(ext_hdrs, 0, sizeof(ext_hdrs));
10207 err = parse_exthdrs(skb, hdr, ext_hdrs);
10208 diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
10209 index 9c94baaf693cb..15f47918cbacd 100644
10210 --- a/net/mac80211/chan.c
10211 +++ b/net/mac80211/chan.c
10212 @@ -1639,12 +1639,9 @@ int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata)
10213
10214 if (new_ctx->replace_state == IEEE80211_CHANCTX_REPLACE_NONE) {
10215 if (old_ctx)
10216 - err = ieee80211_vif_use_reserved_reassign(sdata);
10217 - else
10218 - err = ieee80211_vif_use_reserved_assign(sdata);
10219 + return ieee80211_vif_use_reserved_reassign(sdata);
10220
10221 - if (err)
10222 - return err;
10223 + return ieee80211_vif_use_reserved_assign(sdata);
10224 }
10225
10226 /*
10227 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
10228 index e574fbf6745a4..7747a6f46d299 100644
10229 --- a/net/mac80211/ieee80211_i.h
10230 +++ b/net/mac80211/ieee80211_i.h
10231 @@ -1082,6 +1082,9 @@ struct tpt_led_trigger {
10232 * a scan complete for an aborted scan.
10233 * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
10234 * cancelled.
10235 + * @SCAN_BEACON_WAIT: Set whenever we're passive scanning because of radar/no-IR
10236 + * and could send a probe request after receiving a beacon.
10237 + * @SCAN_BEACON_DONE: Beacon received, we can now send a probe request
10238 */
10239 enum {
10240 SCAN_SW_SCANNING,
10241 @@ -1090,6 +1093,8 @@ enum {
10242 SCAN_COMPLETED,
10243 SCAN_ABORTED,
10244 SCAN_HW_CANCELLED,
10245 + SCAN_BEACON_WAIT,
10246 + SCAN_BEACON_DONE,
10247 };
10248
10249 /**
10250 diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
10251 index 4d31d9688dc23..344b2c22e75b5 100644
10252 --- a/net/mac80211/scan.c
10253 +++ b/net/mac80211/scan.c
10254 @@ -252,6 +252,16 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
10255 if (likely(!sdata1 && !sdata2))
10256 return;
10257
10258 + if (test_and_clear_bit(SCAN_BEACON_WAIT, &local->scanning)) {
10259 + /*
10260 + * we were passive scanning because of radar/no-IR, but
10261 + * the beacon/proberesp rx gives us an opportunity to upgrade
10262 + * to active scan
10263 + */
10264 + set_bit(SCAN_BEACON_DONE, &local->scanning);
10265 + ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
10266 + }
10267 +
10268 if (ieee80211_is_probe_resp(mgmt->frame_control)) {
10269 struct cfg80211_scan_request *scan_req;
10270 struct cfg80211_sched_scan_request *sched_scan_req;
10271 @@ -753,6 +763,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
10272 IEEE80211_CHAN_RADAR)) ||
10273 !req->n_ssids) {
10274 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
10275 + if (req->n_ssids)
10276 + set_bit(SCAN_BEACON_WAIT, &local->scanning);
10277 } else {
10278 ieee80211_scan_state_send_probe(local, &next_delay);
10279 next_delay = IEEE80211_CHANNEL_TIME;
10280 @@ -945,6 +957,8 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
10281 !scan_req->n_ssids) {
10282 *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
10283 local->next_scan_state = SCAN_DECISION;
10284 + if (scan_req->n_ssids)
10285 + set_bit(SCAN_BEACON_WAIT, &local->scanning);
10286 return;
10287 }
10288
10289 @@ -1037,6 +1051,8 @@ void ieee80211_scan_work(struct work_struct *work)
10290 goto out;
10291 }
10292
10293 + clear_bit(SCAN_BEACON_WAIT, &local->scanning);
10294 +
10295 /*
10296 * as long as no delay is required advance immediately
10297 * without scheduling a new work
10298 @@ -1047,6 +1063,10 @@ void ieee80211_scan_work(struct work_struct *work)
10299 goto out_complete;
10300 }
10301
10302 + if (test_and_clear_bit(SCAN_BEACON_DONE, &local->scanning) &&
10303 + local->next_scan_state == SCAN_DECISION)
10304 + local->next_scan_state = SCAN_SEND_PROBE;
10305 +
10306 switch (local->next_scan_state) {
10307 case SCAN_DECISION:
10308 /* if no more bands/channels left, complete scan */
10309 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
10310 index 545da270e8020..58a7d89719b1d 100644
10311 --- a/net/netfilter/nf_tables_api.c
10312 +++ b/net/netfilter/nf_tables_api.c
10313 @@ -2267,27 +2267,31 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
10314
10315 err = nf_tables_expr_parse(ctx, nla, &info);
10316 if (err < 0)
10317 - goto err1;
10318 + goto err_expr_parse;
10319 +
10320 + err = -EOPNOTSUPP;
10321 + if (!(info.ops->type->flags & NFT_EXPR_STATEFUL))
10322 + goto err_expr_stateful;
10323
10324 err = -ENOMEM;
10325 expr = kzalloc(info.ops->size, GFP_KERNEL);
10326 if (expr == NULL)
10327 - goto err2;
10328 + goto err_expr_stateful;
10329
10330 err = nf_tables_newexpr(ctx, &info, expr);
10331 if (err < 0)
10332 - goto err3;
10333 + goto err_expr_new;
10334
10335 return expr;
10336 -err3:
10337 +err_expr_new:
10338 kfree(expr);
10339 -err2:
10340 +err_expr_stateful:
10341 owner = info.ops->type->owner;
10342 if (info.ops->type->release_ops)
10343 info.ops->type->release_ops(info.ops);
10344
10345 module_put(owner);
10346 -err1:
10347 +err_expr_parse:
10348 return ERR_PTR(err);
10349 }
10350
10351 @@ -6566,6 +6570,9 @@ static void nft_commit_release(struct nft_trans *trans)
10352 nf_tables_chain_destroy(&trans->ctx);
10353 break;
10354 case NFT_MSG_DELRULE:
10355 + if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
10356 + nft_flow_rule_destroy(nft_trans_flow_rule(trans));
10357 +
10358 nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
10359 break;
10360 case NFT_MSG_DELSET:
10361 @@ -6887,6 +6894,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
10362 nf_tables_rule_notify(&trans->ctx,
10363 nft_trans_rule(trans),
10364 NFT_MSG_NEWRULE);
10365 + if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
10366 + nft_flow_rule_destroy(nft_trans_flow_rule(trans));
10367 +
10368 nft_trans_destroy(trans);
10369 break;
10370 case NFT_MSG_DELRULE:
10371 diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
10372 index 6fdea0e57db8a..6bcc18124e5bd 100644
10373 --- a/net/netfilter/nft_dynset.c
10374 +++ b/net/netfilter/nft_dynset.c
10375 @@ -204,9 +204,6 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
10376 return PTR_ERR(priv->expr);
10377
10378 err = -EOPNOTSUPP;
10379 - if (!(priv->expr->ops->type->flags & NFT_EXPR_STATEFUL))
10380 - goto err1;
10381 -
10382 if (priv->expr->ops->type->flags & NFT_EXPR_GC) {
10383 if (set->flags & NFT_SET_TIMEOUT)
10384 goto err1;
10385 diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
10386 index 17c0f75dfcdb7..0c5bc3c37ecf4 100644
10387 --- a/net/netfilter/nft_nat.c
10388 +++ b/net/netfilter/nft_nat.c
10389 @@ -283,7 +283,8 @@ static void nft_nat_inet_eval(const struct nft_expr *expr,
10390 {
10391 const struct nft_nat *priv = nft_expr_priv(expr);
10392
10393 - if (priv->family == nft_pf(pkt))
10394 + if (priv->family == nft_pf(pkt) ||
10395 + priv->family == NFPROTO_INET)
10396 nft_nat_eval(expr, regs, pkt);
10397 }
10398
10399 diff --git a/net/nfc/core.c b/net/nfc/core.c
10400 index 63701a980ee12..2d4729d1f0eb9 100644
10401 --- a/net/nfc/core.c
10402 +++ b/net/nfc/core.c
10403 @@ -1159,6 +1159,7 @@ void nfc_unregister_device(struct nfc_dev *dev)
10404 if (dev->rfkill) {
10405 rfkill_unregister(dev->rfkill);
10406 rfkill_destroy(dev->rfkill);
10407 + dev->rfkill = NULL;
10408 }
10409 dev->shutting_down = true;
10410 device_unlock(&dev->dev);
10411 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
10412 index 9fe264bec70ce..cb174f699665b 100644
10413 --- a/net/rxrpc/ar-internal.h
10414 +++ b/net/rxrpc/ar-internal.h
10415 @@ -665,20 +665,21 @@ struct rxrpc_call {
10416
10417 spinlock_t input_lock; /* Lock for packet input to this call */
10418
10419 - /* receive-phase ACK management */
10420 + /* Receive-phase ACK management (ACKs we send). */
10421 u8 ackr_reason; /* reason to ACK */
10422 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
10423 - rxrpc_serial_t ackr_first_seq; /* first sequence number received */
10424 - rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
10425 - rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
10426 - rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
10427 + rxrpc_seq_t ackr_highest_seq; /* Higest sequence number received */
10428 + atomic_t ackr_nr_unacked; /* Number of unacked packets */
10429 + atomic_t ackr_nr_consumed; /* Number of packets needing hard ACK */
10430
10431 /* ping management */
10432 rxrpc_serial_t ping_serial; /* Last ping sent */
10433 ktime_t ping_time; /* Time last ping sent */
10434
10435 - /* transmission-phase ACK management */
10436 + /* Transmission-phase ACK management (ACKs we've received). */
10437 ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
10438 + rxrpc_seq_t acks_first_seq; /* first sequence number received */
10439 + rxrpc_seq_t acks_prev_seq; /* Highest previousPacket received */
10440 rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
10441 rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */
10442 rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */
10443 diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
10444 index 80e15310f1b29..8574e7066d94c 100644
10445 --- a/net/rxrpc/call_event.c
10446 +++ b/net/rxrpc/call_event.c
10447 @@ -407,7 +407,8 @@ recheck_state:
10448 goto recheck_state;
10449 }
10450
10451 - if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) {
10452 + if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) &&
10453 + call->state != RXRPC_CALL_CLIENT_RECV_REPLY) {
10454 rxrpc_resend(call, now);
10455 goto recheck_state;
10456 }
10457 diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
10458 index 916d1f455b218..5cf64cf8debf7 100644
10459 --- a/net/rxrpc/input.c
10460 +++ b/net/rxrpc/input.c
10461 @@ -413,8 +413,8 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
10462 {
10463 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
10464 enum rxrpc_call_state state;
10465 - unsigned int j, nr_subpackets;
10466 - rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
10467 + unsigned int j, nr_subpackets, nr_unacked = 0;
10468 + rxrpc_serial_t serial = sp->hdr.serial, ack_serial = serial;
10469 rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack;
10470 bool immediate_ack = false, jumbo_bad = false;
10471 u8 ack = 0;
10472 @@ -454,7 +454,6 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
10473 !rxrpc_receiving_reply(call))
10474 goto unlock;
10475
10476 - call->ackr_prev_seq = seq0;
10477 hard_ack = READ_ONCE(call->rx_hard_ack);
10478
10479 nr_subpackets = sp->nr_subpackets;
10480 @@ -535,6 +534,9 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
10481 ack_serial = serial;
10482 }
10483
10484 + if (after(seq0, call->ackr_highest_seq))
10485 + call->ackr_highest_seq = seq0;
10486 +
10487 /* Queue the packet. We use a couple of memory barriers here as need
10488 * to make sure that rx_top is perceived to be set after the buffer
10489 * pointer and that the buffer pointer is set after the annotation and
10490 @@ -568,6 +570,8 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
10491 sp = NULL;
10492 }
10493
10494 + nr_unacked++;
10495 +
10496 if (last) {
10497 set_bit(RXRPC_CALL_RX_LAST, &call->flags);
10498 if (!ack) {
10499 @@ -587,9 +591,14 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
10500 }
10501 call->rx_expect_next = seq + 1;
10502 }
10503 + if (!ack)
10504 + ack_serial = serial;
10505 }
10506
10507 ack:
10508 + if (atomic_add_return(nr_unacked, &call->ackr_nr_unacked) > 2 && !ack)
10509 + ack = RXRPC_ACK_IDLE;
10510 +
10511 if (ack)
10512 rxrpc_propose_ACK(call, ack, ack_serial,
10513 immediate_ack, true,
10514 @@ -808,7 +817,7 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
10515 static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
10516 rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt)
10517 {
10518 - rxrpc_seq_t base = READ_ONCE(call->ackr_first_seq);
10519 + rxrpc_seq_t base = READ_ONCE(call->acks_first_seq);
10520
10521 if (after(first_pkt, base))
10522 return true; /* The window advanced */
10523 @@ -816,7 +825,7 @@ static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
10524 if (before(first_pkt, base))
10525 return false; /* firstPacket regressed */
10526
10527 - if (after_eq(prev_pkt, call->ackr_prev_seq))
10528 + if (after_eq(prev_pkt, call->acks_prev_seq))
10529 return true; /* previousPacket hasn't regressed. */
10530
10531 /* Some rx implementations put a serial number in previousPacket. */
10532 @@ -891,8 +900,8 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
10533 /* Discard any out-of-order or duplicate ACKs (outside lock). */
10534 if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
10535 trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
10536 - first_soft_ack, call->ackr_first_seq,
10537 - prev_pkt, call->ackr_prev_seq);
10538 + first_soft_ack, call->acks_first_seq,
10539 + prev_pkt, call->acks_prev_seq);
10540 return;
10541 }
10542
10543 @@ -907,14 +916,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
10544 /* Discard any out-of-order or duplicate ACKs (inside lock). */
10545 if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
10546 trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
10547 - first_soft_ack, call->ackr_first_seq,
10548 - prev_pkt, call->ackr_prev_seq);
10549 + first_soft_ack, call->acks_first_seq,
10550 + prev_pkt, call->acks_prev_seq);
10551 goto out;
10552 }
10553 call->acks_latest_ts = skb->tstamp;
10554
10555 - call->ackr_first_seq = first_soft_ack;
10556 - call->ackr_prev_seq = prev_pkt;
10557 + call->acks_first_seq = first_soft_ack;
10558 + call->acks_prev_seq = prev_pkt;
10559
10560 /* Parse rwind and mtu sizes if provided. */
10561 if (buf.info.rxMTU)
10562 diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
10563 index a4a6f8ee07201..6202d2e32914a 100644
10564 --- a/net/rxrpc/output.c
10565 +++ b/net/rxrpc/output.c
10566 @@ -74,11 +74,18 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
10567 u8 reason)
10568 {
10569 rxrpc_serial_t serial;
10570 + unsigned int tmp;
10571 rxrpc_seq_t hard_ack, top, seq;
10572 int ix;
10573 u32 mtu, jmax;
10574 u8 *ackp = pkt->acks;
10575
10576 + tmp = atomic_xchg(&call->ackr_nr_unacked, 0);
10577 + tmp |= atomic_xchg(&call->ackr_nr_consumed, 0);
10578 + if (!tmp && (reason == RXRPC_ACK_DELAY ||
10579 + reason == RXRPC_ACK_IDLE))
10580 + return 0;
10581 +
10582 /* Barrier against rxrpc_input_data(). */
10583 serial = call->ackr_serial;
10584 hard_ack = READ_ONCE(call->rx_hard_ack);
10585 @@ -89,7 +96,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
10586 pkt->ack.bufferSpace = htons(8);
10587 pkt->ack.maxSkew = htons(0);
10588 pkt->ack.firstPacket = htonl(hard_ack + 1);
10589 - pkt->ack.previousPacket = htonl(call->ackr_prev_seq);
10590 + pkt->ack.previousPacket = htonl(call->ackr_highest_seq);
10591 pkt->ack.serial = htonl(serial);
10592 pkt->ack.reason = reason;
10593 pkt->ack.nAcks = top - hard_ack;
10594 @@ -180,6 +187,10 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
10595 n = rxrpc_fill_out_ack(conn, call, pkt, &hard_ack, &top, reason);
10596
10597 spin_unlock_bh(&call->lock);
10598 + if (n == 0) {
10599 + kfree(pkt);
10600 + return 0;
10601 + }
10602
10603 iov[0].iov_base = pkt;
10604 iov[0].iov_len = sizeof(pkt->whdr) + sizeof(pkt->ack) + n;
10605 @@ -227,13 +238,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
10606 ntohl(pkt->ack.serial),
10607 false, true,
10608 rxrpc_propose_ack_retry_tx);
10609 - } else {
10610 - spin_lock_bh(&call->lock);
10611 - if (after(hard_ack, call->ackr_consumed))
10612 - call->ackr_consumed = hard_ack;
10613 - if (after(top, call->ackr_seen))
10614 - call->ackr_seen = top;
10615 - spin_unlock_bh(&call->lock);
10616 }
10617
10618 rxrpc_set_keepalive(call);
10619 diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
10620 index 4f48e3bdd4b4b..c75789ebc514d 100644
10621 --- a/net/rxrpc/recvmsg.c
10622 +++ b/net/rxrpc/recvmsg.c
10623 @@ -212,11 +212,9 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
10624 rxrpc_end_rx_phase(call, serial);
10625 } else {
10626 /* Check to see if there's an ACK that needs sending. */
10627 - if (after_eq(hard_ack, call->ackr_consumed + 2) ||
10628 - after_eq(top, call->ackr_seen + 2) ||
10629 - (hard_ack == top && after(hard_ack, call->ackr_consumed)))
10630 - rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
10631 - true, true,
10632 + if (atomic_inc_return(&call->ackr_nr_consumed) > 2)
10633 + rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial,
10634 + true, false,
10635 rxrpc_propose_ack_rotate_rx);
10636 if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
10637 rxrpc_send_ack_packet(call, false, NULL);
10638 diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
10639 index 1a340eb0abf7c..22f020099214d 100644
10640 --- a/net/rxrpc/sendmsg.c
10641 +++ b/net/rxrpc/sendmsg.c
10642 @@ -463,6 +463,12 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
10643
10644 success:
10645 ret = copied;
10646 + if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) {
10647 + read_lock_bh(&call->state_lock);
10648 + if (call->error < 0)
10649 + ret = call->error;
10650 + read_unlock_bh(&call->state_lock);
10651 + }
10652 out:
10653 call->tx_pending = skb;
10654 _leave(" = %d", ret);
10655 diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
10656 index 18dade4e6f9a0..8fc4190725058 100644
10657 --- a/net/rxrpc/sysctl.c
10658 +++ b/net/rxrpc/sysctl.c
10659 @@ -12,7 +12,7 @@
10660
10661 static struct ctl_table_header *rxrpc_sysctl_reg_table;
10662 static const unsigned int four = 4;
10663 -static const unsigned int thirtytwo = 32;
10664 +static const unsigned int max_backlog = RXRPC_BACKLOG_MAX - 1;
10665 static const unsigned int n_65535 = 65535;
10666 static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
10667 static const unsigned long one_jiffy = 1;
10668 @@ -97,7 +97,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
10669 .mode = 0644,
10670 .proc_handler = proc_dointvec_minmax,
10671 .extra1 = (void *)&four,
10672 - .extra2 = (void *)&thirtytwo,
10673 + .extra2 = (void *)&max_backlog,
10674 },
10675 {
10676 .procname = "rx_window_size",
10677 diff --git a/net/sctp/input.c b/net/sctp/input.c
10678 index 9616b600a8766..c306cb25f5246 100644
10679 --- a/net/sctp/input.c
10680 +++ b/net/sctp/input.c
10681 @@ -92,6 +92,7 @@ int sctp_rcv(struct sk_buff *skb)
10682 struct sctp_chunk *chunk;
10683 union sctp_addr src;
10684 union sctp_addr dest;
10685 + int bound_dev_if;
10686 int family;
10687 struct sctp_af *af;
10688 struct net *net = dev_net(skb->dev);
10689 @@ -169,7 +170,8 @@ int sctp_rcv(struct sk_buff *skb)
10690 * If a frame arrives on an interface and the receiving socket is
10691 * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
10692 */
10693 - if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) {
10694 + bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
10695 + if (bound_dev_if && (bound_dev_if != af->skb_iif(skb))) {
10696 if (transport) {
10697 sctp_transport_put(transport);
10698 asoc = NULL;
10699 diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
10700 index a5a8cca46bd5f..394491692a078 100644
10701 --- a/net/smc/af_smc.c
10702 +++ b/net/smc/af_smc.c
10703 @@ -877,9 +877,9 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
10704 if (rc && rc != -EINPROGRESS)
10705 goto out;
10706
10707 - sock_hold(&smc->sk); /* sock put in passive closing */
10708 if (smc->use_fallback)
10709 goto out;
10710 + sock_hold(&smc->sk); /* sock put in passive closing */
10711 if (flags & O_NONBLOCK) {
10712 if (schedule_work(&smc->connect_work))
10713 smc->connect_nonblock = 1;
10714 diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
10715 index 7ef37054071f8..cb8740d156336 100644
10716 --- a/net/sunrpc/xdr.c
10717 +++ b/net/sunrpc/xdr.c
10718 @@ -608,7 +608,11 @@ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
10719 */
10720 xdr->p = (void *)p + frag2bytes;
10721 space_left = xdr->buf->buflen - xdr->buf->len;
10722 - xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
10723 + if (space_left - nbytes >= PAGE_SIZE)
10724 + xdr->end = (void *)p + PAGE_SIZE;
10725 + else
10726 + xdr->end = (void *)p + space_left - frag1bytes;
10727 +
10728 xdr->buf->page_len += frag2bytes;
10729 xdr->buf->len += nbytes;
10730 return p;
10731 diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
10732 index c091417bd799e..60aaed9457e44 100644
10733 --- a/net/sunrpc/xprtrdma/rpc_rdma.c
10734 +++ b/net/sunrpc/xprtrdma/rpc_rdma.c
10735 @@ -1042,6 +1042,7 @@ static bool
10736 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
10737 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
10738 {
10739 + struct rpc_xprt *xprt = &r_xprt->rx_xprt;
10740 struct xdr_stream *xdr = &rep->rr_stream;
10741 __be32 *p;
10742
10743 @@ -1065,6 +1066,10 @@ rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
10744 if (*p != cpu_to_be32(RPC_CALL))
10745 return false;
10746
10747 + /* No bc service. */
10748 + if (xprt->bc_serv == NULL)
10749 + return false;
10750 +
10751 /* Now that we are sure this is a backchannel call,
10752 * advance to the RPC header.
10753 */
10754 diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
10755 index 8bd2454cc89dc..577f71dd63fb4 100644
10756 --- a/net/tipc/bearer.c
10757 +++ b/net/tipc/bearer.c
10758 @@ -248,9 +248,8 @@ static int tipc_enable_bearer(struct net *net, const char *name,
10759 u32 i;
10760
10761 if (!bearer_name_validate(name, &b_names)) {
10762 - errstr = "illegal name";
10763 NL_SET_ERR_MSG(extack, "Illegal name");
10764 - goto rejected;
10765 + return res;
10766 }
10767
10768 if (prio > TIPC_MAX_LINK_PRI && prio != TIPC_MEDIA_LINK_PRI) {
10769 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
10770 index 05470ca91bd94..f33e90bd0683b 100644
10771 --- a/net/unix/af_unix.c
10772 +++ b/net/unix/af_unix.c
10773 @@ -440,7 +440,7 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
10774 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
10775 * to other and its full, we will hang waiting for POLLOUT.
10776 */
10777 - if (unix_recvq_full(other) && !sock_flag(other, SOCK_DEAD))
10778 + if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
10779 return 1;
10780
10781 if (connected)
10782 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
10783 index d3e2b97d5d051..8459f5b6002e1 100644
10784 --- a/net/wireless/nl80211.c
10785 +++ b/net/wireless/nl80211.c
10786 @@ -3240,6 +3240,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
10787 wdev_lock(wdev);
10788 switch (wdev->iftype) {
10789 case NL80211_IFTYPE_AP:
10790 + case NL80211_IFTYPE_P2P_GO:
10791 if (wdev->ssid_len &&
10792 nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
10793 goto nla_put_failure_locked;
10794 diff --git a/scripts/faddr2line b/scripts/faddr2line
10795 index 6c6439f69a725..0e6268d598835 100755
10796 --- a/scripts/faddr2line
10797 +++ b/scripts/faddr2line
10798 @@ -44,17 +44,6 @@
10799 set -o errexit
10800 set -o nounset
10801
10802 -READELF="${CROSS_COMPILE:-}readelf"
10803 -ADDR2LINE="${CROSS_COMPILE:-}addr2line"
10804 -SIZE="${CROSS_COMPILE:-}size"
10805 -NM="${CROSS_COMPILE:-}nm"
10806 -
10807 -command -v awk >/dev/null 2>&1 || die "awk isn't installed"
10808 -command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed"
10809 -command -v ${ADDR2LINE} >/dev/null 2>&1 || die "addr2line isn't installed"
10810 -command -v ${SIZE} >/dev/null 2>&1 || die "size isn't installed"
10811 -command -v ${NM} >/dev/null 2>&1 || die "nm isn't installed"
10812 -
10813 usage() {
10814 echo "usage: faddr2line [--list] <object file> <func+offset> <func+offset>..." >&2
10815 exit 1
10816 @@ -69,6 +58,14 @@ die() {
10817 exit 1
10818 }
10819
10820 +READELF="${CROSS_COMPILE:-}readelf"
10821 +ADDR2LINE="${CROSS_COMPILE:-}addr2line"
10822 +AWK="awk"
10823 +
10824 +command -v ${AWK} >/dev/null 2>&1 || die "${AWK} isn't installed"
10825 +command -v ${READELF} >/dev/null 2>&1 || die "${READELF} isn't installed"
10826 +command -v ${ADDR2LINE} >/dev/null 2>&1 || die "${ADDR2LINE} isn't installed"
10827 +
10828 # Try to figure out the source directory prefix so we can remove it from the
10829 # addr2line output. HACK ALERT: This assumes that start_kernel() is in
10830 # init/main.c! This only works for vmlinux. Otherwise it falls back to
10831 @@ -76,7 +73,7 @@ die() {
10832 find_dir_prefix() {
10833 local objfile=$1
10834
10835 - local start_kernel_addr=$(${READELF} -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
10836 + local start_kernel_addr=$(${READELF} --symbols --wide $objfile | ${AWK} '$8 == "start_kernel" {printf "0x%s", $2}')
10837 [[ -z $start_kernel_addr ]] && return
10838
10839 local file_line=$(${ADDR2LINE} -e $objfile $start_kernel_addr)
10840 @@ -97,86 +94,133 @@ __faddr2line() {
10841 local dir_prefix=$3
10842 local print_warnings=$4
10843
10844 - local func=${func_addr%+*}
10845 + local sym_name=${func_addr%+*}
10846 local offset=${func_addr#*+}
10847 offset=${offset%/*}
10848 - local size=
10849 - [[ $func_addr =~ "/" ]] && size=${func_addr#*/}
10850 + local user_size=
10851 + [[ $func_addr =~ "/" ]] && user_size=${func_addr#*/}
10852
10853 - if [[ -z $func ]] || [[ -z $offset ]] || [[ $func = $func_addr ]]; then
10854 + if [[ -z $sym_name ]] || [[ -z $offset ]] || [[ $sym_name = $func_addr ]]; then
10855 warn "bad func+offset $func_addr"
10856 DONE=1
10857 return
10858 fi
10859
10860 # Go through each of the object's symbols which match the func name.
10861 - # In rare cases there might be duplicates.
10862 - file_end=$(${SIZE} -Ax $objfile | awk '$1 == ".text" {print $2}')
10863 - while read symbol; do
10864 - local fields=($symbol)
10865 - local sym_base=0x${fields[0]}
10866 - local sym_type=${fields[1]}
10867 - local sym_end=${fields[3]}
10868 -
10869 - # calculate the size
10870 - local sym_size=$(($sym_end - $sym_base))
10871 + # In rare cases there might be duplicates, in which case we print all
10872 + # matches.
10873 + while read line; do
10874 + local fields=($line)
10875 + local sym_addr=0x${fields[1]}
10876 + local sym_elf_size=${fields[2]}
10877 + local sym_sec=${fields[6]}
10878 +
10879 + # Get the section size:
10880 + local sec_size=$(${READELF} --section-headers --wide $objfile |
10881 + sed 's/\[ /\[/' |
10882 + ${AWK} -v sec=$sym_sec '$1 == "[" sec "]" { print "0x" $6; exit }')
10883 +
10884 + if [[ -z $sec_size ]]; then
10885 + warn "bad section size: section: $sym_sec"
10886 + DONE=1
10887 + return
10888 + fi
10889 +
10890 + # Calculate the symbol size.
10891 + #
10892 + # Unfortunately we can't use the ELF size, because kallsyms
10893 + # also includes the padding bytes in its size calculation. For
10894 + # kallsyms, the size calculation is the distance between the
10895 + # symbol and the next symbol in a sorted list.
10896 + local sym_size
10897 + local cur_sym_addr
10898 + local found=0
10899 + while read line; do
10900 + local fields=($line)
10901 + cur_sym_addr=0x${fields[1]}
10902 + local cur_sym_elf_size=${fields[2]}
10903 + local cur_sym_name=${fields[7]:-}
10904 +
10905 + if [[ $cur_sym_addr = $sym_addr ]] &&
10906 + [[ $cur_sym_elf_size = $sym_elf_size ]] &&
10907 + [[ $cur_sym_name = $sym_name ]]; then
10908 + found=1
10909 + continue
10910 + fi
10911 +
10912 + if [[ $found = 1 ]]; then
10913 + sym_size=$(($cur_sym_addr - $sym_addr))
10914 + [[ $sym_size -lt $sym_elf_size ]] && continue;
10915 + found=2
10916 + break
10917 + fi
10918 + done < <(${READELF} --symbols --wide $objfile | ${AWK} -v sec=$sym_sec '$7 == sec' | sort --key=2)
10919 +
10920 + if [[ $found = 0 ]]; then
10921 + warn "can't find symbol: sym_name: $sym_name sym_sec: $sym_sec sym_addr: $sym_addr sym_elf_size: $sym_elf_size"
10922 + DONE=1
10923 + return
10924 + fi
10925 +
10926 + # If nothing was found after the symbol, assume it's the last
10927 + # symbol in the section.
10928 + [[ $found = 1 ]] && sym_size=$(($sec_size - $sym_addr))
10929 +
10930 if [[ -z $sym_size ]] || [[ $sym_size -le 0 ]]; then
10931 - warn "bad symbol size: base: $sym_base end: $sym_end"
10932 + warn "bad symbol size: sym_addr: $sym_addr cur_sym_addr: $cur_sym_addr"
10933 DONE=1
10934 return
10935 fi
10936 +
10937 sym_size=0x$(printf %x $sym_size)
10938
10939 - # calculate the address
10940 - local addr=$(($sym_base + $offset))
10941 + # Calculate the section address from user-supplied offset:
10942 + local addr=$(($sym_addr + $offset))
10943 if [[ -z $addr ]] || [[ $addr = 0 ]]; then
10944 - warn "bad address: $sym_base + $offset"
10945 + warn "bad address: $sym_addr + $offset"
10946 DONE=1
10947 return
10948 fi
10949 addr=0x$(printf %x $addr)
10950
10951 - # weed out non-function symbols
10952 - if [[ $sym_type != t ]] && [[ $sym_type != T ]]; then
10953 - [[ $print_warnings = 1 ]] &&
10954 - echo "skipping $func address at $addr due to non-function symbol of type '$sym_type'"
10955 - continue
10956 - fi
10957 -
10958 - # if the user provided a size, make sure it matches the symbol's size
10959 - if [[ -n $size ]] && [[ $size -ne $sym_size ]]; then
10960 + # If the user provided a size, make sure it matches the symbol's size:
10961 + if [[ -n $user_size ]] && [[ $user_size -ne $sym_size ]]; then
10962 [[ $print_warnings = 1 ]] &&
10963 - echo "skipping $func address at $addr due to size mismatch ($size != $sym_size)"
10964 + echo "skipping $sym_name address at $addr due to size mismatch ($user_size != $sym_size)"
10965 continue;
10966 fi
10967
10968 - # make sure the provided offset is within the symbol's range
10969 + # Make sure the provided offset is within the symbol's range:
10970 if [[ $offset -gt $sym_size ]]; then
10971 [[ $print_warnings = 1 ]] &&
10972 - echo "skipping $func address at $addr due to size mismatch ($offset > $sym_size)"
10973 + echo "skipping $sym_name address at $addr due to size mismatch ($offset > $sym_size)"
10974 continue
10975 fi
10976
10977 - # separate multiple entries with a blank line
10978 + # In case of duplicates or multiple addresses specified on the
10979 + # cmdline, separate multiple entries with a blank line:
10980 [[ $FIRST = 0 ]] && echo
10981 FIRST=0
10982
10983 - # pass real address to addr2line
10984 - echo "$func+$offset/$sym_size:"
10985 - local file_lines=$(${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;")
10986 - [[ -z $file_lines ]] && return
10987 + echo "$sym_name+$offset/$sym_size:"
10988
10989 + # Pass section address to addr2line and strip absolute paths
10990 + # from the output:
10991 + local output=$(${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;")
10992 + [[ -z $output ]] && continue
10993 +
10994 + # Default output (non --list):
10995 if [[ $LIST = 0 ]]; then
10996 - echo "$file_lines" | while read -r line
10997 + echo "$output" | while read -r line
10998 do
10999 echo $line
11000 done
11001 DONE=1;
11002 - return
11003 + continue
11004 fi
11005
11006 - # show each line with context
11007 - echo "$file_lines" | while read -r line
11008 + # For --list, show each line with its corresponding source code:
11009 + echo "$output" | while read -r line
11010 do
11011 echo
11012 echo $line
11013 @@ -184,12 +228,12 @@ __faddr2line() {
11014 n1=$[$n-5]
11015 n2=$[$n+5]
11016 f=$(echo $line | sed 's/.*at \(.\+\):.*/\1/g')
11017 - awk 'NR>=strtonum("'$n1'") && NR<=strtonum("'$n2'") { if (NR=='$n') printf(">%d<", NR); else printf(" %d ", NR); printf("\t%s\n", $0)}' $f
11018 + ${AWK} 'NR>=strtonum("'$n1'") && NR<=strtonum("'$n2'") { if (NR=='$n') printf(">%d<", NR); else printf(" %d ", NR); printf("\t%s\n", $0)}' $f
11019 done
11020
11021 DONE=1
11022
11023 - done < <(${NM} -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
11024 + done < <(${READELF} --symbols --wide $objfile | ${AWK} -v fn=$sym_name '$4 == "FUNC" && $8 == fn')
11025 }
11026
11027 [[ $# -lt 2 ]] && usage
11028 diff --git a/scripts/gdb/linux/config.py b/scripts/gdb/linux/config.py
11029 index 90e1565b19671..8843ab3cbaddc 100644
11030 --- a/scripts/gdb/linux/config.py
11031 +++ b/scripts/gdb/linux/config.py
11032 @@ -24,9 +24,9 @@ class LxConfigDump(gdb.Command):
11033 filename = arg
11034
11035 try:
11036 - py_config_ptr = gdb.parse_and_eval("kernel_config_data + 8")
11037 - py_config_size = gdb.parse_and_eval(
11038 - "sizeof(kernel_config_data) - 1 - 8 * 2")
11039 + py_config_ptr = gdb.parse_and_eval("&kernel_config_data")
11040 + py_config_ptr_end = gdb.parse_and_eval("&kernel_config_data_end")
11041 + py_config_size = py_config_ptr_end - py_config_ptr
11042 except gdb.error as e:
11043 raise gdb.GdbError("Can't find config, enable CONFIG_IKCONFIG?")
11044
11045 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
11046 index 13cda6aa26880..59011ddf8bb80 100644
11047 --- a/scripts/mod/modpost.c
11048 +++ b/scripts/mod/modpost.c
11049 @@ -1283,7 +1283,8 @@ static int secref_whitelist(const struct sectioncheck *mismatch,
11050
11051 static inline int is_arm_mapping_symbol(const char *str)
11052 {
11053 - return str[0] == '$' && strchr("axtd", str[1])
11054 + return str[0] == '$' &&
11055 + (str[1] == 'a' || str[1] == 'd' || str[1] == 't' || str[1] == 'x')
11056 && (str[2] == '\0' || str[2] == '.');
11057 }
11058
11059 @@ -1998,7 +1999,7 @@ static char *remove_dot(char *s)
11060
11061 if (n && s[n]) {
11062 size_t m = strspn(s + n + 1, "0123456789");
11063 - if (m && (s[n + m] == '.' || s[n + m] == 0))
11064 + if (m && (s[n + m + 1] == '.' || s[n + m + 1] == 0))
11065 s[n] = 0;
11066 }
11067 return s;
11068 diff --git a/security/integrity/platform_certs/keyring_handler.h b/security/integrity/platform_certs/keyring_handler.h
11069 index 2462bfa08fe34..cd06bd6072be2 100644
11070 --- a/security/integrity/platform_certs/keyring_handler.h
11071 +++ b/security/integrity/platform_certs/keyring_handler.h
11072 @@ -30,3 +30,11 @@ efi_element_handler_t get_handler_for_db(const efi_guid_t *sig_type);
11073 efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type);
11074
11075 #endif
11076 +
11077 +#ifndef UEFI_QUIRK_SKIP_CERT
11078 +#define UEFI_QUIRK_SKIP_CERT(vendor, product) \
11079 + .matches = { \
11080 + DMI_MATCH(DMI_BOARD_VENDOR, vendor), \
11081 + DMI_MATCH(DMI_PRODUCT_NAME, product), \
11082 + },
11083 +#endif
11084 diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c
11085 index f0c908241966a..452011428d119 100644
11086 --- a/security/integrity/platform_certs/load_uefi.c
11087 +++ b/security/integrity/platform_certs/load_uefi.c
11088 @@ -3,6 +3,7 @@
11089 #include <linux/kernel.h>
11090 #include <linux/sched.h>
11091 #include <linux/cred.h>
11092 +#include <linux/dmi.h>
11093 #include <linux/err.h>
11094 #include <linux/efi.h>
11095 #include <linux/slab.h>
11096 @@ -11,6 +12,31 @@
11097 #include "../integrity.h"
11098 #include "keyring_handler.h"
11099
11100 +/*
11101 + * On T2 Macs reading the db and dbx efi variables to load UEFI Secure Boot
11102 + * certificates causes occurrence of a page fault in Apple's firmware and
11103 + * a crash disabling EFI runtime services. The following quirk skips reading
11104 + * these variables.
11105 + */
11106 +static const struct dmi_system_id uefi_skip_cert[] = {
11107 + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro15,1") },
11108 + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro15,2") },
11109 + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro15,3") },
11110 + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro15,4") },
11111 + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro16,1") },
11112 + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro16,2") },
11113 + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro16,3") },
11114 + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro16,4") },
11115 + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir8,1") },
11116 + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir8,2") },
11117 + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir9,1") },
11118 + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacMini8,1") },
11119 + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacPro7,1") },
11120 + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,1") },
11121 + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,2") },
11122 + { }
11123 +};
11124 +
11125 /*
11126 * Look to see if a UEFI variable called MokIgnoreDB exists and return true if
11127 * it does.
11128 @@ -78,6 +104,13 @@ static int __init load_uefi_certs(void)
11129 unsigned long dbsize = 0, dbxsize = 0, moksize = 0;
11130 efi_status_t status;
11131 int rc = 0;
11132 + const struct dmi_system_id *dmi_id;
11133 +
11134 + dmi_id = dmi_first_match(uefi_skip_cert);
11135 + if (dmi_id) {
11136 + pr_err("Reading UEFI Secure Boot Certs is not supported on T2 Macs.\n");
11137 + return false;
11138 + }
11139
11140 if (!efi.get_variable)
11141 return false;
11142 diff --git a/sound/core/jack.c b/sound/core/jack.c
11143 index b00ae6f39f054..e7ac82d468216 100644
11144 --- a/sound/core/jack.c
11145 +++ b/sound/core/jack.c
11146 @@ -34,8 +34,11 @@ static int snd_jack_dev_disconnect(struct snd_device *device)
11147 #ifdef CONFIG_SND_JACK_INPUT_DEV
11148 struct snd_jack *jack = device->device_data;
11149
11150 - if (!jack->input_dev)
11151 + mutex_lock(&jack->input_dev_lock);
11152 + if (!jack->input_dev) {
11153 + mutex_unlock(&jack->input_dev_lock);
11154 return 0;
11155 + }
11156
11157 /* If the input device is registered with the input subsystem
11158 * then we need to use a different deallocator. */
11159 @@ -44,6 +47,7 @@ static int snd_jack_dev_disconnect(struct snd_device *device)
11160 else
11161 input_free_device(jack->input_dev);
11162 jack->input_dev = NULL;
11163 + mutex_unlock(&jack->input_dev_lock);
11164 #endif /* CONFIG_SND_JACK_INPUT_DEV */
11165 return 0;
11166 }
11167 @@ -82,8 +86,11 @@ static int snd_jack_dev_register(struct snd_device *device)
11168 snprintf(jack->name, sizeof(jack->name), "%s %s",
11169 card->shortname, jack->id);
11170
11171 - if (!jack->input_dev)
11172 + mutex_lock(&jack->input_dev_lock);
11173 + if (!jack->input_dev) {
11174 + mutex_unlock(&jack->input_dev_lock);
11175 return 0;
11176 + }
11177
11178 jack->input_dev->name = jack->name;
11179
11180 @@ -108,6 +115,7 @@ static int snd_jack_dev_register(struct snd_device *device)
11181 if (err == 0)
11182 jack->registered = 1;
11183
11184 + mutex_unlock(&jack->input_dev_lock);
11185 return err;
11186 }
11187 #endif /* CONFIG_SND_JACK_INPUT_DEV */
11188 @@ -228,9 +236,11 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
11189 return -ENOMEM;
11190 }
11191
11192 - /* don't creat input device for phantom jack */
11193 - if (!phantom_jack) {
11194 #ifdef CONFIG_SND_JACK_INPUT_DEV
11195 + mutex_init(&jack->input_dev_lock);
11196 +
11197 + /* don't create input device for phantom jack */
11198 + if (!phantom_jack) {
11199 int i;
11200
11201 jack->input_dev = input_allocate_device();
11202 @@ -248,8 +258,8 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
11203 input_set_capability(jack->input_dev, EV_SW,
11204 jack_switch_types[i]);
11205
11206 -#endif /* CONFIG_SND_JACK_INPUT_DEV */
11207 }
11208 +#endif /* CONFIG_SND_JACK_INPUT_DEV */
11209
11210 err = snd_device_new(card, SNDRV_DEV_JACK, jack, &ops);
11211 if (err < 0)
11212 @@ -289,10 +299,14 @@ EXPORT_SYMBOL(snd_jack_new);
11213 void snd_jack_set_parent(struct snd_jack *jack, struct device *parent)
11214 {
11215 WARN_ON(jack->registered);
11216 - if (!jack->input_dev)
11217 + mutex_lock(&jack->input_dev_lock);
11218 + if (!jack->input_dev) {
11219 + mutex_unlock(&jack->input_dev_lock);
11220 return;
11221 + }
11222
11223 jack->input_dev->dev.parent = parent;
11224 + mutex_unlock(&jack->input_dev_lock);
11225 }
11226 EXPORT_SYMBOL(snd_jack_set_parent);
11227
11228 @@ -340,6 +354,8 @@ EXPORT_SYMBOL(snd_jack_set_key);
11229
11230 /**
11231 * snd_jack_report - Report the current status of a jack
11232 + * Note: This function uses mutexes and should be called from a
11233 + * context which can sleep (such as a workqueue).
11234 *
11235 * @jack: The jack to report status for
11236 * @status: The current status of the jack
11237 @@ -359,8 +375,11 @@ void snd_jack_report(struct snd_jack *jack, int status)
11238 status & jack_kctl->mask_bits);
11239
11240 #ifdef CONFIG_SND_JACK_INPUT_DEV
11241 - if (!jack->input_dev)
11242 + mutex_lock(&jack->input_dev_lock);
11243 + if (!jack->input_dev) {
11244 + mutex_unlock(&jack->input_dev_lock);
11245 return;
11246 + }
11247
11248 for (i = 0; i < ARRAY_SIZE(jack->key); i++) {
11249 int testbit = SND_JACK_BTN_0 >> i;
11250 @@ -379,6 +398,7 @@ void snd_jack_report(struct snd_jack *jack, int status)
11251 }
11252
11253 input_sync(jack->input_dev);
11254 + mutex_unlock(&jack->input_dev_lock);
11255 #endif /* CONFIG_SND_JACK_INPUT_DEV */
11256 }
11257 EXPORT_SYMBOL(snd_jack_report);
11258 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
11259 index 5e2fadb264e4d..c0b6881b06729 100644
11260 --- a/sound/pci/hda/patch_conexant.c
11261 +++ b/sound/pci/hda/patch_conexant.c
11262 @@ -1012,6 +1012,13 @@ static int patch_conexant_auto(struct hda_codec *codec)
11263 snd_hda_pick_fixup(codec, cxt5051_fixup_models,
11264 cxt5051_fixups, cxt_fixups);
11265 break;
11266 + case 0x14f15098:
11267 + codec->pin_amp_workaround = 1;
11268 + spec->gen.mixer_nid = 0x22;
11269 + spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
11270 + snd_hda_pick_fixup(codec, cxt5066_fixup_models,
11271 + cxt5066_fixups, cxt_fixups);
11272 + break;
11273 case 0x14f150f2:
11274 codec->power_save_node = 1;
11275 /* Fall through */
11276 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
11277 index 78b5a0f22a415..8a221866ab01b 100644
11278 --- a/sound/pci/hda/patch_realtek.c
11279 +++ b/sound/pci/hda/patch_realtek.c
11280 @@ -1932,6 +1932,7 @@ enum {
11281 ALC1220_FIXUP_CLEVO_PB51ED_PINS,
11282 ALC887_FIXUP_ASUS_AUDIO,
11283 ALC887_FIXUP_ASUS_HMIC,
11284 + ALCS1200A_FIXUP_MIC_VREF,
11285 };
11286
11287 static void alc889_fixup_coef(struct hda_codec *codec,
11288 @@ -2477,6 +2478,14 @@ static const struct hda_fixup alc882_fixups[] = {
11289 .chained = true,
11290 .chain_id = ALC887_FIXUP_ASUS_AUDIO,
11291 },
11292 + [ALCS1200A_FIXUP_MIC_VREF] = {
11293 + .type = HDA_FIXUP_PINCTLS,
11294 + .v.pins = (const struct hda_pintbl[]) {
11295 + { 0x18, PIN_VREF50 }, /* rear mic */
11296 + { 0x19, PIN_VREF50 }, /* front mic */
11297 + {}
11298 + }
11299 + },
11300 };
11301
11302 static const struct snd_pci_quirk alc882_fixup_tbl[] = {
11303 @@ -2514,6 +2523,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
11304 SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
11305 SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
11306 SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
11307 + SND_PCI_QUIRK(0x1043, 0x8797, "ASUS TUF B550M-PLUS", ALCS1200A_FIXUP_MIC_VREF),
11308 SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
11309 SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
11310 SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
11311 diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
11312 index 466dc67799f4c..dfc536cd9d2fc 100644
11313 --- a/sound/soc/codecs/Kconfig
11314 +++ b/sound/soc/codecs/Kconfig
11315 @@ -759,7 +759,6 @@ config SND_SOC_MAX98095
11316
11317 config SND_SOC_MAX98357A
11318 tristate "Maxim MAX98357A CODEC"
11319 - depends on GPIOLIB
11320
11321 config SND_SOC_MAX98371
11322 tristate
11323 diff --git a/sound/soc/codecs/rk3328_codec.c b/sound/soc/codecs/rk3328_codec.c
11324 index 514ebe16bbfad..4e71ecf54af7b 100644
11325 --- a/sound/soc/codecs/rk3328_codec.c
11326 +++ b/sound/soc/codecs/rk3328_codec.c
11327 @@ -479,7 +479,7 @@ static int rk3328_platform_probe(struct platform_device *pdev)
11328 ret = clk_prepare_enable(rk3328->pclk);
11329 if (ret < 0) {
11330 dev_err(&pdev->dev, "failed to enable acodec pclk\n");
11331 - return ret;
11332 + goto err_unprepare_mclk;
11333 }
11334
11335 base = devm_platform_ioremap_resource(pdev, 0);
11336 diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
11337 index 7081142a355e1..c444a56df95ba 100644
11338 --- a/sound/soc/codecs/rt5514.c
11339 +++ b/sound/soc/codecs/rt5514.c
11340 @@ -419,7 +419,7 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
11341 }
11342 }
11343
11344 - return 0;
11345 + return 1;
11346 }
11347
11348 static const struct snd_kcontrol_new rt5514_snd_controls[] = {
11349 diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
11350 index c83f7f5da96b7..a66e93a3af745 100644
11351 --- a/sound/soc/codecs/rt5645.c
11352 +++ b/sound/soc/codecs/rt5645.c
11353 @@ -4074,9 +4074,14 @@ static int rt5645_i2c_remove(struct i2c_client *i2c)
11354 if (i2c->irq)
11355 free_irq(i2c->irq, rt5645);
11356
11357 + /*
11358 + * Since the rt5645_btn_check_callback() can queue jack_detect_work,
11359 + * the timer need to be delted first
11360 + */
11361 + del_timer_sync(&rt5645->btn_check_timer);
11362 +
11363 cancel_delayed_work_sync(&rt5645->jack_detect_work);
11364 cancel_delayed_work_sync(&rt5645->rcclock_work);
11365 - del_timer_sync(&rt5645->btn_check_timer);
11366
11367 regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
11368
11369 diff --git a/sound/soc/codecs/tscs454.c b/sound/soc/codecs/tscs454.c
11370 index c3587af9985c0..3d981441b8d1a 100644
11371 --- a/sound/soc/codecs/tscs454.c
11372 +++ b/sound/soc/codecs/tscs454.c
11373 @@ -3128,18 +3128,17 @@ static int set_aif_sample_format(struct snd_soc_component *component,
11374 unsigned int width;
11375 int ret;
11376
11377 - switch (format) {
11378 - case SNDRV_PCM_FORMAT_S16_LE:
11379 + switch (snd_pcm_format_width(format)) {
11380 + case 16:
11381 width = FV_WL_16;
11382 break;
11383 - case SNDRV_PCM_FORMAT_S20_3LE:
11384 + case 20:
11385 width = FV_WL_20;
11386 break;
11387 - case SNDRV_PCM_FORMAT_S24_3LE:
11388 + case 24:
11389 width = FV_WL_24;
11390 break;
11391 - case SNDRV_PCM_FORMAT_S24_LE:
11392 - case SNDRV_PCM_FORMAT_S32_LE:
11393 + case 32:
11394 width = FV_WL_32;
11395 break;
11396 default:
11397 @@ -3337,6 +3336,7 @@ static const struct snd_soc_component_driver soc_component_dev_tscs454 = {
11398 .num_dapm_routes = ARRAY_SIZE(tscs454_intercon),
11399 .controls = tscs454_snd_controls,
11400 .num_controls = ARRAY_SIZE(tscs454_snd_controls),
11401 + .endianness = 1,
11402 };
11403
11404 #define TSCS454_RATES SNDRV_PCM_RATE_8000_96000
11405 diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
11406 index 72e165cc64439..97ece3114b3dc 100644
11407 --- a/sound/soc/codecs/wm2000.c
11408 +++ b/sound/soc/codecs/wm2000.c
11409 @@ -536,7 +536,7 @@ static int wm2000_anc_transition(struct wm2000_priv *wm2000,
11410 {
11411 struct i2c_client *i2c = wm2000->i2c;
11412 int i, j;
11413 - int ret;
11414 + int ret = 0;
11415
11416 if (wm2000->anc_mode == mode)
11417 return 0;
11418 @@ -566,13 +566,13 @@ static int wm2000_anc_transition(struct wm2000_priv *wm2000,
11419 ret = anc_transitions[i].step[j](i2c,
11420 anc_transitions[i].analogue);
11421 if (ret != 0)
11422 - return ret;
11423 + break;
11424 }
11425
11426 if (anc_transitions[i].dest == ANC_OFF)
11427 clk_disable_unprepare(wm2000->mclk);
11428
11429 - return 0;
11430 + return ret;
11431 }
11432
11433 static int wm2000_anc_set_mode(struct wm2000_priv *wm2000)
11434 diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
11435 index 677ecfc1ec68f..afaef20272342 100644
11436 --- a/sound/soc/fsl/fsl_sai.h
11437 +++ b/sound/soc/fsl/fsl_sai.h
11438 @@ -67,8 +67,8 @@
11439 #define FSL_SAI_xCR3(tx, ofs) (tx ? FSL_SAI_TCR3(ofs) : FSL_SAI_RCR3(ofs))
11440 #define FSL_SAI_xCR4(tx, ofs) (tx ? FSL_SAI_TCR4(ofs) : FSL_SAI_RCR4(ofs))
11441 #define FSL_SAI_xCR5(tx, ofs) (tx ? FSL_SAI_TCR5(ofs) : FSL_SAI_RCR5(ofs))
11442 -#define FSL_SAI_xDR(tx, ofs) (tx ? FSL_SAI_TDR(ofs) : FSL_SAI_RDR(ofs))
11443 -#define FSL_SAI_xFR(tx, ofs) (tx ? FSL_SAI_TFR(ofs) : FSL_SAI_RFR(ofs))
11444 +#define FSL_SAI_xDR0(tx) (tx ? FSL_SAI_TDR0 : FSL_SAI_RDR0)
11445 +#define FSL_SAI_xFR0(tx) (tx ? FSL_SAI_TFR0 : FSL_SAI_RFR0)
11446 #define FSL_SAI_xMR(tx) (tx ? FSL_SAI_TMR : FSL_SAI_RMR)
11447
11448 /* SAI Transmit/Receive Control Register */
11449 diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
11450 index 15e8b9343c354..7106d56a3346c 100644
11451 --- a/sound/soc/fsl/imx-sgtl5000.c
11452 +++ b/sound/soc/fsl/imx-sgtl5000.c
11453 @@ -120,19 +120,19 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
11454 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
11455 if (!data) {
11456 ret = -ENOMEM;
11457 - goto fail;
11458 + goto put_device;
11459 }
11460
11461 comp = devm_kzalloc(&pdev->dev, 3 * sizeof(*comp), GFP_KERNEL);
11462 if (!comp) {
11463 ret = -ENOMEM;
11464 - goto fail;
11465 + goto put_device;
11466 }
11467
11468 data->codec_clk = clk_get(&codec_dev->dev, NULL);
11469 if (IS_ERR(data->codec_clk)) {
11470 ret = PTR_ERR(data->codec_clk);
11471 - goto fail;
11472 + goto put_device;
11473 }
11474
11475 data->clk_frequency = clk_get_rate(data->codec_clk);
11476 @@ -158,10 +158,10 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
11477 data->card.dev = &pdev->dev;
11478 ret = snd_soc_of_parse_card_name(&data->card, "model");
11479 if (ret)
11480 - goto fail;
11481 + goto put_device;
11482 ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing");
11483 if (ret)
11484 - goto fail;
11485 + goto put_device;
11486 data->card.num_links = 1;
11487 data->card.owner = THIS_MODULE;
11488 data->card.dai_link = &data->dai;
11489 @@ -176,7 +176,7 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
11490 if (ret != -EPROBE_DEFER)
11491 dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
11492 ret);
11493 - goto fail;
11494 + goto put_device;
11495 }
11496
11497 of_node_put(ssi_np);
11498 @@ -184,6 +184,8 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
11499
11500 return 0;
11501
11502 +put_device:
11503 + put_device(&codec_dev->dev);
11504 fail:
11505 if (data && !IS_ERR(data->codec_clk))
11506 clk_put(data->codec_clk);
11507 diff --git a/sound/soc/mediatek/mt2701/mt2701-wm8960.c b/sound/soc/mediatek/mt2701/mt2701-wm8960.c
11508 index 8c4c89e4c616f..b9ad42112ea18 100644
11509 --- a/sound/soc/mediatek/mt2701/mt2701-wm8960.c
11510 +++ b/sound/soc/mediatek/mt2701/mt2701-wm8960.c
11511 @@ -129,7 +129,8 @@ static int mt2701_wm8960_machine_probe(struct platform_device *pdev)
11512 if (!codec_node) {
11513 dev_err(&pdev->dev,
11514 "Property 'audio-codec' missing or invalid\n");
11515 - return -EINVAL;
11516 + ret = -EINVAL;
11517 + goto put_platform_node;
11518 }
11519 for_each_card_prelinks(card, i, dai_link) {
11520 if (dai_link->codecs->name)
11521 @@ -140,7 +141,7 @@ static int mt2701_wm8960_machine_probe(struct platform_device *pdev)
11522 ret = snd_soc_of_parse_audio_routing(card, "audio-routing");
11523 if (ret) {
11524 dev_err(&pdev->dev, "failed to parse audio-routing: %d\n", ret);
11525 - return ret;
11526 + goto put_codec_node;
11527 }
11528
11529 ret = devm_snd_soc_register_card(&pdev->dev, card);
11530 @@ -148,6 +149,10 @@ static int mt2701_wm8960_machine_probe(struct platform_device *pdev)
11531 dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
11532 __func__, ret);
11533
11534 +put_codec_node:
11535 + of_node_put(codec_node);
11536 +put_platform_node:
11537 + of_node_put(platform_node);
11538 return ret;
11539 }
11540
11541 diff --git a/sound/soc/mediatek/mt8173/mt8173-max98090.c b/sound/soc/mediatek/mt8173/mt8173-max98090.c
11542 index de1410c2c446f..32df181801146 100644
11543 --- a/sound/soc/mediatek/mt8173/mt8173-max98090.c
11544 +++ b/sound/soc/mediatek/mt8173/mt8173-max98090.c
11545 @@ -167,7 +167,8 @@ static int mt8173_max98090_dev_probe(struct platform_device *pdev)
11546 if (!codec_node) {
11547 dev_err(&pdev->dev,
11548 "Property 'audio-codec' missing or invalid\n");
11549 - return -EINVAL;
11550 + ret = -EINVAL;
11551 + goto put_platform_node;
11552 }
11553 for_each_card_prelinks(card, i, dai_link) {
11554 if (dai_link->codecs->name)
11555 @@ -182,6 +183,8 @@ static int mt8173_max98090_dev_probe(struct platform_device *pdev)
11556 __func__, ret);
11557
11558 of_node_put(codec_node);
11559 +
11560 +put_platform_node:
11561 of_node_put(platform_node);
11562 return ret;
11563 }
11564 diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
11565 index cb1b525cbe9de..c899a05e896f3 100644
11566 --- a/sound/soc/mxs/mxs-saif.c
11567 +++ b/sound/soc/mxs/mxs-saif.c
11568 @@ -767,6 +767,7 @@ static int mxs_saif_probe(struct platform_device *pdev)
11569 saif->master_id = saif->id;
11570 } else {
11571 ret = of_alias_get_id(master, "saif");
11572 + of_node_put(master);
11573 if (ret < 0)
11574 return ret;
11575 else
11576 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
11577 index 1c09dfb0c0f09..56c9c4189f269 100644
11578 --- a/sound/soc/soc-dapm.c
11579 +++ b/sound/soc/soc-dapm.c
11580 @@ -3421,7 +3421,6 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
11581 update.val = val;
11582 card->update = &update;
11583 }
11584 - change |= reg_change;
11585
11586 ret = soc_dapm_mixer_update_power(card, kcontrol, connect,
11587 rconnect);
11588 @@ -3527,7 +3526,6 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
11589 update.val = val;
11590 card->update = &update;
11591 }
11592 - change |= reg_change;
11593
11594 ret = soc_dapm_mux_update_power(card, kcontrol, item[0], e);
11595
11596 diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
11597 index f2e9d2b1b913d..29d460c301767 100644
11598 --- a/tools/perf/builtin-c2c.c
11599 +++ b/tools/perf/builtin-c2c.c
11600 @@ -953,8 +953,8 @@ percent_rmt_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
11601 double per_left;
11602 double per_right;
11603
11604 - per_left = PERCENT(left, lcl_hitm);
11605 - per_right = PERCENT(right, lcl_hitm);
11606 + per_left = PERCENT(left, rmt_hitm);
11607 + per_right = PERCENT(right, rmt_hitm);
11608
11609 return per_left - per_right;
11610 }
11611 @@ -2733,9 +2733,7 @@ static int perf_c2c__report(int argc, const char **argv)
11612 "the input file to process"),
11613 OPT_INCR('N', "node-info", &c2c.node_info,
11614 "show extra node info in report (repeat for more info)"),
11615 -#ifdef HAVE_SLANG_SUPPORT
11616 OPT_BOOLEAN(0, "stdio", &c2c.use_stdio, "Use the stdio interface"),
11617 -#endif
11618 OPT_BOOLEAN(0, "stats", &c2c.stats_only,
11619 "Display only statistic tables (implies --stdio)"),
11620 OPT_BOOLEAN(0, "full-symbols", &c2c.symbol_full,
11621 @@ -2762,6 +2760,10 @@ static int perf_c2c__report(int argc, const char **argv)
11622 if (argc)
11623 usage_with_options(report_c2c_usage, options);
11624
11625 +#ifndef HAVE_SLANG_SUPPORT
11626 + c2c.use_stdio = true;
11627 +#endif
11628 +
11629 if (c2c.stats_only)
11630 c2c.use_stdio = true;
11631
11632 diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
11633 index 47f57f5829d3a..a4244bf242e6a 100644
11634 --- a/tools/perf/pmu-events/jevents.c
11635 +++ b/tools/perf/pmu-events/jevents.c
11636 @@ -567,7 +567,7 @@ int json_events(const char *fn,
11637 } else if (json_streq(map, field, "ExtSel")) {
11638 char *code = NULL;
11639 addfield(map, &code, "", "", val);
11640 - eventcode |= strtoul(code, NULL, 0) << 21;
11641 + eventcode |= strtoul(code, NULL, 0) << 8;
11642 free(code);
11643 } else if (json_streq(map, field, "EventName")) {
11644 addfield(map, &name, "", "", val);
11645 diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
11646 index 259868a390198..252d990712496 100644
11647 --- a/tools/perf/util/data.h
11648 +++ b/tools/perf/util/data.h
11649 @@ -3,6 +3,7 @@
11650 #define __PERF_DATA_H
11651
11652 #include <stdbool.h>
11653 +#include <linux/types.h>
11654
11655 enum perf_data_mode {
11656 PERF_DATA_MODE_WRITE,
11657 diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
11658 index 988326b67a916..8bf6b01b35608 100644
11659 --- a/tools/power/x86/turbostat/turbostat.c
11660 +++ b/tools/power/x86/turbostat/turbostat.c
11661 @@ -3865,6 +3865,7 @@ rapl_dram_energy_units_probe(int model, double rapl_energy_units)
11662 case INTEL_FAM6_HASWELL_X: /* HSX */
11663 case INTEL_FAM6_BROADWELL_X: /* BDX */
11664 case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
11665 + case INTEL_FAM6_ICELAKE_X: /* ICX */
11666 return (rapl_dram_energy_units = 15.3 / 1000000);
11667 default:
11668 return (rapl_energy_units);
11669 diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
11670 index d4a02fe44a126..0620580a5c16c 100644
11671 --- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
11672 +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
11673 @@ -94,7 +94,7 @@ typedef void (* (*signal_t)(int, void (*)(int)))(int);
11674
11675 typedef char * (*fn_ptr_arr1_t[10])(int **);
11676
11677 -typedef char * (* const (* const fn_ptr_arr2_t[5])())(char * (*)(int));
11678 +typedef char * (* (* const fn_ptr_arr2_t[5])())(char * (*)(int));
11679
11680 struct struct_w_typedefs {
11681 int_t a;
11682 diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
11683 index d7e07f4c3d7fc..4e15e81673104 100755
11684 --- a/tools/testing/selftests/netfilter/nft_nat.sh
11685 +++ b/tools/testing/selftests/netfilter/nft_nat.sh
11686 @@ -374,6 +374,45 @@ EOF
11687 return $lret
11688 }
11689
11690 +test_local_dnat_portonly()
11691 +{
11692 + local family=$1
11693 + local daddr=$2
11694 + local lret=0
11695 + local sr_s
11696 + local sr_r
11697 +
11698 +ip netns exec "$ns0" nft -f /dev/stdin <<EOF
11699 +table $family nat {
11700 + chain output {
11701 + type nat hook output priority 0; policy accept;
11702 + meta l4proto tcp dnat to :2000
11703 +
11704 + }
11705 +}
11706 +EOF
11707 + if [ $? -ne 0 ]; then
11708 + if [ $family = "inet" ];then
11709 + echo "SKIP: inet port test"
11710 + test_inet_nat=false
11711 + return
11712 + fi
11713 + echo "SKIP: Could not add $family dnat hook"
11714 + return
11715 + fi
11716 +
11717 + echo SERVER-$family | ip netns exec "$ns1" timeout 5 socat -u STDIN TCP-LISTEN:2000 &
11718 + sc_s=$!
11719 +
11720 + result=$(ip netns exec "$ns0" timeout 1 socat TCP:$daddr:2000 STDOUT)
11721 +
11722 + if [ "$result" = "SERVER-inet" ];then
11723 + echo "PASS: inet port rewrite without l3 address"
11724 + else
11725 + echo "ERROR: inet port rewrite"
11726 + ret=1
11727 + fi
11728 +}
11729
11730 test_masquerade6()
11731 {
11732 @@ -841,6 +880,10 @@ fi
11733 reset_counters
11734 test_local_dnat ip
11735 test_local_dnat6 ip6
11736 +
11737 +reset_counters
11738 +test_local_dnat_portonly inet 10.0.1.99
11739 +
11740 reset_counters
11741 $test_inet_nat && test_local_dnat inet
11742 $test_inet_nat && test_local_dnat6 inet