Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0150-4.14.51-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 6 months ago) by niro
File size: 211479 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.txt b/Documentation/devicetree/bindings/display/panel/panel-common.txt
2     index ec52c472c845..0603af877155 100644
3     --- a/Documentation/devicetree/bindings/display/panel/panel-common.txt
4     +++ b/Documentation/devicetree/bindings/display/panel/panel-common.txt
5     @@ -38,7 +38,7 @@ Display Timings
6     require specific display timings. The panel-timing subnode expresses those
7     timings as specified in the timing subnode section of the display timing
8     bindings defined in
9     - Documentation/devicetree/bindings/display/display-timing.txt.
10     + Documentation/devicetree/bindings/display/panel/display-timing.txt.
11    
12    
13     Connectivity
14     diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
15     index 891db41e9420..98d7898fcd78 100644
16     --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
17     +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
18     @@ -25,6 +25,7 @@ Required Properties:
19     - "renesas,dmac-r8a7794" (R-Car E2)
20     - "renesas,dmac-r8a7795" (R-Car H3)
21     - "renesas,dmac-r8a7796" (R-Car M3-W)
22     + - "renesas,dmac-r8a77965" (R-Car M3-N)
23     - "renesas,dmac-r8a77970" (R-Car V3M)
24    
25     - reg: base address and length of the registers block for the DMAC
26     diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
27     index 6f2ec9af0de2..dee9520224a9 100644
28     --- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
29     +++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
30     @@ -55,9 +55,9 @@ pins it needs, and how they should be configured, with regard to muxer
31     configuration, drive strength and pullups. If one of these options is
32     not set, its actual value will be unspecified.
33    
34     -This driver supports the generic pin multiplexing and configuration
35     -bindings. For details on each properties, you can refer to
36     -./pinctrl-bindings.txt.
37     +Allwinner A1X Pin Controller supports the generic pin multiplexing and
38     +configuration bindings. For details on each properties, you can refer to
39     + ./pinctrl-bindings.txt.
40    
41     Required sub-node properties:
42     - pins
43     diff --git a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
44     index 8ff65fa632fd..c06c045126fc 100644
45     --- a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
46     +++ b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
47     @@ -21,7 +21,7 @@ Required properties:
48     - interrupts : identifier to the device interrupt
49     - clocks : a list of phandle + clock-specifier pairs, one for each
50     entry in clock names.
51     -- clocks-names :
52     +- clock-names :
53     * "xtal" for external xtal clock identifier
54     * "pclk" for the bus core clock, either the clk81 clock or the gate clock
55     * "baud" for the source of the baudrate generator, can be either the xtal
56     diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
57     index cf504d0380ae..88f947c47adc 100644
58     --- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
59     +++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
60     @@ -41,6 +41,8 @@ Required properties:
61     - "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART.
62     - "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART.
63     - "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART.
64     + - "renesas,scif-r8a77965" for R8A77965 (R-Car M3-N) SCIF compatible UART.
65     + - "renesas,hscif-r8a77965" for R8A77965 (R-Car M3-N) HSCIF compatible UART.
66     - "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART.
67     - "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART.
68     - "renesas,scif-r8a77995" for R8A77995 (R-Car D3) SCIF compatible UART.
69     diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
70     index 1afd298eddd7..f4a98c85340a 100644
71     --- a/Documentation/devicetree/bindings/vendor-prefixes.txt
72     +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
73     @@ -172,6 +172,7 @@ karo Ka-Ro electronics GmbH
74     keithkoep Keith & Koep GmbH
75     keymile Keymile GmbH
76     khadas Khadas
77     +kiebackpeter Kieback & Peter GmbH
78     kinetic Kinetic Technologies
79     kingnovel Kingnovel Technology Co., Ltd.
80     kosagi Sutajio Ko-Usagi PTE Ltd.
81     diff --git a/Makefile b/Makefile
82     index 84374c5ba60e..a33376204c17 100644
83     --- a/Makefile
84     +++ b/Makefile
85     @@ -1,7 +1,7 @@
86     # SPDX-License-Identifier: GPL-2.0
87     VERSION = 4
88     PATCHLEVEL = 14
89     -SUBLEVEL = 50
90     +SUBLEVEL = 51
91     EXTRAVERSION =
92     NAME = Petit Gorille
93    
94     diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
95     index 8a756870c238..5f687ba1eaa7 100644
96     --- a/arch/arm/boot/compressed/head.S
97     +++ b/arch/arm/boot/compressed/head.S
98     @@ -29,19 +29,19 @@
99     #if defined(CONFIG_DEBUG_ICEDCC)
100    
101     #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
102     - .macro loadsp, rb, tmp
103     + .macro loadsp, rb, tmp1, tmp2
104     .endm
105     .macro writeb, ch, rb
106     mcr p14, 0, \ch, c0, c5, 0
107     .endm
108     #elif defined(CONFIG_CPU_XSCALE)
109     - .macro loadsp, rb, tmp
110     + .macro loadsp, rb, tmp1, tmp2
111     .endm
112     .macro writeb, ch, rb
113     mcr p14, 0, \ch, c8, c0, 0
114     .endm
115     #else
116     - .macro loadsp, rb, tmp
117     + .macro loadsp, rb, tmp1, tmp2
118     .endm
119     .macro writeb, ch, rb
120     mcr p14, 0, \ch, c1, c0, 0
121     @@ -57,7 +57,7 @@
122     .endm
123    
124     #if defined(CONFIG_ARCH_SA1100)
125     - .macro loadsp, rb, tmp
126     + .macro loadsp, rb, tmp1, tmp2
127     mov \rb, #0x80000000 @ physical base address
128     #ifdef CONFIG_DEBUG_LL_SER3
129     add \rb, \rb, #0x00050000 @ Ser3
130     @@ -66,8 +66,8 @@
131     #endif
132     .endm
133     #else
134     - .macro loadsp, rb, tmp
135     - addruart \rb, \tmp
136     + .macro loadsp, rb, tmp1, tmp2
137     + addruart \rb, \tmp1, \tmp2
138     .endm
139     #endif
140     #endif
141     @@ -559,8 +559,6 @@ not_relocated: mov r0, #0
142     bl decompress_kernel
143     bl cache_clean_flush
144     bl cache_off
145     - mov r1, r7 @ restore architecture number
146     - mov r2, r8 @ restore atags pointer
147    
148     #ifdef CONFIG_ARM_VIRT_EXT
149     mrs r0, spsr @ Get saved CPU boot mode
150     @@ -1295,7 +1293,7 @@ phex: adr r3, phexbuf
151     b 1b
152    
153     @ puts corrupts {r0, r1, r2, r3}
154     -puts: loadsp r3, r1
155     +puts: loadsp r3, r2, r1
156     1: ldrb r2, [r0], #1
157     teq r2, #0
158     moveq pc, lr
159     @@ -1312,8 +1310,8 @@ puts: loadsp r3, r1
160     @ putc corrupts {r0, r1, r2, r3}
161     putc:
162     mov r2, r0
163     + loadsp r3, r1, r0
164     mov r0, #0
165     - loadsp r3, r1
166     b 2b
167    
168     @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
169     @@ -1363,6 +1361,8 @@ __hyp_reentry_vectors:
170    
171     __enter_kernel:
172     mov r0, #0 @ must be 0
173     + mov r1, r7 @ restore architecture number
174     + mov r2, r8 @ restore atags pointer
175     ARM( mov pc, r4 ) @ call kernel
176     M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class
177     THUMB( bx r4 ) @ entry point is always ARM for A/R classes
178     diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
179     index 7c957ea06c66..9a9902974b1b 100644
180     --- a/arch/arm/boot/dts/bcm-cygnus.dtsi
181     +++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
182     @@ -69,7 +69,7 @@
183     timer@20200 {
184     compatible = "arm,cortex-a9-global-timer";
185     reg = <0x20200 0x100>;
186     - interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
187     + interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
188     clocks = <&periph_clk>;
189     };
190    
191     diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
192     index af68ef7b0caa..8a15f7193c82 100644
193     --- a/arch/arm/boot/dts/da850.dtsi
194     +++ b/arch/arm/boot/dts/da850.dtsi
195     @@ -34,8 +34,6 @@
196     pmx_core: pinmux@14120 {
197     compatible = "pinctrl-single";
198     reg = <0x14120 0x50>;
199     - #address-cells = <1>;
200     - #size-cells = <0>;
201     #pinctrl-cells = <2>;
202     pinctrl-single,bit-per-mux;
203     pinctrl-single,register-width = <32>;
204     diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
205     index e262fa9ef334..c335b923753a 100644
206     --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
207     +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
208     @@ -26,7 +26,7 @@
209     gpio = <&gpio1 3 0>; /* gpio_3 */
210     startup-delay-us = <70000>;
211     enable-active-high;
212     - vin-supply = <&vmmc2>;
213     + vin-supply = <&vaux3>;
214     };
215    
216     /* HS USB Host PHY on PORT 1 */
217     @@ -108,6 +108,7 @@
218     twl_audio: audio {
219     compatible = "ti,twl4030-audio";
220     codec {
221     + ti,hs_extmute_gpio = <&gpio2 25 GPIO_ACTIVE_HIGH>;
222     };
223     };
224     };
225     @@ -221,6 +222,7 @@
226     pinctrl-single,pins = <
227     OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
228     OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
229     + OMAP3_CORE1_IOPAD(0x20ba, PIN_OUTPUT | MUX_MODE4) /* gpmc_ncs6.gpio_57 */
230     >;
231     };
232     };
233     @@ -235,7 +237,7 @@
234     };
235     wl127x_gpio: pinmux_wl127x_gpio_pin {
236     pinctrl-single,pins = <
237     - OMAP3_WKUP_IOPAD(0x2a0c, PIN_INPUT | MUX_MODE4) /* sys_boot0.gpio_2 */
238     + OMAP3_WKUP_IOPAD(0x2a0a, PIN_INPUT | MUX_MODE4) /* sys_boot0.gpio_2 */
239     OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */
240     >;
241     };
242     @@ -270,6 +272,11 @@
243     #include "twl4030.dtsi"
244     #include "twl4030_omap3.dtsi"
245    
246     +&vaux3 {
247     + regulator-min-microvolt = <2800000>;
248     + regulator-max-microvolt = <2800000>;
249     +};
250     +
251     &twl {
252     twl_power: power {
253     compatible = "ti,twl4030-power-idle-osc-off", "ti,twl4030-power-idle";
254     diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
255     index 6b38d7a634c1..c15318431986 100644
256     --- a/arch/arm/kernel/machine_kexec.c
257     +++ b/arch/arm/kernel/machine_kexec.c
258     @@ -95,6 +95,27 @@ void machine_crash_nonpanic_core(void *unused)
259     cpu_relax();
260     }
261    
262     +void crash_smp_send_stop(void)
263     +{
264     + static int cpus_stopped;
265     + unsigned long msecs;
266     +
267     + if (cpus_stopped)
268     + return;
269     +
270     + atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
271     + smp_call_function(machine_crash_nonpanic_core, NULL, false);
272     + msecs = 1000; /* Wait at most a second for the other cpus to stop */
273     + while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
274     + mdelay(1);
275     + msecs--;
276     + }
277     + if (atomic_read(&waiting_for_crash_ipi) > 0)
278     + pr_warn("Non-crashing CPUs did not react to IPI\n");
279     +
280     + cpus_stopped = 1;
281     +}
282     +
283     static void machine_kexec_mask_interrupts(void)
284     {
285     unsigned int i;
286     @@ -120,19 +141,8 @@ static void machine_kexec_mask_interrupts(void)
287    
288     void machine_crash_shutdown(struct pt_regs *regs)
289     {
290     - unsigned long msecs;
291     -
292     local_irq_disable();
293     -
294     - atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
295     - smp_call_function(machine_crash_nonpanic_core, NULL, false);
296     - msecs = 1000; /* Wait at most a second for the other cpus to stop */
297     - while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
298     - mdelay(1);
299     - msecs--;
300     - }
301     - if (atomic_read(&waiting_for_crash_ipi) > 0)
302     - pr_warn("Non-crashing CPUs did not react to IPI\n");
303     + crash_smp_send_stop();
304    
305     crash_save_cpu(regs, smp_processor_id());
306     machine_kexec_mask_interrupts();
307     diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
308     index f673cd7a6766..fb7c44cdadcb 100644
309     --- a/arch/arm/mach-davinci/board-da830-evm.c
310     +++ b/arch/arm/mach-davinci/board-da830-evm.c
311     @@ -205,12 +205,17 @@ static const short da830_evm_mmc_sd_pins[] = {
312     -1
313     };
314    
315     +#define DA830_MMCSD_WP_PIN GPIO_TO_PIN(2, 1)
316     +#define DA830_MMCSD_CD_PIN GPIO_TO_PIN(2, 2)
317     +
318     static struct gpiod_lookup_table mmc_gpios_table = {
319     .dev_id = "da830-mmc.0",
320     .table = {
321     /* gpio chip 1 contains gpio range 32-63 */
322     - GPIO_LOOKUP("davinci_gpio.1", 2, "cd", GPIO_ACTIVE_LOW),
323     - GPIO_LOOKUP("davinci_gpio.1", 1, "wp", GPIO_ACTIVE_LOW),
324     + GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_CD_PIN, "cd",
325     + GPIO_ACTIVE_LOW),
326     + GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_WP_PIN, "wp",
327     + GPIO_ACTIVE_LOW),
328     },
329     };
330    
331     diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
332     index cbde0030c092..25f12118c364 100644
333     --- a/arch/arm/mach-davinci/board-da850-evm.c
334     +++ b/arch/arm/mach-davinci/board-da850-evm.c
335     @@ -763,12 +763,17 @@ static const short da850_evm_mcasp_pins[] __initconst = {
336     -1
337     };
338    
339     +#define DA850_MMCSD_CD_PIN GPIO_TO_PIN(4, 0)
340     +#define DA850_MMCSD_WP_PIN GPIO_TO_PIN(4, 1)
341     +
342     static struct gpiod_lookup_table mmc_gpios_table = {
343     .dev_id = "da830-mmc.0",
344     .table = {
345     /* gpio chip 2 contains gpio range 64-95 */
346     - GPIO_LOOKUP("davinci_gpio.2", 0, "cd", GPIO_ACTIVE_LOW),
347     - GPIO_LOOKUP("davinci_gpio.2", 1, "wp", GPIO_ACTIVE_LOW),
348     + GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd",
349     + GPIO_ACTIVE_LOW),
350     + GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp",
351     + GPIO_ACTIVE_LOW),
352     },
353     };
354    
355     diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
356     index 62e7bc3018f0..8e64685b1941 100644
357     --- a/arch/arm/mach-davinci/board-dm355-evm.c
358     +++ b/arch/arm/mach-davinci/board-dm355-evm.c
359     @@ -18,6 +18,7 @@
360     #include <linux/i2c.h>
361     #include <linux/gpio.h>
362     #include <linux/clk.h>
363     +#include <linux/dm9000.h>
364     #include <linux/videodev2.h>
365     #include <media/i2c/tvp514x.h>
366     #include <linux/spi/spi.h>
367     @@ -168,11 +169,16 @@ static struct resource dm355evm_dm9000_rsrc[] = {
368     },
369     };
370    
371     +static struct dm9000_plat_data dm335evm_dm9000_platdata;
372     +
373     static struct platform_device dm355evm_dm9000 = {
374     .name = "dm9000",
375     .id = -1,
376     .resource = dm355evm_dm9000_rsrc,
377     .num_resources = ARRAY_SIZE(dm355evm_dm9000_rsrc),
378     + .dev = {
379     + .platform_data = &dm335evm_dm9000_platdata,
380     + },
381     };
382    
383     static struct tvp514x_platform_data tvp5146_pdata = {
384     diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
385     index cb0a41e83582..4c458f714101 100644
386     --- a/arch/arm/mach-davinci/board-dm646x-evm.c
387     +++ b/arch/arm/mach-davinci/board-dm646x-evm.c
388     @@ -534,11 +534,12 @@ static struct vpif_display_config dm646x_vpif_display_config = {
389     .set_clock = set_vpif_clock,
390     .subdevinfo = dm646x_vpif_subdev,
391     .subdev_count = ARRAY_SIZE(dm646x_vpif_subdev),
392     + .i2c_adapter_id = 1,
393     .chan_config[0] = {
394     .outputs = dm6467_ch0_outputs,
395     .output_count = ARRAY_SIZE(dm6467_ch0_outputs),
396     },
397     - .card_name = "DM646x EVM",
398     + .card_name = "DM646x EVM Video Display",
399     };
400    
401     /**
402     @@ -676,6 +677,7 @@ static struct vpif_capture_config dm646x_vpif_capture_cfg = {
403     .setup_input_channel_mode = setup_vpif_input_channel_mode,
404     .subdev_info = vpif_capture_sdev_info,
405     .subdev_count = ARRAY_SIZE(vpif_capture_sdev_info),
406     + .i2c_adapter_id = 1,
407     .chan_config[0] = {
408     .inputs = dm6467_ch0_inputs,
409     .input_count = ARRAY_SIZE(dm6467_ch0_inputs),
410     @@ -696,6 +698,7 @@ static struct vpif_capture_config dm646x_vpif_capture_cfg = {
411     .fid_pol = 0,
412     },
413     },
414     + .card_name = "DM646x EVM Video Capture",
415     };
416    
417     static void __init evm_init_video(void)
418     diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
419     index 62eb7d668890..10a027253250 100644
420     --- a/arch/arm/mach-davinci/board-omapl138-hawk.c
421     +++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
422     @@ -123,12 +123,16 @@ static const short hawk_mmcsd0_pins[] = {
423     -1
424     };
425    
426     +#define DA850_HAWK_MMCSD_CD_PIN GPIO_TO_PIN(3, 12)
427     +#define DA850_HAWK_MMCSD_WP_PIN GPIO_TO_PIN(3, 13)
428     +
429     static struct gpiod_lookup_table mmc_gpios_table = {
430     .dev_id = "da830-mmc.0",
431     .table = {
432     - /* CD: gpio3_12: gpio60: chip 1 contains gpio range 32-63*/
433     - GPIO_LOOKUP("davinci_gpio.0", 28, "cd", GPIO_ACTIVE_LOW),
434     - GPIO_LOOKUP("davinci_gpio.0", 29, "wp", GPIO_ACTIVE_LOW),
435     + GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_CD_PIN, "cd",
436     + GPIO_ACTIVE_LOW),
437     + GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_WP_PIN, "wp",
438     + GPIO_ACTIVE_LOW),
439     },
440     };
441    
442     diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
443     index da21353cac45..d869369ca2bc 100644
444     --- a/arch/arm/mach-davinci/dm646x.c
445     +++ b/arch/arm/mach-davinci/dm646x.c
446     @@ -495,7 +495,8 @@ static u8 dm646x_default_priorities[DAVINCI_N_AINTC_IRQ] = {
447     [IRQ_DM646X_MCASP0TXINT] = 7,
448     [IRQ_DM646X_MCASP0RXINT] = 7,
449     [IRQ_DM646X_RESERVED_3] = 7,
450     - [IRQ_DM646X_MCASP1TXINT] = 7, /* clockevent */
451     + [IRQ_DM646X_MCASP1TXINT] = 7,
452     + [IRQ_TINT0_TINT12] = 7, /* clockevent */
453     [IRQ_TINT0_TINT34] = 7, /* clocksource */
454     [IRQ_TINT1_TINT12] = 7, /* DSP timer */
455     [IRQ_TINT1_TINT34] = 7, /* system tick */
456     diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c
457     index fe57e2692629..abca83d22ff3 100644
458     --- a/arch/arm/mach-keystone/pm_domain.c
459     +++ b/arch/arm/mach-keystone/pm_domain.c
460     @@ -29,6 +29,7 @@ static struct dev_pm_domain keystone_pm_domain = {
461    
462     static struct pm_clk_notifier_block platform_domain_notifier = {
463     .pm_domain = &keystone_pm_domain,
464     + .con_ids = { NULL },
465     };
466    
467     static const struct of_device_id of_keystone_table[] = {
468     diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
469     index 793a24a53c52..d7ca9e2b40d2 100644
470     --- a/arch/arm/mach-omap1/ams-delta-fiq.c
471     +++ b/arch/arm/mach-omap1/ams-delta-fiq.c
472     @@ -58,22 +58,24 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
473     irq_num = gpio_to_irq(gpio);
474     fiq_count = fiq_buffer[FIQ_CNT_INT_00 + gpio];
475    
476     - while (irq_counter[gpio] < fiq_count) {
477     - if (gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) {
478     - struct irq_data *d = irq_get_irq_data(irq_num);
479     -
480     - /*
481     - * It looks like handle_edge_irq() that
482     - * OMAP GPIO edge interrupts default to,
483     - * expects interrupt already unmasked.
484     - */
485     - if (irq_chip && irq_chip->irq_unmask)
486     + if (irq_counter[gpio] < fiq_count &&
487     + gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) {
488     + struct irq_data *d = irq_get_irq_data(irq_num);
489     +
490     + /*
491     + * handle_simple_irq() that OMAP GPIO edge
492     + * interrupts default to since commit 80ac93c27441
493     + * requires interrupt already acked and unmasked.
494     + */
495     + if (irq_chip) {
496     + if (irq_chip->irq_ack)
497     + irq_chip->irq_ack(d);
498     + if (irq_chip->irq_unmask)
499     irq_chip->irq_unmask(d);
500     }
501     - generic_handle_irq(irq_num);
502     -
503     - irq_counter[gpio]++;
504     }
505     + for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++)
506     + generic_handle_irq(irq_num);
507     }
508     return IRQ_HANDLED;
509     }
510     diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
511     index 76eb6ec5f157..1e6a967cd2d5 100644
512     --- a/arch/arm/mach-omap2/powerdomain.c
513     +++ b/arch/arm/mach-omap2/powerdomain.c
514     @@ -188,7 +188,7 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag)
515     ((prev & OMAP_POWERSTATE_MASK) << 0));
516     trace_power_domain_target_rcuidle(pwrdm->name,
517     trace_state,
518     - smp_processor_id());
519     + raw_smp_processor_id());
520     }
521     break;
522     default:
523     @@ -518,7 +518,7 @@ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
524     if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) {
525     /* Trace the pwrdm desired target state */
526     trace_power_domain_target_rcuidle(pwrdm->name, pwrst,
527     - smp_processor_id());
528     + raw_smp_processor_id());
529     /* Program the pwrdm desired target state */
530     ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst);
531     }
532     diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
533     index 54f418d05e15..2306b1a0c09a 100644
534     --- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
535     +++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
536     @@ -46,7 +46,7 @@
537     compatible = "ethernet-phy-ieee802.3-c22";
538     reg = <0x0>;
539     interrupt-parent = <&gpio>;
540     - interrupts = <TEGRA_MAIN_GPIO(M, 5) IRQ_TYPE_LEVEL_HIGH>;
541     + interrupts = <TEGRA_MAIN_GPIO(M, 5) IRQ_TYPE_LEVEL_LOW>;
542     };
543     };
544     };
545     diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
546     index 30da0918d046..04569aa267fd 100644
547     --- a/arch/arm64/include/asm/cputype.h
548     +++ b/arch/arm64/include/asm/cputype.h
549     @@ -75,6 +75,7 @@
550     #define ARM_CPU_IMP_CAVIUM 0x43
551     #define ARM_CPU_IMP_BRCM 0x42
552     #define ARM_CPU_IMP_QCOM 0x51
553     +#define ARM_CPU_IMP_NVIDIA 0x4E
554    
555     #define ARM_CPU_PART_AEM_V8 0xD0F
556     #define ARM_CPU_PART_FOUNDATION 0xD00
557     @@ -98,6 +99,9 @@
558     #define QCOM_CPU_PART_FALKOR 0xC00
559     #define QCOM_CPU_PART_KRYO 0x200
560    
561     +#define NVIDIA_CPU_PART_DENVER 0x003
562     +#define NVIDIA_CPU_PART_CARMEL 0x004
563     +
564     #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
565     #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
566     #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
567     @@ -112,6 +116,8 @@
568     #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
569     #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
570     #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
571     +#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
572     +#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
573    
574     #ifndef __ASSEMBLY__
575    
576     diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
577     index 9cbb6123208f..edaf346d13d5 100644
578     --- a/arch/arm64/kernel/ptrace.c
579     +++ b/arch/arm64/kernel/ptrace.c
580     @@ -25,6 +25,7 @@
581     #include <linux/sched/signal.h>
582     #include <linux/sched/task_stack.h>
583     #include <linux/mm.h>
584     +#include <linux/nospec.h>
585     #include <linux/smp.h>
586     #include <linux/ptrace.h>
587     #include <linux/user.h>
588     @@ -247,15 +248,20 @@ static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
589    
590     switch (note_type) {
591     case NT_ARM_HW_BREAK:
592     - if (idx < ARM_MAX_BRP)
593     - bp = tsk->thread.debug.hbp_break[idx];
594     + if (idx >= ARM_MAX_BRP)
595     + goto out;
596     + idx = array_index_nospec(idx, ARM_MAX_BRP);
597     + bp = tsk->thread.debug.hbp_break[idx];
598     break;
599     case NT_ARM_HW_WATCH:
600     - if (idx < ARM_MAX_WRP)
601     - bp = tsk->thread.debug.hbp_watch[idx];
602     + if (idx >= ARM_MAX_WRP)
603     + goto out;
604     + idx = array_index_nospec(idx, ARM_MAX_WRP);
605     + bp = tsk->thread.debug.hbp_watch[idx];
606     break;
607     }
608    
609     +out:
610     return bp;
611     }
612    
613     @@ -1194,9 +1200,7 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
614     {
615     int ret;
616     u32 kdata;
617     - mm_segment_t old_fs = get_fs();
618    
619     - set_fs(KERNEL_DS);
620     /* Watchpoint */
621     if (num < 0) {
622     ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
623     @@ -1207,7 +1211,6 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
624     } else {
625     ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
626     }
627     - set_fs(old_fs);
628    
629     if (!ret)
630     ret = put_user(kdata, data);
631     @@ -1220,7 +1223,6 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
632     {
633     int ret;
634     u32 kdata = 0;
635     - mm_segment_t old_fs = get_fs();
636    
637     if (num == 0)
638     return 0;
639     @@ -1229,12 +1231,10 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
640     if (ret)
641     return ret;
642    
643     - set_fs(KERNEL_DS);
644     if (num < 0)
645     ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
646     else
647     ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
648     - set_fs(old_fs);
649    
650     return ret;
651     }
652     diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
653     index 66f5e9a61efc..7288e31d3713 100644
654     --- a/arch/hexagon/include/asm/io.h
655     +++ b/arch/hexagon/include/asm/io.h
656     @@ -216,6 +216,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
657     memcpy((void *) dst, src, count);
658     }
659    
660     +static inline void memset_io(volatile void __iomem *addr, int value,
661     + size_t size)
662     +{
663     + memset((void __force *)addr, value, size);
664     +}
665     +
666     #define PCI_IO_ADDR (volatile void __iomem *)
667    
668     /*
669     diff --git a/arch/hexagon/lib/checksum.c b/arch/hexagon/lib/checksum.c
670     index 617506d1a559..7cd0a2259269 100644
671     --- a/arch/hexagon/lib/checksum.c
672     +++ b/arch/hexagon/lib/checksum.c
673     @@ -199,3 +199,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
674     memcpy(dst, src, len);
675     return csum_partial(dst, len, sum);
676     }
677     +EXPORT_SYMBOL(csum_partial_copy_nocheck);
678     diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts
679     index 2cd49b60e030..f7aad80c69ab 100644
680     --- a/arch/mips/boot/dts/img/boston.dts
681     +++ b/arch/mips/boot/dts/img/boston.dts
682     @@ -51,6 +51,8 @@
683     ranges = <0x02000000 0 0x40000000
684     0x40000000 0 0x40000000>;
685    
686     + bus-range = <0x00 0xff>;
687     +
688     interrupt-map-mask = <0 0 0 7>;
689     interrupt-map = <0 0 0 1 &pci0_intc 1>,
690     <0 0 0 2 &pci0_intc 2>,
691     @@ -79,6 +81,8 @@
692     ranges = <0x02000000 0 0x20000000
693     0x20000000 0 0x20000000>;
694    
695     + bus-range = <0x00 0xff>;
696     +
697     interrupt-map-mask = <0 0 0 7>;
698     interrupt-map = <0 0 0 1 &pci1_intc 1>,
699     <0 0 0 2 &pci1_intc 2>,
700     @@ -107,6 +111,8 @@
701     ranges = <0x02000000 0 0x16000000
702     0x16000000 0 0x100000>;
703    
704     + bus-range = <0x00 0xff>;
705     +
706     interrupt-map-mask = <0 0 0 7>;
707     interrupt-map = <0 0 0 1 &pci2_intc 1>,
708     <0 0 0 2 &pci2_intc 2>,
709     diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
710     index 0cbf3af37eca..a7d0b836f2f7 100644
711     --- a/arch/mips/include/asm/io.h
712     +++ b/arch/mips/include/asm/io.h
713     @@ -307,7 +307,7 @@ static inline void iounmap(const volatile void __iomem *addr)
714     #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT)
715     #define war_io_reorder_wmb() wmb()
716     #else
717     -#define war_io_reorder_wmb() do { } while (0)
718     +#define war_io_reorder_wmb() barrier()
719     #endif
720    
721     #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
722     @@ -377,6 +377,8 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
723     BUG(); \
724     } \
725     \
726     + /* prevent prefetching of coherent DMA data prematurely */ \
727     + rmb(); \
728     return pfx##ioswab##bwlq(__mem, __val); \
729     }
730    
731     diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
732     index 513826a43efd..6a71d3151a23 100644
733     --- a/arch/parisc/kernel/drivers.c
734     +++ b/arch/parisc/kernel/drivers.c
735     @@ -448,7 +448,8 @@ static int match_by_id(struct device * dev, void * data)
736     * Checks all the children of @parent for a matching @id. If none
737     * found, it allocates a new device and returns it.
738     */
739     -static struct parisc_device * alloc_tree_node(struct device *parent, char id)
740     +static struct parisc_device * __init alloc_tree_node(
741     + struct device *parent, char id)
742     {
743     struct match_id_data d = {
744     .id = id,
745     @@ -825,8 +826,8 @@ void walk_lower_bus(struct parisc_device *dev)
746     * devices which are not physically connected (such as extra serial &
747     * keyboard ports). This problem is not yet solved.
748     */
749     -static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
750     - struct device *parent)
751     +static void __init walk_native_bus(unsigned long io_io_low,
752     + unsigned long io_io_high, struct device *parent)
753     {
754     int i, devices_found = 0;
755     unsigned long hpa = io_io_low;
756     diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
757     index 30c28ab14540..ab4d5580bb02 100644
758     --- a/arch/parisc/kernel/smp.c
759     +++ b/arch/parisc/kernel/smp.c
760     @@ -418,8 +418,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
761     }
762    
763     #ifdef CONFIG_PROC_FS
764     -int __init
765     -setup_profiling_timer(unsigned int multiplier)
766     +int setup_profiling_timer(unsigned int multiplier)
767     {
768     return -EINVAL;
769     }
770     diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
771     index f7e684560186..42a873226a04 100644
772     --- a/arch/parisc/kernel/time.c
773     +++ b/arch/parisc/kernel/time.c
774     @@ -205,7 +205,7 @@ static int __init rtc_init(void)
775     device_initcall(rtc_init);
776     #endif
777    
778     -void read_persistent_clock(struct timespec *ts)
779     +void read_persistent_clock64(struct timespec64 *ts)
780     {
781     static struct pdc_tod tod_data;
782     if (pdc_tod_read(&tod_data) == 0) {
783     diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
784     index de470caf0784..fc222a0c2ac4 100644
785     --- a/arch/powerpc/platforms/powernv/memtrace.c
786     +++ b/arch/powerpc/platforms/powernv/memtrace.c
787     @@ -82,19 +82,6 @@ static const struct file_operations memtrace_fops = {
788     .open = simple_open,
789     };
790    
791     -static void flush_memory_region(u64 base, u64 size)
792     -{
793     - unsigned long line_size = ppc64_caches.l1d.size;
794     - u64 end = base + size;
795     - u64 addr;
796     -
797     - base = round_down(base, line_size);
798     - end = round_up(end, line_size);
799     -
800     - for (addr = base; addr < end; addr += line_size)
801     - asm volatile("dcbf 0,%0" : "=r" (addr) :: "memory");
802     -}
803     -
804     static int check_memblock_online(struct memory_block *mem, void *arg)
805     {
806     if (mem->state != MEM_ONLINE)
807     @@ -132,10 +119,6 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
808     walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE,
809     change_memblock_state);
810    
811     - /* RCU grace period? */
812     - flush_memory_region((u64)__va(start_pfn << PAGE_SHIFT),
813     - nr_pages << PAGE_SHIFT);
814     -
815     lock_device_hotplug();
816     remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
817     unlock_device_hotplug();
818     diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
819     index 4205f6d42b69..a5bd03642678 100644
820     --- a/arch/sh/kernel/cpu/sh2/probe.c
821     +++ b/arch/sh/kernel/cpu/sh2/probe.c
822     @@ -43,7 +43,11 @@ void __ref cpu_probe(void)
823     #endif
824    
825     #if defined(CONFIG_CPU_J2)
826     +#if defined(CONFIG_SMP)
827     unsigned cpu = hard_smp_processor_id();
828     +#else
829     + unsigned cpu = 0;
830     +#endif
831     if (cpu == 0) of_scan_flat_dt(scan_cache, NULL);
832     if (j2_ccr_base) __raw_writel(0x80000303, j2_ccr_base + 4*cpu);
833     if (cpu != 0) return;
834     diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
835     index 6965ee8c4b8a..228732654cfe 100644
836     --- a/arch/x86/events/intel/core.c
837     +++ b/arch/x86/events/intel/core.c
838     @@ -3331,7 +3331,8 @@ static void intel_pmu_cpu_starting(int cpu)
839    
840     cpuc->lbr_sel = NULL;
841    
842     - flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
843     + if (x86_pmu.version > 1)
844     + flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
845    
846     if (!cpuc->shared_regs)
847     return;
848     @@ -3494,6 +3495,8 @@ static __initconst const struct x86_pmu core_pmu = {
849     .cpu_dying = intel_pmu_cpu_dying,
850     };
851    
852     +static struct attribute *intel_pmu_attrs[];
853     +
854     static __initconst const struct x86_pmu intel_pmu = {
855     .name = "Intel",
856     .handle_irq = intel_pmu_handle_irq,
857     @@ -3524,6 +3527,8 @@ static __initconst const struct x86_pmu intel_pmu = {
858     .format_attrs = intel_arch3_formats_attr,
859     .events_sysfs_show = intel_event_sysfs_show,
860    
861     + .attrs = intel_pmu_attrs,
862     +
863     .cpu_prepare = intel_pmu_cpu_prepare,
864     .cpu_starting = intel_pmu_cpu_starting,
865     .cpu_dying = intel_pmu_cpu_dying,
866     @@ -3902,8 +3907,6 @@ __init int intel_pmu_init(void)
867    
868     x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
869    
870     -
871     - x86_pmu.attrs = intel_pmu_attrs;
872     /*
873     * Quirk: v2 perfmon does not report fixed-purpose events, so
874     * assume at least 3 events, when not running in a hypervisor:
875     diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
876     index b3e32b010ab1..c2c01f84df75 100644
877     --- a/arch/x86/include/asm/insn.h
878     +++ b/arch/x86/include/asm/insn.h
879     @@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn)
880     return insn_offset_displacement(insn) + insn->displacement.nbytes;
881     }
882    
883     +#define POP_SS_OPCODE 0x1f
884     +#define MOV_SREG_OPCODE 0x8e
885     +
886     +/*
887     + * Intel SDM Vol.3A 6.8.3 states;
888     + * "Any single-step trap that would be delivered following the MOV to SS
889     + * instruction or POP to SS instruction (because EFLAGS.TF is 1) is
890     + * suppressed."
891     + * This function returns true if @insn is MOV SS or POP SS. On these
892     + * instructions, single stepping is suppressed.
893     + */
894     +static inline int insn_masking_exception(struct insn *insn)
895     +{
896     + return insn->opcode.bytes[0] == POP_SS_OPCODE ||
897     + (insn->opcode.bytes[0] == MOV_SREG_OPCODE &&
898     + X86_MODRM_REG(insn->modrm.bytes[0]) == 2);
899     +}
900     +
901     #endif /* _ASM_X86_INSN_H */
902     diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
903     index c895f38a7a5e..0b2330e19169 100644
904     --- a/arch/x86/kernel/cpu/intel.c
905     +++ b/arch/x86/kernel/cpu/intel.c
906     @@ -751,6 +751,9 @@ static const struct _tlb_table intel_tlb_table[] = {
907     { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
908     { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
909     { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
910     + { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
911     + { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
912     + { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
913     { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
914     { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
915     { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
916     diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
917     index fb095ba0c02f..f24cd9f1799a 100644
918     --- a/arch/x86/kernel/kexec-bzimage64.c
919     +++ b/arch/x86/kernel/kexec-bzimage64.c
920     @@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
921     * little bit simple
922     */
923     efi_map_sz = efi_get_runtime_map_size();
924     - efi_map_sz = ALIGN(efi_map_sz, 16);
925     params_cmdline_sz = sizeof(struct boot_params) + cmdline_len +
926     MAX_ELFCOREHDR_STR_LEN;
927     params_cmdline_sz = ALIGN(params_cmdline_sz, 16);
928     - kbuf.bufsz = params_cmdline_sz + efi_map_sz +
929     + kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) +
930     sizeof(struct setup_data) +
931     sizeof(struct efi_setup_data);
932    
933     @@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
934     if (!params)
935     return ERR_PTR(-ENOMEM);
936     efi_map_offset = params_cmdline_sz;
937     - efi_setup_data_offset = efi_map_offset + efi_map_sz;
938     + efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16);
939    
940     /* Copy setup header onto bootparams. Documentation/x86/boot.txt */
941     setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset;
942     diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
943     index ce06ec9c2323..f1030c522e06 100644
944     --- a/arch/x86/kernel/kprobes/core.c
945     +++ b/arch/x86/kernel/kprobes/core.c
946     @@ -369,6 +369,10 @@ int __copy_instruction(u8 *dest, u8 *src, struct insn *insn)
947     if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
948     return 0;
949    
950     + /* We should not singlestep on the exception masking instructions */
951     + if (insn_masking_exception(insn))
952     + return 0;
953     +
954     #ifdef CONFIG_X86_64
955     /* Only x86_64 has RIP relative instructions */
956     if (insn_rip_relative(insn)) {
957     diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
958     index 495c776de4b4..e1ea13ae53b9 100644
959     --- a/arch/x86/kernel/uprobes.c
960     +++ b/arch/x86/kernel/uprobes.c
961     @@ -296,6 +296,10 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
962     if (is_prefix_bad(insn))
963     return -ENOTSUPP;
964    
965     + /* We should not singlestep on the exception masking instructions */
966     + if (insn_masking_exception(insn))
967     + return -ENOTSUPP;
968     +
969     if (x86_64)
970     good_insns = good_insns_64;
971     else
972     diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
973     index dc97f2544b6f..5d13abecb384 100644
974     --- a/arch/x86/kvm/hyperv.c
975     +++ b/arch/x86/kvm/hyperv.c
976     @@ -1223,7 +1223,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
977     struct kvm_run *run = vcpu->run;
978    
979     kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
980     - return 1;
981     + return kvm_skip_emulated_instruction(vcpu);
982     }
983    
984     int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
985     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
986     index 029aa1318874..cfa155078ebb 100644
987     --- a/arch/x86/kvm/svm.c
988     +++ b/arch/x86/kvm/svm.c
989     @@ -4756,9 +4756,8 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
990     }
991    
992     if (!ret && svm) {
993     - trace_kvm_pi_irte_update(svm->vcpu.vcpu_id,
994     - host_irq, e->gsi,
995     - vcpu_info.vector,
996     + trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
997     + e->gsi, vcpu_info.vector,
998     vcpu_info.pi_desc_addr, set);
999     }
1000    
1001     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1002     index b1556166a06d..90747865205d 100644
1003     --- a/arch/x86/kvm/vmx.c
1004     +++ b/arch/x86/kvm/vmx.c
1005     @@ -10318,6 +10318,16 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
1006     return true;
1007     }
1008    
1009     +static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
1010     + struct vmcs12 *vmcs12)
1011     +{
1012     + if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
1013     + !page_address_valid(vcpu, vmcs12->apic_access_addr))
1014     + return -EINVAL;
1015     + else
1016     + return 0;
1017     +}
1018     +
1019     static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
1020     struct vmcs12 *vmcs12)
1021     {
1022     @@ -10961,6 +10971,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
1023     if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12))
1024     return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
1025    
1026     + if (nested_vmx_check_apic_access_controls(vcpu, vmcs12))
1027     + return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
1028     +
1029     if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12))
1030     return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
1031    
1032     @@ -12171,7 +12184,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
1033     vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
1034     vcpu_info.vector = irq.vector;
1035    
1036     - trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi,
1037     + trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
1038     vcpu_info.vector, vcpu_info.pi_desc_addr, set);
1039    
1040     if (set)
1041     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1042     index b62328cd4cb0..2f3fe25639b3 100644
1043     --- a/arch/x86/kvm/x86.c
1044     +++ b/arch/x86/kvm/x86.c
1045     @@ -6297,12 +6297,13 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
1046     int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
1047     {
1048     unsigned long nr, a0, a1, a2, a3, ret;
1049     - int op_64_bit, r;
1050     + int op_64_bit;
1051    
1052     - r = kvm_skip_emulated_instruction(vcpu);
1053     -
1054     - if (kvm_hv_hypercall_enabled(vcpu->kvm))
1055     - return kvm_hv_hypercall(vcpu);
1056     + if (kvm_hv_hypercall_enabled(vcpu->kvm)) {
1057     + if (!kvm_hv_hypercall(vcpu))
1058     + return 0;
1059     + goto out;
1060     + }
1061    
1062     nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
1063     a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
1064     @@ -6323,7 +6324,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
1065    
1066     if (kvm_x86_ops->get_cpl(vcpu) != 0) {
1067     ret = -KVM_EPERM;
1068     - goto out;
1069     + goto out_error;
1070     }
1071    
1072     switch (nr) {
1073     @@ -6343,12 +6344,14 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
1074     ret = -KVM_ENOSYS;
1075     break;
1076     }
1077     -out:
1078     +out_error:
1079     if (!op_64_bit)
1080     ret = (u32)ret;
1081     kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
1082     +
1083     +out:
1084     ++vcpu->stat.hypercalls;
1085     - return r;
1086     + return kvm_skip_emulated_instruction(vcpu);
1087     }
1088     EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
1089    
1090     diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
1091     index bb77606d04e0..a9deb2b0397d 100644
1092     --- a/arch/x86/net/bpf_jit_comp.c
1093     +++ b/arch/x86/net/bpf_jit_comp.c
1094     @@ -1159,6 +1159,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1095     for (pass = 0; pass < 20 || image; pass++) {
1096     proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1097     if (proglen <= 0) {
1098     +out_image:
1099     image = NULL;
1100     if (header)
1101     bpf_jit_binary_free(header);
1102     @@ -1169,8 +1170,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1103     if (proglen != oldproglen) {
1104     pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1105     proglen, oldproglen);
1106     - prog = orig_prog;
1107     - goto out_addrs;
1108     + goto out_image;
1109     }
1110     break;
1111     }
1112     diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
1113     index 754d5391d9fa..854508b00bbb 100644
1114     --- a/arch/x86/xen/enlighten_hvm.c
1115     +++ b/arch/x86/xen/enlighten_hvm.c
1116     @@ -64,6 +64,19 @@ static void __init xen_hvm_init_mem_mapping(void)
1117     {
1118     early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
1119     HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
1120     +
1121     + /*
1122     + * The virtual address of the shared_info page has changed, so
1123     + * the vcpu_info pointer for VCPU 0 is now stale.
1124     + *
1125     + * The prepare_boot_cpu callback will re-initialize it via
1126     + * xen_vcpu_setup, but we can't rely on that to be called for
1127     + * old Xen versions (xen_have_vector_callback == 0).
1128     + *
1129     + * It is, in any case, bad to have a stale vcpu_info pointer
1130     + * so reset it now.
1131     + */
1132     + xen_vcpu_info_reset(0);
1133     }
1134    
1135     static void __init init_hvm_pv_info(void)
1136     diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
1137     index d3f56baee936..3dc7c0b4adcb 100644
1138     --- a/block/blk-cgroup.c
1139     +++ b/block/blk-cgroup.c
1140     @@ -1149,18 +1149,16 @@ int blkcg_init_queue(struct request_queue *q)
1141     rcu_read_lock();
1142     spin_lock_irq(q->queue_lock);
1143     blkg = blkg_create(&blkcg_root, q, new_blkg);
1144     + if (IS_ERR(blkg))
1145     + goto err_unlock;
1146     + q->root_blkg = blkg;
1147     + q->root_rl.blkg = blkg;
1148     spin_unlock_irq(q->queue_lock);
1149     rcu_read_unlock();
1150    
1151     if (preloaded)
1152     radix_tree_preload_end();
1153    
1154     - if (IS_ERR(blkg))
1155     - return PTR_ERR(blkg);
1156     -
1157     - q->root_blkg = blkg;
1158     - q->root_rl.blkg = blkg;
1159     -
1160     ret = blk_throtl_init(q);
1161     if (ret) {
1162     spin_lock_irq(q->queue_lock);
1163     @@ -1168,6 +1166,13 @@ int blkcg_init_queue(struct request_queue *q)
1164     spin_unlock_irq(q->queue_lock);
1165     }
1166     return ret;
1167     +
1168     +err_unlock:
1169     + spin_unlock_irq(q->queue_lock);
1170     + rcu_read_unlock();
1171     + if (preloaded)
1172     + radix_tree_preload_end();
1173     + return PTR_ERR(blkg);
1174     }
1175    
1176     /**
1177     @@ -1374,17 +1379,12 @@ void blkcg_deactivate_policy(struct request_queue *q,
1178     __clear_bit(pol->plid, q->blkcg_pols);
1179    
1180     list_for_each_entry(blkg, &q->blkg_list, q_node) {
1181     - /* grab blkcg lock too while removing @pd from @blkg */
1182     - spin_lock(&blkg->blkcg->lock);
1183     -
1184     if (blkg->pd[pol->plid]) {
1185     if (pol->pd_offline_fn)
1186     pol->pd_offline_fn(blkg->pd[pol->plid]);
1187     pol->pd_free_fn(blkg->pd[pol->plid]);
1188     blkg->pd[pol->plid] = NULL;
1189     }
1190     -
1191     - spin_unlock(&blkg->blkcg->lock);
1192     }
1193    
1194     spin_unlock_irq(q->queue_lock);
1195     diff --git a/block/blk-mq.c b/block/blk-mq.c
1196     index 007f96611364..74c35513ada5 100644
1197     --- a/block/blk-mq.c
1198     +++ b/block/blk-mq.c
1199     @@ -118,6 +118,25 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
1200     blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
1201     }
1202    
1203     +static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
1204     + struct request *rq, void *priv,
1205     + bool reserved)
1206     +{
1207     + struct mq_inflight *mi = priv;
1208     +
1209     + if (rq->part == mi->part)
1210     + mi->inflight[rq_data_dir(rq)]++;
1211     +}
1212     +
1213     +void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
1214     + unsigned int inflight[2])
1215     +{
1216     + struct mq_inflight mi = { .part = part, .inflight = inflight, };
1217     +
1218     + inflight[0] = inflight[1] = 0;
1219     + blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
1220     +}
1221     +
1222     void blk_freeze_queue_start(struct request_queue *q)
1223     {
1224     int freeze_depth;
1225     diff --git a/block/blk-mq.h b/block/blk-mq.h
1226     index 4933af9d61f7..877237e09083 100644
1227     --- a/block/blk-mq.h
1228     +++ b/block/blk-mq.h
1229     @@ -136,6 +136,8 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
1230     }
1231    
1232     void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
1233     - unsigned int inflight[2]);
1234     + unsigned int inflight[2]);
1235     +void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
1236     + unsigned int inflight[2]);
1237    
1238     #endif
1239     diff --git a/block/genhd.c b/block/genhd.c
1240     index dd305c65ffb0..449ef56bba70 100644
1241     --- a/block/genhd.c
1242     +++ b/block/genhd.c
1243     @@ -82,6 +82,18 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part,
1244     }
1245     }
1246    
1247     +void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
1248     + unsigned int inflight[2])
1249     +{
1250     + if (q->mq_ops) {
1251     + blk_mq_in_flight_rw(q, part, inflight);
1252     + return;
1253     + }
1254     +
1255     + inflight[0] = atomic_read(&part->in_flight[0]);
1256     + inflight[1] = atomic_read(&part->in_flight[1]);
1257     +}
1258     +
1259     struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
1260     {
1261     struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);
1262     diff --git a/block/partition-generic.c b/block/partition-generic.c
1263     index 08dabcd8b6ae..db57cced9b98 100644
1264     --- a/block/partition-generic.c
1265     +++ b/block/partition-generic.c
1266     @@ -145,13 +145,15 @@ ssize_t part_stat_show(struct device *dev,
1267     jiffies_to_msecs(part_stat_read(p, time_in_queue)));
1268     }
1269    
1270     -ssize_t part_inflight_show(struct device *dev,
1271     - struct device_attribute *attr, char *buf)
1272     +ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
1273     + char *buf)
1274     {
1275     struct hd_struct *p = dev_to_part(dev);
1276     + struct request_queue *q = part_to_disk(p)->queue;
1277     + unsigned int inflight[2];
1278    
1279     - return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]),
1280     - atomic_read(&p->in_flight[1]));
1281     + part_in_flight_rw(q, p, inflight);
1282     + return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]);
1283     }
1284    
1285     #ifdef CONFIG_FAIL_MAKE_REQUEST
1286     diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
1287     index ebb626ffb5fa..4bde16fb97d8 100644
1288     --- a/drivers/acpi/acpi_watchdog.c
1289     +++ b/drivers/acpi/acpi_watchdog.c
1290     @@ -12,23 +12,64 @@
1291     #define pr_fmt(fmt) "ACPI: watchdog: " fmt
1292    
1293     #include <linux/acpi.h>
1294     +#include <linux/dmi.h>
1295     #include <linux/ioport.h>
1296     #include <linux/platform_device.h>
1297    
1298     #include "internal.h"
1299    
1300     +static const struct dmi_system_id acpi_watchdog_skip[] = {
1301     + {
1302     + /*
1303     + * On Lenovo Z50-70 there are two issues with the WDAT
1304     + * table. First some of the instructions use RTC SRAM
1305     + * to store persistent information. This does not work well
1306     + * with Linux RTC driver. Second, more important thing is
1307     + * that the instructions do not actually reset the system.
1308     + *
1309     + * On this particular system iTCO_wdt seems to work just
1310     + * fine so we prefer that over WDAT for now.
1311     + *
1312     + * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033.
1313     + */
1314     + .ident = "Lenovo Z50-70",
1315     + .matches = {
1316     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1317     + DMI_MATCH(DMI_PRODUCT_NAME, "20354"),
1318     + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Z50-70"),
1319     + },
1320     + },
1321     + {}
1322     +};
1323     +
1324     +static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
1325     +{
1326     + const struct acpi_table_wdat *wdat = NULL;
1327     + acpi_status status;
1328     +
1329     + if (acpi_disabled)
1330     + return NULL;
1331     +
1332     + if (dmi_check_system(acpi_watchdog_skip))
1333     + return NULL;
1334     +
1335     + status = acpi_get_table(ACPI_SIG_WDAT, 0,
1336     + (struct acpi_table_header **)&wdat);
1337     + if (ACPI_FAILURE(status)) {
1338     + /* It is fine if there is no WDAT */
1339     + return NULL;
1340     + }
1341     +
1342     + return wdat;
1343     +}
1344     +
1345     /**
1346     * Returns true if this system should prefer ACPI based watchdog instead of
1347     * the native one (which are typically the same hardware).
1348     */
1349     bool acpi_has_watchdog(void)
1350     {
1351     - struct acpi_table_header hdr;
1352     -
1353     - if (acpi_disabled)
1354     - return false;
1355     -
1356     - return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr));
1357     + return !!acpi_watchdog_get_wdat();
1358     }
1359     EXPORT_SYMBOL_GPL(acpi_has_watchdog);
1360    
1361     @@ -41,12 +82,10 @@ void __init acpi_watchdog_init(void)
1362     struct platform_device *pdev;
1363     struct resource *resources;
1364     size_t nresources = 0;
1365     - acpi_status status;
1366     int i;
1367    
1368     - status = acpi_get_table(ACPI_SIG_WDAT, 0,
1369     - (struct acpi_table_header **)&wdat);
1370     - if (ACPI_FAILURE(status)) {
1371     + wdat = acpi_watchdog_get_wdat();
1372     + if (!wdat) {
1373     /* It is fine if there is no WDAT */
1374     return;
1375     }
1376     diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
1377     index 8082871b409a..2ef0ad6a33d6 100644
1378     --- a/drivers/acpi/sleep.c
1379     +++ b/drivers/acpi/sleep.c
1380     @@ -364,6 +364,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
1381     DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
1382     },
1383     },
1384     + /*
1385     + * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
1386     + * the Low Power S0 Idle firmware interface (see
1387     + * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
1388     + */
1389     + {
1390     + .callback = init_no_lps0,
1391     + .ident = "ThinkPad X1 Tablet(2016)",
1392     + .matches = {
1393     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1394     + DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
1395     + },
1396     + },
1397     {},
1398     };
1399    
1400     diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1401     index 18391d0c0cd7..75eb50041c99 100644
1402     --- a/drivers/ata/ahci.c
1403     +++ b/drivers/ata/ahci.c
1404     @@ -686,7 +686,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1405    
1406     DPRINTK("ENTER\n");
1407    
1408     - ahci_stop_engine(ap);
1409     + hpriv->stop_engine(ap);
1410    
1411     rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1412     deadline, &online, NULL);
1413     @@ -712,7 +712,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1414     bool online;
1415     int rc;
1416    
1417     - ahci_stop_engine(ap);
1418     + hpriv->stop_engine(ap);
1419    
1420     /* clear D2H reception area to properly wait for D2H FIS */
1421     ata_tf_init(link->device, &tf);
1422     @@ -776,7 +776,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
1423    
1424     DPRINTK("ENTER\n");
1425    
1426     - ahci_stop_engine(ap);
1427     + hpriv->stop_engine(ap);
1428    
1429     for (i = 0; i < 2; i++) {
1430     u16 val;
1431     diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
1432     index 8b61123d2c3c..781b898e5785 100644
1433     --- a/drivers/ata/ahci.h
1434     +++ b/drivers/ata/ahci.h
1435     @@ -361,6 +361,13 @@ struct ahci_host_priv {
1436     * be overridden anytime before the host is activated.
1437     */
1438     void (*start_engine)(struct ata_port *ap);
1439     + /*
1440     + * Optional ahci_stop_engine override, if not set this gets set to the
1441     + * default ahci_stop_engine during ahci_save_initial_config, this can
1442     + * be overridden anytime before the host is activated.
1443     + */
1444     + int (*stop_engine)(struct ata_port *ap);
1445     +
1446     irqreturn_t (*irq_handler)(int irq, void *dev_instance);
1447    
1448     /* only required for per-port MSI(-X) support */
1449     diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
1450     index de7128d81e9c..0045dacd814b 100644
1451     --- a/drivers/ata/ahci_mvebu.c
1452     +++ b/drivers/ata/ahci_mvebu.c
1453     @@ -62,6 +62,60 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
1454     writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
1455     }
1456    
1457     +/**
1458     + * ahci_mvebu_stop_engine
1459     + *
1460     + * @ap: Target ata port
1461     + *
1462     + * Errata Ref#226 - SATA Disk HOT swap issue when connected through
1463     + * Port Multiplier in FIS-based Switching mode.
1464     + *
1465     + * To avoid the issue, according to design, the bits[11:8, 0] of
1466     + * register PxFBS are cleared when Port Command and Status (0x18) bit[0]
1467     + * changes its value from 1 to 0, i.e. falling edge of Port
1468     + * Command and Status bit[0] sends PULSE that resets PxFBS
1469     + * bits[11:8; 0].
1470     + *
1471     + * This function is used to override function of "ahci_stop_engine"
1472     + * from libahci.c by adding the mvebu work around(WA) to save PxFBS
1473     + * value before the PxCMD ST write of 0, then restore PxFBS value.
1474     + *
1475     + * Return: 0 on success; Error code otherwise.
1476     + */
1477     +int ahci_mvebu_stop_engine(struct ata_port *ap)
1478     +{
1479     + void __iomem *port_mmio = ahci_port_base(ap);
1480     + u32 tmp, port_fbs;
1481     +
1482     + tmp = readl(port_mmio + PORT_CMD);
1483     +
1484     + /* check if the HBA is idle */
1485     + if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
1486     + return 0;
1487     +
1488     + /* save the port PxFBS register for later restore */
1489     + port_fbs = readl(port_mmio + PORT_FBS);
1490     +
1491     + /* setting HBA to idle */
1492     + tmp &= ~PORT_CMD_START;
1493     + writel(tmp, port_mmio + PORT_CMD);
1494     +
1495     + /*
1496     + * bit #15 PxCMD signal doesn't clear PxFBS,
1497     + * restore the PxFBS register right after clearing the PxCMD ST,
1498     + * no need to wait for the PxCMD bit #15.
1499     + */
1500     + writel(port_fbs, port_mmio + PORT_FBS);
1501     +
1502     + /* wait for engine to stop. This could be as long as 500 msec */
1503     + tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
1504     + PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
1505     + if (tmp & PORT_CMD_LIST_ON)
1506     + return -EIO;
1507     +
1508     + return 0;
1509     +}
1510     +
1511     #ifdef CONFIG_PM_SLEEP
1512     static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
1513     {
1514     @@ -112,6 +166,8 @@ static int ahci_mvebu_probe(struct platform_device *pdev)
1515     if (rc)
1516     return rc;
1517    
1518     + hpriv->stop_engine = ahci_mvebu_stop_engine;
1519     +
1520     if (of_device_is_compatible(pdev->dev.of_node,
1521     "marvell,armada-380-ahci")) {
1522     dram = mv_mbus_dram_info();
1523     diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
1524     index b6b0bf76dfc7..ab5ac103bfb8 100644
1525     --- a/drivers/ata/ahci_qoriq.c
1526     +++ b/drivers/ata/ahci_qoriq.c
1527     @@ -94,7 +94,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
1528    
1529     DPRINTK("ENTER\n");
1530    
1531     - ahci_stop_engine(ap);
1532     + hpriv->stop_engine(ap);
1533    
1534     /*
1535     * There is a errata on ls1021a Rev1.0 and Rev2.0 which is:
1536     diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
1537     index c2b5941d9184..ad58da7c9aff 100644
1538     --- a/drivers/ata/ahci_xgene.c
1539     +++ b/drivers/ata/ahci_xgene.c
1540     @@ -165,7 +165,7 @@ static int xgene_ahci_restart_engine(struct ata_port *ap)
1541     PORT_CMD_ISSUE, 0x0, 1, 100))
1542     return -EBUSY;
1543    
1544     - ahci_stop_engine(ap);
1545     + hpriv->stop_engine(ap);
1546     ahci_start_fis_rx(ap);
1547    
1548     /*
1549     @@ -421,7 +421,7 @@ static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
1550     portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
1551     portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
1552    
1553     - ahci_stop_engine(ap);
1554     + hpriv->stop_engine(ap);
1555    
1556     rc = xgene_ahci_do_hardreset(link, deadline, &online);
1557    
1558     diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
1559     index 3e286d86ab42..5ae268b8514e 100644
1560     --- a/drivers/ata/libahci.c
1561     +++ b/drivers/ata/libahci.c
1562     @@ -560,6 +560,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
1563     if (!hpriv->start_engine)
1564     hpriv->start_engine = ahci_start_engine;
1565    
1566     + if (!hpriv->stop_engine)
1567     + hpriv->stop_engine = ahci_stop_engine;
1568     +
1569     if (!hpriv->irq_handler)
1570     hpriv->irq_handler = ahci_single_level_irq_intr;
1571     }
1572     @@ -887,9 +890,10 @@ static void ahci_start_port(struct ata_port *ap)
1573     static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1574     {
1575     int rc;
1576     + struct ahci_host_priv *hpriv = ap->host->private_data;
1577    
1578     /* disable DMA */
1579     - rc = ahci_stop_engine(ap);
1580     + rc = hpriv->stop_engine(ap);
1581     if (rc) {
1582     *emsg = "failed to stop engine";
1583     return rc;
1584     @@ -1299,7 +1303,7 @@ int ahci_kick_engine(struct ata_port *ap)
1585     int busy, rc;
1586    
1587     /* stop engine */
1588     - rc = ahci_stop_engine(ap);
1589     + rc = hpriv->stop_engine(ap);
1590     if (rc)
1591     goto out_restart;
1592    
1593     @@ -1538,7 +1542,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
1594    
1595     DPRINTK("ENTER\n");
1596    
1597     - ahci_stop_engine(ap);
1598     + hpriv->stop_engine(ap);
1599    
1600     /* clear D2H reception area to properly wait for D2H FIS */
1601     ata_tf_init(link->device, &tf);
1602     @@ -2064,14 +2068,14 @@ void ahci_error_handler(struct ata_port *ap)
1603    
1604     if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1605     /* restart engine */
1606     - ahci_stop_engine(ap);
1607     + hpriv->stop_engine(ap);
1608     hpriv->start_engine(ap);
1609     }
1610    
1611     sata_pmp_error_handler(ap);
1612    
1613     if (!ata_dev_enabled(ap->link.device))
1614     - ahci_stop_engine(ap);
1615     + hpriv->stop_engine(ap);
1616     }
1617     EXPORT_SYMBOL_GPL(ahci_error_handler);
1618    
1619     @@ -2118,7 +2122,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
1620     return;
1621    
1622     /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
1623     - rc = ahci_stop_engine(ap);
1624     + rc = hpriv->stop_engine(ap);
1625     if (rc)
1626     return;
1627    
1628     @@ -2178,7 +2182,7 @@ static void ahci_enable_fbs(struct ata_port *ap)
1629     return;
1630     }
1631    
1632     - rc = ahci_stop_engine(ap);
1633     + rc = hpriv->stop_engine(ap);
1634     if (rc)
1635     return;
1636    
1637     @@ -2211,7 +2215,7 @@ static void ahci_disable_fbs(struct ata_port *ap)
1638     return;
1639     }
1640    
1641     - rc = ahci_stop_engine(ap);
1642     + rc = hpriv->stop_engine(ap);
1643     if (rc)
1644     return;
1645    
1646     diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
1647     index ea20e0eb4d5a..711dd91b5e2c 100644
1648     --- a/drivers/ata/libata-eh.c
1649     +++ b/drivers/ata/libata-eh.c
1650     @@ -175,8 +175,8 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
1651     { }
1652     #endif /* CONFIG_PM */
1653    
1654     -static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
1655     - va_list args)
1656     +static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi,
1657     + const char *fmt, va_list args)
1658     {
1659     ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
1660     ATA_EH_DESC_LEN - ehi->desc_len,
1661     diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
1662     index aafb8cc03523..e67815b896fc 100644
1663     --- a/drivers/ata/sata_highbank.c
1664     +++ b/drivers/ata/sata_highbank.c
1665     @@ -410,7 +410,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
1666     int rc;
1667     int retry = 100;
1668    
1669     - ahci_stop_engine(ap);
1670     + hpriv->stop_engine(ap);
1671    
1672     /* clear D2H reception area to properly wait for D2H FIS */
1673     ata_tf_init(link->device, &tf);
1674     diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
1675     index c381c8e396fc..79d8c84693a1 100644
1676     --- a/drivers/char/agp/uninorth-agp.c
1677     +++ b/drivers/char/agp/uninorth-agp.c
1678     @@ -195,7 +195,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty
1679     return 0;
1680     }
1681    
1682     -int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1683     +static int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1684     {
1685     size_t i;
1686     u32 *gp;
1687     @@ -470,7 +470,7 @@ static int uninorth_free_gatt_table(struct agp_bridge_data *bridge)
1688     return 0;
1689     }
1690    
1691     -void null_cache_flush(void)
1692     +static void null_cache_flush(void)
1693     {
1694     mb();
1695     }
1696     diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
1697     index 16a3d5717f4e..a062f79bc509 100644
1698     --- a/drivers/clk/clk-mux.c
1699     +++ b/drivers/clk/clk-mux.c
1700     @@ -101,10 +101,18 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
1701     return 0;
1702     }
1703    
1704     +static int clk_mux_determine_rate(struct clk_hw *hw,
1705     + struct clk_rate_request *req)
1706     +{
1707     + struct clk_mux *mux = to_clk_mux(hw);
1708     +
1709     + return clk_mux_determine_rate_flags(hw, req, mux->flags);
1710     +}
1711     +
1712     const struct clk_ops clk_mux_ops = {
1713     .get_parent = clk_mux_get_parent,
1714     .set_parent = clk_mux_set_parent,
1715     - .determine_rate = __clk_mux_determine_rate,
1716     + .determine_rate = clk_mux_determine_rate,
1717     };
1718     EXPORT_SYMBOL_GPL(clk_mux_ops);
1719    
1720     diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
1721     index 4e21f5bcd954..6f4c98ca6e50 100644
1722     --- a/drivers/clk/clk.c
1723     +++ b/drivers/clk/clk.c
1724     @@ -351,9 +351,9 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now,
1725     return now <= rate && now > best;
1726     }
1727    
1728     -static int
1729     -clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
1730     - unsigned long flags)
1731     +int clk_mux_determine_rate_flags(struct clk_hw *hw,
1732     + struct clk_rate_request *req,
1733     + unsigned long flags)
1734     {
1735     struct clk_core *core = hw->core, *parent, *best_parent = NULL;
1736     int i, num_parents, ret;
1737     @@ -413,6 +413,7 @@ clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
1738    
1739     return 0;
1740     }
1741     +EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
1742    
1743     struct clk *__clk_lookup(const char *name)
1744     {
1745     diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
1746     index 5e8c18afce9a..41c08fc892b9 100644
1747     --- a/drivers/clk/imx/clk-imx6ul.c
1748     +++ b/drivers/clk/imx/clk-imx6ul.c
1749     @@ -461,7 +461,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
1750     clk_set_rate(clks[IMX6UL_CLK_AHB], 99000000);
1751    
1752     /* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */
1753     - clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]);
1754     + clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_OSC]);
1755     clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_CLK2]);
1756     clk_set_parent(clks[IMX6UL_CLK_PERIPH_PRE], clks[IMX6UL_CLK_PLL2_BUS]);
1757     clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_PRE]);
1758     diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
1759     index 557ed25b42e3..d175b9545581 100644
1760     --- a/drivers/clocksource/timer-imx-tpm.c
1761     +++ b/drivers/clocksource/timer-imx-tpm.c
1762     @@ -20,6 +20,7 @@
1763     #define TPM_SC 0x10
1764     #define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3)
1765     #define TPM_SC_CMOD_DIV_DEFAULT 0x3
1766     +#define TPM_SC_TOF_MASK (0x1 << 7)
1767     #define TPM_CNT 0x14
1768     #define TPM_MOD 0x18
1769     #define TPM_STATUS 0x1c
1770     @@ -29,6 +30,7 @@
1771     #define TPM_C0SC_MODE_SHIFT 2
1772     #define TPM_C0SC_MODE_MASK 0x3c
1773     #define TPM_C0SC_MODE_SW_COMPARE 0x4
1774     +#define TPM_C0SC_CHF_MASK (0x1 << 7)
1775     #define TPM_C0V 0x24
1776    
1777     static void __iomem *timer_base;
1778     @@ -205,9 +207,13 @@ static int __init tpm_timer_init(struct device_node *np)
1779     * 4) Channel0 disabled
1780     * 5) DMA transfers disabled
1781     */
1782     + /* make sure counter is disabled */
1783     writel(0, timer_base + TPM_SC);
1784     + /* TOF is W1C */
1785     + writel(TPM_SC_TOF_MASK, timer_base + TPM_SC);
1786     writel(0, timer_base + TPM_CNT);
1787     - writel(0, timer_base + TPM_C0SC);
1788     + /* CHF is W1C */
1789     + writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC);
1790    
1791     /* increase per cnt, div 8 by default */
1792     writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT,
1793     diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
1794     index b9bd827caa22..1b4d465cc5d9 100644
1795     --- a/drivers/firmware/efi/libstub/arm64-stub.c
1796     +++ b/drivers/firmware/efi/libstub/arm64-stub.c
1797     @@ -97,6 +97,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
1798     u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ?
1799     (phys_seed >> 32) & mask : TEXT_OFFSET;
1800    
1801     + /*
1802     + * With CONFIG_RANDOMIZE_TEXT_OFFSET=y, TEXT_OFFSET may not
1803     + * be a multiple of EFI_KIMG_ALIGN, and we must ensure that
1804     + * we preserve the misalignment of 'offset' relative to
1805     + * EFI_KIMG_ALIGN so that statically allocated objects whose
1806     + * alignment exceeds PAGE_SIZE appear correctly aligned in
1807     + * memory.
1808     + */
1809     + offset |= TEXT_OFFSET % EFI_KIMG_ALIGN;
1810     +
1811     /*
1812     * If KASLR is enabled, and we have some randomness available,
1813     * locate the kernel at a randomized offset in physical memory.
1814     diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
1815     index 660b3fbade41..8a05efa7edf0 100644
1816     --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
1817     +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
1818     @@ -716,12 +716,13 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
1819     struct timespec64 time;
1820    
1821     dev = kfd_device_by_id(args->gpu_id);
1822     - if (dev == NULL)
1823     - return -EINVAL;
1824     -
1825     - /* Reading GPU clock counter from KGD */
1826     - args->gpu_clock_counter =
1827     - dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
1828     + if (dev)
1829     + /* Reading GPU clock counter from KGD */
1830     + args->gpu_clock_counter =
1831     + dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
1832     + else
1833     + /* Node without GPU resource */
1834     + args->gpu_clock_counter = 0;
1835    
1836     /* No access to rdtsc. Using raw monotonic time */
1837     getrawmonotonic64(&time);
1838     diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
1839     index 39ac15ce4702..9e2ae02f31e0 100644
1840     --- a/drivers/gpu/drm/drm_dumb_buffers.c
1841     +++ b/drivers/gpu/drm/drm_dumb_buffers.c
1842     @@ -65,12 +65,13 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
1843     return -EINVAL;
1844    
1845     /* overflow checks for 32bit size calculations */
1846     - /* NOTE: DIV_ROUND_UP() can overflow */
1847     + if (args->bpp > U32_MAX - 8)
1848     + return -EINVAL;
1849     cpp = DIV_ROUND_UP(args->bpp, 8);
1850     - if (!cpp || cpp > 0xffffffffU / args->width)
1851     + if (cpp > U32_MAX / args->width)
1852     return -EINVAL;
1853     stride = cpp * args->width;
1854     - if (args->height > 0xffffffffU / stride)
1855     + if (args->height > U32_MAX / stride)
1856     return -EINVAL;
1857    
1858     /* test for wrap-around */
1859     diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
1860     index deaf869374ea..a9a0b56f1fbc 100644
1861     --- a/drivers/gpu/drm/msm/dsi/dsi_host.c
1862     +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
1863     @@ -740,7 +740,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
1864     switch (mipi_fmt) {
1865     case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
1866     case MIPI_DSI_FMT_RGB666_PACKED:
1867     - case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
1868     + case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666;
1869     case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
1870     default: return CMD_DST_FORMAT_RGB888;
1871     }
1872     diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
1873     index c178563fcd4d..456622b46335 100644
1874     --- a/drivers/gpu/drm/msm/msm_fbdev.c
1875     +++ b/drivers/gpu/drm/msm/msm_fbdev.c
1876     @@ -92,8 +92,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
1877    
1878     if (IS_ERR(fb)) {
1879     dev_err(dev->dev, "failed to allocate fb\n");
1880     - ret = PTR_ERR(fb);
1881     - goto fail;
1882     + return PTR_ERR(fb);
1883     }
1884    
1885     bo = msm_framebuffer_bo(fb, 0);
1886     @@ -151,13 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
1887    
1888     fail_unlock:
1889     mutex_unlock(&dev->struct_mutex);
1890     -fail:
1891     -
1892     - if (ret) {
1893     - if (fb)
1894     - drm_framebuffer_remove(fb);
1895     - }
1896     -
1897     + drm_framebuffer_remove(fb);
1898     return ret;
1899     }
1900    
1901     diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
1902     index 6e0fb50d0de4..f2df718af370 100644
1903     --- a/drivers/gpu/drm/msm/msm_gem.c
1904     +++ b/drivers/gpu/drm/msm/msm_gem.c
1905     @@ -132,17 +132,19 @@ static void put_pages(struct drm_gem_object *obj)
1906     struct msm_gem_object *msm_obj = to_msm_bo(obj);
1907    
1908     if (msm_obj->pages) {
1909     - /* For non-cached buffers, ensure the new pages are clean
1910     - * because display controller, GPU, etc. are not coherent:
1911     - */
1912     - if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
1913     - dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
1914     - msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
1915     + if (msm_obj->sgt) {
1916     + /* For non-cached buffers, ensure the new
1917     + * pages are clean because display controller,
1918     + * GPU, etc. are not coherent:
1919     + */
1920     + if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
1921     + dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
1922     + msm_obj->sgt->nents,
1923     + DMA_BIDIRECTIONAL);
1924    
1925     - if (msm_obj->sgt)
1926     sg_free_table(msm_obj->sgt);
1927     -
1928     - kfree(msm_obj->sgt);
1929     + kfree(msm_obj->sgt);
1930     + }
1931    
1932     if (use_pages(obj))
1933     drm_gem_put_pages(obj, msm_obj->pages, true, false);
1934     diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
1935     index f169348da377..ef3731d2f2e7 100644
1936     --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
1937     +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
1938     @@ -634,7 +634,7 @@ static int hdmi_audio_config(struct device *dev,
1939     struct omap_dss_audio *dss_audio)
1940     {
1941     struct omap_hdmi *hd = dev_get_drvdata(dev);
1942     - int ret;
1943     + int ret = 0;
1944    
1945     mutex_lock(&hd->lock);
1946    
1947     diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
1948     index c3453f3bd603..1359bf50598f 100644
1949     --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
1950     +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
1951     @@ -926,8 +926,13 @@ int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
1952     {
1953     const struct hdmi4_features *features;
1954     struct resource *res;
1955     + const struct soc_device_attribute *soc;
1956    
1957     - features = soc_device_match(hdmi4_soc_devices)->data;
1958     + soc = soc_device_match(hdmi4_soc_devices);
1959     + if (!soc)
1960     + return -ENODEV;
1961     +
1962     + features = soc->data;
1963     core->cts_swmode = features->cts_swmode;
1964     core->audio_use_mclk = features->audio_use_mclk;
1965    
1966     diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
1967     index b3221ca5bcd8..26db0ce7a085 100644
1968     --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
1969     +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
1970     @@ -660,7 +660,7 @@ static int hdmi_audio_config(struct device *dev,
1971     struct omap_dss_audio *dss_audio)
1972     {
1973     struct omap_hdmi *hd = dev_get_drvdata(dev);
1974     - int ret;
1975     + int ret = 0;
1976    
1977     mutex_lock(&hd->lock);
1978    
1979     diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
1980     index aa5ba9ae2191..556335ecb2b7 100644
1981     --- a/drivers/gpu/drm/omapdrm/omap_connector.c
1982     +++ b/drivers/gpu/drm/omapdrm/omap_connector.c
1983     @@ -123,6 +123,9 @@ static int omap_connector_get_modes(struct drm_connector *connector)
1984     if (dssdrv->read_edid) {
1985     void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
1986    
1987     + if (!edid)
1988     + return 0;
1989     +
1990     if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
1991     drm_edid_is_valid(edid)) {
1992     drm_mode_connector_update_edid_property(
1993     @@ -141,6 +144,9 @@ static int omap_connector_get_modes(struct drm_connector *connector)
1994     struct drm_display_mode *mode = drm_mode_create(dev);
1995     struct videomode vm = {0};
1996    
1997     + if (!mode)
1998     + return 0;
1999     +
2000     dssdrv->get_timings(dssdev, &vm);
2001    
2002     drm_display_mode_from_videomode(&vm, mode);
2003     @@ -196,6 +202,10 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
2004     if (!r) {
2005     /* check if vrefresh is still valid */
2006     new_mode = drm_mode_duplicate(dev, mode);
2007     +
2008     + if (!new_mode)
2009     + return MODE_BAD;
2010     +
2011     new_mode->clock = vm.pixelclock / 1000;
2012     new_mode->vrefresh = 0;
2013     if (mode->vrefresh == drm_mode_vrefresh(new_mode))
2014     diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
2015     index fd05f7e9f43f..df05fe53c399 100644
2016     --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
2017     +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
2018     @@ -389,12 +389,16 @@ int tiler_unpin(struct tiler_block *block)
2019     struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
2020     uint16_t h, uint16_t align)
2021     {
2022     - struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
2023     + struct tiler_block *block;
2024     u32 min_align = 128;
2025     int ret;
2026     unsigned long flags;
2027     u32 slot_bytes;
2028    
2029     + block = kzalloc(sizeof(*block), GFP_KERNEL);
2030     + if (!block)
2031     + return ERR_PTR(-ENOMEM);
2032     +
2033     BUG_ON(!validfmt(fmt));
2034    
2035     /* convert width/height to slots */
2036     diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
2037     index c10fdfc0930f..1cd39507b634 100644
2038     --- a/drivers/gpu/drm/omapdrm/tcm-sita.c
2039     +++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
2040     @@ -92,7 +92,7 @@ static int l2r_t2b(uint16_t w, uint16_t h, uint16_t a, int16_t offset,
2041     {
2042     int i;
2043     unsigned long index;
2044     - bool area_free;
2045     + bool area_free = false;
2046     unsigned long slots_per_band = PAGE_SIZE / slot_bytes;
2047     unsigned long bit_offset = (offset > 0) ? offset / slot_bytes : 0;
2048     unsigned long curr_bit = bit_offset;
2049     diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
2050     index 8c7a0ce147a1..eca4c9d97110 100644
2051     --- a/drivers/hid/Kconfig
2052     +++ b/drivers/hid/Kconfig
2053     @@ -436,10 +436,11 @@ config HID_LENOVO
2054     select NEW_LEDS
2055     select LEDS_CLASS
2056     ---help---
2057     - Support for Lenovo devices that are not fully compliant with HID standard.
2058     + Support for IBM/Lenovo devices that are not fully compliant with HID standard.
2059    
2060     - Say Y if you want support for the non-compliant features of the Lenovo
2061     - Thinkpad standalone keyboards, e.g:
2062     + Say Y if you want support for horizontal scrolling of the IBM/Lenovo
2063     + Scrollpoint mice or the non-compliant features of the Lenovo Thinkpad
2064     + standalone keyboards, e.g:
2065     - ThinkPad USB Keyboard with TrackPoint (supports extra LEDs and trackpoint
2066     configuration)
2067     - ThinkPad Compact Bluetooth Keyboard with TrackPoint (supports Fn keys)
2068     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
2069     index ff539c0b4637..9e478f03e845 100644
2070     --- a/drivers/hid/hid-ids.h
2071     +++ b/drivers/hid/hid-ids.h
2072     @@ -532,6 +532,13 @@
2073     #define USB_VENDOR_ID_HUION 0x256c
2074     #define USB_DEVICE_ID_HUION_TABLET 0x006e
2075    
2076     +#define USB_VENDOR_ID_IBM 0x04b3
2077     +#define USB_DEVICE_ID_IBM_SCROLLPOINT_III 0x3100
2078     +#define USB_DEVICE_ID_IBM_SCROLLPOINT_PRO 0x3103
2079     +#define USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL 0x3105
2080     +#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL 0x3108
2081     +#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO 0x3109
2082     +
2083     #define USB_VENDOR_ID_IDEACOM 0x1cb6
2084     #define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650
2085     #define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651
2086     @@ -664,6 +671,7 @@
2087     #define USB_DEVICE_ID_LENOVO_TPKBD 0x6009
2088     #define USB_DEVICE_ID_LENOVO_CUSBKBD 0x6047
2089     #define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048
2090     +#define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL 0x6049
2091     #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
2092     #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
2093     #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
2094     diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
2095     index 1ac4ff4d57a6..643b6eb54442 100644
2096     --- a/drivers/hid/hid-lenovo.c
2097     +++ b/drivers/hid/hid-lenovo.c
2098     @@ -6,6 +6,17 @@
2099     *
2100     * Copyright (c) 2012 Bernhard Seibold
2101     * Copyright (c) 2014 Jamie Lentin <jm@lentin.co.uk>
2102     + *
2103     + * Linux IBM/Lenovo Scrollpoint mouse driver:
2104     + * - IBM Scrollpoint III
2105     + * - IBM Scrollpoint Pro
2106     + * - IBM Scrollpoint Optical
2107     + * - IBM Scrollpoint Optical 800dpi
2108     + * - IBM Scrollpoint Optical 800dpi Pro
2109     + * - Lenovo Scrollpoint Optical
2110     + *
2111     + * Copyright (c) 2012 Peter De Wachter <pdewacht@gmail.com>
2112     + * Copyright (c) 2018 Peter Ganzhorn <peter.ganzhorn@gmail.com>
2113     */
2114    
2115     /*
2116     @@ -160,6 +171,17 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
2117     return 0;
2118     }
2119    
2120     +static int lenovo_input_mapping_scrollpoint(struct hid_device *hdev,
2121     + struct hid_input *hi, struct hid_field *field,
2122     + struct hid_usage *usage, unsigned long **bit, int *max)
2123     +{
2124     + if (usage->hid == HID_GD_Z) {
2125     + hid_map_usage(hi, usage, bit, max, EV_REL, REL_HWHEEL);
2126     + return 1;
2127     + }
2128     + return 0;
2129     +}
2130     +
2131     static int lenovo_input_mapping(struct hid_device *hdev,
2132     struct hid_input *hi, struct hid_field *field,
2133     struct hid_usage *usage, unsigned long **bit, int *max)
2134     @@ -172,6 +194,14 @@ static int lenovo_input_mapping(struct hid_device *hdev,
2135     case USB_DEVICE_ID_LENOVO_CBTKBD:
2136     return lenovo_input_mapping_cptkbd(hdev, hi, field,
2137     usage, bit, max);
2138     + case USB_DEVICE_ID_IBM_SCROLLPOINT_III:
2139     + case USB_DEVICE_ID_IBM_SCROLLPOINT_PRO:
2140     + case USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL:
2141     + case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL:
2142     + case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO:
2143     + case USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL:
2144     + return lenovo_input_mapping_scrollpoint(hdev, hi, field,
2145     + usage, bit, max);
2146     default:
2147     return 0;
2148     }
2149     @@ -883,6 +913,12 @@ static const struct hid_device_id lenovo_devices[] = {
2150     { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
2151     { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
2152     { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
2153     + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_III) },
2154     + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_PRO) },
2155     + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL) },
2156     + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL) },
2157     + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO) },
2158     + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL) },
2159     { }
2160     };
2161    
2162     diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
2163     index f272cdd9bd55..2623a567ffba 100644
2164     --- a/drivers/hid/intel-ish-hid/ishtp/bus.c
2165     +++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
2166     @@ -418,7 +418,7 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
2167     list_del(&device->device_link);
2168     spin_unlock_irqrestore(&dev->device_list_lock, flags);
2169     dev_err(dev->devc, "Failed to register ISHTP client device\n");
2170     - kfree(device);
2171     + put_device(&device->dev);
2172     return NULL;
2173     }
2174    
2175     diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
2176     index 4c337585479e..69afd7968d9c 100644
2177     --- a/drivers/hid/wacom_sys.c
2178     +++ b/drivers/hid/wacom_sys.c
2179     @@ -1102,8 +1102,10 @@ static int __wacom_devm_sysfs_create_group(struct wacom *wacom,
2180     devres->root = root;
2181    
2182     error = sysfs_create_group(devres->root, group);
2183     - if (error)
2184     + if (error) {
2185     + devres_free(devres);
2186     return error;
2187     + }
2188    
2189     devres_add(&wacom->hdev->dev, devres);
2190    
2191     diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
2192     index 2aa0e83174c5..dae8ac618a52 100644
2193     --- a/drivers/i2c/busses/i2c-pmcmsp.c
2194     +++ b/drivers/i2c/busses/i2c-pmcmsp.c
2195     @@ -564,10 +564,10 @@ static int pmcmsptwi_master_xfer(struct i2c_adapter *adap,
2196     * TODO: We could potentially loop and retry in the case
2197     * of MSP_TWI_XFER_TIMEOUT.
2198     */
2199     - return -1;
2200     + return -EIO;
2201     }
2202    
2203     - return 0;
2204     + return num;
2205     }
2206    
2207     static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter)
2208     diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
2209     index 25fcc3c1e32b..4053259bccb8 100644
2210     --- a/drivers/i2c/busses/i2c-sprd.c
2211     +++ b/drivers/i2c/busses/i2c-sprd.c
2212     @@ -86,6 +86,7 @@ struct sprd_i2c {
2213     u32 count;
2214     int irq;
2215     int err;
2216     + bool is_suspended;
2217     };
2218    
2219     static void sprd_i2c_set_count(struct sprd_i2c *i2c_dev, u32 count)
2220     @@ -283,6 +284,9 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap,
2221     struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
2222     int im, ret;
2223    
2224     + if (i2c_dev->is_suspended)
2225     + return -EBUSY;
2226     +
2227     ret = pm_runtime_get_sync(i2c_dev->dev);
2228     if (ret < 0)
2229     return ret;
2230     @@ -364,13 +368,12 @@ static irqreturn_t sprd_i2c_isr_thread(int irq, void *dev_id)
2231     struct sprd_i2c *i2c_dev = dev_id;
2232     struct i2c_msg *msg = i2c_dev->msg;
2233     bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK);
2234     - u32 i2c_count = readl(i2c_dev->base + I2C_COUNT);
2235     u32 i2c_tran;
2236    
2237     if (msg->flags & I2C_M_RD)
2238     i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD;
2239     else
2240     - i2c_tran = i2c_count;
2241     + i2c_tran = i2c_dev->count;
2242    
2243     /*
2244     * If we got one ACK from slave when writing data, and we did not
2245     @@ -408,14 +411,13 @@ static irqreturn_t sprd_i2c_isr(int irq, void *dev_id)
2246     {
2247     struct sprd_i2c *i2c_dev = dev_id;
2248     struct i2c_msg *msg = i2c_dev->msg;
2249     - u32 i2c_count = readl(i2c_dev->base + I2C_COUNT);
2250     bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK);
2251     u32 i2c_tran;
2252    
2253     if (msg->flags & I2C_M_RD)
2254     i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD;
2255     else
2256     - i2c_tran = i2c_count;
2257     + i2c_tran = i2c_dev->count;
2258    
2259     /*
2260     * If we did not get one ACK from slave when writing data, then we
2261     @@ -586,11 +588,23 @@ static int sprd_i2c_remove(struct platform_device *pdev)
2262    
2263     static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev)
2264     {
2265     + struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
2266     +
2267     + i2c_lock_adapter(&i2c_dev->adap);
2268     + i2c_dev->is_suspended = true;
2269     + i2c_unlock_adapter(&i2c_dev->adap);
2270     +
2271     return pm_runtime_force_suspend(pdev);
2272     }
2273    
2274     static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev)
2275     {
2276     + struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
2277     +
2278     + i2c_lock_adapter(&i2c_dev->adap);
2279     + i2c_dev->is_suspended = false;
2280     + i2c_unlock_adapter(&i2c_dev->adap);
2281     +
2282     return pm_runtime_force_resume(pdev);
2283     }
2284    
2285     diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
2286     index e4be86b3de9a..7235c7302bb7 100644
2287     --- a/drivers/i2c/busses/i2c-viperboard.c
2288     +++ b/drivers/i2c/busses/i2c-viperboard.c
2289     @@ -337,7 +337,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
2290     }
2291     mutex_unlock(&vb->lock);
2292     }
2293     - return 0;
2294     + return num;
2295     error:
2296     mutex_unlock(&vb->lock);
2297     return error;
2298     diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
2299     index 3726205c8704..7507cc641de3 100644
2300     --- a/drivers/infiniband/Kconfig
2301     +++ b/drivers/infiniband/Kconfig
2302     @@ -60,9 +60,12 @@ config INFINIBAND_ON_DEMAND_PAGING
2303     pages on demand instead.
2304    
2305     config INFINIBAND_ADDR_TRANS
2306     - bool
2307     + bool "RDMA/CM"
2308     depends on INFINIBAND
2309     default y
2310     + ---help---
2311     + Support for RDMA communication manager (CM).
2312     + This allows for a generic connection abstraction over RDMA.
2313    
2314     config INFINIBAND_ADDR_TRANS_CONFIGFS
2315     bool
2316     diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
2317     index 6c725c435f5d..79843a3ca9dc 100644
2318     --- a/drivers/infiniband/core/cma.c
2319     +++ b/drivers/infiniband/core/cma.c
2320     @@ -420,6 +420,8 @@ struct cma_hdr {
2321     #define CMA_VERSION 0x00
2322    
2323     struct cma_req_info {
2324     + struct sockaddr_storage listen_addr_storage;
2325     + struct sockaddr_storage src_addr_storage;
2326     struct ib_device *device;
2327     int port;
2328     union ib_gid local_gid;
2329     @@ -898,7 +900,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
2330     {
2331     struct ib_qp_attr qp_attr;
2332     int qp_attr_mask, ret;
2333     - union ib_gid sgid;
2334    
2335     mutex_lock(&id_priv->qp_mutex);
2336     if (!id_priv->id.qp) {
2337     @@ -921,12 +922,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
2338     if (ret)
2339     goto out;
2340    
2341     - ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
2342     - rdma_ah_read_grh(&qp_attr.ah_attr)->sgid_index,
2343     - &sgid, NULL);
2344     - if (ret)
2345     - goto out;
2346     -
2347     BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
2348    
2349     if (conn_param)
2350     @@ -1372,11 +1367,11 @@ static bool validate_net_dev(struct net_device *net_dev,
2351     }
2352    
2353     static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
2354     - const struct cma_req_info *req)
2355     + struct cma_req_info *req)
2356     {
2357     - struct sockaddr_storage listen_addr_storage, src_addr_storage;
2358     - struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage,
2359     - *src_addr = (struct sockaddr *)&src_addr_storage;
2360     + struct sockaddr *listen_addr =
2361     + (struct sockaddr *)&req->listen_addr_storage;
2362     + struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage;
2363     struct net_device *net_dev;
2364     const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
2365     int err;
2366     @@ -1391,11 +1386,6 @@ static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
2367     if (!net_dev)
2368     return ERR_PTR(-ENODEV);
2369    
2370     - if (!validate_net_dev(net_dev, listen_addr, src_addr)) {
2371     - dev_put(net_dev);
2372     - return ERR_PTR(-EHOSTUNREACH);
2373     - }
2374     -
2375     return net_dev;
2376     }
2377    
2378     @@ -1531,15 +1521,51 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
2379     }
2380     }
2381    
2382     + /*
2383     + * Net namespace might be getting deleted while route lookup,
2384     + * cm_id lookup is in progress. Therefore, perform netdevice
2385     + * validation, cm_id lookup under rcu lock.
2386     + * RCU lock along with netdevice state check, synchronizes with
2387     + * netdevice migrating to different net namespace and also avoids
2388     + * case where net namespace doesn't get deleted while lookup is in
2389     + * progress.
2390     + * If the device state is not IFF_UP, its properties such as ifindex
2391     + * and nd_net cannot be trusted to remain valid without rcu lock.
2392     + * net/core/dev.c change_net_namespace() ensures to synchronize with
2393     + * ongoing operations on net device after device is closed using
2394     + * synchronize_net().
2395     + */
2396     + rcu_read_lock();
2397     + if (*net_dev) {
2398     + /*
2399     + * If netdevice is down, it is likely that it is administratively
2400     + * down or it might be migrating to different namespace.
2401     + * In that case avoid further processing, as the net namespace
2402     + * or ifindex may change.
2403     + */
2404     + if (((*net_dev)->flags & IFF_UP) == 0) {
2405     + id_priv = ERR_PTR(-EHOSTUNREACH);
2406     + goto err;
2407     + }
2408     +
2409     + if (!validate_net_dev(*net_dev,
2410     + (struct sockaddr *)&req.listen_addr_storage,
2411     + (struct sockaddr *)&req.src_addr_storage)) {
2412     + id_priv = ERR_PTR(-EHOSTUNREACH);
2413     + goto err;
2414     + }
2415     + }
2416     +
2417     bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
2418     rdma_ps_from_service_id(req.service_id),
2419     cma_port_from_service_id(req.service_id));
2420     id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
2421     +err:
2422     + rcu_read_unlock();
2423     if (IS_ERR(id_priv) && *net_dev) {
2424     dev_put(*net_dev);
2425     *net_dev = NULL;
2426     }
2427     -
2428     return id_priv;
2429     }
2430    
2431     diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
2432     index 81528f64061a..cb0fecc958b5 100644
2433     --- a/drivers/infiniband/core/iwpm_util.c
2434     +++ b/drivers/infiniband/core/iwpm_util.c
2435     @@ -114,7 +114,7 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
2436     struct sockaddr_storage *mapped_sockaddr,
2437     u8 nl_client)
2438     {
2439     - struct hlist_head *hash_bucket_head;
2440     + struct hlist_head *hash_bucket_head = NULL;
2441     struct iwpm_mapping_info *map_info;
2442     unsigned long flags;
2443     int ret = -EINVAL;
2444     @@ -142,6 +142,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
2445     }
2446     }
2447     spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
2448     +
2449     + if (!hash_bucket_head)
2450     + kfree(map_info);
2451     return ret;
2452     }
2453    
2454     diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
2455     index cb91245e9163..d8efdc191c27 100644
2456     --- a/drivers/infiniband/core/mad.c
2457     +++ b/drivers/infiniband/core/mad.c
2458     @@ -60,7 +60,7 @@ module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
2459     MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
2460    
2461     static struct list_head ib_mad_port_list;
2462     -static u32 ib_mad_client_id = 0;
2463     +static atomic_t ib_mad_client_id = ATOMIC_INIT(0);
2464    
2465     /* Port list lock */
2466     static DEFINE_SPINLOCK(ib_mad_port_list_lock);
2467     @@ -378,7 +378,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
2468     }
2469    
2470     spin_lock_irqsave(&port_priv->reg_lock, flags);
2471     - mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
2472     + mad_agent_priv->agent.hi_tid = atomic_inc_return(&ib_mad_client_id);
2473    
2474     /*
2475     * Make sure MAD registration (if supplied)
2476     diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
2477     index 5e9f72ea4579..5feb8bbeff18 100644
2478     --- a/drivers/infiniband/core/uverbs_ioctl.c
2479     +++ b/drivers/infiniband/core/uverbs_ioctl.c
2480     @@ -191,6 +191,15 @@ static int uverbs_validate_kernel_mandatory(const struct uverbs_method_spec *met
2481     return -EINVAL;
2482     }
2483    
2484     + for (; i < method_spec->num_buckets; i++) {
2485     + struct uverbs_attr_spec_hash *attr_spec_bucket =
2486     + method_spec->attr_buckets[i];
2487     +
2488     + if (!bitmap_empty(attr_spec_bucket->mandatory_attrs_bitmask,
2489     + attr_spec_bucket->num_attrs))
2490     + return -EINVAL;
2491     + }
2492     +
2493     return 0;
2494     }
2495    
2496     diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
2497     index a97055dd4fbd..b5fab55cc275 100644
2498     --- a/drivers/infiniband/hw/hfi1/affinity.c
2499     +++ b/drivers/infiniband/hw/hfi1/affinity.c
2500     @@ -412,7 +412,6 @@ static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
2501     static int get_irq_affinity(struct hfi1_devdata *dd,
2502     struct hfi1_msix_entry *msix)
2503     {
2504     - int ret;
2505     cpumask_var_t diff;
2506     struct hfi1_affinity_node *entry;
2507     struct cpu_mask_set *set = NULL;
2508     @@ -424,10 +423,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
2509     extra[0] = '\0';
2510     cpumask_clear(&msix->mask);
2511    
2512     - ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
2513     - if (!ret)
2514     - return -ENOMEM;
2515     -
2516     entry = node_affinity_lookup(dd->node);
2517    
2518     switch (msix->type) {
2519     @@ -458,6 +453,9 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
2520     * finds its CPU here.
2521     */
2522     if (cpu == -1 && set) {
2523     + if (!zalloc_cpumask_var(&diff, GFP_KERNEL))
2524     + return -ENOMEM;
2525     +
2526     if (cpumask_equal(&set->mask, &set->used)) {
2527     /*
2528     * We've used up all the CPUs, bump up the generation
2529     @@ -469,6 +467,8 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
2530     cpumask_andnot(diff, &set->mask, &set->used);
2531     cpu = cpumask_first(diff);
2532     cpumask_set_cpu(cpu, &set->used);
2533     +
2534     + free_cpumask_var(diff);
2535     }
2536    
2537     cpumask_set_cpu(cpu, &msix->mask);
2538     @@ -482,7 +482,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
2539     hfi1_setup_sdma_notifier(msix);
2540     }
2541    
2542     - free_cpumask_var(diff);
2543     return 0;
2544     }
2545    
2546     diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
2547     index d5c6ff843fc6..918dbd350c71 100644
2548     --- a/drivers/infiniband/hw/hfi1/init.c
2549     +++ b/drivers/infiniband/hw/hfi1/init.c
2550     @@ -88,9 +88,9 @@
2551     * pio buffers per ctxt, etc.) Zero means use one user context per CPU.
2552     */
2553     int num_user_contexts = -1;
2554     -module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO);
2555     +module_param_named(num_user_contexts, num_user_contexts, int, 0444);
2556     MODULE_PARM_DESC(
2557     - num_user_contexts, "Set max number of user contexts to use");
2558     + num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)");
2559    
2560     uint krcvqs[RXE_NUM_DATA_VL];
2561     int krcvqsset;
2562     diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
2563     index 61927c165b59..4cf11063e0b5 100644
2564     --- a/drivers/infiniband/sw/rxe/rxe_opcode.c
2565     +++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
2566     @@ -390,7 +390,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
2567     .name = "IB_OPCODE_RC_SEND_ONLY_INV",
2568     .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
2569     | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
2570     - | RXE_END_MASK,
2571     + | RXE_END_MASK | RXE_START_MASK,
2572     .length = RXE_BTH_BYTES + RXE_IETH_BYTES,
2573     .offset = {
2574     [RXE_BTH] = 0,
2575     diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
2576     index 44b838ec9420..54cc9cb1e3b7 100644
2577     --- a/drivers/infiniband/sw/rxe/rxe_req.c
2578     +++ b/drivers/infiniband/sw/rxe/rxe_req.c
2579     @@ -728,7 +728,6 @@ int rxe_requester(void *arg)
2580     rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
2581    
2582     if (ret == -EAGAIN) {
2583     - kfree_skb(skb);
2584     rxe_run_task(&qp->req.task, 1);
2585     goto exit;
2586     }
2587     diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
2588     index 01f926fd9029..bd43c1c7a42f 100644
2589     --- a/drivers/infiniband/sw/rxe/rxe_resp.c
2590     +++ b/drivers/infiniband/sw/rxe/rxe_resp.c
2591     @@ -742,7 +742,6 @@ static enum resp_states read_reply(struct rxe_qp *qp,
2592     err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
2593     if (err) {
2594     pr_err("Failed sending RDMA reply.\n");
2595     - kfree_skb(skb);
2596     return RESPST_ERR_RNR;
2597     }
2598    
2599     @@ -955,10 +954,8 @@ static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
2600     }
2601    
2602     err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
2603     - if (err) {
2604     + if (err)
2605     pr_err_ratelimited("Failed sending ack\n");
2606     - kfree_skb(skb);
2607     - }
2608    
2609     err1:
2610     return err;
2611     @@ -1151,7 +1148,6 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
2612     if (rc) {
2613     pr_err("Failed resending result. This flow is not handled - skb ignored\n");
2614     rxe_drop_ref(qp);
2615     - kfree_skb(skb_copy);
2616     rc = RESPST_CLEANUP;
2617     goto out;
2618     }
2619     diff --git a/drivers/infiniband/ulp/srp/Kconfig b/drivers/infiniband/ulp/srp/Kconfig
2620     index c74ee9633041..99db8fe5173a 100644
2621     --- a/drivers/infiniband/ulp/srp/Kconfig
2622     +++ b/drivers/infiniband/ulp/srp/Kconfig
2623     @@ -1,6 +1,6 @@
2624     config INFINIBAND_SRP
2625     tristate "InfiniBand SCSI RDMA Protocol"
2626     - depends on SCSI
2627     + depends on SCSI && INFINIBAND_ADDR_TRANS
2628     select SCSI_SRP_ATTRS
2629     ---help---
2630     Support for the SCSI RDMA Protocol over InfiniBand. This
2631     diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig
2632     index 31ee83d528d9..fb8b7182f05e 100644
2633     --- a/drivers/infiniband/ulp/srpt/Kconfig
2634     +++ b/drivers/infiniband/ulp/srpt/Kconfig
2635     @@ -1,6 +1,6 @@
2636     config INFINIBAND_SRPT
2637     tristate "InfiniBand SCSI RDMA Protocol target support"
2638     - depends on INFINIBAND && TARGET_CORE
2639     + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE
2640     ---help---
2641    
2642     Support for the SCSI RDMA Protocol (SRP) Target driver. The
2643     diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
2644     index d97a85907ed6..d0c3d275bf9f 100644
2645     --- a/drivers/input/rmi4/rmi_spi.c
2646     +++ b/drivers/input/rmi4/rmi_spi.c
2647     @@ -147,8 +147,11 @@ static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
2648     if (len > RMI_SPI_XFER_SIZE_LIMIT)
2649     return -EINVAL;
2650    
2651     - if (rmi_spi->xfer_buf_size < len)
2652     - rmi_spi_manage_pools(rmi_spi, len);
2653     + if (rmi_spi->xfer_buf_size < len) {
2654     + ret = rmi_spi_manage_pools(rmi_spi, len);
2655     + if (ret < 0)
2656     + return ret;
2657     + }
2658    
2659     if (addr == 0)
2660     /*
2661     diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
2662     index 429b694405c7..fc149ea64be7 100644
2663     --- a/drivers/input/touchscreen/atmel_mxt_ts.c
2664     +++ b/drivers/input/touchscreen/atmel_mxt_ts.c
2665     @@ -275,7 +275,8 @@ struct mxt_data {
2666     char phys[64]; /* device physical location */
2667     const struct mxt_platform_data *pdata;
2668     struct mxt_object *object_table;
2669     - struct mxt_info info;
2670     + struct mxt_info *info;
2671     + void *raw_info_block;
2672     unsigned int irq;
2673     unsigned int max_x;
2674     unsigned int max_y;
2675     @@ -450,12 +451,13 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry)
2676     {
2677     u8 appmode = data->client->addr;
2678     u8 bootloader;
2679     + u8 family_id = data->info ? data->info->family_id : 0;
2680    
2681     switch (appmode) {
2682     case 0x4a:
2683     case 0x4b:
2684     /* Chips after 1664S use different scheme */
2685     - if (retry || data->info.family_id >= 0xa2) {
2686     + if (retry || family_id >= 0xa2) {
2687     bootloader = appmode - 0x24;
2688     break;
2689     }
2690     @@ -682,7 +684,7 @@ mxt_get_object(struct mxt_data *data, u8 type)
2691     struct mxt_object *object;
2692     int i;
2693    
2694     - for (i = 0; i < data->info.object_num; i++) {
2695     + for (i = 0; i < data->info->object_num; i++) {
2696     object = data->object_table + i;
2697     if (object->type == type)
2698     return object;
2699     @@ -1453,12 +1455,12 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
2700     data_pos += offset;
2701     }
2702    
2703     - if (cfg_info.family_id != data->info.family_id) {
2704     + if (cfg_info.family_id != data->info->family_id) {
2705     dev_err(dev, "Family ID mismatch!\n");
2706     return -EINVAL;
2707     }
2708    
2709     - if (cfg_info.variant_id != data->info.variant_id) {
2710     + if (cfg_info.variant_id != data->info->variant_id) {
2711     dev_err(dev, "Variant ID mismatch!\n");
2712     return -EINVAL;
2713     }
2714     @@ -1503,7 +1505,7 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
2715    
2716     /* Malloc memory to store configuration */
2717     cfg_start_ofs = MXT_OBJECT_START +
2718     - data->info.object_num * sizeof(struct mxt_object) +
2719     + data->info->object_num * sizeof(struct mxt_object) +
2720     MXT_INFO_CHECKSUM_SIZE;
2721     config_mem_size = data->mem_size - cfg_start_ofs;
2722     config_mem = kzalloc(config_mem_size, GFP_KERNEL);
2723     @@ -1554,20 +1556,6 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
2724     return ret;
2725     }
2726    
2727     -static int mxt_get_info(struct mxt_data *data)
2728     -{
2729     - struct i2c_client *client = data->client;
2730     - struct mxt_info *info = &data->info;
2731     - int error;
2732     -
2733     - /* Read 7-byte info block starting at address 0 */
2734     - error = __mxt_read_reg(client, 0, sizeof(*info), info);
2735     - if (error)
2736     - return error;
2737     -
2738     - return 0;
2739     -}
2740     -
2741     static void mxt_free_input_device(struct mxt_data *data)
2742     {
2743     if (data->input_dev) {
2744     @@ -1582,9 +1570,10 @@ static void mxt_free_object_table(struct mxt_data *data)
2745     video_unregister_device(&data->dbg.vdev);
2746     v4l2_device_unregister(&data->dbg.v4l2);
2747     #endif
2748     -
2749     - kfree(data->object_table);
2750     data->object_table = NULL;
2751     + data->info = NULL;
2752     + kfree(data->raw_info_block);
2753     + data->raw_info_block = NULL;
2754     kfree(data->msg_buf);
2755     data->msg_buf = NULL;
2756     data->T5_address = 0;
2757     @@ -1600,34 +1589,18 @@ static void mxt_free_object_table(struct mxt_data *data)
2758     data->max_reportid = 0;
2759     }
2760    
2761     -static int mxt_get_object_table(struct mxt_data *data)
2762     +static int mxt_parse_object_table(struct mxt_data *data,
2763     + struct mxt_object *object_table)
2764     {
2765     struct i2c_client *client = data->client;
2766     - size_t table_size;
2767     - struct mxt_object *object_table;
2768     - int error;
2769     int i;
2770     u8 reportid;
2771     u16 end_address;
2772    
2773     - table_size = data->info.object_num * sizeof(struct mxt_object);
2774     - object_table = kzalloc(table_size, GFP_KERNEL);
2775     - if (!object_table) {
2776     - dev_err(&data->client->dev, "Failed to allocate memory\n");
2777     - return -ENOMEM;
2778     - }
2779     -
2780     - error = __mxt_read_reg(client, MXT_OBJECT_START, table_size,
2781     - object_table);
2782     - if (error) {
2783     - kfree(object_table);
2784     - return error;
2785     - }
2786     -
2787     /* Valid Report IDs start counting from 1 */
2788     reportid = 1;
2789     data->mem_size = 0;
2790     - for (i = 0; i < data->info.object_num; i++) {
2791     + for (i = 0; i < data->info->object_num; i++) {
2792     struct mxt_object *object = object_table + i;
2793     u8 min_id, max_id;
2794    
2795     @@ -1651,8 +1624,8 @@ static int mxt_get_object_table(struct mxt_data *data)
2796    
2797     switch (object->type) {
2798     case MXT_GEN_MESSAGE_T5:
2799     - if (data->info.family_id == 0x80 &&
2800     - data->info.version < 0x20) {
2801     + if (data->info->family_id == 0x80 &&
2802     + data->info->version < 0x20) {
2803     /*
2804     * On mXT224 firmware versions prior to V2.0
2805     * read and discard unused CRC byte otherwise
2806     @@ -1707,24 +1680,102 @@ static int mxt_get_object_table(struct mxt_data *data)
2807     /* If T44 exists, T5 position has to be directly after */
2808     if (data->T44_address && (data->T5_address != data->T44_address + 1)) {
2809     dev_err(&client->dev, "Invalid T44 position\n");
2810     - error = -EINVAL;
2811     - goto free_object_table;
2812     + return -EINVAL;
2813     }
2814    
2815     data->msg_buf = kcalloc(data->max_reportid,
2816     data->T5_msg_size, GFP_KERNEL);
2817     - if (!data->msg_buf) {
2818     - dev_err(&client->dev, "Failed to allocate message buffer\n");
2819     + if (!data->msg_buf)
2820     + return -ENOMEM;
2821     +
2822     + return 0;
2823     +}
2824     +
2825     +static int mxt_read_info_block(struct mxt_data *data)
2826     +{
2827     + struct i2c_client *client = data->client;
2828     + int error;
2829     + size_t size;
2830     + void *id_buf, *buf;
2831     + uint8_t num_objects;
2832     + u32 calculated_crc;
2833     + u8 *crc_ptr;
2834     +
2835     + /* If info block already allocated, free it */
2836     + if (data->raw_info_block)
2837     + mxt_free_object_table(data);
2838     +
2839     + /* Read 7-byte ID information block starting at address 0 */
2840     + size = sizeof(struct mxt_info);
2841     + id_buf = kzalloc(size, GFP_KERNEL);
2842     + if (!id_buf)
2843     + return -ENOMEM;
2844     +
2845     + error = __mxt_read_reg(client, 0, size, id_buf);
2846     + if (error)
2847     + goto err_free_mem;
2848     +
2849     + /* Resize buffer to give space for rest of info block */
2850     + num_objects = ((struct mxt_info *)id_buf)->object_num;
2851     + size += (num_objects * sizeof(struct mxt_object))
2852     + + MXT_INFO_CHECKSUM_SIZE;
2853     +
2854     + buf = krealloc(id_buf, size, GFP_KERNEL);
2855     + if (!buf) {
2856     error = -ENOMEM;
2857     - goto free_object_table;
2858     + goto err_free_mem;
2859     + }
2860     + id_buf = buf;
2861     +
2862     + /* Read rest of info block */
2863     + error = __mxt_read_reg(client, MXT_OBJECT_START,
2864     + size - MXT_OBJECT_START,
2865     + id_buf + MXT_OBJECT_START);
2866     + if (error)
2867     + goto err_free_mem;
2868     +
2869     + /* Extract & calculate checksum */
2870     + crc_ptr = id_buf + size - MXT_INFO_CHECKSUM_SIZE;
2871     + data->info_crc = crc_ptr[0] | (crc_ptr[1] << 8) | (crc_ptr[2] << 16);
2872     +
2873     + calculated_crc = mxt_calculate_crc(id_buf, 0,
2874     + size - MXT_INFO_CHECKSUM_SIZE);
2875     +
2876     + /*
2877     + * CRC mismatch can be caused by data corruption due to I2C comms
2878     + * issue or else device is not using Object Based Protocol (eg i2c-hid)
2879     + */
2880     + if ((data->info_crc == 0) || (data->info_crc != calculated_crc)) {
2881     + dev_err(&client->dev,
2882     + "Info Block CRC error calculated=0x%06X read=0x%06X\n",
2883     + calculated_crc, data->info_crc);
2884     + error = -EIO;
2885     + goto err_free_mem;
2886     + }
2887     +
2888     + data->raw_info_block = id_buf;
2889     + data->info = (struct mxt_info *)id_buf;
2890     +
2891     + dev_info(&client->dev,
2892     + "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
2893     + data->info->family_id, data->info->variant_id,
2894     + data->info->version >> 4, data->info->version & 0xf,
2895     + data->info->build, data->info->object_num);
2896     +
2897     + /* Parse object table information */
2898     + error = mxt_parse_object_table(data, id_buf + MXT_OBJECT_START);
2899     + if (error) {
2900     + dev_err(&client->dev, "Error %d parsing object table\n", error);
2901     + mxt_free_object_table(data);
2902     + goto err_free_mem;
2903     }
2904    
2905     - data->object_table = object_table;
2906     + data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START);
2907    
2908     return 0;
2909    
2910     -free_object_table:
2911     - mxt_free_object_table(data);
2912     +err_free_mem:
2913     + kfree(id_buf);
2914     return error;
2915     }
2916    
2917     @@ -2039,7 +2090,7 @@ static int mxt_initialize(struct mxt_data *data)
2918     int error;
2919    
2920     while (1) {
2921     - error = mxt_get_info(data);
2922     + error = mxt_read_info_block(data);
2923     if (!error)
2924     break;
2925    
2926     @@ -2070,16 +2121,9 @@ static int mxt_initialize(struct mxt_data *data)
2927     msleep(MXT_FW_RESET_TIME);
2928     }
2929    
2930     - /* Get object table information */
2931     - error = mxt_get_object_table(data);
2932     - if (error) {
2933     - dev_err(&client->dev, "Error %d reading object table\n", error);
2934     - return error;
2935     - }
2936     -
2937     error = mxt_acquire_irq(data);
2938     if (error)
2939     - goto err_free_object_table;
2940     + return error;
2941    
2942     error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME,
2943     &client->dev, GFP_KERNEL, data,
2944     @@ -2087,14 +2131,10 @@ static int mxt_initialize(struct mxt_data *data)
2945     if (error) {
2946     dev_err(&client->dev, "Failed to invoke firmware loader: %d\n",
2947     error);
2948     - goto err_free_object_table;
2949     + return error;
2950     }
2951    
2952     return 0;
2953     -
2954     -err_free_object_table:
2955     - mxt_free_object_table(data);
2956     - return error;
2957     }
2958    
2959     static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
2960     @@ -2155,7 +2195,7 @@ static int mxt_init_t7_power_cfg(struct mxt_data *data)
2961     static u16 mxt_get_debug_value(struct mxt_data *data, unsigned int x,
2962     unsigned int y)
2963     {
2964     - struct mxt_info *info = &data->info;
2965     + struct mxt_info *info = data->info;
2966     struct mxt_dbg *dbg = &data->dbg;
2967     unsigned int ofs, page;
2968     unsigned int col = 0;
2969     @@ -2483,7 +2523,7 @@ static const struct video_device mxt_video_device = {
2970    
2971     static void mxt_debug_init(struct mxt_data *data)
2972     {
2973     - struct mxt_info *info = &data->info;
2974     + struct mxt_info *info = data->info;
2975     struct mxt_dbg *dbg = &data->dbg;
2976     struct mxt_object *object;
2977     int error;
2978     @@ -2569,7 +2609,6 @@ static int mxt_configure_objects(struct mxt_data *data,
2979     const struct firmware *cfg)
2980     {
2981     struct device *dev = &data->client->dev;
2982     - struct mxt_info *info = &data->info;
2983     int error;
2984    
2985     error = mxt_init_t7_power_cfg(data);
2986     @@ -2594,11 +2633,6 @@ static int mxt_configure_objects(struct mxt_data *data,
2987    
2988     mxt_debug_init(data);
2989    
2990     - dev_info(dev,
2991     - "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
2992     - info->family_id, info->variant_id, info->version >> 4,
2993     - info->version & 0xf, info->build, info->object_num);
2994     -
2995     return 0;
2996     }
2997    
2998     @@ -2607,7 +2641,7 @@ static ssize_t mxt_fw_version_show(struct device *dev,
2999     struct device_attribute *attr, char *buf)
3000     {
3001     struct mxt_data *data = dev_get_drvdata(dev);
3002     - struct mxt_info *info = &data->info;
3003     + struct mxt_info *info = data->info;
3004     return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n",
3005     info->version >> 4, info->version & 0xf, info->build);
3006     }
3007     @@ -2617,7 +2651,7 @@ static ssize_t mxt_hw_version_show(struct device *dev,
3008     struct device_attribute *attr, char *buf)
3009     {
3010     struct mxt_data *data = dev_get_drvdata(dev);
3011     - struct mxt_info *info = &data->info;
3012     + struct mxt_info *info = data->info;
3013     return scnprintf(buf, PAGE_SIZE, "%u.%u\n",
3014     info->family_id, info->variant_id);
3015     }
3016     @@ -2656,7 +2690,7 @@ static ssize_t mxt_object_show(struct device *dev,
3017     return -ENOMEM;
3018    
3019     error = 0;
3020     - for (i = 0; i < data->info.object_num; i++) {
3021     + for (i = 0; i < data->info->object_num; i++) {
3022     object = data->object_table + i;
3023    
3024     if (!mxt_object_readable(object->type))
3025     diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
3026     index 57c920c1372d..e3dbb6101b4a 100644
3027     --- a/drivers/iommu/dmar.c
3028     +++ b/drivers/iommu/dmar.c
3029     @@ -1342,7 +1342,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
3030     struct qi_desc desc;
3031    
3032     if (mask) {
3033     - BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
3034     + BUG_ON(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
3035     addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
3036     desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
3037     } else
3038     diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
3039     index 365a8cc62405..b6a681bce400 100644
3040     --- a/drivers/net/can/dev.c
3041     +++ b/drivers/net/can/dev.c
3042     @@ -604,7 +604,7 @@ void can_bus_off(struct net_device *dev)
3043     {
3044     struct can_priv *priv = netdev_priv(dev);
3045    
3046     - netdev_dbg(dev, "bus-off\n");
3047     + netdev_info(dev, "bus-off\n");
3048    
3049     netif_carrier_off(dev);
3050    
3051     diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
3052     index c93e5613d4cc..cc658a29cc33 100644
3053     --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
3054     +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
3055     @@ -310,6 +310,8 @@ int aq_nic_ndev_init(struct aq_nic_s *self)
3056    
3057     self->ndev->hw_features |= aq_hw_caps->hw_features;
3058     self->ndev->features = aq_hw_caps->hw_features;
3059     + self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
3060     + NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO;
3061     self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
3062     self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
3063     self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN;
3064     diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
3065     index 3e62692af011..fa5b30f547f6 100644
3066     --- a/drivers/net/ethernet/hisilicon/hns/hnae.h
3067     +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
3068     @@ -87,7 +87,7 @@ do { \
3069    
3070     #define HNAE_AE_REGISTER 0x1
3071    
3072     -#define RCB_RING_NAME_LEN 16
3073     +#define RCB_RING_NAME_LEN (IFNAMSIZ + 4)
3074    
3075     #define HNAE_LOWEST_LATENCY_COAL_PARAM 30
3076     #define HNAE_LOW_LATENCY_COAL_PARAM 80
3077     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
3078     index 8a85217845ae..cf6a245db6d5 100644
3079     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
3080     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
3081     @@ -3413,6 +3413,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
3082     hw->phy.sfp_setup_needed = false;
3083     }
3084    
3085     + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
3086     + return status;
3087     +
3088     /* Reset PHY */
3089     if (!hw->phy.reset_disable && hw->phy.ops.reset)
3090     hw->phy.ops.reset(hw);
3091     diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
3092     index f88ff3f4b661..35d14af235f7 100644
3093     --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
3094     +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
3095     @@ -277,8 +277,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
3096     if ((*reg & mask) == val)
3097     return 0;
3098    
3099     - if (msleep_interruptible(25))
3100     - return -ERESTARTSYS;
3101     + msleep(25);
3102    
3103     if (time_after(start_time, wait_until))
3104     return -ETIMEDOUT;
3105     diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
3106     index 085338990f49..c5452b445c37 100644
3107     --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
3108     +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
3109     @@ -115,8 +115,7 @@ int qed_l2_alloc(struct qed_hwfn *p_hwfn)
3110    
3111     void qed_l2_setup(struct qed_hwfn *p_hwfn)
3112     {
3113     - if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
3114     - p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
3115     + if (!QED_IS_L2_PERSONALITY(p_hwfn))
3116     return;
3117    
3118     mutex_init(&p_hwfn->p_l2_info->lock);
3119     @@ -126,8 +125,7 @@ void qed_l2_free(struct qed_hwfn *p_hwfn)
3120     {
3121     u32 i;
3122    
3123     - if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
3124     - p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
3125     + if (!QED_IS_L2_PERSONALITY(p_hwfn))
3126     return;
3127    
3128     if (!p_hwfn->p_l2_info)
3129     diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
3130     index 50b142fad6b8..1900bf7e67d1 100644
3131     --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
3132     +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
3133     @@ -238,7 +238,7 @@ qede_rdma_get_free_event_node(struct qede_dev *edev)
3134     }
3135    
3136     if (!found) {
3137     - event_node = kzalloc(sizeof(*event_node), GFP_KERNEL);
3138     + event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC);
3139     if (!event_node) {
3140     DP_NOTICE(edev,
3141     "qedr: Could not allocate memory for rdma work\n");
3142     diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
3143     index a3f456b91c99..e9e67c22c8bb 100644
3144     --- a/drivers/net/phy/marvell.c
3145     +++ b/drivers/net/phy/marvell.c
3146     @@ -1409,6 +1409,15 @@ static int m88e1318_set_wol(struct phy_device *phydev,
3147     if (err < 0)
3148     return err;
3149    
3150     + /* If WOL event happened once, the LED[2] interrupt pin
3151     + * will not be cleared unless we reading the interrupt status
3152     + * register. If interrupts are in use, the normal interrupt
3153     + * handling will clear the WOL event. Clear the WOL event
3154     + * before enabling it if !phy_interrupt_is_valid()
3155     + */
3156     + if (!phy_interrupt_is_valid(phydev))
3157     + phy_read(phydev, MII_M1011_IEVENT);
3158     +
3159     /* Enable the WOL interrupt */
3160     temp = phy_read(phydev, MII_88E1318S_PHY_CSIER);
3161     temp |= MII_88E1318S_PHY_CSIER_WOL_EIE;
3162     diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
3163     index 46d6cb1e03bd..8f845de8a8a2 100644
3164     --- a/drivers/nvme/host/Kconfig
3165     +++ b/drivers/nvme/host/Kconfig
3166     @@ -18,7 +18,7 @@ config NVME_FABRICS
3167    
3168     config NVME_RDMA
3169     tristate "NVM Express over Fabrics RDMA host driver"
3170     - depends on INFINIBAND && BLOCK
3171     + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
3172     select NVME_CORE
3173     select NVME_FABRICS
3174     select SG_POOL
3175     diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
3176     index dd956311a85a..38c128f230e7 100644
3177     --- a/drivers/nvme/host/core.c
3178     +++ b/drivers/nvme/host/core.c
3179     @@ -665,6 +665,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
3180     ret = PTR_ERR(meta);
3181     goto out_unmap;
3182     }
3183     + req->cmd_flags |= REQ_INTEGRITY;
3184     }
3185     }
3186    
3187     diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
3188     index 740aae51e1c6..33d060c524e6 100644
3189     --- a/drivers/nvme/host/fabrics.c
3190     +++ b/drivers/nvme/host/fabrics.c
3191     @@ -587,6 +587,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
3192     ret = -ENOMEM;
3193     goto out;
3194     }
3195     + kfree(opts->transport);
3196     opts->transport = p;
3197     break;
3198     case NVMF_OPT_NQN:
3199     @@ -595,6 +596,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
3200     ret = -ENOMEM;
3201     goto out;
3202     }
3203     + kfree(opts->subsysnqn);
3204     opts->subsysnqn = p;
3205     nqnlen = strlen(opts->subsysnqn);
3206     if (nqnlen >= NVMF_NQN_SIZE) {
3207     @@ -617,6 +619,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
3208     ret = -ENOMEM;
3209     goto out;
3210     }
3211     + kfree(opts->traddr);
3212     opts->traddr = p;
3213     break;
3214     case NVMF_OPT_TRSVCID:
3215     @@ -625,6 +628,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
3216     ret = -ENOMEM;
3217     goto out;
3218     }
3219     + kfree(opts->trsvcid);
3220     opts->trsvcid = p;
3221     break;
3222     case NVMF_OPT_QUEUE_SIZE:
3223     @@ -706,6 +710,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
3224     ret = -EINVAL;
3225     goto out;
3226     }
3227     + nvmf_host_put(opts->host);
3228     opts->host = nvmf_host_add(p);
3229     kfree(p);
3230     if (!opts->host) {
3231     @@ -731,6 +736,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
3232     ret = -ENOMEM;
3233     goto out;
3234     }
3235     + kfree(opts->host_traddr);
3236     opts->host_traddr = p;
3237     break;
3238     case NVMF_OPT_HOST_ID:
3239     diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
3240     index 03e4ab65fe77..48d20c2c1256 100644
3241     --- a/drivers/nvme/target/Kconfig
3242     +++ b/drivers/nvme/target/Kconfig
3243     @@ -27,7 +27,7 @@ config NVME_TARGET_LOOP
3244    
3245     config NVME_TARGET_RDMA
3246     tristate "NVMe over Fabrics RDMA target support"
3247     - depends on INFINIBAND
3248     + depends on INFINIBAND && INFINIBAND_ADDR_TRANS
3249     depends on NVME_TARGET
3250     help
3251     This enables the NVMe RDMA target support, which allows exporting NVMe
3252     diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c
3253     index dc3033cf3c19..efc317e7669d 100644
3254     --- a/drivers/pci/dwc/pcie-kirin.c
3255     +++ b/drivers/pci/dwc/pcie-kirin.c
3256     @@ -490,7 +490,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
3257     return ret;
3258    
3259     kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
3260     - "reset-gpio", 0);
3261     + "reset-gpios", 0);
3262     if (kirin_pcie->gpio_id_reset < 0)
3263     return -ENODEV;
3264    
3265     diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
3266     index 2d3d5ac92c06..81ec9b6805fc 100644
3267     --- a/drivers/remoteproc/qcom_q6v5_pil.c
3268     +++ b/drivers/remoteproc/qcom_q6v5_pil.c
3269     @@ -915,6 +915,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
3270     dev_err(qproc->dev, "unable to resolve mba region\n");
3271     return ret;
3272     }
3273     + of_node_put(node);
3274    
3275     qproc->mba_phys = r.start;
3276     qproc->mba_size = resource_size(&r);
3277     @@ -932,6 +933,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
3278     dev_err(qproc->dev, "unable to resolve mpss region\n");
3279     return ret;
3280     }
3281     + of_node_put(node);
3282    
3283     qproc->mpss_phys = qproc->mpss_reloc = r.start;
3284     qproc->mpss_size = resource_size(&r);
3285     diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
3286     index e0996fce3963..6a5b5b16145e 100644
3287     --- a/drivers/rpmsg/rpmsg_char.c
3288     +++ b/drivers/rpmsg/rpmsg_char.c
3289     @@ -581,4 +581,6 @@ static void rpmsg_chrdev_exit(void)
3290     unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
3291     }
3292     module_exit(rpmsg_chrdev_exit);
3293     +
3294     +MODULE_ALIAS("rpmsg:rpmsg_chrdev");
3295     MODULE_LICENSE("GPL v2");
3296     diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
3297     index a851d34c642b..04674ce961f1 100644
3298     --- a/drivers/s390/net/smsgiucv.c
3299     +++ b/drivers/s390/net/smsgiucv.c
3300     @@ -189,7 +189,7 @@ static struct device_driver smsg_driver = {
3301    
3302     static void __exit smsg_exit(void)
3303     {
3304     - cpcmd("SET SMSG IUCV", NULL, 0, NULL);
3305     + cpcmd("SET SMSG OFF", NULL, 0, NULL);
3306     device_unregister(smsg_dev);
3307     iucv_unregister(&smsg_handler, 1);
3308     driver_unregister(&smsg_driver);
3309     diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
3310     index ac879745ef80..18a409bb9e0c 100644
3311     --- a/drivers/scsi/isci/port_config.c
3312     +++ b/drivers/scsi/isci/port_config.c
3313     @@ -291,7 +291,7 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
3314     * Note: We have not moved the current phy_index so we will actually
3315     * compare the startting phy with itself.
3316     * This is expected and required to add the phy to the port. */
3317     - while (phy_index < SCI_MAX_PHYS) {
3318     + for (; phy_index < SCI_MAX_PHYS; phy_index++) {
3319     if ((phy_mask & (1 << phy_index)) == 0)
3320     continue;
3321     sci_phy_get_sas_address(&ihost->phys[phy_index],
3322     @@ -311,7 +311,6 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
3323     &ihost->phys[phy_index]);
3324    
3325     assigned_phy_mask |= (1 << phy_index);
3326     - phy_index++;
3327     }
3328    
3329     }
3330     diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
3331     index 4bf406df051b..72a919179d06 100644
3332     --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
3333     +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
3334     @@ -903,7 +903,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
3335     goto fail_fw_init;
3336     }
3337    
3338     - ret = 0;
3339     + return 0;
3340    
3341     fail_fw_init:
3342     megasas_return_cmd(instance, cmd);
3343     @@ -913,8 +913,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
3344     IOCInitMessage, ioc_init_handle);
3345     fail_get_cmd:
3346     dev_err(&instance->pdev->dev,
3347     - "Init cmd return status %s for SCSI host %d\n",
3348     - ret ? "FAILED" : "SUCCESS", instance->host->host_no);
3349     + "Init cmd return status FAILED for SCSI host %d\n",
3350     + instance->host->host_no);
3351    
3352     return ret;
3353     }
3354     diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
3355     index 7404d26895f5..f6542c159ed6 100644
3356     --- a/drivers/scsi/scsi_transport_iscsi.c
3357     +++ b/drivers/scsi/scsi_transport_iscsi.c
3358     @@ -2322,6 +2322,12 @@ iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
3359     return nlmsg_multicast(nls, skb, 0, group, gfp);
3360     }
3361    
3362     +static int
3363     +iscsi_unicast_skb(struct sk_buff *skb, u32 portid)
3364     +{
3365     + return nlmsg_unicast(nls, skb, portid);
3366     +}
3367     +
3368     int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
3369     char *data, uint32_t data_size)
3370     {
3371     @@ -2524,14 +2530,11 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
3372     EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
3373    
3374     static int
3375     -iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
3376     - void *payload, int size)
3377     +iscsi_if_send_reply(u32 portid, int type, void *payload, int size)
3378     {
3379     struct sk_buff *skb;
3380     struct nlmsghdr *nlh;
3381     int len = nlmsg_total_size(size);
3382     - int flags = multi ? NLM_F_MULTI : 0;
3383     - int t = done ? NLMSG_DONE : type;
3384    
3385     skb = alloc_skb(len, GFP_ATOMIC);
3386     if (!skb) {
3387     @@ -2539,10 +2542,9 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
3388     return -ENOMEM;
3389     }
3390    
3391     - nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
3392     - nlh->nlmsg_flags = flags;
3393     + nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0);
3394     memcpy(nlmsg_data(nlh), payload, size);
3395     - return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
3396     + return iscsi_unicast_skb(skb, portid);
3397     }
3398    
3399     static int
3400     @@ -3470,6 +3472,7 @@ static int
3401     iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3402     {
3403     int err = 0;
3404     + u32 portid;
3405     struct iscsi_uevent *ev = nlmsg_data(nlh);
3406     struct iscsi_transport *transport = NULL;
3407     struct iscsi_internal *priv;
3408     @@ -3490,10 +3493,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3409     if (!try_module_get(transport->owner))
3410     return -EINVAL;
3411    
3412     + portid = NETLINK_CB(skb).portid;
3413     +
3414     switch (nlh->nlmsg_type) {
3415     case ISCSI_UEVENT_CREATE_SESSION:
3416     err = iscsi_if_create_session(priv, ep, ev,
3417     - NETLINK_CB(skb).portid,
3418     + portid,
3419     ev->u.c_session.initial_cmdsn,
3420     ev->u.c_session.cmds_max,
3421     ev->u.c_session.queue_depth);
3422     @@ -3506,7 +3511,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3423     }
3424    
3425     err = iscsi_if_create_session(priv, ep, ev,
3426     - NETLINK_CB(skb).portid,
3427     + portid,
3428     ev->u.c_bound_session.initial_cmdsn,
3429     ev->u.c_bound_session.cmds_max,
3430     ev->u.c_bound_session.queue_depth);
3431     @@ -3664,6 +3669,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3432     static void
3433     iscsi_if_rx(struct sk_buff *skb)
3434     {
3435     + u32 portid = NETLINK_CB(skb).portid;
3436     +
3437     mutex_lock(&rx_queue_mutex);
3438     while (skb->len >= NLMSG_HDRLEN) {
3439     int err;
3440     @@ -3699,8 +3706,8 @@ iscsi_if_rx(struct sk_buff *skb)
3441     break;
3442     if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
3443     break;
3444     - err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
3445     - nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
3446     + err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
3447     + ev, sizeof(*ev));
3448     } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
3449     skb_pull(skb, rlen);
3450     }
3451     diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
3452     index c44de0b4a995..beb585ddc07d 100644
3453     --- a/drivers/scsi/storvsc_drv.c
3454     +++ b/drivers/scsi/storvsc_drv.c
3455     @@ -1725,11 +1725,14 @@ static int storvsc_probe(struct hv_device *device,
3456     max_targets = STORVSC_MAX_TARGETS;
3457     max_channels = STORVSC_MAX_CHANNELS;
3458     /*
3459     - * On Windows8 and above, we support sub-channels for storage.
3460     + * On Windows8 and above, we support sub-channels for storage
3461     + * on SCSI and FC controllers.
3462     * The number of sub-channels offerred is based on the number of
3463     * VCPUs in the guest.
3464     */
3465     - max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel);
3466     + if (!dev_is_ide)
3467     + max_sub_channels =
3468     + (num_cpus - 1) / storvsc_vcpus_per_sub_channel;
3469     }
3470    
3471     scsi_driver.can_queue = (max_outstanding_req_per_channel *
3472     diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
3473     index c374e3b5c678..777e5f1e52d1 100644
3474     --- a/drivers/scsi/vmw_pvscsi.c
3475     +++ b/drivers/scsi/vmw_pvscsi.c
3476     @@ -609,7 +609,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
3477     break;
3478    
3479     case BTSTAT_ABORTQUEUE:
3480     - cmd->result = (DID_ABORT << 16);
3481     + cmd->result = (DID_BUS_BUSY << 16);
3482     break;
3483    
3484     case BTSTAT_SCSIPARITY:
3485     diff --git a/drivers/soc/bcm/raspberrypi-power.c b/drivers/soc/bcm/raspberrypi-power.c
3486     index fe96a8b956fb..f7ed1187518b 100644
3487     --- a/drivers/soc/bcm/raspberrypi-power.c
3488     +++ b/drivers/soc/bcm/raspberrypi-power.c
3489     @@ -45,7 +45,7 @@ struct rpi_power_domains {
3490     struct rpi_power_domain_packet {
3491     u32 domain;
3492     u32 on;
3493     -} __packet;
3494     +};
3495    
3496     /*
3497     * Asks the firmware to enable or disable power on a specific power
3498     diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
3499     index 7428091d3f5b..bd00b7cc8b78 100644
3500     --- a/drivers/spi/spi-bcm2835aux.c
3501     +++ b/drivers/spi/spi-bcm2835aux.c
3502     @@ -184,6 +184,11 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
3503     struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
3504     irqreturn_t ret = IRQ_NONE;
3505    
3506     + /* IRQ may be shared, so return if our interrupts are disabled */
3507     + if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
3508     + (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
3509     + return ret;
3510     +
3511     /* check if we have data to read */
3512     while (bs->rx_len &&
3513     (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
3514     diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
3515     index 5c9516ae4942..4a001634023e 100644
3516     --- a/drivers/spi/spi-cadence.c
3517     +++ b/drivers/spi/spi-cadence.c
3518     @@ -313,6 +313,14 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
3519    
3520     while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) &&
3521     (xspi->tx_bytes > 0)) {
3522     +
3523     + /* When xspi in busy condition, bytes may send failed,
3524     + * then spi control did't work thoroughly, add one byte delay
3525     + */
3526     + if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
3527     + CDNS_SPI_IXR_TXFULL)
3528     + usleep_range(10, 20);
3529     +
3530     if (xspi->txbuf)
3531     cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
3532     else
3533     diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
3534     index 2770fbd4ce49..52056535f54e 100644
3535     --- a/drivers/spi/spi-sh-msiof.c
3536     +++ b/drivers/spi/spi-sh-msiof.c
3537     @@ -277,6 +277,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
3538     }
3539    
3540     k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
3541     + brps = min_t(int, brps, 32);
3542    
3543     scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
3544     sh_msiof_write(p, TSCR, scr);
3545     diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
3546     index 0d99b242e82e..6cb933ecc084 100644
3547     --- a/drivers/target/target_core_pscsi.c
3548     +++ b/drivers/target/target_core_pscsi.c
3549     @@ -890,6 +890,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
3550     bytes = min(bytes, data_len);
3551    
3552     if (!bio) {
3553     +new_bio:
3554     nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
3555     nr_pages -= nr_vecs;
3556     /*
3557     @@ -931,6 +932,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
3558     * be allocated with pscsi_get_bio() above.
3559     */
3560     bio = NULL;
3561     + goto new_bio;
3562     }
3563    
3564     data_len -= bytes;
3565     diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
3566     index 58a5009eacc3..a548c3695797 100644
3567     --- a/drivers/tee/tee_core.c
3568     +++ b/drivers/tee/tee_core.c
3569     @@ -181,6 +181,17 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
3570     if (IS_ERR(shm))
3571     return PTR_ERR(shm);
3572    
3573     + /*
3574     + * Ensure offset + size does not overflow offset
3575     + * and does not overflow the size of the referred
3576     + * shared memory object.
3577     + */
3578     + if ((ip.a + ip.b) < ip.a ||
3579     + (ip.a + ip.b) > shm->size) {
3580     + tee_shm_put(shm);
3581     + return -EINVAL;
3582     + }
3583     +
3584     params[n].u.memref.shm_offs = ip.a;
3585     params[n].u.memref.size = ip.b;
3586     params[n].u.memref.shm = shm;
3587     diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
3588     index 8a7f24dd9315..0c19fcd56a0d 100644
3589     --- a/drivers/thermal/int340x_thermal/int3403_thermal.c
3590     +++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
3591     @@ -194,6 +194,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
3592     return -EFAULT;
3593     }
3594    
3595     + priv->priv = obj;
3596     obj->max_state = p->package.count - 1;
3597     obj->cdev =
3598     thermal_cooling_device_register(acpi_device_bid(priv->adev),
3599     @@ -201,8 +202,6 @@ static int int3403_cdev_add(struct int3403_priv *priv)
3600     if (IS_ERR(obj->cdev))
3601     result = PTR_ERR(obj->cdev);
3602    
3603     - priv->priv = obj;
3604     -
3605     kfree(buf.pointer);
3606     /* TODO: add ACPI notification support */
3607    
3608     diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
3609     index 5a6dca01a1d0..802388bb42ba 100644
3610     --- a/drivers/usb/musb/musb_host.c
3611     +++ b/drivers/usb/musb/musb_host.c
3612     @@ -2560,8 +2560,11 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
3613     {
3614     struct musb *musb = hcd_to_musb(hcd);
3615     u8 devctl;
3616     + int ret;
3617    
3618     - musb_port_suspend(musb, true);
3619     + ret = musb_port_suspend(musb, true);
3620     + if (ret)
3621     + return ret;
3622    
3623     if (!is_host_active(musb))
3624     return 0;
3625     diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
3626     index 7bbf01bf4bb0..54d02ed032df 100644
3627     --- a/drivers/usb/musb/musb_host.h
3628     +++ b/drivers/usb/musb/musb_host.h
3629     @@ -92,7 +92,7 @@ extern void musb_host_rx(struct musb *, u8);
3630     extern void musb_root_disconnect(struct musb *musb);
3631     extern void musb_host_resume_root_hub(struct musb *musb);
3632     extern void musb_host_poke_root_hub(struct musb *musb);
3633     -extern void musb_port_suspend(struct musb *musb, bool do_suspend);
3634     +extern int musb_port_suspend(struct musb *musb, bool do_suspend);
3635     extern void musb_port_reset(struct musb *musb, bool do_reset);
3636     extern void musb_host_finish_resume(struct work_struct *work);
3637     #else
3638     @@ -124,7 +124,10 @@ static inline void musb_root_disconnect(struct musb *musb) {}
3639     static inline void musb_host_resume_root_hub(struct musb *musb) {}
3640     static inline void musb_host_poll_rh_status(struct musb *musb) {}
3641     static inline void musb_host_poke_root_hub(struct musb *musb) {}
3642     -static inline void musb_port_suspend(struct musb *musb, bool do_suspend) {}
3643     +static inline int musb_port_suspend(struct musb *musb, bool do_suspend)
3644     +{
3645     + return 0;
3646     +}
3647     static inline void musb_port_reset(struct musb *musb, bool do_reset) {}
3648     static inline void musb_host_finish_resume(struct work_struct *work) {}
3649     #endif
3650     diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
3651     index 0b4595439d51..5eca5d2d5e00 100644
3652     --- a/drivers/usb/musb/musb_virthub.c
3653     +++ b/drivers/usb/musb/musb_virthub.c
3654     @@ -73,14 +73,14 @@ void musb_host_finish_resume(struct work_struct *work)
3655     spin_unlock_irqrestore(&musb->lock, flags);
3656     }
3657    
3658     -void musb_port_suspend(struct musb *musb, bool do_suspend)
3659     +int musb_port_suspend(struct musb *musb, bool do_suspend)
3660     {
3661     struct usb_otg *otg = musb->xceiv->otg;
3662     u8 power;
3663     void __iomem *mbase = musb->mregs;
3664    
3665     if (!is_host_active(musb))
3666     - return;
3667     + return 0;
3668    
3669     /* NOTE: this doesn't necessarily put PHY into low power mode,
3670     * turning off its clock; that's a function of PHY integration and
3671     @@ -91,16 +91,20 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
3672     if (do_suspend) {
3673     int retries = 10000;
3674    
3675     - power &= ~MUSB_POWER_RESUME;
3676     - power |= MUSB_POWER_SUSPENDM;
3677     - musb_writeb(mbase, MUSB_POWER, power);
3678     + if (power & MUSB_POWER_RESUME)
3679     + return -EBUSY;
3680    
3681     - /* Needed for OPT A tests */
3682     - power = musb_readb(mbase, MUSB_POWER);
3683     - while (power & MUSB_POWER_SUSPENDM) {
3684     + if (!(power & MUSB_POWER_SUSPENDM)) {
3685     + power |= MUSB_POWER_SUSPENDM;
3686     + musb_writeb(mbase, MUSB_POWER, power);
3687     +
3688     + /* Needed for OPT A tests */
3689     power = musb_readb(mbase, MUSB_POWER);
3690     - if (retries-- < 1)
3691     - break;
3692     + while (power & MUSB_POWER_SUSPENDM) {
3693     + power = musb_readb(mbase, MUSB_POWER);
3694     + if (retries-- < 1)
3695     + break;
3696     + }
3697     }
3698    
3699     musb_dbg(musb, "Root port suspended, power %02x", power);
3700     @@ -136,6 +140,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
3701     schedule_delayed_work(&musb->finish_resume_work,
3702     msecs_to_jiffies(USB_RESUME_TIMEOUT));
3703     }
3704     + return 0;
3705     }
3706    
3707     void musb_port_reset(struct musb *musb, bool do_reset)
3708     diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
3709     index b57891c1fd31..7afbea512207 100644
3710     --- a/drivers/usb/typec/ucsi/Makefile
3711     +++ b/drivers/usb/typec/ucsi/Makefile
3712     @@ -5,6 +5,6 @@ obj-$(CONFIG_TYPEC_UCSI) += typec_ucsi.o
3713    
3714     typec_ucsi-y := ucsi.o
3715    
3716     -typec_ucsi-$(CONFIG_FTRACE) += trace.o
3717     +typec_ucsi-$(CONFIG_TRACING) += trace.o
3718    
3719     obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o
3720     diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
3721     index d2edbc79384a..83243af22d51 100644
3722     --- a/drivers/xen/xenbus/xenbus_dev_frontend.c
3723     +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
3724     @@ -403,7 +403,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u,
3725     {
3726     struct {
3727     struct xsd_sockmsg hdr;
3728     - const char body[16];
3729     + char body[16];
3730     } msg;
3731     int rc;
3732    
3733     @@ -412,6 +412,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u,
3734     msg.hdr.len = strlen(reply) + 1;
3735     if (msg.hdr.len > sizeof(msg.body))
3736     return -E2BIG;
3737     + memcpy(&msg.body, reply, msg.hdr.len);
3738    
3739     mutex_lock(&u->reply_mutex);
3740     rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);
3741     diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
3742     index 9f715c3edcf9..ccc9c708a860 100644
3743     --- a/fs/afs/rxrpc.c
3744     +++ b/fs/afs/rxrpc.c
3745     @@ -55,6 +55,7 @@ int afs_open_socket(void)
3746     {
3747     struct sockaddr_rxrpc srx;
3748     struct socket *socket;
3749     + unsigned int min_level;
3750     int ret;
3751    
3752     _enter("");
3753     @@ -80,6 +81,12 @@ int afs_open_socket(void)
3754     memset(&srx.transport.sin.sin_addr, 0,
3755     sizeof(srx.transport.sin.sin_addr));
3756    
3757     + min_level = RXRPC_SECURITY_ENCRYPT;
3758     + ret = kernel_setsockopt(socket, SOL_RXRPC, RXRPC_MIN_SECURITY_LEVEL,
3759     + (void *)&min_level, sizeof(min_level));
3760     + if (ret < 0)
3761     + goto error_2;
3762     +
3763     ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
3764     if (ret < 0)
3765     goto error_2;
3766     diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
3767     index e3f6c49e5c4d..24613b4e224c 100644
3768     --- a/fs/btrfs/scrub.c
3769     +++ b/fs/btrfs/scrub.c
3770     @@ -301,6 +301,11 @@ static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
3771     static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
3772     static void scrub_put_ctx(struct scrub_ctx *sctx);
3773    
3774     +static inline int scrub_is_page_on_raid56(struct scrub_page *page)
3775     +{
3776     + return page->recover &&
3777     + (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
3778     +}
3779    
3780     static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
3781     {
3782     @@ -1323,15 +1328,34 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
3783     * could happen otherwise that a correct page would be
3784     * overwritten by a bad one).
3785     */
3786     - for (mirror_index = 0;
3787     - mirror_index < BTRFS_MAX_MIRRORS &&
3788     - sblocks_for_recheck[mirror_index].page_count > 0;
3789     - mirror_index++) {
3790     + for (mirror_index = 0; ;mirror_index++) {
3791     struct scrub_block *sblock_other;
3792    
3793     if (mirror_index == failed_mirror_index)
3794     continue;
3795     - sblock_other = sblocks_for_recheck + mirror_index;
3796     +
3797     + /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
3798     + if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
3799     + if (mirror_index >= BTRFS_MAX_MIRRORS)
3800     + break;
3801     + if (!sblocks_for_recheck[mirror_index].page_count)
3802     + break;
3803     +
3804     + sblock_other = sblocks_for_recheck + mirror_index;
3805     + } else {
3806     + struct scrub_recover *r = sblock_bad->pagev[0]->recover;
3807     + int max_allowed = r->bbio->num_stripes -
3808     + r->bbio->num_tgtdevs;
3809     +
3810     + if (mirror_index >= max_allowed)
3811     + break;
3812     + if (!sblocks_for_recheck[1].page_count)
3813     + break;
3814     +
3815     + ASSERT(failed_mirror_index == 0);
3816     + sblock_other = sblocks_for_recheck + 1;
3817     + sblock_other->pagev[0]->mirror_num = 1 + mirror_index;
3818     + }
3819    
3820     /* build and submit the bios, check checksums */
3821     scrub_recheck_block(fs_info, sblock_other, 0);
3822     @@ -1679,18 +1703,13 @@ static void scrub_bio_wait_endio(struct bio *bio)
3823     complete(&ret->event);
3824     }
3825    
3826     -static inline int scrub_is_page_on_raid56(struct scrub_page *page)
3827     -{
3828     - return page->recover &&
3829     - (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
3830     -}
3831     -
3832     static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
3833     struct bio *bio,
3834     struct scrub_page *page)
3835     {
3836     struct scrub_bio_ret done;
3837     int ret;
3838     + int mirror_num;
3839    
3840     init_completion(&done.event);
3841     done.status = 0;
3842     @@ -1698,9 +1717,10 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
3843     bio->bi_private = &done;
3844     bio->bi_end_io = scrub_bio_wait_endio;
3845    
3846     + mirror_num = page->sblock->pagev[0]->mirror_num;
3847     ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
3848     page->recover->map_length,
3849     - page->mirror_num, 0);
3850     + mirror_num, 0);
3851     if (ret)
3852     return ret;
3853    
3854     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3855     index eef875da7c0b..839327f75e3d 100644
3856     --- a/fs/cifs/smb2ops.c
3857     +++ b/fs/cifs/smb2ops.c
3858     @@ -570,9 +570,15 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
3859    
3860     SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3861    
3862     + /*
3863     + * If ea_name is NULL (listxattr) and there are no EAs, return 0 as it's
3864     + * not an error. Otherwise, the specified ea_name was not found.
3865     + */
3866     if (!rc)
3867     rc = move_smb2_ea_to_cifs(ea_data, buf_size, smb2_data,
3868     SMB2_MAX_EA_BUF, ea_name);
3869     + else if (!ea_name && rc == -ENODATA)
3870     + rc = 0;
3871    
3872     kfree(smb2_data);
3873     return rc;
3874     diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
3875     index e5e29f8c920b..9d1823efff34 100644
3876     --- a/fs/ecryptfs/crypto.c
3877     +++ b/fs/ecryptfs/crypto.c
3878     @@ -2026,6 +2026,16 @@ int ecryptfs_encrypt_and_encode_filename(
3879     return rc;
3880     }
3881    
3882     +static bool is_dot_dotdot(const char *name, size_t name_size)
3883     +{
3884     + if (name_size == 1 && name[0] == '.')
3885     + return true;
3886     + else if (name_size == 2 && name[0] == '.' && name[1] == '.')
3887     + return true;
3888     +
3889     + return false;
3890     +}
3891     +
3892     /**
3893     * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext
3894     * @plaintext_name: The plaintext name
3895     @@ -2050,13 +2060,21 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name,
3896     size_t packet_size;
3897     int rc = 0;
3898    
3899     - if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
3900     - && !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
3901     - && (name_size > ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE)
3902     - && (strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX,
3903     - ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) == 0)) {
3904     - const char *orig_name = name;
3905     - size_t orig_name_size = name_size;
3906     + if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) &&
3907     + !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)) {
3908     + if (is_dot_dotdot(name, name_size)) {
3909     + rc = ecryptfs_copy_filename(plaintext_name,
3910     + plaintext_name_size,
3911     + name, name_size);
3912     + goto out;
3913     + }
3914     +
3915     + if (name_size <= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE ||
3916     + strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX,
3917     + ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE)) {
3918     + rc = -EINVAL;
3919     + goto out;
3920     + }
3921    
3922     name += ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE;
3923     name_size -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE;
3924     @@ -2079,12 +2097,9 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name,
3925     decoded_name,
3926     decoded_name_size);
3927     if (rc) {
3928     - printk(KERN_INFO "%s: Could not parse tag 70 packet "
3929     - "from filename; copying through filename "
3930     - "as-is\n", __func__);
3931     - rc = ecryptfs_copy_filename(plaintext_name,
3932     - plaintext_name_size,
3933     - orig_name, orig_name_size);
3934     + ecryptfs_printk(KERN_DEBUG,
3935     + "%s: Could not parse tag 70 packet from filename\n",
3936     + __func__);
3937     goto out_free;
3938     }
3939     } else {
3940     diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
3941     index c74ed3ca3372..b76a9853325e 100644
3942     --- a/fs/ecryptfs/file.c
3943     +++ b/fs/ecryptfs/file.c
3944     @@ -82,17 +82,28 @@ ecryptfs_filldir(struct dir_context *ctx, const char *lower_name,
3945     buf->sb, lower_name,
3946     lower_namelen);
3947     if (rc) {
3948     - printk(KERN_ERR "%s: Error attempting to decode and decrypt "
3949     - "filename [%s]; rc = [%d]\n", __func__, lower_name,
3950     - rc);
3951     - goto out;
3952     + if (rc != -EINVAL) {
3953     + ecryptfs_printk(KERN_DEBUG,
3954     + "%s: Error attempting to decode and decrypt filename [%s]; rc = [%d]\n",
3955     + __func__, lower_name, rc);
3956     + return rc;
3957     + }
3958     +
3959     + /* Mask -EINVAL errors as these are most likely due a plaintext
3960     + * filename present in the lower filesystem despite filename
3961     + * encryption being enabled. One unavoidable example would be
3962     + * the "lost+found" dentry in the root directory of an Ext4
3963     + * filesystem.
3964     + */
3965     + return 0;
3966     }
3967     +
3968     buf->caller->pos = buf->ctx.pos;
3969     rc = !dir_emit(buf->caller, name, name_size, ino, d_type);
3970     kfree(name);
3971     if (!rc)
3972     buf->entries_written++;
3973     -out:
3974     +
3975     return rc;
3976     }
3977    
3978     diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
3979     index 447a24d77b89..ed4edcd2bc56 100644
3980     --- a/fs/isofs/inode.c
3981     +++ b/fs/isofs/inode.c
3982     @@ -394,7 +394,10 @@ static int parse_options(char *options, struct iso9660_options *popt)
3983     break;
3984     #ifdef CONFIG_JOLIET
3985     case Opt_iocharset:
3986     + kfree(popt->iocharset);
3987     popt->iocharset = match_strdup(&args[0]);
3988     + if (!popt->iocharset)
3989     + return 0;
3990     break;
3991     #endif
3992     case Opt_map_a:
3993     diff --git a/fs/namespace.c b/fs/namespace.c
3994     index 62b17aff1908..1eb3bfd8be5a 100644
3995     --- a/fs/namespace.c
3996     +++ b/fs/namespace.c
3997     @@ -2810,7 +2810,7 @@ long do_mount(const char *dev_name, const char __user *dir_name,
3998     mnt_flags |= MNT_NODIRATIME;
3999     if (flags & MS_STRICTATIME)
4000     mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
4001     - if (flags & SB_RDONLY)
4002     + if (flags & MS_RDONLY)
4003     mnt_flags |= MNT_READONLY;
4004    
4005     /* The default atime for remount is preservation */
4006     diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
4007     index 074716293829..d76c81323dc1 100644
4008     --- a/fs/notify/fsnotify.c
4009     +++ b/fs/notify/fsnotify.c
4010     @@ -192,8 +192,9 @@ static int send_to_group(struct inode *to_tell,
4011     struct fsnotify_iter_info *iter_info)
4012     {
4013     struct fsnotify_group *group = NULL;
4014     - __u32 inode_test_mask = 0;
4015     - __u32 vfsmount_test_mask = 0;
4016     + __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
4017     + __u32 marks_mask = 0;
4018     + __u32 marks_ignored_mask = 0;
4019    
4020     if (unlikely(!inode_mark && !vfsmount_mark)) {
4021     BUG();
4022     @@ -213,29 +214,25 @@ static int send_to_group(struct inode *to_tell,
4023     /* does the inode mark tell us to do something? */
4024     if (inode_mark) {
4025     group = inode_mark->group;
4026     - inode_test_mask = (mask & ~FS_EVENT_ON_CHILD);
4027     - inode_test_mask &= inode_mark->mask;
4028     - inode_test_mask &= ~inode_mark->ignored_mask;
4029     + marks_mask |= inode_mark->mask;
4030     + marks_ignored_mask |= inode_mark->ignored_mask;
4031     }
4032    
4033     /* does the vfsmount_mark tell us to do something? */
4034     if (vfsmount_mark) {
4035     - vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD);
4036     group = vfsmount_mark->group;
4037     - vfsmount_test_mask &= vfsmount_mark->mask;
4038     - vfsmount_test_mask &= ~vfsmount_mark->ignored_mask;
4039     - if (inode_mark)
4040     - vfsmount_test_mask &= ~inode_mark->ignored_mask;
4041     + marks_mask |= vfsmount_mark->mask;
4042     + marks_ignored_mask |= vfsmount_mark->ignored_mask;
4043     }
4044    
4045     pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p"
4046     - " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x"
4047     + " vfsmount_mark=%p marks_mask=%x marks_ignored_mask=%x"
4048     " data=%p data_is=%d cookie=%d\n",
4049     - __func__, group, to_tell, mask, inode_mark,
4050     - inode_test_mask, vfsmount_mark, vfsmount_test_mask, data,
4051     + __func__, group, to_tell, mask, inode_mark, vfsmount_mark,
4052     + marks_mask, marks_ignored_mask, data,
4053     data_is, cookie);
4054    
4055     - if (!inode_test_mask && !vfsmount_test_mask)
4056     + if (!(test_mask & marks_mask & ~marks_ignored_mask))
4057     return 0;
4058    
4059     return group->ops->handle_event(group, to_tell, inode_mark,
4060     diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
4061     index ab156e35ec00..1b1283f07941 100644
4062     --- a/fs/ocfs2/refcounttree.c
4063     +++ b/fs/ocfs2/refcounttree.c
4064     @@ -4250,10 +4250,11 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
4065     static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
4066     struct dentry *new_dentry, bool preserve)
4067     {
4068     - int error;
4069     + int error, had_lock;
4070     struct inode *inode = d_inode(old_dentry);
4071     struct buffer_head *old_bh = NULL;
4072     struct inode *new_orphan_inode = NULL;
4073     + struct ocfs2_lock_holder oh;
4074    
4075     if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
4076     return -EOPNOTSUPP;
4077     @@ -4295,6 +4296,14 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
4078     goto out;
4079     }
4080    
4081     + had_lock = ocfs2_inode_lock_tracker(new_orphan_inode, NULL, 1,
4082     + &oh);
4083     + if (had_lock < 0) {
4084     + error = had_lock;
4085     + mlog_errno(error);
4086     + goto out;
4087     + }
4088     +
4089     /* If the security isn't preserved, we need to re-initialize them. */
4090     if (!preserve) {
4091     error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
4092     @@ -4302,14 +4311,15 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
4093     if (error)
4094     mlog_errno(error);
4095     }
4096     -out:
4097     if (!error) {
4098     error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
4099     new_dentry);
4100     if (error)
4101     mlog_errno(error);
4102     }
4103     + ocfs2_inode_unlock_tracker(new_orphan_inode, 1, &oh, had_lock);
4104    
4105     +out:
4106     if (new_orphan_inode) {
4107     /*
4108     * We need to open_unlock the inode no matter whether we
4109     diff --git a/fs/proc/base.c b/fs/proc/base.c
4110     index dd9d4d3a2e39..c5c42f3e33d1 100644
4111     --- a/fs/proc/base.c
4112     +++ b/fs/proc/base.c
4113     @@ -1694,6 +1694,12 @@ void task_dump_owner(struct task_struct *task, mode_t mode,
4114     kuid_t uid;
4115     kgid_t gid;
4116    
4117     + if (unlikely(task->flags & PF_KTHREAD)) {
4118     + *ruid = GLOBAL_ROOT_UID;
4119     + *rgid = GLOBAL_ROOT_GID;
4120     + return;
4121     + }
4122     +
4123     /* Default to the tasks effective ownership */
4124     rcu_read_lock();
4125     cred = __task_cred(task);
4126     diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
4127     index d1e82761de81..e64ecb9f2720 100644
4128     --- a/fs/proc/kcore.c
4129     +++ b/fs/proc/kcore.c
4130     @@ -209,25 +209,34 @@ kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
4131     {
4132     struct list_head *head = (struct list_head *)arg;
4133     struct kcore_list *ent;
4134     + struct page *p;
4135     +
4136     + if (!pfn_valid(pfn))
4137     + return 1;
4138     +
4139     + p = pfn_to_page(pfn);
4140     + if (!memmap_valid_within(pfn, p, page_zone(p)))
4141     + return 1;
4142    
4143     ent = kmalloc(sizeof(*ent), GFP_KERNEL);
4144     if (!ent)
4145     return -ENOMEM;
4146     - ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
4147     + ent->addr = (unsigned long)page_to_virt(p);
4148     ent->size = nr_pages << PAGE_SHIFT;
4149    
4150     - /* Sanity check: Can happen in 32bit arch...maybe */
4151     - if (ent->addr < (unsigned long) __va(0))
4152     + if (!virt_addr_valid(ent->addr))
4153     goto free_out;
4154    
4155     /* cut not-mapped area. ....from ppc-32 code. */
4156     if (ULONG_MAX - ent->addr < ent->size)
4157     ent->size = ULONG_MAX - ent->addr;
4158    
4159     - /* cut when vmalloc() area is higher than direct-map area */
4160     - if (VMALLOC_START > (unsigned long)__va(0)) {
4161     - if (ent->addr > VMALLOC_START)
4162     - goto free_out;
4163     + /*
4164     + * We've already checked virt_addr_valid so we know this address
4165     + * is a valid pointer, therefore we can check against it to determine
4166     + * if we need to trim
4167     + */
4168     + if (VMALLOC_START > ent->addr) {
4169     if (VMALLOC_START - ent->addr < ent->size)
4170     ent->size = VMALLOC_START - ent->addr;
4171     }
4172     diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
4173     index 6744bd706ecf..4cd8328e4039 100644
4174     --- a/fs/proc/task_mmu.c
4175     +++ b/fs/proc/task_mmu.c
4176     @@ -1327,9 +1327,11 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
4177     #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
4178     else if (is_swap_pmd(pmd)) {
4179     swp_entry_t entry = pmd_to_swp_entry(pmd);
4180     + unsigned long offset = swp_offset(entry);
4181    
4182     + offset += (addr & ~PMD_MASK) >> PAGE_SHIFT;
4183     frame = swp_type(entry) |
4184     - (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
4185     + (offset << MAX_SWAPFILES_SHIFT);
4186     flags |= PM_SWAP;
4187     if (pmd_swp_soft_dirty(pmd))
4188     flags |= PM_SOFT_DIRTY;
4189     @@ -1349,6 +1351,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
4190     break;
4191     if (pm->show_pfn && (flags & PM_PRESENT))
4192     frame++;
4193     + else if (flags & PM_SWAP)
4194     + frame += (1 << MAX_SWAPFILES_SHIFT);
4195     }
4196     spin_unlock(ptl);
4197     return err;
4198     diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
4199     index 86eb33f67618..2f4e79fe7b86 100644
4200     --- a/include/linux/clk-provider.h
4201     +++ b/include/linux/clk-provider.h
4202     @@ -752,6 +752,9 @@ int __clk_mux_determine_rate(struct clk_hw *hw,
4203     int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
4204     int __clk_mux_determine_rate_closest(struct clk_hw *hw,
4205     struct clk_rate_request *req);
4206     +int clk_mux_determine_rate_flags(struct clk_hw *hw,
4207     + struct clk_rate_request *req,
4208     + unsigned long flags);
4209     void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
4210     void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
4211     unsigned long max_rate);
4212     diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
4213     index 0c0146e7e274..59fbe005f204 100644
4214     --- a/include/linux/ethtool.h
4215     +++ b/include/linux/ethtool.h
4216     @@ -300,6 +300,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
4217     * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
4218     * instead of the latter), any change to them will be overwritten
4219     * by kernel. Returns a negative error code or zero.
4220     + * @get_fecparam: Get the network device Forward Error Correction parameters.
4221     + * @set_fecparam: Set the network device Forward Error Correction parameters.
4222     *
4223     * All operations are optional (i.e. the function pointer may be set
4224     * to %NULL) and callers must take this into account. Callers must
4225     diff --git a/include/linux/genhd.h b/include/linux/genhd.h
4226     index 5ade8f2a6987..550fa358893a 100644
4227     --- a/include/linux/genhd.h
4228     +++ b/include/linux/genhd.h
4229     @@ -365,7 +365,9 @@ static inline void free_part_stats(struct hd_struct *part)
4230     part_stat_add(cpu, gendiskp, field, -subnd)
4231    
4232     void part_in_flight(struct request_queue *q, struct hd_struct *part,
4233     - unsigned int inflight[2]);
4234     + unsigned int inflight[2]);
4235     +void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
4236     + unsigned int inflight[2]);
4237     void part_dec_in_flight(struct request_queue *q, struct hd_struct *part,
4238     int rw);
4239     void part_inc_in_flight(struct request_queue *q, struct hd_struct *part,
4240     diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
4241     index 39f0489eb137..b81d458ad4fb 100644
4242     --- a/include/linux/kvm_host.h
4243     +++ b/include/linux/kvm_host.h
4244     @@ -1044,13 +1044,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
4245    
4246     #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4247    
4248     -#ifdef CONFIG_S390
4249     -#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
4250     -#elif defined(CONFIG_ARM64)
4251     -#define KVM_MAX_IRQ_ROUTES 4096
4252     -#else
4253     -#define KVM_MAX_IRQ_ROUTES 1024
4254     -#endif
4255     +#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
4256    
4257     bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
4258     int kvm_set_irq_routing(struct kvm *kvm,
4259     diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
4260     index b5b43f94f311..01b990e4b228 100644
4261     --- a/include/linux/mtd/map.h
4262     +++ b/include/linux/mtd/map.h
4263     @@ -312,7 +312,7 @@ void map_destroy(struct mtd_info *mtd);
4264     ({ \
4265     int i, ret = 1; \
4266     for (i = 0; i < map_words(map); i++) { \
4267     - if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \
4268     + if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \
4269     ret = 0; \
4270     break; \
4271     } \
4272     diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
4273     index b1f37a89e368..79b99d653e03 100644
4274     --- a/include/linux/percpu-rwsem.h
4275     +++ b/include/linux/percpu-rwsem.h
4276     @@ -133,7 +133,7 @@ static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
4277     lock_release(&sem->rw_sem.dep_map, 1, ip);
4278     #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
4279     if (!read)
4280     - sem->rw_sem.owner = NULL;
4281     + sem->rw_sem.owner = RWSEM_OWNER_UNKNOWN;
4282     #endif
4283     }
4284    
4285     @@ -141,6 +141,10 @@ static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
4286     bool read, unsigned long ip)
4287     {
4288     lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
4289     +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
4290     + if (!read)
4291     + sem->rw_sem.owner = current;
4292     +#endif
4293     }
4294    
4295     #endif
4296     diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
4297     index dfa34d803439..c427ffaa4904 100644
4298     --- a/include/linux/rwsem.h
4299     +++ b/include/linux/rwsem.h
4300     @@ -44,6 +44,12 @@ struct rw_semaphore {
4301     #endif
4302     };
4303    
4304     +/*
4305     + * Setting bit 0 of the owner field with other non-zero bits will indicate
4306     + * that the rwsem is writer-owned with an unknown owner.
4307     + */
4308     +#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-1L)
4309     +
4310     extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
4311     extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
4312     extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
4313     diff --git a/include/linux/sched.h b/include/linux/sched.h
4314     index 2c9790b43881..e04919aa8201 100644
4315     --- a/include/linux/sched.h
4316     +++ b/include/linux/sched.h
4317     @@ -113,17 +113,36 @@ struct task_group;
4318    
4319     #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
4320    
4321     +/*
4322     + * Special states are those that do not use the normal wait-loop pattern. See
4323     + * the comment with set_special_state().
4324     + */
4325     +#define is_special_task_state(state) \
4326     + ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
4327     +
4328     #define __set_current_state(state_value) \
4329     do { \
4330     + WARN_ON_ONCE(is_special_task_state(state_value));\
4331     current->task_state_change = _THIS_IP_; \
4332     current->state = (state_value); \
4333     } while (0)
4334     +
4335     #define set_current_state(state_value) \
4336     do { \
4337     + WARN_ON_ONCE(is_special_task_state(state_value));\
4338     current->task_state_change = _THIS_IP_; \
4339     smp_store_mb(current->state, (state_value)); \
4340     } while (0)
4341    
4342     +#define set_special_state(state_value) \
4343     + do { \
4344     + unsigned long flags; /* may shadow */ \
4345     + WARN_ON_ONCE(!is_special_task_state(state_value)); \
4346     + raw_spin_lock_irqsave(&current->pi_lock, flags); \
4347     + current->task_state_change = _THIS_IP_; \
4348     + current->state = (state_value); \
4349     + raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
4350     + } while (0)
4351     #else
4352     /*
4353     * set_current_state() includes a barrier so that the write of current->state
4354     @@ -145,8 +164,8 @@ struct task_group;
4355     *
4356     * The above is typically ordered against the wakeup, which does:
4357     *
4358     - * need_sleep = false;
4359     - * wake_up_state(p, TASK_UNINTERRUPTIBLE);
4360     + * need_sleep = false;
4361     + * wake_up_state(p, TASK_UNINTERRUPTIBLE);
4362     *
4363     * Where wake_up_state() (and all other wakeup primitives) imply enough
4364     * barriers to order the store of the variable against wakeup.
4365     @@ -155,12 +174,33 @@ struct task_group;
4366     * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
4367     * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
4368     *
4369     - * This is obviously fine, since they both store the exact same value.
4370     + * However, with slightly different timing the wakeup TASK_RUNNING store can
4371     + * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
4372     + * a problem either because that will result in one extra go around the loop
4373     + * and our @cond test will save the day.
4374     *
4375     * Also see the comments of try_to_wake_up().
4376     */
4377     -#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
4378     -#define set_current_state(state_value) smp_store_mb(current->state, (state_value))
4379     +#define __set_current_state(state_value) \
4380     + current->state = (state_value)
4381     +
4382     +#define set_current_state(state_value) \
4383     + smp_store_mb(current->state, (state_value))
4384     +
4385     +/*
4386     + * set_special_state() should be used for those states when the blocking task
4387     + * can not use the regular condition based wait-loop. In that case we must
4388     + * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
4389     + * will not collide with our state change.
4390     + */
4391     +#define set_special_state(state_value) \
4392     + do { \
4393     + unsigned long flags; /* may shadow */ \
4394     + raw_spin_lock_irqsave(&current->pi_lock, flags); \
4395     + current->state = (state_value); \
4396     + raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
4397     + } while (0)
4398     +
4399     #endif
4400    
4401     /* Task command name length: */
4402     diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
4403     index 0aa4548fb492..fbf86ecd149d 100644
4404     --- a/include/linux/sched/signal.h
4405     +++ b/include/linux/sched/signal.h
4406     @@ -280,7 +280,7 @@ static inline void kernel_signal_stop(void)
4407     {
4408     spin_lock_irq(&current->sighand->siglock);
4409     if (current->jobctl & JOBCTL_STOP_DEQUEUED)
4410     - __set_current_state(TASK_STOPPED);
4411     + set_special_state(TASK_STOPPED);
4412     spin_unlock_irq(&current->sighand->siglock);
4413    
4414     schedule();
4415     diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h
4416     index e8f0f852968f..c0c5c5b73dc0 100644
4417     --- a/include/linux/stringhash.h
4418     +++ b/include/linux/stringhash.h
4419     @@ -50,9 +50,9 @@ partial_name_hash(unsigned long c, unsigned long prevhash)
4420     * losing bits). This also has the property (wanted by the dcache)
4421     * that the msbits make a good hash table index.
4422     */
4423     -static inline unsigned long end_name_hash(unsigned long hash)
4424     +static inline unsigned int end_name_hash(unsigned long hash)
4425     {
4426     - return __hash_32((unsigned int)hash);
4427     + return hash_long(hash, 32);
4428     }
4429    
4430     /*
4431     diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
4432     index cb979ad90401..b86c4c367004 100644
4433     --- a/include/soc/bcm2835/raspberrypi-firmware.h
4434     +++ b/include/soc/bcm2835/raspberrypi-firmware.h
4435     @@ -125,13 +125,13 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
4436     static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
4437     void *data, size_t len)
4438     {
4439     - return 0;
4440     + return -ENOSYS;
4441     }
4442    
4443     static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
4444     void *data, size_t tag_size)
4445     {
4446     - return 0;
4447     + return -ENOSYS;
4448     }
4449    
4450     static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
4451     diff --git a/init/main.c b/init/main.c
4452     index 2d355a61dfc5..0d88f37febcb 100644
4453     --- a/init/main.c
4454     +++ b/init/main.c
4455     @@ -974,6 +974,13 @@ __setup("rodata=", set_debug_rodata);
4456     static void mark_readonly(void)
4457     {
4458     if (rodata_enabled) {
4459     + /*
4460     + * load_module() results in W+X mappings, which are cleaned up
4461     + * with call_rcu_sched(). Let's make sure that queued work is
4462     + * flushed so that we don't hit false positives looking for
4463     + * insecure pages which are W+X.
4464     + */
4465     + rcu_barrier_sched();
4466     mark_rodata_ro();
4467     rodata_test();
4468     } else
4469     diff --git a/kernel/kthread.c b/kernel/kthread.c
4470     index 1c19edf82427..1ef8f3a5b072 100644
4471     --- a/kernel/kthread.c
4472     +++ b/kernel/kthread.c
4473     @@ -169,12 +169,13 @@ void *kthread_probe_data(struct task_struct *task)
4474    
4475     static void __kthread_parkme(struct kthread *self)
4476     {
4477     - __set_current_state(TASK_PARKED);
4478     - while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
4479     + for (;;) {
4480     + set_current_state(TASK_PARKED);
4481     + if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
4482     + break;
4483     if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
4484     complete(&self->parked);
4485     schedule();
4486     - __set_current_state(TASK_PARKED);
4487     }
4488     clear_bit(KTHREAD_IS_PARKED, &self->flags);
4489     __set_current_state(TASK_RUNNING);
4490     diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
4491     index e795908f3607..a90336779375 100644
4492     --- a/kernel/locking/rwsem-xadd.c
4493     +++ b/kernel/locking/rwsem-xadd.c
4494     @@ -352,16 +352,15 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
4495     struct task_struct *owner;
4496     bool ret = true;
4497    
4498     + BUILD_BUG_ON(!rwsem_has_anonymous_owner(RWSEM_OWNER_UNKNOWN));
4499     +
4500     if (need_resched())
4501     return false;
4502    
4503     rcu_read_lock();
4504     owner = READ_ONCE(sem->owner);
4505     - if (!rwsem_owner_is_writer(owner)) {
4506     - /*
4507     - * Don't spin if the rwsem is readers owned.
4508     - */
4509     - ret = !rwsem_owner_is_reader(owner);
4510     + if (!owner || !is_rwsem_owner_spinnable(owner)) {
4511     + ret = !owner; /* !owner is spinnable */
4512     goto done;
4513     }
4514    
4515     @@ -382,11 +381,11 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
4516     {
4517     struct task_struct *owner = READ_ONCE(sem->owner);
4518    
4519     - if (!rwsem_owner_is_writer(owner))
4520     - goto out;
4521     + if (!is_rwsem_owner_spinnable(owner))
4522     + return false;
4523    
4524     rcu_read_lock();
4525     - while (sem->owner == owner) {
4526     + while (owner && (READ_ONCE(sem->owner) == owner)) {
4527     /*
4528     * Ensure we emit the owner->on_cpu, dereference _after_
4529     * checking sem->owner still matches owner, if that fails,
4530     @@ -408,12 +407,12 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
4531     cpu_relax();
4532     }
4533     rcu_read_unlock();
4534     -out:
4535     +
4536     /*
4537     * If there is a new owner or the owner is not set, we continue
4538     * spinning.
4539     */
4540     - return !rwsem_owner_is_reader(READ_ONCE(sem->owner));
4541     + return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
4542     }
4543    
4544     static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
4545     diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
4546     index a6c76a4832b4..22bd01a7dcaa 100644
4547     --- a/kernel/locking/rwsem.c
4548     +++ b/kernel/locking/rwsem.c
4549     @@ -201,5 +201,3 @@ void up_read_non_owner(struct rw_semaphore *sem)
4550     EXPORT_SYMBOL(up_read_non_owner);
4551    
4552     #endif
4553     -
4554     -
4555     diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h
4556     index a883b8f1fdc6..410ee7b9ac2c 100644
4557     --- a/kernel/locking/rwsem.h
4558     +++ b/kernel/locking/rwsem.h
4559     @@ -1,20 +1,24 @@
4560     /* SPDX-License-Identifier: GPL-2.0 */
4561     /*
4562     * The owner field of the rw_semaphore structure will be set to
4563     - * RWSEM_READ_OWNED when a reader grabs the lock. A writer will clear
4564     + * RWSEM_READER_OWNED when a reader grabs the lock. A writer will clear
4565     * the owner field when it unlocks. A reader, on the other hand, will
4566     * not touch the owner field when it unlocks.
4567     *
4568     - * In essence, the owner field now has the following 3 states:
4569     + * In essence, the owner field now has the following 4 states:
4570     * 1) 0
4571     * - lock is free or the owner hasn't set the field yet
4572     * 2) RWSEM_READER_OWNED
4573     * - lock is currently or previously owned by readers (lock is free
4574     * or not set by owner yet)
4575     - * 3) Other non-zero value
4576     - * - a writer owns the lock
4577     + * 3) RWSEM_ANONYMOUSLY_OWNED bit set with some other bits set as well
4578     + * - lock is owned by an anonymous writer, so spinning on the lock
4579     + * owner should be disabled.
4580     + * 4) Other non-zero value
4581     + * - a writer owns the lock and other writers can spin on the lock owner.
4582     */
4583     -#define RWSEM_READER_OWNED ((struct task_struct *)1UL)
4584     +#define RWSEM_ANONYMOUSLY_OWNED (1UL << 0)
4585     +#define RWSEM_READER_OWNED ((struct task_struct *)RWSEM_ANONYMOUSLY_OWNED)
4586    
4587     #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
4588     /*
4589     @@ -45,14 +49,22 @@ static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
4590     WRITE_ONCE(sem->owner, RWSEM_READER_OWNED);
4591     }
4592    
4593     -static inline bool rwsem_owner_is_writer(struct task_struct *owner)
4594     +/*
4595     + * Return true if the a rwsem waiter can spin on the rwsem's owner
4596     + * and steal the lock, i.e. the lock is not anonymously owned.
4597     + * N.B. !owner is considered spinnable.
4598     + */
4599     +static inline bool is_rwsem_owner_spinnable(struct task_struct *owner)
4600     {
4601     - return owner && owner != RWSEM_READER_OWNED;
4602     + return !((unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED);
4603     }
4604    
4605     -static inline bool rwsem_owner_is_reader(struct task_struct *owner)
4606     +/*
4607     + * Return true if rwsem is owned by an anonymous writer or readers.
4608     + */
4609     +static inline bool rwsem_has_anonymous_owner(struct task_struct *owner)
4610     {
4611     - return owner == RWSEM_READER_OWNED;
4612     + return (unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED;
4613     }
4614     #else
4615     static inline void rwsem_set_owner(struct rw_semaphore *sem)
4616     diff --git a/kernel/module.c b/kernel/module.c
4617     index 690c0651c40f..321b0b1f87e7 100644
4618     --- a/kernel/module.c
4619     +++ b/kernel/module.c
4620     @@ -3506,6 +3506,11 @@ static noinline int do_init_module(struct module *mod)
4621     * walking this with preempt disabled. In all the failure paths, we
4622     * call synchronize_sched(), but we don't want to slow down the success
4623     * path, so use actual RCU here.
4624     + * Note that module_alloc() on most architectures creates W+X page
4625     + * mappings which won't be cleaned up until do_free_init() runs. Any
4626     + * code such as mark_rodata_ro() which depends on those mappings to
4627     + * be cleaned up needs to sync with the queued work - ie
4628     + * rcu_barrier_sched()
4629     */
4630     call_rcu_sched(&freeinit->rcu, do_free_init);
4631     mutex_unlock(&module_mutex);
4632     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
4633     index 8cf36b30a006..f287dcbe8cb2 100644
4634     --- a/kernel/sched/core.c
4635     +++ b/kernel/sched/core.c
4636     @@ -3374,23 +3374,8 @@ static void __sched notrace __schedule(bool preempt)
4637    
4638     void __noreturn do_task_dead(void)
4639     {
4640     - /*
4641     - * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
4642     - * when the following two conditions become true.
4643     - * - There is race condition of mmap_sem (It is acquired by
4644     - * exit_mm()), and
4645     - * - SMI occurs before setting TASK_RUNINNG.
4646     - * (or hypervisor of virtual machine switches to other guest)
4647     - * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
4648     - *
4649     - * To avoid it, we have to wait for releasing tsk->pi_lock which
4650     - * is held by try_to_wake_up()
4651     - */
4652     - raw_spin_lock_irq(&current->pi_lock);
4653     - raw_spin_unlock_irq(&current->pi_lock);
4654     -
4655     /* Causes final put_task_struct in finish_task_switch(): */
4656     - __set_current_state(TASK_DEAD);
4657     + set_special_state(TASK_DEAD);
4658    
4659     /* Tell freezer to ignore us: */
4660     current->flags |= PF_NOFREEZE;
4661     diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
4662     index 4ae5c1ea90e2..501f17c642ab 100644
4663     --- a/kernel/sched/deadline.c
4664     +++ b/kernel/sched/deadline.c
4665     @@ -1084,7 +1084,7 @@ extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
4666     * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
4667     * So, overflow is not an issue here.
4668     */
4669     -u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
4670     +static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
4671     {
4672     u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
4673     u64 u_act;
4674     @@ -2655,8 +2655,6 @@ bool dl_cpu_busy(unsigned int cpu)
4675     #endif
4676    
4677     #ifdef CONFIG_SCHED_DEBUG
4678     -extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
4679     -
4680     void print_dl_stats(struct seq_file *m, int cpu)
4681     {
4682     print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
4683     diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
4684     index 113eaeb6c0f8..bba2217652ff 100644
4685     --- a/kernel/sched/rt.c
4686     +++ b/kernel/sched/rt.c
4687     @@ -2689,8 +2689,6 @@ int sched_rr_handler(struct ctl_table *table, int write,
4688     }
4689    
4690     #ifdef CONFIG_SCHED_DEBUG
4691     -extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
4692     -
4693     void print_rt_stats(struct seq_file *m, int cpu)
4694     {
4695     rt_rq_iter_t iter;
4696     diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
4697     index 307c35d33660..b29376169f3f 100644
4698     --- a/kernel/sched/sched.h
4699     +++ b/kernel/sched/sched.h
4700     @@ -1969,8 +1969,9 @@ extern bool sched_debug_enabled;
4701     extern void print_cfs_stats(struct seq_file *m, int cpu);
4702     extern void print_rt_stats(struct seq_file *m, int cpu);
4703     extern void print_dl_stats(struct seq_file *m, int cpu);
4704     -extern void
4705     -print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
4706     +extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
4707     +extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
4708     +extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
4709     #ifdef CONFIG_NUMA_BALANCING
4710     extern void
4711     show_numa_stats(struct task_struct *p, struct seq_file *m);
4712     diff --git a/kernel/signal.c b/kernel/signal.c
4713     index 6895f6bb98a7..4439ba9dc5d9 100644
4714     --- a/kernel/signal.c
4715     +++ b/kernel/signal.c
4716     @@ -1828,14 +1828,27 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
4717     return;
4718     }
4719    
4720     + set_special_state(TASK_TRACED);
4721     +
4722     /*
4723     * We're committing to trapping. TRACED should be visible before
4724     * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
4725     * Also, transition to TRACED and updates to ->jobctl should be
4726     * atomic with respect to siglock and should be done after the arch
4727     * hook as siglock is released and regrabbed across it.
4728     + *
4729     + * TRACER TRACEE
4730     + *
4731     + * ptrace_attach()
4732     + * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
4733     + * do_wait()
4734     + * set_current_state() smp_wmb();
4735     + * ptrace_do_wait()
4736     + * wait_task_stopped()
4737     + * task_stopped_code()
4738     + * [L] task_is_traced() [S] task_clear_jobctl_trapping();
4739     */
4740     - set_current_state(TASK_TRACED);
4741     + smp_wmb();
4742    
4743     current->last_siginfo = info;
4744     current->exit_code = exit_code;
4745     @@ -2043,7 +2056,7 @@ static bool do_signal_stop(int signr)
4746     if (task_participate_group_stop(current))
4747     notify = CLD_STOPPED;
4748    
4749     - __set_current_state(TASK_STOPPED);
4750     + set_special_state(TASK_STOPPED);
4751     spin_unlock_irq(&current->sighand->siglock);
4752    
4753     /*
4754     diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
4755     index b7591261652d..64c0291b579c 100644
4756     --- a/kernel/stop_machine.c
4757     +++ b/kernel/stop_machine.c
4758     @@ -21,6 +21,7 @@
4759     #include <linux/smpboot.h>
4760     #include <linux/atomic.h>
4761     #include <linux/nmi.h>
4762     +#include <linux/sched/wake_q.h>
4763    
4764     /*
4765     * Structure to determine completion condition and record errors. May
4766     @@ -65,27 +66,31 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done)
4767     }
4768    
4769     static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
4770     - struct cpu_stop_work *work)
4771     + struct cpu_stop_work *work,
4772     + struct wake_q_head *wakeq)
4773     {
4774     list_add_tail(&work->list, &stopper->works);
4775     - wake_up_process(stopper->thread);
4776     + wake_q_add(wakeq, stopper->thread);
4777     }
4778    
4779     /* queue @work to @stopper. if offline, @work is completed immediately */
4780     static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
4781     {
4782     struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
4783     + DEFINE_WAKE_Q(wakeq);
4784     unsigned long flags;
4785     bool enabled;
4786    
4787     spin_lock_irqsave(&stopper->lock, flags);
4788     enabled = stopper->enabled;
4789     if (enabled)
4790     - __cpu_stop_queue_work(stopper, work);
4791     + __cpu_stop_queue_work(stopper, work, &wakeq);
4792     else if (work->done)
4793     cpu_stop_signal_done(work->done);
4794     spin_unlock_irqrestore(&stopper->lock, flags);
4795    
4796     + wake_up_q(&wakeq);
4797     +
4798     return enabled;
4799     }
4800    
4801     @@ -229,6 +234,7 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
4802     {
4803     struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
4804     struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
4805     + DEFINE_WAKE_Q(wakeq);
4806     int err;
4807     retry:
4808     spin_lock_irq(&stopper1->lock);
4809     @@ -252,8 +258,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
4810     goto unlock;
4811    
4812     err = 0;
4813     - __cpu_stop_queue_work(stopper1, work1);
4814     - __cpu_stop_queue_work(stopper2, work2);
4815     + __cpu_stop_queue_work(stopper1, work1, &wakeq);
4816     + __cpu_stop_queue_work(stopper2, work2, &wakeq);
4817     unlock:
4818     spin_unlock(&stopper2->lock);
4819     spin_unlock_irq(&stopper1->lock);
4820     @@ -263,6 +269,9 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
4821     cpu_relax();
4822     goto retry;
4823     }
4824     +
4825     + wake_up_q(&wakeq);
4826     +
4827     return err;
4828     }
4829     /**
4830     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4831     index 546cd481a2ca..942d9342b63b 100644
4832     --- a/mm/memcontrol.c
4833     +++ b/mm/memcontrol.c
4834     @@ -2205,7 +2205,7 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
4835     {
4836     struct memcg_kmem_cache_create_work *cw;
4837    
4838     - cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
4839     + cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
4840     if (!cw)
4841     return;
4842    
4843     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
4844     index 125b49c166a4..f0caff3139ed 100644
4845     --- a/net/ipv4/tcp_input.c
4846     +++ b/net/ipv4/tcp_input.c
4847     @@ -647,7 +647,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
4848     sk->sk_rcvbuf = rcvbuf;
4849    
4850     /* Make the window clamp follow along. */
4851     - tp->window_clamp = rcvwin;
4852     + tp->window_clamp = tcp_win_from_space(rcvbuf);
4853     }
4854     }
4855     tp->rcvq_space.space = copied;
4856     diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
4857     index 6acb2eecd986..c764c2a77d94 100644
4858     --- a/net/ipv6/netfilter/Kconfig
4859     +++ b/net/ipv6/netfilter/Kconfig
4860     @@ -48,6 +48,34 @@ config NFT_CHAIN_ROUTE_IPV6
4861     fields such as the source, destination, flowlabel, hop-limit and
4862     the packet mark.
4863    
4864     +if NF_NAT_IPV6
4865     +
4866     +config NFT_CHAIN_NAT_IPV6
4867     + tristate "IPv6 nf_tables nat chain support"
4868     + help
4869     + This option enables the "nat" chain for IPv6 in nf_tables. This
4870     + chain type is used to perform Network Address Translation (NAT)
4871     + packet transformations such as the source, destination address and
4872     + source and destination ports.
4873     +
4874     +config NFT_MASQ_IPV6
4875     + tristate "IPv6 masquerade support for nf_tables"
4876     + depends on NFT_MASQ
4877     + select NF_NAT_MASQUERADE_IPV6
4878     + help
4879     + This is the expression that provides IPv4 masquerading support for
4880     + nf_tables.
4881     +
4882     +config NFT_REDIR_IPV6
4883     + tristate "IPv6 redirect support for nf_tables"
4884     + depends on NFT_REDIR
4885     + select NF_NAT_REDIRECT
4886     + help
4887     + This is the expression that provides IPv4 redirect support for
4888     + nf_tables.
4889     +
4890     +endif # NF_NAT_IPV6
4891     +
4892     config NFT_REJECT_IPV6
4893     select NF_REJECT_IPV6
4894     default NFT_REJECT
4895     @@ -99,39 +127,12 @@ config NF_NAT_IPV6
4896    
4897     if NF_NAT_IPV6
4898    
4899     -config NFT_CHAIN_NAT_IPV6
4900     - depends on NF_TABLES_IPV6
4901     - tristate "IPv6 nf_tables nat chain support"
4902     - help
4903     - This option enables the "nat" chain for IPv6 in nf_tables. This
4904     - chain type is used to perform Network Address Translation (NAT)
4905     - packet transformations such as the source, destination address and
4906     - source and destination ports.
4907     -
4908     config NF_NAT_MASQUERADE_IPV6
4909     tristate "IPv6 masquerade support"
4910     help
4911     This is the kernel functionality to provide NAT in the masquerade
4912     flavour (automatic source address selection) for IPv6.
4913    
4914     -config NFT_MASQ_IPV6
4915     - tristate "IPv6 masquerade support for nf_tables"
4916     - depends on NF_TABLES_IPV6
4917     - depends on NFT_MASQ
4918     - select NF_NAT_MASQUERADE_IPV6
4919     - help
4920     - This is the expression that provides IPv4 masquerading support for
4921     - nf_tables.
4922     -
4923     -config NFT_REDIR_IPV6
4924     - tristate "IPv6 redirect support for nf_tables"
4925     - depends on NF_TABLES_IPV6
4926     - depends on NFT_REDIR
4927     - select NF_NAT_REDIRECT
4928     - help
4929     - This is the expression that provides IPv4 redirect support for
4930     - nf_tables.
4931     -
4932     endif # NF_NAT_IPV6
4933    
4934     config IP6_NF_IPTABLES
4935     diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
4936     index bef516ec47f9..197947a07f83 100644
4937     --- a/net/mac80211/agg-tx.c
4938     +++ b/net/mac80211/agg-tx.c
4939     @@ -8,6 +8,7 @@
4940     * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
4941     * Copyright 2007-2010, Intel Corporation
4942     * Copyright(c) 2015-2017 Intel Deutschland GmbH
4943     + * Copyright (C) 2018 Intel Corporation
4944     *
4945     * This program is free software; you can redistribute it and/or modify
4946     * it under the terms of the GNU General Public License version 2 as
4947     @@ -987,6 +988,9 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
4948    
4949     sta->ampdu_mlme.addba_req_num[tid] = 0;
4950    
4951     + tid_tx->timeout =
4952     + le16_to_cpu(mgmt->u.action.u.addba_resp.timeout);
4953     +
4954     if (tid_tx->timeout) {
4955     mod_timer(&tid_tx->session_timer,
4956     TU_TO_EXP_TIME(tid_tx->timeout));
4957     diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
4958     index 9115cc52ce83..052dbd4fa366 100644
4959     --- a/net/mac80211/mlme.c
4960     +++ b/net/mac80211/mlme.c
4961     @@ -35,6 +35,7 @@
4962     #define IEEE80211_AUTH_TIMEOUT (HZ / 5)
4963     #define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2)
4964     #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10)
4965     +#define IEEE80211_AUTH_TIMEOUT_SAE (HZ * 2)
4966     #define IEEE80211_AUTH_MAX_TRIES 3
4967     #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
4968     #define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
4969     @@ -3798,16 +3799,19 @@ static int ieee80211_auth(struct ieee80211_sub_if_data *sdata)
4970     tx_flags);
4971    
4972     if (tx_flags == 0) {
4973     - auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
4974     - auth_data->timeout_started = true;
4975     - run_again(sdata, auth_data->timeout);
4976     + if (auth_data->algorithm == WLAN_AUTH_SAE)
4977     + auth_data->timeout = jiffies +
4978     + IEEE80211_AUTH_TIMEOUT_SAE;
4979     + else
4980     + auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
4981     } else {
4982     auth_data->timeout =
4983     round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
4984     - auth_data->timeout_started = true;
4985     - run_again(sdata, auth_data->timeout);
4986     }
4987    
4988     + auth_data->timeout_started = true;
4989     + run_again(sdata, auth_data->timeout);
4990     +
4991     return 0;
4992     }
4993    
4994     @@ -3878,8 +3882,15 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
4995     ifmgd->status_received = false;
4996     if (ifmgd->auth_data && ieee80211_is_auth(fc)) {
4997     if (status_acked) {
4998     - ifmgd->auth_data->timeout =
4999     - jiffies + IEEE80211_AUTH_TIMEOUT_SHORT;
5000     + if (ifmgd->auth_data->algorithm ==
5001     + WLAN_AUTH_SAE)
5002     + ifmgd->auth_data->timeout =
5003     + jiffies +
5004     + IEEE80211_AUTH_TIMEOUT_SAE;
5005     + else
5006     + ifmgd->auth_data->timeout =
5007     + jiffies +
5008     + IEEE80211_AUTH_TIMEOUT_SHORT;
5009     run_again(sdata, ifmgd->auth_data->timeout);
5010     } else {
5011     ifmgd->auth_data->timeout = jiffies - 1;
5012     diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
5013     index 73429841f115..ccb65f18df5d 100644
5014     --- a/net/mac80211/tx.c
5015     +++ b/net/mac80211/tx.c
5016     @@ -4,6 +4,7 @@
5017     * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5018     * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
5019     * Copyright 2013-2014 Intel Mobile Communications GmbH
5020     + * Copyright (C) 2018 Intel Corporation
5021     *
5022     * This program is free software; you can redistribute it and/or modify
5023     * it under the terms of the GNU General Public License version 2 as
5024     @@ -1138,7 +1139,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
5025     }
5026    
5027     /* reset session timer */
5028     - if (reset_agg_timer && tid_tx->timeout)
5029     + if (reset_agg_timer)
5030     tid_tx->last_tx = jiffies;
5031    
5032     return queued;
5033     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
5034     index 689e9c0570ba..cf30c440f7a7 100644
5035     --- a/net/netfilter/nf_tables_api.c
5036     +++ b/net/netfilter/nf_tables_api.c
5037     @@ -4977,7 +4977,7 @@ static void nft_chain_commit_update(struct nft_trans *trans)
5038     struct nft_base_chain *basechain;
5039    
5040     if (nft_trans_chain_name(trans))
5041     - strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans));
5042     + swap(trans->ctx.chain->name, nft_trans_chain_name(trans));
5043    
5044     if (!nft_is_base_chain(trans->ctx.chain))
5045     return;
5046     diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
5047     index 80fb6f63e768..6e721c449c4b 100644
5048     --- a/net/rds/ib_cm.c
5049     +++ b/net/rds/ib_cm.c
5050     @@ -546,7 +546,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
5051     rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
5052     ic->i_send_cq, ic->i_recv_cq);
5053    
5054     - return ret;
5055     + goto out;
5056    
5057     sends_out:
5058     vfree(ic->i_sends);
5059     @@ -571,6 +571,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
5060     ic->i_send_cq = NULL;
5061     rds_ibdev_out:
5062     rds_ib_remove_conn(rds_ibdev, conn);
5063     +out:
5064     rds_ib_dev_put(rds_ibdev);
5065    
5066     return ret;
5067     diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
5068     index 7c1cb08874d5..2a32f60652d8 100644
5069     --- a/net/rxrpc/af_rxrpc.c
5070     +++ b/net/rxrpc/af_rxrpc.c
5071     @@ -302,7 +302,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
5072     memset(&cp, 0, sizeof(cp));
5073     cp.local = rx->local;
5074     cp.key = key;
5075     - cp.security_level = 0;
5076     + cp.security_level = rx->min_sec_level;
5077     cp.exclusive = false;
5078     cp.service_id = srx->srx_service;
5079     call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len,
5080     diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
5081     index 38b99db30e54..2af42c7d5b82 100644
5082     --- a/net/rxrpc/local_object.c
5083     +++ b/net/rxrpc/local_object.c
5084     @@ -133,22 +133,49 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
5085     }
5086     }
5087    
5088     - /* we want to receive ICMP errors */
5089     - opt = 1;
5090     - ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
5091     - (char *) &opt, sizeof(opt));
5092     - if (ret < 0) {
5093     - _debug("setsockopt failed");
5094     - goto error;
5095     - }
5096     + switch (local->srx.transport.family) {
5097     + case AF_INET:
5098     + /* we want to receive ICMP errors */
5099     + opt = 1;
5100     + ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
5101     + (char *) &opt, sizeof(opt));
5102     + if (ret < 0) {
5103     + _debug("setsockopt failed");
5104     + goto error;
5105     + }
5106    
5107     - /* we want to set the don't fragment bit */
5108     - opt = IP_PMTUDISC_DO;
5109     - ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
5110     - (char *) &opt, sizeof(opt));
5111     - if (ret < 0) {
5112     - _debug("setsockopt failed");
5113     - goto error;
5114     + /* we want to set the don't fragment bit */
5115     + opt = IP_PMTUDISC_DO;
5116     + ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
5117     + (char *) &opt, sizeof(opt));
5118     + if (ret < 0) {
5119     + _debug("setsockopt failed");
5120     + goto error;
5121     + }
5122     + break;
5123     +
5124     + case AF_INET6:
5125     + /* we want to receive ICMP errors */
5126     + opt = 1;
5127     + ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
5128     + (char *) &opt, sizeof(opt));
5129     + if (ret < 0) {
5130     + _debug("setsockopt failed");
5131     + goto error;
5132     + }
5133     +
5134     + /* we want to set the don't fragment bit */
5135     + opt = IPV6_PMTUDISC_DO;
5136     + ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
5137     + (char *) &opt, sizeof(opt));
5138     + if (ret < 0) {
5139     + _debug("setsockopt failed");
5140     + goto error;
5141     + }
5142     + break;
5143     +
5144     + default:
5145     + BUG();
5146     }
5147    
5148     /* set the socket up */
5149     diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
5150     index 59949d61f20d..6e749497009e 100644
5151     --- a/net/sched/act_skbedit.c
5152     +++ b/net/sched/act_skbedit.c
5153     @@ -121,7 +121,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
5154     return 0;
5155    
5156     if (!flags) {
5157     - tcf_idr_release(*a, bind);
5158     + if (exists)
5159     + tcf_idr_release(*a, bind);
5160     return -EINVAL;
5161     }
5162    
5163     diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
5164     index f9c289e05707..654a81238406 100644
5165     --- a/net/smc/af_smc.c
5166     +++ b/net/smc/af_smc.c
5167     @@ -1264,8 +1264,11 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
5168    
5169     smc = smc_sk(sk);
5170     lock_sock(sk);
5171     - if (sk->sk_state != SMC_ACTIVE)
5172     + if (sk->sk_state != SMC_ACTIVE) {
5173     + release_sock(sk);
5174     goto out;
5175     + }
5176     + release_sock(sk);
5177     if (smc->use_fallback)
5178     rc = kernel_sendpage(smc->clcsock, page, offset,
5179     size, flags);
5180     @@ -1273,7 +1276,6 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
5181     rc = sock_no_sendpage(sock, page, offset, size, flags);
5182    
5183     out:
5184     - release_sock(sk);
5185     return rc;
5186     }
5187    
5188     diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
5189     index 0fcfb3916dcf..254ddc2c3914 100644
5190     --- a/net/tipc/monitor.c
5191     +++ b/net/tipc/monitor.c
5192     @@ -768,7 +768,7 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
5193    
5194     ret = tipc_bearer_get_name(net, bearer_name, bearer_id);
5195     if (ret || !mon)
5196     - return -EINVAL;
5197     + return 0;
5198    
5199     hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
5200     NLM_F_MULTI, TIPC_NL_MON_GET);
5201     diff --git a/net/tipc/node.c b/net/tipc/node.c
5202     index f6c5743c170e..42e9bdcc4bb6 100644
5203     --- a/net/tipc/node.c
5204     +++ b/net/tipc/node.c
5205     @@ -1831,6 +1831,7 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
5206     int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
5207     {
5208     struct net *net = genl_info_net(info);
5209     + struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
5210     struct tipc_nl_msg msg;
5211     char *name;
5212     int err;
5213     @@ -1838,9 +1839,19 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
5214     msg.portid = info->snd_portid;
5215     msg.seq = info->snd_seq;
5216    
5217     - if (!info->attrs[TIPC_NLA_LINK_NAME])
5218     + if (!info->attrs[TIPC_NLA_LINK])
5219     return -EINVAL;
5220     - name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
5221     +
5222     + err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
5223     + info->attrs[TIPC_NLA_LINK],
5224     + tipc_nl_link_policy, info->extack);
5225     + if (err)
5226     + return err;
5227     +
5228     + if (!attrs[TIPC_NLA_LINK_NAME])
5229     + return -EINVAL;
5230     +
5231     + name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
5232    
5233     msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
5234     if (!msg.skb)
5235     @@ -2113,8 +2124,8 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
5236     struct net *net = sock_net(skb->sk);
5237     u32 prev_bearer = cb->args[0];
5238     struct tipc_nl_msg msg;
5239     + int bearer_id;
5240     int err;
5241     - int i;
5242    
5243     if (prev_bearer == MAX_BEARERS)
5244     return 0;
5245     @@ -2124,16 +2135,13 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
5246     msg.seq = cb->nlh->nlmsg_seq;
5247    
5248     rtnl_lock();
5249     - for (i = prev_bearer; i < MAX_BEARERS; i++) {
5250     - prev_bearer = i;
5251     - err = __tipc_nl_add_monitor(net, &msg, prev_bearer);
5252     + for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
5253     + err = __tipc_nl_add_monitor(net, &msg, bearer_id);
5254     if (err)
5255     - goto out;
5256     + break;
5257     }
5258     -
5259     -out:
5260     rtnl_unlock();
5261     - cb->args[0] = prev_bearer;
5262     + cb->args[0] = bearer_id;
5263    
5264     return skb->len;
5265     }
5266     diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
5267     index 8c7063e1aa46..0b9b014b4bb6 100644
5268     --- a/sound/soc/codecs/msm8916-wcd-analog.c
5269     +++ b/sound/soc/codecs/msm8916-wcd-analog.c
5270     @@ -1184,7 +1184,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
5271     return irq;
5272     }
5273    
5274     - ret = devm_request_irq(dev, irq, pm8916_mbhc_switch_irq_handler,
5275     + ret = devm_request_threaded_irq(dev, irq, NULL,
5276     + pm8916_mbhc_switch_irq_handler,
5277     IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
5278     IRQF_ONESHOT,
5279     "mbhc switch irq", priv);
5280     @@ -1198,7 +1199,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
5281     return irq;
5282     }
5283    
5284     - ret = devm_request_irq(dev, irq, mbhc_btn_press_irq_handler,
5285     + ret = devm_request_threaded_irq(dev, irq, NULL,
5286     + mbhc_btn_press_irq_handler,
5287     IRQF_TRIGGER_RISING |
5288     IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
5289     "mbhc btn press irq", priv);
5290     @@ -1211,7 +1213,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
5291     return irq;
5292     }
5293    
5294     - ret = devm_request_irq(dev, irq, mbhc_btn_release_irq_handler,
5295     + ret = devm_request_threaded_irq(dev, irq, NULL,
5296     + mbhc_btn_release_irq_handler,
5297     IRQF_TRIGGER_RISING |
5298     IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
5299     "mbhc btn release irq", priv);
5300     diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
5301     index d7956ababd11..e52e68b56238 100644
5302     --- a/sound/soc/codecs/rt5514.c
5303     +++ b/sound/soc/codecs/rt5514.c
5304     @@ -89,6 +89,7 @@ static const struct reg_default rt5514_reg[] = {
5305     {RT5514_PLL3_CALIB_CTRL5, 0x40220012},
5306     {RT5514_DELAY_BUF_CTRL1, 0x7fff006a},
5307     {RT5514_DELAY_BUF_CTRL3, 0x00000000},
5308     + {RT5514_ASRC_IN_CTRL1, 0x00000003},
5309     {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f},
5310     {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f},
5311     {RT5514_DOWNFILTER0_CTRL3, 0x10000362},
5312     @@ -181,6 +182,7 @@ static bool rt5514_readable_register(struct device *dev, unsigned int reg)
5313     case RT5514_PLL3_CALIB_CTRL5:
5314     case RT5514_DELAY_BUF_CTRL1:
5315     case RT5514_DELAY_BUF_CTRL3:
5316     + case RT5514_ASRC_IN_CTRL1:
5317     case RT5514_DOWNFILTER0_CTRL1:
5318     case RT5514_DOWNFILTER0_CTRL2:
5319     case RT5514_DOWNFILTER0_CTRL3:
5320     @@ -238,6 +240,7 @@ static bool rt5514_i2c_readable_register(struct device *dev,
5321     case RT5514_DSP_MAPPING | RT5514_PLL3_CALIB_CTRL5:
5322     case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL1:
5323     case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL3:
5324     + case RT5514_DSP_MAPPING | RT5514_ASRC_IN_CTRL1:
5325     case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL1:
5326     case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL2:
5327     case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL3:
5328     diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
5329     index e5049fbfc4f1..30cdad2eab7f 100644
5330     --- a/sound/soc/soc-topology.c
5331     +++ b/sound/soc/soc-topology.c
5332     @@ -510,7 +510,7 @@ static void remove_widget(struct snd_soc_component *comp,
5333     */
5334     if (dobj->widget.kcontrol_type == SND_SOC_TPLG_TYPE_ENUM) {
5335     /* enumerated widget mixer */
5336     - for (i = 0; i < w->num_kcontrols; i++) {
5337     + for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) {
5338     struct snd_kcontrol *kcontrol = w->kcontrols[i];
5339     struct soc_enum *se =
5340     (struct soc_enum *)kcontrol->private_value;
5341     @@ -528,7 +528,7 @@ static void remove_widget(struct snd_soc_component *comp,
5342     kfree(w->kcontrol_news);
5343     } else {
5344     /* volume mixer or bytes controls */
5345     - for (i = 0; i < w->num_kcontrols; i++) {
5346     + for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) {
5347     struct snd_kcontrol *kcontrol = w->kcontrols[i];
5348    
5349     if (dobj->widget.kcontrol_type
5350     @@ -2571,7 +2571,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
5351    
5352     /* match index */
5353     if (dobj->index != index &&
5354     - dobj->index != SND_SOC_TPLG_INDEX_ALL)
5355     + index != SND_SOC_TPLG_INDEX_ALL)
5356     continue;
5357    
5358     switch (dobj->type) {
5359     diff --git a/tools/net/bpf_dbg.c b/tools/net/bpf_dbg.c
5360     index 4f254bcc4423..61b9aa5d6415 100644
5361     --- a/tools/net/bpf_dbg.c
5362     +++ b/tools/net/bpf_dbg.c
5363     @@ -1063,7 +1063,7 @@ static int cmd_load_pcap(char *file)
5364    
5365     static int cmd_load(char *arg)
5366     {
5367     - char *subcmd, *cont, *tmp = strdup(arg);
5368     + char *subcmd, *cont = NULL, *tmp = strdup(arg);
5369     int ret = CMD_OK;
5370    
5371     subcmd = strtok_r(tmp, " ", &cont);
5372     @@ -1073,7 +1073,10 @@ static int cmd_load(char *arg)
5373     bpf_reset();
5374     bpf_reset_breakpoints();
5375    
5376     - ret = cmd_load_bpf(cont);
5377     + if (!cont)
5378     + ret = CMD_ERR;
5379     + else
5380     + ret = cmd_load_bpf(cont);
5381     } else if (matches(subcmd, "pcap") == 0) {
5382     ret = cmd_load_pcap(cont);
5383     } else {
5384     diff --git a/tools/objtool/arch/x86/include/asm/insn.h b/tools/objtool/arch/x86/include/asm/insn.h
5385     index b3e32b010ab1..c2c01f84df75 100644
5386     --- a/tools/objtool/arch/x86/include/asm/insn.h
5387     +++ b/tools/objtool/arch/x86/include/asm/insn.h
5388     @@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn)
5389     return insn_offset_displacement(insn) + insn->displacement.nbytes;
5390     }
5391    
5392     +#define POP_SS_OPCODE 0x1f
5393     +#define MOV_SREG_OPCODE 0x8e
5394     +
5395     +/*
5396     + * Intel SDM Vol.3A 6.8.3 states;
5397     + * "Any single-step trap that would be delivered following the MOV to SS
5398     + * instruction or POP to SS instruction (because EFLAGS.TF is 1) is
5399     + * suppressed."
5400     + * This function returns true if @insn is MOV SS or POP SS. On these
5401     + * instructions, single stepping is suppressed.
5402     + */
5403     +static inline int insn_masking_exception(struct insn *insn)
5404     +{
5405     + return insn->opcode.bytes[0] == POP_SS_OPCODE ||
5406     + (insn->opcode.bytes[0] == MOV_SREG_OPCODE &&
5407     + X86_MODRM_REG(insn->modrm.bytes[0]) == 2);
5408     +}
5409     +
5410     #endif /* _ASM_X86_INSN_H */
5411     diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
5412     index 4e8dd5fd45fd..ec40e47aa198 100644
5413     --- a/tools/perf/util/symbol.c
5414     +++ b/tools/perf/util/symbol.c
5415     @@ -2093,16 +2093,14 @@ static bool symbol__read_kptr_restrict(void)
5416    
5417     int symbol__annotation_init(void)
5418     {
5419     + if (symbol_conf.init_annotation)
5420     + return 0;
5421     +
5422     if (symbol_conf.initialized) {
5423     pr_err("Annotation needs to be init before symbol__init()\n");
5424     return -1;
5425     }
5426    
5427     - if (symbol_conf.init_annotation) {
5428     - pr_warning("Annotation being initialized multiple times\n");
5429     - return 0;
5430     - }
5431     -
5432     symbol_conf.priv_size += sizeof(struct annotation);
5433     symbol_conf.init_annotation = true;
5434     return 0;
5435     diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
5436     new file mode 100644
5437     index 000000000000..c193dce611a2
5438     --- /dev/null
5439     +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
5440     @@ -0,0 +1,44 @@
5441     +#!/bin/sh
5442     +# description: event trigger - test multiple actions on hist trigger
5443     +
5444     +
5445     +do_reset() {
5446     + reset_trigger
5447     + echo > set_event
5448     + clear_trace
5449     +}
5450     +
5451     +fail() { #msg
5452     + do_reset
5453     + echo $1
5454     + exit_fail
5455     +}
5456     +
5457     +if [ ! -f set_event ]; then
5458     + echo "event tracing is not supported"
5459     + exit_unsupported
5460     +fi
5461     +
5462     +if [ ! -f synthetic_events ]; then
5463     + echo "synthetic event is not supported"
5464     + exit_unsupported
5465     +fi
5466     +
5467     +clear_synthetic_events
5468     +reset_tracer
5469     +do_reset
5470     +
5471     +echo "Test multiple actions on hist trigger"
5472     +echo 'wakeup_latency u64 lat; pid_t pid' >> synthetic_events
5473     +TRIGGER1=events/sched/sched_wakeup/trigger
5474     +TRIGGER2=events/sched/sched_switch/trigger
5475     +
5476     +echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="cyclictest"' > $TRIGGER1
5477     +echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0 if next_comm=="cyclictest"' >> $TRIGGER2
5478     +echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,next_pid) if next_comm=="cyclictest"' >> $TRIGGER2
5479     +echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,prev_pid) if next_comm=="cyclictest"' >> $TRIGGER2
5480     +echo 'hist:keys=next_pid if next_comm=="cyclictest"' >> $TRIGGER2
5481     +
5482     +do_reset
5483     +
5484     +exit 0
5485     diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
5486     index aa6e2d7f6a1f..903980921d9e 100644
5487     --- a/tools/testing/selftests/x86/Makefile
5488     +++ b/tools/testing/selftests/x86/Makefile
5489     @@ -11,7 +11,7 @@ CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
5490    
5491     TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \
5492     check_initial_reg_state sigreturn iopl mpx-mini-test ioperm \
5493     - protection_keys test_vdso test_vsyscall
5494     + protection_keys test_vdso test_vsyscall mov_ss_trap
5495     TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
5496     test_FCMOV test_FCOMI test_FISTTP \
5497     vdso_restorer
5498     diff --git a/tools/testing/selftests/x86/mov_ss_trap.c b/tools/testing/selftests/x86/mov_ss_trap.c
5499     new file mode 100644
5500     index 000000000000..3c3a022654f3
5501     --- /dev/null
5502     +++ b/tools/testing/selftests/x86/mov_ss_trap.c
5503     @@ -0,0 +1,285 @@
5504     +/* SPDX-License-Identifier: GPL-2.0 */
5505     +/*
5506     + * mov_ss_trap.c: Exercise the bizarre side effects of a watchpoint on MOV SS
5507     + *
5508     + * This does MOV SS from a watchpointed address followed by various
5509     + * types of kernel entries. A MOV SS that hits a watchpoint will queue
5510     + * up a #DB trap but will not actually deliver that trap. The trap
5511     + * will be delivered after the next instruction instead. The CPU's logic
5512     + * seems to be:
5513     + *
5514     + * - Any fault: drop the pending #DB trap.
5515     + * - INT $N, INT3, INTO, SYSCALL, SYSENTER: enter the kernel and then
5516     + * deliver #DB.
5517     + * - ICEBP: enter the kernel but do not deliver the watchpoint trap
5518     + * - breakpoint: only one #DB is delivered (phew!)
5519     + *
5520     + * There are plenty of ways for a kernel to handle this incorrectly. This
5521     + * test tries to exercise all the cases.
5522     + *
5523     + * This should mostly cover CVE-2018-1087 and CVE-2018-8897.
5524     + */
5525     +#define _GNU_SOURCE
5526     +
5527     +#include <stdlib.h>
5528     +#include <sys/ptrace.h>
5529     +#include <sys/types.h>
5530     +#include <sys/wait.h>
5531     +#include <sys/user.h>
5532     +#include <sys/syscall.h>
5533     +#include <unistd.h>
5534     +#include <errno.h>
5535     +#include <stddef.h>
5536     +#include <stdio.h>
5537     +#include <err.h>
5538     +#include <string.h>
5539     +#include <setjmp.h>
5540     +#include <sys/prctl.h>
5541     +
5542     +#define X86_EFLAGS_RF (1UL << 16)
5543     +
5544     +#if __x86_64__
5545     +# define REG_IP REG_RIP
5546     +#else
5547     +# define REG_IP REG_EIP
5548     +#endif
5549     +
5550     +unsigned short ss;
5551     +extern unsigned char breakpoint_insn[];
5552     +sigjmp_buf jmpbuf;
5553     +static unsigned char altstack_data[SIGSTKSZ];
5554     +
5555     +static void enable_watchpoint(void)
5556     +{
5557     + pid_t parent = getpid();
5558     + int status;
5559     +
5560     + pid_t child = fork();
5561     + if (child < 0)
5562     + err(1, "fork");
5563     +
5564     + if (child) {
5565     + if (waitpid(child, &status, 0) != child)
5566     + err(1, "waitpid for child");
5567     + } else {
5568     + unsigned long dr0, dr1, dr7;
5569     +
5570     + dr0 = (unsigned long)&ss;
5571     + dr1 = (unsigned long)breakpoint_insn;
5572     + dr7 = ((1UL << 1) | /* G0 */
5573     + (3UL << 16) | /* RW0 = read or write */
5574     + (1UL << 18) | /* LEN0 = 2 bytes */
5575     + (1UL << 3)); /* G1, RW1 = insn */
5576     +
5577     + if (ptrace(PTRACE_ATTACH, parent, NULL, NULL) != 0)
5578     + err(1, "PTRACE_ATTACH");
5579     +
5580     + if (waitpid(parent, &status, 0) != parent)
5581     + err(1, "waitpid for child");
5582     +
5583     + if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[0]), dr0) != 0)
5584     + err(1, "PTRACE_POKEUSER DR0");
5585     +
5586     + if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[1]), dr1) != 0)
5587     + err(1, "PTRACE_POKEUSER DR1");
5588     +
5589     + if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[7]), dr7) != 0)
5590     + err(1, "PTRACE_POKEUSER DR7");
5591     +
5592     + printf("\tDR0 = %lx, DR1 = %lx, DR7 = %lx\n", dr0, dr1, dr7);
5593     +
5594     + if (ptrace(PTRACE_DETACH, parent, NULL, NULL) != 0)
5595     + err(1, "PTRACE_DETACH");
5596     +
5597     + exit(0);
5598     + }
5599     +}
5600     +
5601     +static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
5602     + int flags)
5603     +{
5604     + struct sigaction sa;
5605     + memset(&sa, 0, sizeof(sa));
5606     + sa.sa_sigaction = handler;
5607     + sa.sa_flags = SA_SIGINFO | flags;
5608     + sigemptyset(&sa.sa_mask);
5609     + if (sigaction(sig, &sa, 0))
5610     + err(1, "sigaction");
5611     +}
5612     +
5613     +static char const * const signames[] = {
5614     + [SIGSEGV] = "SIGSEGV",
5615     + [SIGBUS] = "SIBGUS",
5616     + [SIGTRAP] = "SIGTRAP",
5617     + [SIGILL] = "SIGILL",
5618     +};
5619     +
5620     +static void sigtrap(int sig, siginfo_t *si, void *ctx_void)
5621     +{
5622     + ucontext_t *ctx = ctx_void;
5623     +
5624     + printf("\tGot SIGTRAP with RIP=%lx, EFLAGS.RF=%d\n",
5625     + (unsigned long)ctx->uc_mcontext.gregs[REG_IP],
5626     + !!(ctx->uc_mcontext.gregs[REG_EFL] & X86_EFLAGS_RF));
5627     +}
5628     +
5629     +static void handle_and_return(int sig, siginfo_t *si, void *ctx_void)
5630     +{
5631     + ucontext_t *ctx = ctx_void;
5632     +
5633     + printf("\tGot %s with RIP=%lx\n", signames[sig],
5634     + (unsigned long)ctx->uc_mcontext.gregs[REG_IP]);
5635     +}
5636     +
5637     +static void handle_and_longjmp(int sig, siginfo_t *si, void *ctx_void)
5638     +{
5639     + ucontext_t *ctx = ctx_void;
5640     +
5641     + printf("\tGot %s with RIP=%lx\n", signames[sig],
5642     + (unsigned long)ctx->uc_mcontext.gregs[REG_IP]);
5643     +
5644     + siglongjmp(jmpbuf, 1);
5645     +}
5646     +
5647     +int main()
5648     +{
5649     + unsigned long nr;
5650     +
5651     + asm volatile ("mov %%ss, %[ss]" : [ss] "=m" (ss));
5652     + printf("\tSS = 0x%hx, &SS = 0x%p\n", ss, &ss);
5653     +
5654     + if (prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0) == 0)
5655     + printf("\tPR_SET_PTRACER_ANY succeeded\n");
5656     +
5657     + printf("\tSet up a watchpoint\n");
5658     + sethandler(SIGTRAP, sigtrap, 0);
5659     + enable_watchpoint();
5660     +
5661     + printf("[RUN]\tRead from watched memory (should get SIGTRAP)\n");
5662     + asm volatile ("mov %[ss], %[tmp]" : [tmp] "=r" (nr) : [ss] "m" (ss));
5663     +
5664     + printf("[RUN]\tMOV SS; INT3\n");
5665     + asm volatile ("mov %[ss], %%ss; int3" :: [ss] "m" (ss));
5666     +
5667     + printf("[RUN]\tMOV SS; INT 3\n");
5668     + asm volatile ("mov %[ss], %%ss; .byte 0xcd, 0x3" :: [ss] "m" (ss));
5669     +
5670     + printf("[RUN]\tMOV SS; CS CS INT3\n");
5671     + asm volatile ("mov %[ss], %%ss; .byte 0x2e, 0x2e; int3" :: [ss] "m" (ss));
5672     +
5673     + printf("[RUN]\tMOV SS; CSx14 INT3\n");
5674     + asm volatile ("mov %[ss], %%ss; .fill 14,1,0x2e; int3" :: [ss] "m" (ss));
5675     +
5676     + printf("[RUN]\tMOV SS; INT 4\n");
5677     + sethandler(SIGSEGV, handle_and_return, SA_RESETHAND);
5678     + asm volatile ("mov %[ss], %%ss; int $4" :: [ss] "m" (ss));
5679     +
5680     +#ifdef __i386__
5681     + printf("[RUN]\tMOV SS; INTO\n");
5682     + sethandler(SIGSEGV, handle_and_return, SA_RESETHAND);
5683     + nr = -1;
5684     + asm volatile ("add $1, %[tmp]; mov %[ss], %%ss; into"
5685     + : [tmp] "+r" (nr) : [ss] "m" (ss));
5686     +#endif
5687     +
5688     + if (sigsetjmp(jmpbuf, 1) == 0) {
5689     + printf("[RUN]\tMOV SS; ICEBP\n");
5690     +
5691     + /* Some emulators (e.g. QEMU TCG) don't emulate ICEBP. */
5692     + sethandler(SIGILL, handle_and_longjmp, SA_RESETHAND);
5693     +
5694     + asm volatile ("mov %[ss], %%ss; .byte 0xf1" :: [ss] "m" (ss));
5695     + }
5696     +
5697     + if (sigsetjmp(jmpbuf, 1) == 0) {
5698     + printf("[RUN]\tMOV SS; CLI\n");
5699     + sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND);
5700     + asm volatile ("mov %[ss], %%ss; cli" :: [ss] "m" (ss));
5701     + }
5702     +
5703     + if (sigsetjmp(jmpbuf, 1) == 0) {
5704     + printf("[RUN]\tMOV SS; #PF\n");
5705     + sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND);
5706     + asm volatile ("mov %[ss], %%ss; mov (-1), %[tmp]"
5707     + : [tmp] "=r" (nr) : [ss] "m" (ss));
5708     + }
5709     +
5710     + /*
5711     + * INT $1: if #DB has DPL=3 and there isn't special handling,
5712     + * then the kernel will die.
5713     + */
5714     + if (sigsetjmp(jmpbuf, 1) == 0) {
5715     + printf("[RUN]\tMOV SS; INT 1\n");
5716     + sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND);
5717     + asm volatile ("mov %[ss], %%ss; int $1" :: [ss] "m" (ss));
5718     + }
5719     +
5720     +#ifdef __x86_64__
5721     + /*
5722     + * In principle, we should test 32-bit SYSCALL as well, but
5723     + * the calling convention is so unpredictable that it's
5724     + * not obviously worth the effort.
5725     + */
5726     + if (sigsetjmp(jmpbuf, 1) == 0) {
5727     + printf("[RUN]\tMOV SS; SYSCALL\n");
5728     + sethandler(SIGILL, handle_and_longjmp, SA_RESETHAND);
5729     + nr = SYS_getpid;
5730     + /*
5731     + * Toggle the high bit of RSP to make it noncanonical to
5732     + * strengthen this test on non-SMAP systems.
5733     + */
5734     + asm volatile ("btc $63, %%rsp\n\t"
5735     + "mov %[ss], %%ss; syscall\n\t"
5736     + "btc $63, %%rsp"
5737     + : "+a" (nr) : [ss] "m" (ss)
5738     + : "rcx"
5739     +#ifdef __x86_64__
5740     + , "r11"
5741     +#endif
5742     + );
5743     + }
5744     +#endif
5745     +
5746     + printf("[RUN]\tMOV SS; breakpointed NOP\n");
5747     + asm volatile ("mov %[ss], %%ss; breakpoint_insn: nop" :: [ss] "m" (ss));
5748     +
5749     + /*
5750     + * Invoking SYSENTER directly breaks all the rules. Just handle
5751     + * the SIGSEGV.
5752     + */
5753     + if (sigsetjmp(jmpbuf, 1) == 0) {
5754     + printf("[RUN]\tMOV SS; SYSENTER\n");
5755     + stack_t stack = {
5756     + .ss_sp = altstack_data,
5757     + .ss_size = SIGSTKSZ,
5758     + };
5759     + if (sigaltstack(&stack, NULL) != 0)
5760     + err(1, "sigaltstack");
5761     + sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND | SA_ONSTACK);
5762     + nr = SYS_getpid;
5763     + asm volatile ("mov %[ss], %%ss; SYSENTER" : "+a" (nr)
5764     + : [ss] "m" (ss) : "flags", "rcx"
5765     +#ifdef __x86_64__
5766     + , "r11"
5767     +#endif
5768     + );
5769     +
5770     + /* We're unreachable here. SYSENTER forgets RIP. */
5771     + }
5772     +
5773     + if (sigsetjmp(jmpbuf, 1) == 0) {
5774     + printf("[RUN]\tMOV SS; INT $0x80\n");
5775     + sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND);
5776     + nr = 20; /* compat getpid */
5777     + asm volatile ("mov %[ss], %%ss; int $0x80"
5778     + : "+a" (nr) : [ss] "m" (ss)
5779     + : "flags"
5780     +#ifdef __x86_64__
5781     + , "r8", "r9", "r10", "r11"
5782     +#endif
5783     + );
5784     + }
5785     +
5786     + printf("[OK]\tI aten't dead\n");
5787     + return 0;
5788     +}
5789     diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c
5790     index 9c0325e1ea68..50f7e9272481 100644
5791     --- a/tools/testing/selftests/x86/mpx-mini-test.c
5792     +++ b/tools/testing/selftests/x86/mpx-mini-test.c
5793     @@ -368,6 +368,11 @@ static int expected_bnd_index = -1;
5794     uint64_t shadow_plb[NR_MPX_BOUNDS_REGISTERS][2]; /* shadow MPX bound registers */
5795     unsigned long shadow_map[NR_MPX_BOUNDS_REGISTERS];
5796    
5797     +/* Failed address bound checks: */
5798     +#ifndef SEGV_BNDERR
5799     +# define SEGV_BNDERR 3
5800     +#endif
5801     +
5802     /*
5803     * The kernel is supposed to provide some information about the bounds
5804     * exception in the siginfo. It should match what we have in the bounds
5805     @@ -419,8 +424,6 @@ void handler(int signum, siginfo_t *si, void *vucontext)
5806     br_count++;
5807     dprintf1("#BR 0x%jx (total seen: %d)\n", status, br_count);
5808    
5809     -#define SEGV_BNDERR 3 /* failed address bound checks */
5810     -
5811     dprintf2("Saw a #BR! status 0x%jx at %016lx br_reason: %jx\n",
5812     status, ip, br_reason);
5813     dprintf2("si_signo: %d\n", si->si_signo);
5814     diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
5815     index f15aa5a76fe3..460b4bdf4c1e 100644
5816     --- a/tools/testing/selftests/x86/protection_keys.c
5817     +++ b/tools/testing/selftests/x86/protection_keys.c
5818     @@ -72,10 +72,9 @@ extern void abort_hooks(void);
5819     test_nr, iteration_nr); \
5820     dprintf0("errno at assert: %d", errno); \
5821     abort_hooks(); \
5822     - assert(condition); \
5823     + exit(__LINE__); \
5824     } \
5825     } while (0)
5826     -#define raw_assert(cond) assert(cond)
5827    
5828     void cat_into_file(char *str, char *file)
5829     {
5830     @@ -87,12 +86,17 @@ void cat_into_file(char *str, char *file)
5831     * these need to be raw because they are called under
5832     * pkey_assert()
5833     */
5834     - raw_assert(fd >= 0);
5835     + if (fd < 0) {
5836     + fprintf(stderr, "error opening '%s'\n", str);
5837     + perror("error: ");
5838     + exit(__LINE__);
5839     + }
5840     +
5841     ret = write(fd, str, strlen(str));
5842     if (ret != strlen(str)) {
5843     perror("write to file failed");
5844     fprintf(stderr, "filename: '%s' str: '%s'\n", file, str);
5845     - raw_assert(0);
5846     + exit(__LINE__);
5847     }
5848     close(fd);
5849     }
5850     @@ -191,26 +195,30 @@ void lots_o_noops_around_write(int *write_to_me)
5851     #ifdef __i386__
5852    
5853     #ifndef SYS_mprotect_key
5854     -# define SYS_mprotect_key 380
5855     +# define SYS_mprotect_key 380
5856     #endif
5857     +
5858     #ifndef SYS_pkey_alloc
5859     -# define SYS_pkey_alloc 381
5860     -# define SYS_pkey_free 382
5861     +# define SYS_pkey_alloc 381
5862     +# define SYS_pkey_free 382
5863     #endif
5864     -#define REG_IP_IDX REG_EIP
5865     -#define si_pkey_offset 0x14
5866     +
5867     +#define REG_IP_IDX REG_EIP
5868     +#define si_pkey_offset 0x14
5869    
5870     #else
5871    
5872     #ifndef SYS_mprotect_key
5873     -# define SYS_mprotect_key 329
5874     +# define SYS_mprotect_key 329
5875     #endif
5876     +
5877     #ifndef SYS_pkey_alloc
5878     -# define SYS_pkey_alloc 330
5879     -# define SYS_pkey_free 331
5880     +# define SYS_pkey_alloc 330
5881     +# define SYS_pkey_free 331
5882     #endif
5883     -#define REG_IP_IDX REG_RIP
5884     -#define si_pkey_offset 0x20
5885     +
5886     +#define REG_IP_IDX REG_RIP
5887     +#define si_pkey_offset 0x20
5888    
5889     #endif
5890    
5891     @@ -225,8 +233,14 @@ void dump_mem(void *dumpme, int len_bytes)
5892     }
5893     }
5894    
5895     -#define SEGV_BNDERR 3 /* failed address bound checks */
5896     -#define SEGV_PKUERR 4
5897     +/* Failed address bound checks: */
5898     +#ifndef SEGV_BNDERR
5899     +# define SEGV_BNDERR 3
5900     +#endif
5901     +
5902     +#ifndef SEGV_PKUERR
5903     +# define SEGV_PKUERR 4
5904     +#endif
5905    
5906     static char *si_code_str(int si_code)
5907     {
5908     @@ -289,13 +303,6 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
5909     dump_mem(pkru_ptr - 128, 256);
5910     pkey_assert(*pkru_ptr);
5911    
5912     - si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset);
5913     - dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr);
5914     - dump_mem(si_pkey_ptr - 8, 24);
5915     - siginfo_pkey = *si_pkey_ptr;
5916     - pkey_assert(siginfo_pkey < NR_PKEYS);
5917     - last_si_pkey = siginfo_pkey;
5918     -
5919     if ((si->si_code == SEGV_MAPERR) ||
5920     (si->si_code == SEGV_ACCERR) ||
5921     (si->si_code == SEGV_BNDERR)) {
5922     @@ -303,6 +310,13 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
5923     exit(4);
5924     }
5925    
5926     + si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset);
5927     + dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr);
5928     + dump_mem((u8 *)si_pkey_ptr - 8, 24);
5929     + siginfo_pkey = *si_pkey_ptr;
5930     + pkey_assert(siginfo_pkey < NR_PKEYS);
5931     + last_si_pkey = siginfo_pkey;
5932     +
5933     dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr);
5934     /* need __rdpkru() version so we do not do shadow_pkru checking */
5935     dprintf1("signal pkru from pkru: %08x\n", __rdpkru());
5936     @@ -311,22 +325,6 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
5937     dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n");
5938     pkru_faults++;
5939     dprintf1("<<<<==================================================\n");
5940     - return;
5941     - if (trapno == 14) {
5942     - fprintf(stderr,
5943     - "ERROR: In signal handler, page fault, trapno = %d, ip = %016lx\n",
5944     - trapno, ip);
5945     - fprintf(stderr, "si_addr %p\n", si->si_addr);
5946     - fprintf(stderr, "REG_ERR: %lx\n",
5947     - (unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]);
5948     - exit(1);
5949     - } else {
5950     - fprintf(stderr, "unexpected trap %d! at 0x%lx\n", trapno, ip);
5951     - fprintf(stderr, "si_addr %p\n", si->si_addr);
5952     - fprintf(stderr, "REG_ERR: %lx\n",
5953     - (unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]);
5954     - exit(2);
5955     - }
5956     dprint_in_signal = 0;
5957     }
5958    
5959     @@ -393,10 +391,15 @@ pid_t fork_lazy_child(void)
5960     return forkret;
5961     }
5962    
5963     -#define PKEY_DISABLE_ACCESS 0x1
5964     -#define PKEY_DISABLE_WRITE 0x2
5965     +#ifndef PKEY_DISABLE_ACCESS
5966     +# define PKEY_DISABLE_ACCESS 0x1
5967     +#endif
5968     +
5969     +#ifndef PKEY_DISABLE_WRITE
5970     +# define PKEY_DISABLE_WRITE 0x2
5971     +#endif
5972    
5973     -u32 pkey_get(int pkey, unsigned long flags)
5974     +static u32 hw_pkey_get(int pkey, unsigned long flags)
5975     {
5976     u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE);
5977     u32 pkru = __rdpkru();
5978     @@ -418,7 +421,7 @@ u32 pkey_get(int pkey, unsigned long flags)
5979     return masked_pkru;
5980     }
5981    
5982     -int pkey_set(int pkey, unsigned long rights, unsigned long flags)
5983     +static int hw_pkey_set(int pkey, unsigned long rights, unsigned long flags)
5984     {
5985     u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE);
5986     u32 old_pkru = __rdpkru();
5987     @@ -452,15 +455,15 @@ void pkey_disable_set(int pkey, int flags)
5988     pkey, flags);
5989     pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
5990    
5991     - pkey_rights = pkey_get(pkey, syscall_flags);
5992     + pkey_rights = hw_pkey_get(pkey, syscall_flags);
5993    
5994     - dprintf1("%s(%d) pkey_get(%d): %x\n", __func__,
5995     + dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
5996     pkey, pkey, pkey_rights);
5997     pkey_assert(pkey_rights >= 0);
5998    
5999     pkey_rights |= flags;
6000    
6001     - ret = pkey_set(pkey, pkey_rights, syscall_flags);
6002     + ret = hw_pkey_set(pkey, pkey_rights, syscall_flags);
6003     assert(!ret);
6004     /*pkru and flags have the same format */
6005     shadow_pkru |= flags << (pkey * 2);
6006     @@ -468,8 +471,8 @@ void pkey_disable_set(int pkey, int flags)
6007    
6008     pkey_assert(ret >= 0);
6009    
6010     - pkey_rights = pkey_get(pkey, syscall_flags);
6011     - dprintf1("%s(%d) pkey_get(%d): %x\n", __func__,
6012     + pkey_rights = hw_pkey_get(pkey, syscall_flags);
6013     + dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
6014     pkey, pkey, pkey_rights);
6015    
6016     dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru());
6017     @@ -483,24 +486,24 @@ void pkey_disable_clear(int pkey, int flags)
6018     {
6019     unsigned long syscall_flags = 0;
6020     int ret;
6021     - int pkey_rights = pkey_get(pkey, syscall_flags);
6022     + int pkey_rights = hw_pkey_get(pkey, syscall_flags);
6023     u32 orig_pkru = rdpkru();
6024    
6025     pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
6026    
6027     - dprintf1("%s(%d) pkey_get(%d): %x\n", __func__,
6028     + dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
6029     pkey, pkey, pkey_rights);
6030     pkey_assert(pkey_rights >= 0);
6031    
6032     pkey_rights |= flags;
6033    
6034     - ret = pkey_set(pkey, pkey_rights, 0);
6035     + ret = hw_pkey_set(pkey, pkey_rights, 0);
6036     /* pkru and flags have the same format */
6037     shadow_pkru &= ~(flags << (pkey * 2));
6038     pkey_assert(ret >= 0);
6039    
6040     - pkey_rights = pkey_get(pkey, syscall_flags);
6041     - dprintf1("%s(%d) pkey_get(%d): %x\n", __func__,
6042     + pkey_rights = hw_pkey_get(pkey, syscall_flags);
6043     + dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
6044     pkey, pkey, pkey_rights);
6045    
6046     dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru());
6047     @@ -674,10 +677,12 @@ int mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot,
6048     struct pkey_malloc_record {
6049     void *ptr;
6050     long size;
6051     + int prot;
6052     };
6053     struct pkey_malloc_record *pkey_malloc_records;
6054     +struct pkey_malloc_record *pkey_last_malloc_record;
6055     long nr_pkey_malloc_records;
6056     -void record_pkey_malloc(void *ptr, long size)
6057     +void record_pkey_malloc(void *ptr, long size, int prot)
6058     {
6059     long i;
6060     struct pkey_malloc_record *rec = NULL;
6061     @@ -709,6 +714,8 @@ void record_pkey_malloc(void *ptr, long size)
6062     (int)(rec - pkey_malloc_records), rec, ptr, size);
6063     rec->ptr = ptr;
6064     rec->size = size;
6065     + rec->prot = prot;
6066     + pkey_last_malloc_record = rec;
6067     nr_pkey_malloc_records++;
6068     }
6069    
6070     @@ -753,7 +760,7 @@ void *malloc_pkey_with_mprotect(long size, int prot, u16 pkey)
6071     pkey_assert(ptr != (void *)-1);
6072     ret = mprotect_pkey((void *)ptr, PAGE_SIZE, prot, pkey);
6073     pkey_assert(!ret);
6074     - record_pkey_malloc(ptr, size);
6075     + record_pkey_malloc(ptr, size, prot);
6076     rdpkru();
6077    
6078     dprintf1("%s() for pkey %d @ %p\n", __func__, pkey, ptr);
6079     @@ -774,7 +781,7 @@ void *malloc_pkey_anon_huge(long size, int prot, u16 pkey)
6080     size = ALIGN_UP(size, HPAGE_SIZE * 2);
6081     ptr = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6082     pkey_assert(ptr != (void *)-1);
6083     - record_pkey_malloc(ptr, size);
6084     + record_pkey_malloc(ptr, size, prot);
6085     mprotect_pkey(ptr, size, prot, pkey);
6086    
6087     dprintf1("unaligned ptr: %p\n", ptr);
6088     @@ -847,7 +854,7 @@ void *malloc_pkey_hugetlb(long size, int prot, u16 pkey)
6089     pkey_assert(ptr != (void *)-1);
6090     mprotect_pkey(ptr, size, prot, pkey);
6091    
6092     - record_pkey_malloc(ptr, size);
6093     + record_pkey_malloc(ptr, size, prot);
6094    
6095     dprintf1("mmap()'d hugetlbfs for pkey %d @ %p\n", pkey, ptr);
6096     return ptr;
6097     @@ -869,7 +876,7 @@ void *malloc_pkey_mmap_dax(long size, int prot, u16 pkey)
6098    
6099     mprotect_pkey(ptr, size, prot, pkey);
6100    
6101     - record_pkey_malloc(ptr, size);
6102     + record_pkey_malloc(ptr, size, prot);
6103    
6104     dprintf1("mmap()'d for pkey %d @ %p\n", pkey, ptr);
6105     close(fd);
6106     @@ -918,13 +925,21 @@ void *malloc_pkey(long size, int prot, u16 pkey)
6107     }
6108    
6109     int last_pkru_faults;
6110     +#define UNKNOWN_PKEY -2
6111     void expected_pk_fault(int pkey)
6112     {
6113     dprintf2("%s(): last_pkru_faults: %d pkru_faults: %d\n",
6114     __func__, last_pkru_faults, pkru_faults);
6115     dprintf2("%s(%d): last_si_pkey: %d\n", __func__, pkey, last_si_pkey);
6116     pkey_assert(last_pkru_faults + 1 == pkru_faults);
6117     - pkey_assert(last_si_pkey == pkey);
6118     +
6119     + /*
6120     + * For exec-only memory, we do not know the pkey in
6121     + * advance, so skip this check.
6122     + */
6123     + if (pkey != UNKNOWN_PKEY)
6124     + pkey_assert(last_si_pkey == pkey);
6125     +
6126     /*
6127     * The signal handler shold have cleared out PKRU to let the
6128     * test program continue. We now have to restore it.
6129     @@ -939,10 +954,11 @@ void expected_pk_fault(int pkey)
6130     last_si_pkey = -1;
6131     }
6132    
6133     -void do_not_expect_pk_fault(void)
6134     -{
6135     - pkey_assert(last_pkru_faults == pkru_faults);
6136     -}
6137     +#define do_not_expect_pk_fault(msg) do { \
6138     + if (last_pkru_faults != pkru_faults) \
6139     + dprintf0("unexpected PK fault: %s\n", msg); \
6140     + pkey_assert(last_pkru_faults == pkru_faults); \
6141     +} while (0)
6142    
6143     int test_fds[10] = { -1 };
6144     int nr_test_fds;
6145     @@ -1151,12 +1167,15 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
6146     pkey_assert(i < NR_PKEYS*2);
6147    
6148     /*
6149     - * There are 16 pkeys supported in hardware. One is taken
6150     - * up for the default (0) and another can be taken up by
6151     - * an execute-only mapping. Ensure that we can allocate
6152     - * at least 14 (16-2).
6153     + * There are 16 pkeys supported in hardware. Three are
6154     + * allocated by the time we get here:
6155     + * 1. The default key (0)
6156     + * 2. One possibly consumed by an execute-only mapping.
6157     + * 3. One allocated by the test code and passed in via
6158     + * 'pkey' to this function.
6159     + * Ensure that we can allocate at least another 13 (16-3).
6160     */
6161     - pkey_assert(i >= NR_PKEYS-2);
6162     + pkey_assert(i >= NR_PKEYS-3);
6163    
6164     for (i = 0; i < nr_allocated_pkeys; i++) {
6165     err = sys_pkey_free(allocated_pkeys[i]);
6166     @@ -1165,6 +1184,35 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
6167     }
6168     }
6169    
6170     +/*
6171     + * pkey 0 is special. It is allocated by default, so you do not
6172     + * have to call pkey_alloc() to use it first. Make sure that it
6173     + * is usable.
6174     + */
6175     +void test_mprotect_with_pkey_0(int *ptr, u16 pkey)
6176     +{
6177     + long size;
6178     + int prot;
6179     +
6180     + assert(pkey_last_malloc_record);
6181     + size = pkey_last_malloc_record->size;
6182     + /*
6183     + * This is a bit of a hack. But mprotect() requires
6184     + * huge-page-aligned sizes when operating on hugetlbfs.
6185     + * So, make sure that we use something that's a multiple
6186     + * of a huge page when we can.
6187     + */
6188     + if (size >= HPAGE_SIZE)
6189     + size = HPAGE_SIZE;
6190     + prot = pkey_last_malloc_record->prot;
6191     +
6192     + /* Use pkey 0 */
6193     + mprotect_pkey(ptr, size, prot, 0);
6194     +
6195     + /* Make sure that we can set it back to the original pkey. */
6196     + mprotect_pkey(ptr, size, prot, pkey);
6197     +}
6198     +
6199     void test_ptrace_of_child(int *ptr, u16 pkey)
6200     {
6201     __attribute__((__unused__)) int peek_result;
6202     @@ -1228,7 +1276,7 @@ void test_ptrace_of_child(int *ptr, u16 pkey)
6203     pkey_assert(ret != -1);
6204     /* Now access from the current task, and expect NO exception: */
6205     peek_result = read_ptr(plain_ptr);
6206     - do_not_expect_pk_fault();
6207     + do_not_expect_pk_fault("read plain pointer after ptrace");
6208    
6209     ret = ptrace(PTRACE_DETACH, child_pid, ignored, 0);
6210     pkey_assert(ret != -1);
6211     @@ -1241,12 +1289,9 @@ void test_ptrace_of_child(int *ptr, u16 pkey)
6212     free(plain_ptr_unaligned);
6213     }
6214    
6215     -void test_executing_on_unreadable_memory(int *ptr, u16 pkey)
6216     +void *get_pointer_to_instructions(void)
6217     {
6218     void *p1;
6219     - int scratch;
6220     - int ptr_contents;
6221     - int ret;
6222    
6223     p1 = ALIGN_PTR_UP(&lots_o_noops_around_write, PAGE_SIZE);
6224     dprintf3("&lots_o_noops: %p\n", &lots_o_noops_around_write);
6225     @@ -1256,7 +1301,23 @@ void test_executing_on_unreadable_memory(int *ptr, u16 pkey)
6226     /* Point 'p1' at the *second* page of the function: */
6227     p1 += PAGE_SIZE;
6228    
6229     + /*
6230     + * Try to ensure we fault this in on next touch to ensure
6231     + * we get an instruction fault as opposed to a data one
6232     + */
6233     madvise(p1, PAGE_SIZE, MADV_DONTNEED);
6234     +
6235     + return p1;
6236     +}
6237     +
6238     +void test_executing_on_unreadable_memory(int *ptr, u16 pkey)
6239     +{
6240     + void *p1;
6241     + int scratch;
6242     + int ptr_contents;
6243     + int ret;
6244     +
6245     + p1 = get_pointer_to_instructions();
6246     lots_o_noops_around_write(&scratch);
6247     ptr_contents = read_ptr(p1);
6248     dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
6249     @@ -1272,12 +1333,55 @@ void test_executing_on_unreadable_memory(int *ptr, u16 pkey)
6250     */
6251     madvise(p1, PAGE_SIZE, MADV_DONTNEED);
6252     lots_o_noops_around_write(&scratch);
6253     - do_not_expect_pk_fault();
6254     + do_not_expect_pk_fault("executing on PROT_EXEC memory");
6255     ptr_contents = read_ptr(p1);
6256     dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
6257     expected_pk_fault(pkey);
6258     }
6259    
6260     +void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey)
6261     +{
6262     + void *p1;
6263     + int scratch;
6264     + int ptr_contents;
6265     + int ret;
6266     +
6267     + dprintf1("%s() start\n", __func__);
6268     +
6269     + p1 = get_pointer_to_instructions();
6270     + lots_o_noops_around_write(&scratch);
6271     + ptr_contents = read_ptr(p1);
6272     + dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
6273     +
6274     + /* Use a *normal* mprotect(), not mprotect_pkey(): */
6275     + ret = mprotect(p1, PAGE_SIZE, PROT_EXEC);
6276     + pkey_assert(!ret);
6277     +
6278     + dprintf2("pkru: %x\n", rdpkru());
6279     +
6280     + /* Make sure this is an *instruction* fault */
6281     + madvise(p1, PAGE_SIZE, MADV_DONTNEED);
6282     + lots_o_noops_around_write(&scratch);
6283     + do_not_expect_pk_fault("executing on PROT_EXEC memory");
6284     + ptr_contents = read_ptr(p1);
6285     + dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
6286     + expected_pk_fault(UNKNOWN_PKEY);
6287     +
6288     + /*
6289     + * Put the memory back to non-PROT_EXEC. Should clear the
6290     + * exec-only pkey off the VMA and allow it to be readable
6291     + * again. Go to PROT_NONE first to check for a kernel bug
6292     + * that did not clear the pkey when doing PROT_NONE.
6293     + */
6294     + ret = mprotect(p1, PAGE_SIZE, PROT_NONE);
6295     + pkey_assert(!ret);
6296     +
6297     + ret = mprotect(p1, PAGE_SIZE, PROT_READ|PROT_EXEC);
6298     + pkey_assert(!ret);
6299     + ptr_contents = read_ptr(p1);
6300     + do_not_expect_pk_fault("plain read on recently PROT_EXEC area");
6301     +}
6302     +
6303     void test_mprotect_pkey_on_unsupported_cpu(int *ptr, u16 pkey)
6304     {
6305     int size = PAGE_SIZE;
6306     @@ -1302,6 +1406,8 @@ void (*pkey_tests[])(int *ptr, u16 pkey) = {
6307     test_kernel_gup_of_access_disabled_region,
6308     test_kernel_gup_write_to_write_disabled_region,
6309     test_executing_on_unreadable_memory,
6310     + test_implicit_mprotect_exec_only_memory,
6311     + test_mprotect_with_pkey_0,
6312     test_ptrace_of_child,
6313     test_pkey_syscalls_on_non_allocated_pkey,
6314     test_pkey_syscalls_bad_args,
6315     diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
6316     index b3d4a10f09a1..af003268bf3e 100644
6317     --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
6318     +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
6319     @@ -14,6 +14,8 @@
6320     #include <linux/irqchip/arm-gic.h>
6321     #include <linux/kvm.h>
6322     #include <linux/kvm_host.h>
6323     +#include <linux/nospec.h>
6324     +
6325     #include <kvm/iodev.h>
6326     #include <kvm/arm_vgic.h>
6327    
6328     @@ -320,6 +322,9 @@ static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
6329    
6330     if (n > vgic_v3_max_apr_idx(vcpu))
6331     return 0;
6332     +
6333     + n = array_index_nospec(n, 4);
6334     +
6335     /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
6336     return vgicv3->vgic_ap1r[n];
6337     }