Magellan Linux

Annotation of /trunk/kernel-magellan/patches-5.0/0107-5.0.8-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3334 - (hide annotations) (download)
Fri Apr 26 12:20:37 2019 UTC (5 years, 1 month ago) by niro
File size: 171281 byte(s)
-linux-5.0.8
1 niro 3334 diff --git a/Makefile b/Makefile
2     index af99c77c7066..f7666051de66 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 0
9     -SUBLEVEL = 7
10     +SUBLEVEL = 8
11     EXTRAVERSION =
12     NAME = Shy Crocodile
13    
14     @@ -510,7 +510,7 @@ endif
15     ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
16     ifneq ($(CROSS_COMPILE),)
17     CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
18     -GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
19     +GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
20     CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
21     GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
22     endif
23     diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
24     index dce5be5df97b..edcff79879e7 100644
25     --- a/arch/arm/boot/dts/am335x-evm.dts
26     +++ b/arch/arm/boot/dts/am335x-evm.dts
27     @@ -57,6 +57,24 @@
28     enable-active-high;
29     };
30    
31     + /* TPS79501 */
32     + v1_8d_reg: fixedregulator-v1_8d {
33     + compatible = "regulator-fixed";
34     + regulator-name = "v1_8d";
35     + vin-supply = <&vbat>;
36     + regulator-min-microvolt = <1800000>;
37     + regulator-max-microvolt = <1800000>;
38     + };
39     +
40     + /* TPS79501 */
41     + v3_3d_reg: fixedregulator-v3_3d {
42     + compatible = "regulator-fixed";
43     + regulator-name = "v3_3d";
44     + vin-supply = <&vbat>;
45     + regulator-min-microvolt = <3300000>;
46     + regulator-max-microvolt = <3300000>;
47     + };
48     +
49     matrix_keypad: matrix_keypad0 {
50     compatible = "gpio-matrix-keypad";
51     debounce-delay-ms = <5>;
52     @@ -499,10 +517,10 @@
53     status = "okay";
54    
55     /* Regulators */
56     - AVDD-supply = <&vaux2_reg>;
57     - IOVDD-supply = <&vaux2_reg>;
58     - DRVDD-supply = <&vaux2_reg>;
59     - DVDD-supply = <&vbat>;
60     + AVDD-supply = <&v3_3d_reg>;
61     + IOVDD-supply = <&v3_3d_reg>;
62     + DRVDD-supply = <&v3_3d_reg>;
63     + DVDD-supply = <&v1_8d_reg>;
64     };
65     };
66    
67     diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
68     index b128998097ce..2c2d8b5b8cf5 100644
69     --- a/arch/arm/boot/dts/am335x-evmsk.dts
70     +++ b/arch/arm/boot/dts/am335x-evmsk.dts
71     @@ -73,6 +73,24 @@
72     enable-active-high;
73     };
74    
75     + /* TPS79518 */
76     + v1_8d_reg: fixedregulator-v1_8d {
77     + compatible = "regulator-fixed";
78     + regulator-name = "v1_8d";
79     + vin-supply = <&vbat>;
80     + regulator-min-microvolt = <1800000>;
81     + regulator-max-microvolt = <1800000>;
82     + };
83     +
84     + /* TPS78633 */
85     + v3_3d_reg: fixedregulator-v3_3d {
86     + compatible = "regulator-fixed";
87     + regulator-name = "v3_3d";
88     + vin-supply = <&vbat>;
89     + regulator-min-microvolt = <3300000>;
90     + regulator-max-microvolt = <3300000>;
91     + };
92     +
93     leds {
94     pinctrl-names = "default";
95     pinctrl-0 = <&user_leds_s0>;
96     @@ -501,10 +519,10 @@
97     status = "okay";
98    
99     /* Regulators */
100     - AVDD-supply = <&vaux2_reg>;
101     - IOVDD-supply = <&vaux2_reg>;
102     - DRVDD-supply = <&vaux2_reg>;
103     - DVDD-supply = <&vbat>;
104     + AVDD-supply = <&v3_3d_reg>;
105     + IOVDD-supply = <&v3_3d_reg>;
106     + DRVDD-supply = <&v3_3d_reg>;
107     + DVDD-supply = <&v1_8d_reg>;
108     };
109     };
110    
111     diff --git a/arch/arm/boot/dts/rk3288-tinker.dtsi b/arch/arm/boot/dts/rk3288-tinker.dtsi
112     index aa107ee41b8b..ef653c3209bc 100644
113     --- a/arch/arm/boot/dts/rk3288-tinker.dtsi
114     +++ b/arch/arm/boot/dts/rk3288-tinker.dtsi
115     @@ -254,6 +254,7 @@
116     };
117    
118     vccio_sd: LDO_REG5 {
119     + regulator-boot-on;
120     regulator-min-microvolt = <1800000>;
121     regulator-max-microvolt = <3300000>;
122     regulator-name = "vccio_sd";
123     @@ -430,7 +431,7 @@
124     bus-width = <4>;
125     cap-mmc-highspeed;
126     cap-sd-highspeed;
127     - card-detect-delay = <200>;
128     + broken-cd;
129     disable-wp; /* wp not hooked up */
130     pinctrl-names = "default";
131     pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
132     diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
133     index ca7d52daa8fb..09868dcee34b 100644
134     --- a/arch/arm/boot/dts/rk3288.dtsi
135     +++ b/arch/arm/boot/dts/rk3288.dtsi
136     @@ -70,7 +70,7 @@
137     compatible = "arm,cortex-a12";
138     reg = <0x501>;
139     resets = <&cru SRST_CORE1>;
140     - operating-points = <&cpu_opp_table>;
141     + operating-points-v2 = <&cpu_opp_table>;
142     #cooling-cells = <2>; /* min followed by max */
143     clock-latency = <40000>;
144     clocks = <&cru ARMCLK>;
145     @@ -80,7 +80,7 @@
146     compatible = "arm,cortex-a12";
147     reg = <0x502>;
148     resets = <&cru SRST_CORE2>;
149     - operating-points = <&cpu_opp_table>;
150     + operating-points-v2 = <&cpu_opp_table>;
151     #cooling-cells = <2>; /* min followed by max */
152     clock-latency = <40000>;
153     clocks = <&cru ARMCLK>;
154     @@ -90,7 +90,7 @@
155     compatible = "arm,cortex-a12";
156     reg = <0x503>;
157     resets = <&cru SRST_CORE3>;
158     - operating-points = <&cpu_opp_table>;
159     + operating-points-v2 = <&cpu_opp_table>;
160     #cooling-cells = <2>; /* min followed by max */
161     clock-latency = <40000>;
162     clocks = <&cru ARMCLK>;
163     diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
164     index 1c01a6f843d8..28a2e45752fe 100644
165     --- a/arch/arm/boot/dts/sama5d2-pinfunc.h
166     +++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
167     @@ -518,7 +518,7 @@
168     #define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0)
169     #define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3)
170     #define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1)
171     -#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 2, 1)
172     +#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 3, 1)
173     #define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2)
174     #define PIN_PC10 74
175     #define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0)
176     diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
177     index c4c0a8ea11e4..ee410ae7369e 100644
178     --- a/arch/arm/mach-omap1/board-ams-delta.c
179     +++ b/arch/arm/mach-omap1/board-ams-delta.c
180     @@ -182,6 +182,7 @@ static struct resource latch1_resources[] = {
181    
182     static struct bgpio_pdata latch1_pdata = {
183     .label = LATCH1_LABEL,
184     + .base = -1,
185     .ngpio = LATCH1_NGPIO,
186     };
187    
188     @@ -219,6 +220,7 @@ static struct resource latch2_resources[] = {
189    
190     static struct bgpio_pdata latch2_pdata = {
191     .label = LATCH2_LABEL,
192     + .base = -1,
193     .ngpio = LATCH2_NGPIO,
194     };
195    
196     diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
197     index 040b36ef0dd2..520ed8e474be 100644
198     --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
199     +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
200     @@ -46,8 +46,7 @@
201    
202     vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator {
203     compatible = "regulator-fixed";
204     - enable-active-high;
205     - gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
206     + gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
207     pinctrl-names = "default";
208     pinctrl-0 = <&usb20_host_drv>;
209     regulator-name = "vcc_host1_5v";
210     diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
211     index ecd7f19c3542..97aa65455b4a 100644
212     --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
213     +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
214     @@ -1431,11 +1431,11 @@
215    
216     sdmmc0 {
217     sdmmc0_clk: sdmmc0-clk {
218     - rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>;
219     + rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>;
220     };
221    
222     sdmmc0_cmd: sdmmc0-cmd {
223     - rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>;
224     + rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>;
225     };
226    
227     sdmmc0_dectn: sdmmc0-dectn {
228     @@ -1447,14 +1447,14 @@
229     };
230    
231     sdmmc0_bus1: sdmmc0-bus1 {
232     - rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>;
233     + rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>;
234     };
235    
236     sdmmc0_bus4: sdmmc0-bus4 {
237     - rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>,
238     - <1 RK_PA1 1 &pcfg_pull_up_4ma>,
239     - <1 RK_PA2 1 &pcfg_pull_up_4ma>,
240     - <1 RK_PA3 1 &pcfg_pull_up_4ma>;
241     + rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>,
242     + <1 RK_PA1 1 &pcfg_pull_up_8ma>,
243     + <1 RK_PA2 1 &pcfg_pull_up_8ma>,
244     + <1 RK_PA3 1 &pcfg_pull_up_8ma>;
245     };
246    
247     sdmmc0_gpio: sdmmc0-gpio {
248     @@ -1628,50 +1628,50 @@
249     rgmiim1_pins: rgmiim1-pins {
250     rockchip,pins =
251     /* mac_txclk */
252     - <1 RK_PB4 2 &pcfg_pull_none_12ma>,
253     + <1 RK_PB4 2 &pcfg_pull_none_8ma>,
254     /* mac_rxclk */
255     - <1 RK_PB5 2 &pcfg_pull_none_2ma>,
256     + <1 RK_PB5 2 &pcfg_pull_none_4ma>,
257     /* mac_mdio */
258     - <1 RK_PC3 2 &pcfg_pull_none_2ma>,
259     + <1 RK_PC3 2 &pcfg_pull_none_4ma>,
260     /* mac_txen */
261     - <1 RK_PD1 2 &pcfg_pull_none_12ma>,
262     + <1 RK_PD1 2 &pcfg_pull_none_8ma>,
263     /* mac_clk */
264     - <1 RK_PC5 2 &pcfg_pull_none_2ma>,
265     + <1 RK_PC5 2 &pcfg_pull_none_4ma>,
266     /* mac_rxdv */
267     - <1 RK_PC6 2 &pcfg_pull_none_2ma>,
268     + <1 RK_PC6 2 &pcfg_pull_none_4ma>,
269     /* mac_mdc */
270     - <1 RK_PC7 2 &pcfg_pull_none_2ma>,
271     + <1 RK_PC7 2 &pcfg_pull_none_4ma>,
272     /* mac_rxd1 */
273     - <1 RK_PB2 2 &pcfg_pull_none_2ma>,
274     + <1 RK_PB2 2 &pcfg_pull_none_4ma>,
275     /* mac_rxd0 */
276     - <1 RK_PB3 2 &pcfg_pull_none_2ma>,
277     + <1 RK_PB3 2 &pcfg_pull_none_4ma>,
278     /* mac_txd1 */
279     - <1 RK_PB0 2 &pcfg_pull_none_12ma>,
280     + <1 RK_PB0 2 &pcfg_pull_none_8ma>,
281     /* mac_txd0 */
282     - <1 RK_PB1 2 &pcfg_pull_none_12ma>,
283     + <1 RK_PB1 2 &pcfg_pull_none_8ma>,
284     /* mac_rxd3 */
285     - <1 RK_PB6 2 &pcfg_pull_none_2ma>,
286     + <1 RK_PB6 2 &pcfg_pull_none_4ma>,
287     /* mac_rxd2 */
288     - <1 RK_PB7 2 &pcfg_pull_none_2ma>,
289     + <1 RK_PB7 2 &pcfg_pull_none_4ma>,
290     /* mac_txd3 */
291     - <1 RK_PC0 2 &pcfg_pull_none_12ma>,
292     + <1 RK_PC0 2 &pcfg_pull_none_8ma>,
293     /* mac_txd2 */
294     - <1 RK_PC1 2 &pcfg_pull_none_12ma>,
295     + <1 RK_PC1 2 &pcfg_pull_none_8ma>,
296    
297     /* mac_txclk */
298     - <0 RK_PB0 1 &pcfg_pull_none>,
299     + <0 RK_PB0 1 &pcfg_pull_none_8ma>,
300     /* mac_txen */
301     - <0 RK_PB4 1 &pcfg_pull_none>,
302     + <0 RK_PB4 1 &pcfg_pull_none_8ma>,
303     /* mac_clk */
304     - <0 RK_PD0 1 &pcfg_pull_none>,
305     + <0 RK_PD0 1 &pcfg_pull_none_4ma>,
306     /* mac_txd1 */
307     - <0 RK_PC0 1 &pcfg_pull_none>,
308     + <0 RK_PC0 1 &pcfg_pull_none_8ma>,
309     /* mac_txd0 */
310     - <0 RK_PC1 1 &pcfg_pull_none>,
311     + <0 RK_PC1 1 &pcfg_pull_none_8ma>,
312     /* mac_txd3 */
313     - <0 RK_PC7 1 &pcfg_pull_none>,
314     + <0 RK_PC7 1 &pcfg_pull_none_8ma>,
315     /* mac_txd2 */
316     - <0 RK_PC6 1 &pcfg_pull_none>;
317     + <0 RK_PC6 1 &pcfg_pull_none_8ma>;
318     };
319    
320     rmiim1_pins: rmiim1-pins {
321     diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
322     index cccb83ad7fa8..e1d95f08f8e1 100644
323     --- a/arch/arm64/include/asm/futex.h
324     +++ b/arch/arm64/include/asm/futex.h
325     @@ -30,8 +30,8 @@ do { \
326     " prfm pstl1strm, %2\n" \
327     "1: ldxr %w1, %2\n" \
328     insn "\n" \
329     -"2: stlxr %w3, %w0, %2\n" \
330     -" cbnz %w3, 1b\n" \
331     +"2: stlxr %w0, %w3, %2\n" \
332     +" cbnz %w0, 1b\n" \
333     " dmb ish\n" \
334     "3:\n" \
335     " .pushsection .fixup,\"ax\"\n" \
336     @@ -50,30 +50,30 @@ do { \
337     static inline int
338     arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
339     {
340     - int oldval = 0, ret, tmp;
341     + int oldval, ret, tmp;
342     u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
343    
344     pagefault_disable();
345    
346     switch (op) {
347     case FUTEX_OP_SET:
348     - __futex_atomic_op("mov %w0, %w4",
349     + __futex_atomic_op("mov %w3, %w4",
350     ret, oldval, uaddr, tmp, oparg);
351     break;
352     case FUTEX_OP_ADD:
353     - __futex_atomic_op("add %w0, %w1, %w4",
354     + __futex_atomic_op("add %w3, %w1, %w4",
355     ret, oldval, uaddr, tmp, oparg);
356     break;
357     case FUTEX_OP_OR:
358     - __futex_atomic_op("orr %w0, %w1, %w4",
359     + __futex_atomic_op("orr %w3, %w1, %w4",
360     ret, oldval, uaddr, tmp, oparg);
361     break;
362     case FUTEX_OP_ANDN:
363     - __futex_atomic_op("and %w0, %w1, %w4",
364     + __futex_atomic_op("and %w3, %w1, %w4",
365     ret, oldval, uaddr, tmp, ~oparg);
366     break;
367     case FUTEX_OP_XOR:
368     - __futex_atomic_op("eor %w0, %w1, %w4",
369     + __futex_atomic_op("eor %w3, %w1, %w4",
370     ret, oldval, uaddr, tmp, oparg);
371     break;
372     default:
373     diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
374     index 905e1bb0e7bd..cd9f4e9d04d3 100644
375     --- a/arch/arm64/include/asm/module.h
376     +++ b/arch/arm64/include/asm/module.h
377     @@ -73,4 +73,9 @@ static inline bool is_forbidden_offset_for_adrp(void *place)
378     struct plt_entry get_plt_entry(u64 dst, void *pc);
379     bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
380    
381     +static inline bool plt_entry_is_initialized(const struct plt_entry *e)
382     +{
383     + return e->adrp || e->add || e->br;
384     +}
385     +
386     #endif /* __ASM_MODULE_H */
387     diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
388     index 8e4431a8821f..07b298120182 100644
389     --- a/arch/arm64/kernel/ftrace.c
390     +++ b/arch/arm64/kernel/ftrace.c
391     @@ -107,8 +107,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
392     trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
393     if (!plt_entries_equal(mod->arch.ftrace_trampoline,
394     &trampoline)) {
395     - if (!plt_entries_equal(mod->arch.ftrace_trampoline,
396     - &(struct plt_entry){})) {
397     + if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
398     pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
399     return -EINVAL;
400     }
401     diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
402     index 4e2fb877f8d5..92bfeb3e8d7c 100644
403     --- a/arch/arm64/kernel/traps.c
404     +++ b/arch/arm64/kernel/traps.c
405     @@ -102,10 +102,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
406     void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
407     {
408     struct stackframe frame;
409     - int skip;
410     + int skip = 0;
411    
412     pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
413    
414     + if (regs) {
415     + if (user_mode(regs))
416     + return;
417     + skip = 1;
418     + }
419     +
420     if (!tsk)
421     tsk = current;
422    
423     @@ -126,7 +132,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
424     frame.graph = 0;
425     #endif
426    
427     - skip = !!regs;
428     printk("Call trace:\n");
429     do {
430     /* skip until specified stack frame */
431     @@ -176,15 +181,13 @@ static int __die(const char *str, int err, struct pt_regs *regs)
432     return ret;
433    
434     print_modules();
435     - __show_regs(regs);
436     pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
437     TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
438     end_of_stack(tsk));
439     + show_regs(regs);
440    
441     - if (!user_mode(regs)) {
442     - dump_backtrace(regs, tsk);
443     + if (!user_mode(regs))
444     dump_instr(KERN_EMERG, regs);
445     - }
446    
447     return ret;
448     }
449     diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h
450     index d637445737b7..9a9cd81e66c1 100644
451     --- a/arch/csky/include/asm/syscall.h
452     +++ b/arch/csky/include/asm/syscall.h
453     @@ -49,10 +49,11 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
454     if (i == 0) {
455     args[0] = regs->orig_a0;
456     args++;
457     - i++;
458     n--;
459     + } else {
460     + i--;
461     }
462     - memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
463     + memcpy(args, &regs->a1 + i, n * sizeof(args[0]));
464     }
465    
466     static inline void
467     @@ -63,10 +64,11 @@ syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
468     if (i == 0) {
469     regs->orig_a0 = args[0];
470     args++;
471     - i++;
472     n--;
473     + } else {
474     + i--;
475     }
476     - memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
477     + memcpy(&regs->a1 + i, args, n * sizeof(regs->a1));
478     }
479    
480     static inline int
481     diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
482     index 2a27b275ab09..9ff033d261ab 100644
483     --- a/arch/parisc/include/asm/ptrace.h
484     +++ b/arch/parisc/include/asm/ptrace.h
485     @@ -22,13 +22,14 @@ unsigned long profile_pc(struct pt_regs *);
486    
487     static inline unsigned long regs_return_value(struct pt_regs *regs)
488     {
489     - return regs->gr[20];
490     + return regs->gr[28];
491     }
492    
493     static inline void instruction_pointer_set(struct pt_regs *regs,
494     unsigned long val)
495     {
496     - regs->iaoq[0] = val;
497     + regs->iaoq[0] = val;
498     + regs->iaoq[1] = val + 4;
499     }
500    
501     /* Query offset/name of register from its name/offset */
502     diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
503     index eb39e7e380d7..841db71958cd 100644
504     --- a/arch/parisc/kernel/process.c
505     +++ b/arch/parisc/kernel/process.c
506     @@ -210,12 +210,6 @@ void __cpuidle arch_cpu_idle(void)
507    
508     static int __init parisc_idle_init(void)
509     {
510     - const char *marker;
511     -
512     - /* check QEMU/SeaBIOS marker in PAGE0 */
513     - marker = (char *) &PAGE0->pad0;
514     - running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
515     -
516     if (!running_on_qemu)
517     cpu_idle_poll_ctrl(1);
518    
519     diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
520     index f2cf86ac279b..25946624ce6a 100644
521     --- a/arch/parisc/kernel/setup.c
522     +++ b/arch/parisc/kernel/setup.c
523     @@ -396,6 +396,9 @@ void __init start_parisc(void)
524     int ret, cpunum;
525     struct pdc_coproc_cfg coproc_cfg;
526    
527     + /* check QEMU/SeaBIOS marker in PAGE0 */
528     + running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
529     +
530     cpunum = smp_processor_id();
531    
532     init_cpu_topology();
533     diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
534     index 9e253ce27e08..4fee6c9887db 100644
535     --- a/arch/powerpc/kernel/exceptions-64s.S
536     +++ b/arch/powerpc/kernel/exceptions-64s.S
537     @@ -612,11 +612,17 @@ EXC_COMMON_BEGIN(data_access_slb_common)
538     ld r4,PACA_EXSLB+EX_DAR(r13)
539     std r4,_DAR(r1)
540     addi r3,r1,STACK_FRAME_OVERHEAD
541     +BEGIN_MMU_FTR_SECTION
542     + /* HPT case, do SLB fault */
543     bl do_slb_fault
544     cmpdi r3,0
545     bne- 1f
546     b fast_exception_return
547     1: /* Error case */
548     +MMU_FTR_SECTION_ELSE
549     + /* Radix case, access is outside page table range */
550     + li r3,-EFAULT
551     +ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
552     std r3,RESULT(r1)
553     bl save_nvgprs
554     RECONCILE_IRQ_STATE(r10, r11)
555     @@ -661,11 +667,17 @@ EXC_COMMON_BEGIN(instruction_access_slb_common)
556     EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB)
557     ld r4,_NIP(r1)
558     addi r3,r1,STACK_FRAME_OVERHEAD
559     +BEGIN_MMU_FTR_SECTION
560     + /* HPT case, do SLB fault */
561     bl do_slb_fault
562     cmpdi r3,0
563     bne- 1f
564     b fast_exception_return
565     1: /* Error case */
566     +MMU_FTR_SECTION_ELSE
567     + /* Radix case, access is outside page table range */
568     + li r3,-EFAULT
569     +ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
570     std r3,RESULT(r1)
571     bl save_nvgprs
572     RECONCILE_IRQ_STATE(r10, r11)
573     diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
574     index bba3da6ef157..6ea9e1804233 100644
575     --- a/arch/riscv/include/asm/syscall.h
576     +++ b/arch/riscv/include/asm/syscall.h
577     @@ -79,10 +79,11 @@ static inline void syscall_get_arguments(struct task_struct *task,
578     if (i == 0) {
579     args[0] = regs->orig_a0;
580     args++;
581     - i++;
582     n--;
583     + } else {
584     + i--;
585     }
586     - memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
587     + memcpy(args, &regs->a1 + i, n * sizeof(args[0]));
588     }
589    
590     static inline void syscall_set_arguments(struct task_struct *task,
591     @@ -94,10 +95,11 @@ static inline void syscall_set_arguments(struct task_struct *task,
592     if (i == 0) {
593     regs->orig_a0 = args[0];
594     args++;
595     - i++;
596     n--;
597     - }
598     - memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
599     + } else {
600     + i--;
601     + }
602     + memcpy(&regs->a1 + i, args, n * sizeof(regs->a1));
603     }
604    
605     static inline int syscall_get_arch(void)
606     diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
607     index 7d2d7c801dba..0ecfac84ba91 100644
608     --- a/arch/x86/events/amd/core.c
609     +++ b/arch/x86/events/amd/core.c
610     @@ -3,10 +3,14 @@
611     #include <linux/types.h>
612     #include <linux/init.h>
613     #include <linux/slab.h>
614     +#include <linux/delay.h>
615     #include <asm/apicdef.h>
616     +#include <asm/nmi.h>
617    
618     #include "../perf_event.h"
619    
620     +static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
621     +
622     static __initconst const u64 amd_hw_cache_event_ids
623     [PERF_COUNT_HW_CACHE_MAX]
624     [PERF_COUNT_HW_CACHE_OP_MAX]
625     @@ -429,6 +433,132 @@ static void amd_pmu_cpu_dead(int cpu)
626     }
627     }
628    
629     +/*
630     + * When a PMC counter overflows, an NMI is used to process the event and
631     + * reset the counter. NMI latency can result in the counter being updated
632     + * before the NMI can run, which can result in what appear to be spurious
633     + * NMIs. This function is intended to wait for the NMI to run and reset
634     + * the counter to avoid possible unhandled NMI messages.
635     + */
636     +#define OVERFLOW_WAIT_COUNT 50
637     +
638     +static void amd_pmu_wait_on_overflow(int idx)
639     +{
640     + unsigned int i;
641     + u64 counter;
642     +
643     + /*
644     + * Wait for the counter to be reset if it has overflowed. This loop
645     + * should exit very, very quickly, but just in case, don't wait
646     + * forever...
647     + */
648     + for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
649     + rdmsrl(x86_pmu_event_addr(idx), counter);
650     + if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
651     + break;
652     +
653     + /* Might be in IRQ context, so can't sleep */
654     + udelay(1);
655     + }
656     +}
657     +
658     +static void amd_pmu_disable_all(void)
659     +{
660     + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
661     + int idx;
662     +
663     + x86_pmu_disable_all();
664     +
665     + /*
666     + * This shouldn't be called from NMI context, but add a safeguard here
667     + * to return, since if we're in NMI context we can't wait for an NMI
668     + * to reset an overflowed counter value.
669     + */
670     + if (in_nmi())
671     + return;
672     +
673     + /*
674     + * Check each counter for overflow and wait for it to be reset by the
675     + * NMI if it has overflowed. This relies on the fact that all active
676     + * counters are always enabled when this function is caled and
677     + * ARCH_PERFMON_EVENTSEL_INT is always set.
678     + */
679     + for (idx = 0; idx < x86_pmu.num_counters; idx++) {
680     + if (!test_bit(idx, cpuc->active_mask))
681     + continue;
682     +
683     + amd_pmu_wait_on_overflow(idx);
684     + }
685     +}
686     +
687     +static void amd_pmu_disable_event(struct perf_event *event)
688     +{
689     + x86_pmu_disable_event(event);
690     +
691     + /*
692     + * This can be called from NMI context (via x86_pmu_stop). The counter
693     + * may have overflowed, but either way, we'll never see it get reset
694     + * by the NMI if we're already in the NMI. And the NMI latency support
695     + * below will take care of any pending NMI that might have been
696     + * generated by the overflow.
697     + */
698     + if (in_nmi())
699     + return;
700     +
701     + amd_pmu_wait_on_overflow(event->hw.idx);
702     +}
703     +
704     +/*
705     + * Because of NMI latency, if multiple PMC counters are active or other sources
706     + * of NMIs are received, the perf NMI handler can handle one or more overflowed
707     + * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
708     + * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
709     + * back-to-back NMI support won't be active. This PMC handler needs to take into
710     + * account that this can occur, otherwise this could result in unknown NMI
711     + * messages being issued. Examples of this is PMC overflow while in the NMI
712     + * handler when multiple PMCs are active or PMC overflow while handling some
713     + * other source of an NMI.
714     + *
715     + * Attempt to mitigate this by using the number of active PMCs to determine
716     + * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
717     + * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
718     + * number of active PMCs or 2. The value of 2 is used in case an NMI does not
719     + * arrive at the LAPIC in time to be collapsed into an already pending NMI.
720     + */
721     +static int amd_pmu_handle_irq(struct pt_regs *regs)
722     +{
723     + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
724     + int active, handled;
725     +
726     + /*
727     + * Obtain the active count before calling x86_pmu_handle_irq() since
728     + * it is possible that x86_pmu_handle_irq() may make a counter
729     + * inactive (through x86_pmu_stop).
730     + */
731     + active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
732     +
733     + /* Process any counter overflows */
734     + handled = x86_pmu_handle_irq(regs);
735     +
736     + /*
737     + * If a counter was handled, record the number of possible remaining
738     + * NMIs that can occur.
739     + */
740     + if (handled) {
741     + this_cpu_write(perf_nmi_counter,
742     + min_t(unsigned int, 2, active));
743     +
744     + return handled;
745     + }
746     +
747     + if (!this_cpu_read(perf_nmi_counter))
748     + return NMI_DONE;
749     +
750     + this_cpu_dec(perf_nmi_counter);
751     +
752     + return NMI_HANDLED;
753     +}
754     +
755     static struct event_constraint *
756     amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
757     struct perf_event *event)
758     @@ -621,11 +751,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config)
759    
760     static __initconst const struct x86_pmu amd_pmu = {
761     .name = "AMD",
762     - .handle_irq = x86_pmu_handle_irq,
763     - .disable_all = x86_pmu_disable_all,
764     + .handle_irq = amd_pmu_handle_irq,
765     + .disable_all = amd_pmu_disable_all,
766     .enable_all = x86_pmu_enable_all,
767     .enable = x86_pmu_enable_event,
768     - .disable = x86_pmu_disable_event,
769     + .disable = amd_pmu_disable_event,
770     .hw_config = amd_pmu_hw_config,
771     .schedule_events = x86_schedule_events,
772     .eventsel = MSR_K7_EVNTSEL0,
773     @@ -732,7 +862,7 @@ void amd_pmu_enable_virt(void)
774     cpuc->perf_ctr_virt_mask = 0;
775    
776     /* Reload all events */
777     - x86_pmu_disable_all();
778     + amd_pmu_disable_all();
779     x86_pmu_enable_all(0);
780     }
781     EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
782     @@ -750,7 +880,7 @@ void amd_pmu_disable_virt(void)
783     cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
784    
785     /* Reload all events */
786     - x86_pmu_disable_all();
787     + amd_pmu_disable_all();
788     x86_pmu_enable_all(0);
789     }
790     EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
791     diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
792     index e2b1447192a8..81911e11a15d 100644
793     --- a/arch/x86/events/core.c
794     +++ b/arch/x86/events/core.c
795     @@ -1349,8 +1349,9 @@ void x86_pmu_stop(struct perf_event *event, int flags)
796     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
797     struct hw_perf_event *hwc = &event->hw;
798    
799     - if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
800     + if (test_bit(hwc->idx, cpuc->active_mask)) {
801     x86_pmu.disable(event);
802     + __clear_bit(hwc->idx, cpuc->active_mask);
803     cpuc->events[hwc->idx] = NULL;
804     WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
805     hwc->state |= PERF_HES_STOPPED;
806     @@ -1447,16 +1448,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
807     apic_write(APIC_LVTPC, APIC_DM_NMI);
808    
809     for (idx = 0; idx < x86_pmu.num_counters; idx++) {
810     - if (!test_bit(idx, cpuc->active_mask)) {
811     - /*
812     - * Though we deactivated the counter some cpus
813     - * might still deliver spurious interrupts still
814     - * in flight. Catch them:
815     - */
816     - if (__test_and_clear_bit(idx, cpuc->running))
817     - handled++;
818     + if (!test_bit(idx, cpuc->active_mask))
819     continue;
820     - }
821    
822     event = cpuc->events[idx];
823    
824     diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
825     index ad7b210aa3f6..8e790ec219a5 100644
826     --- a/arch/x86/include/asm/bitops.h
827     +++ b/arch/x86/include/asm/bitops.h
828     @@ -36,22 +36,17 @@
829     * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
830     */
831    
832     -#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
833     -/* Technically wrong, but this avoids compilation errors on some gcc
834     - versions. */
835     -#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
836     -#else
837     -#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
838     -#endif
839     +#define RLONG_ADDR(x) "m" (*(volatile long *) (x))
840     +#define WBYTE_ADDR(x) "+m" (*(volatile char *) (x))
841    
842     -#define ADDR BITOP_ADDR(addr)
843     +#define ADDR RLONG_ADDR(addr)
844    
845     /*
846     * We do the locked ops that don't return the old value as
847     * a mask operation on a byte.
848     */
849     #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
850     -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
851     +#define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
852     #define CONST_MASK(nr) (1 << ((nr) & 7))
853    
854     /**
855     @@ -79,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr)
856     : "memory");
857     } else {
858     asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
859     - : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
860     + : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
861     }
862     }
863    
864     @@ -94,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr)
865     */
866     static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
867     {
868     - asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
869     + asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
870     }
871    
872     /**
873     @@ -116,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr)
874     : "iq" ((u8)~CONST_MASK(nr)));
875     } else {
876     asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
877     - : BITOP_ADDR(addr)
878     - : "Ir" (nr));
879     + : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
880     }
881     }
882    
883     @@ -137,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
884    
885     static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
886     {
887     - asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
888     + asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
889     }
890    
891     static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
892     @@ -145,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
893     bool negative;
894     asm volatile(LOCK_PREFIX "andb %2,%1"
895     CC_SET(s)
896     - : CC_OUT(s) (negative), ADDR
897     + : CC_OUT(s) (negative), WBYTE_ADDR(addr)
898     : "ir" ((char) ~(1 << nr)) : "memory");
899     return negative;
900     }
901     @@ -161,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
902     * __clear_bit() is non-atomic and implies release semantics before the memory
903     * operation. It can be used for an unlock if no other CPUs can concurrently
904     * modify other bits in the word.
905     - *
906     - * No memory barrier is required here, because x86 cannot reorder stores past
907     - * older loads. Same principle as spin_unlock.
908     */
909     static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
910     {
911     - barrier();
912     __clear_bit(nr, addr);
913     }
914    
915     @@ -182,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
916     */
917     static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
918     {
919     - asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
920     + asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
921     }
922    
923     /**
924     @@ -202,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
925     : "iq" ((u8)CONST_MASK(nr)));
926     } else {
927     asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
928     - : BITOP_ADDR(addr)
929     - : "Ir" (nr));
930     + : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
931     }
932     }
933    
934     @@ -248,8 +237,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
935    
936     asm(__ASM_SIZE(bts) " %2,%1"
937     CC_SET(c)
938     - : CC_OUT(c) (oldbit), ADDR
939     - : "Ir" (nr));
940     + : CC_OUT(c) (oldbit)
941     + : ADDR, "Ir" (nr) : "memory");
942     return oldbit;
943     }
944    
945     @@ -288,8 +277,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
946    
947     asm volatile(__ASM_SIZE(btr) " %2,%1"
948     CC_SET(c)
949     - : CC_OUT(c) (oldbit), ADDR
950     - : "Ir" (nr));
951     + : CC_OUT(c) (oldbit)
952     + : ADDR, "Ir" (nr) : "memory");
953     return oldbit;
954     }
955    
956     @@ -300,8 +289,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
957    
958     asm volatile(__ASM_SIZE(btc) " %2,%1"
959     CC_SET(c)
960     - : CC_OUT(c) (oldbit), ADDR
961     - : "Ir" (nr) : "memory");
962     + : CC_OUT(c) (oldbit)
963     + : ADDR, "Ir" (nr) : "memory");
964    
965     return oldbit;
966     }
967     @@ -332,7 +321,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
968     asm volatile(__ASM_SIZE(bt) " %2,%1"
969     CC_SET(c)
970     : CC_OUT(c) (oldbit)
971     - : "m" (*(unsigned long *)addr), "Ir" (nr));
972     + : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
973    
974     return oldbit;
975     }
976     diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
977     index 55d392c6bd29..2fd165f1cffa 100644
978     --- a/arch/x86/include/asm/string_32.h
979     +++ b/arch/x86/include/asm/string_32.h
980     @@ -179,14 +179,7 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
981     * No 3D Now!
982     */
983    
984     -#if (__GNUC__ >= 4)
985     #define memcpy(t, f, n) __builtin_memcpy(t, f, n)
986     -#else
987     -#define memcpy(t, f, n) \
988     - (__builtin_constant_p((n)) \
989     - ? __constant_memcpy((t), (f), (n)) \
990     - : __memcpy((t), (f), (n)))
991     -#endif
992    
993     #endif
994     #endif /* !CONFIG_FORTIFY_SOURCE */
995     @@ -282,12 +275,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
996    
997     {
998     int d0, d1;
999     -#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
1000     - /* Workaround for broken gcc 4.0 */
1001     - register unsigned long eax asm("%eax") = pattern;
1002     -#else
1003     unsigned long eax = pattern;
1004     -#endif
1005    
1006     switch (count % 4) {
1007     case 0:
1008     @@ -321,15 +309,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
1009     #define __HAVE_ARCH_MEMSET
1010     extern void *memset(void *, int, size_t);
1011     #ifndef CONFIG_FORTIFY_SOURCE
1012     -#if (__GNUC__ >= 4)
1013     #define memset(s, c, count) __builtin_memset(s, c, count)
1014     -#else
1015     -#define memset(s, c, count) \
1016     - (__builtin_constant_p(c) \
1017     - ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
1018     - (count)) \
1019     - : __memset((s), (c), (count)))
1020     -#endif
1021     #endif /* !CONFIG_FORTIFY_SOURCE */
1022    
1023     #define __HAVE_ARCH_MEMSET16
1024     diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
1025     index 4e4194e21a09..75314c3dbe47 100644
1026     --- a/arch/x86/include/asm/string_64.h
1027     +++ b/arch/x86/include/asm/string_64.h
1028     @@ -14,21 +14,6 @@
1029     extern void *memcpy(void *to, const void *from, size_t len);
1030     extern void *__memcpy(void *to, const void *from, size_t len);
1031    
1032     -#ifndef CONFIG_FORTIFY_SOURCE
1033     -#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
1034     -#define memcpy(dst, src, len) \
1035     -({ \
1036     - size_t __len = (len); \
1037     - void *__ret; \
1038     - if (__builtin_constant_p(len) && __len >= 64) \
1039     - __ret = __memcpy((dst), (src), __len); \
1040     - else \
1041     - __ret = __builtin_memcpy((dst), (src), __len); \
1042     - __ret; \
1043     -})
1044     -#endif
1045     -#endif /* !CONFIG_FORTIFY_SOURCE */
1046     -
1047     #define __HAVE_ARCH_MEMSET
1048     void *memset(void *s, int c, size_t n);
1049     void *__memset(void *s, int c, size_t n);
1050     diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
1051     index ef05bea7010d..6b5c710846f5 100644
1052     --- a/arch/x86/include/asm/xen/hypercall.h
1053     +++ b/arch/x86/include/asm/xen/hypercall.h
1054     @@ -206,6 +206,9 @@ xen_single_call(unsigned int call,
1055     __HYPERCALL_DECLS;
1056     __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
1057    
1058     + if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
1059     + return -EINVAL;
1060     +
1061     asm volatile(CALL_NOSPEC
1062     : __HYPERCALL_5PARAM
1063     : [thunk_target] "a" (&hypercall_page[call])
1064     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1065     index f13a3a24d360..a9b8e38d78ad 100644
1066     --- a/arch/x86/kvm/svm.c
1067     +++ b/arch/x86/kvm/svm.c
1068     @@ -6422,11 +6422,11 @@ e_free:
1069     return ret;
1070     }
1071    
1072     -static int get_num_contig_pages(int idx, struct page **inpages,
1073     - unsigned long npages)
1074     +static unsigned long get_num_contig_pages(unsigned long idx,
1075     + struct page **inpages, unsigned long npages)
1076     {
1077     unsigned long paddr, next_paddr;
1078     - int i = idx + 1, pages = 1;
1079     + unsigned long i = idx + 1, pages = 1;
1080    
1081     /* find the number of contiguous pages starting from idx */
1082     paddr = __sme_page_pa(inpages[idx]);
1083     @@ -6445,12 +6445,12 @@ static int get_num_contig_pages(int idx, struct page **inpages,
1084    
1085     static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1086     {
1087     - unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
1088     + unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
1089     struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1090     struct kvm_sev_launch_update_data params;
1091     struct sev_data_launch_update_data *data;
1092     struct page **inpages;
1093     - int i, ret, pages;
1094     + int ret;
1095    
1096     if (!sev_guest(kvm))
1097     return -ENOTTY;
1098     diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
1099     index f014e1aeee96..f90b3a948291 100644
1100     --- a/arch/x86/kvm/vmx/nested.c
1101     +++ b/arch/x86/kvm/vmx/nested.c
1102     @@ -500,6 +500,17 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
1103     }
1104     }
1105    
1106     +static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
1107     + int msr;
1108     +
1109     + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
1110     + unsigned word = msr / BITS_PER_LONG;
1111     +
1112     + msr_bitmap[word] = ~0;
1113     + msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
1114     + }
1115     +}
1116     +
1117     /*
1118     * Merge L0's and L1's MSR bitmap, return false to indicate that
1119     * we do not use the hardware.
1120     @@ -541,39 +552,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
1121     return false;
1122    
1123     msr_bitmap_l1 = (unsigned long *)kmap(page);
1124     - if (nested_cpu_has_apic_reg_virt(vmcs12)) {
1125     - /*
1126     - * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
1127     - * just lets the processor take the value from the virtual-APIC page;
1128     - * take those 256 bits directly from the L1 bitmap.
1129     - */
1130     - for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
1131     - unsigned word = msr / BITS_PER_LONG;
1132     - msr_bitmap_l0[word] = msr_bitmap_l1[word];
1133     - msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
1134     - }
1135     - } else {
1136     - for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
1137     - unsigned word = msr / BITS_PER_LONG;
1138     - msr_bitmap_l0[word] = ~0;
1139     - msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
1140     - }
1141     - }
1142    
1143     - nested_vmx_disable_intercept_for_msr(
1144     - msr_bitmap_l1, msr_bitmap_l0,
1145     - X2APIC_MSR(APIC_TASKPRI),
1146     - MSR_TYPE_W);
1147     + /*
1148     + * To keep the control flow simple, pay eight 8-byte writes (sixteen
1149     + * 4-byte writes on 32-bit systems) up front to enable intercepts for
1150     + * the x2APIC MSR range and selectively disable them below.
1151     + */
1152     + enable_x2apic_msr_intercepts(msr_bitmap_l0);
1153     +
1154     + if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
1155     + if (nested_cpu_has_apic_reg_virt(vmcs12)) {
1156     + /*
1157     + * L0 need not intercept reads for MSRs between 0x800
1158     + * and 0x8ff, it just lets the processor take the value
1159     + * from the virtual-APIC page; take those 256 bits
1160     + * directly from the L1 bitmap.
1161     + */
1162     + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
1163     + unsigned word = msr / BITS_PER_LONG;
1164     +
1165     + msr_bitmap_l0[word] = msr_bitmap_l1[word];
1166     + }
1167     + }
1168    
1169     - if (nested_cpu_has_vid(vmcs12)) {
1170     - nested_vmx_disable_intercept_for_msr(
1171     - msr_bitmap_l1, msr_bitmap_l0,
1172     - X2APIC_MSR(APIC_EOI),
1173     - MSR_TYPE_W);
1174     nested_vmx_disable_intercept_for_msr(
1175     msr_bitmap_l1, msr_bitmap_l0,
1176     - X2APIC_MSR(APIC_SELF_IPI),
1177     - MSR_TYPE_W);
1178     + X2APIC_MSR(APIC_TASKPRI),
1179     + MSR_TYPE_R | MSR_TYPE_W);
1180     +
1181     + if (nested_cpu_has_vid(vmcs12)) {
1182     + nested_vmx_disable_intercept_for_msr(
1183     + msr_bitmap_l1, msr_bitmap_l0,
1184     + X2APIC_MSR(APIC_EOI),
1185     + MSR_TYPE_W);
1186     + nested_vmx_disable_intercept_for_msr(
1187     + msr_bitmap_l1, msr_bitmap_l0,
1188     + X2APIC_MSR(APIC_SELF_IPI),
1189     + MSR_TYPE_W);
1190     + }
1191     }
1192    
1193     if (spec_ctrl)
1194     diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
1195     index 174c11f13bba..b9f82510c650 100644
1196     --- a/arch/xtensa/kernel/stacktrace.c
1197     +++ b/arch/xtensa/kernel/stacktrace.c
1198     @@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data)
1199     return 1;
1200     }
1201    
1202     +/*
1203     + * level == 0 is for the return address from the caller of this function,
1204     + * not from this function itself.
1205     + */
1206     unsigned long return_address(unsigned level)
1207     {
1208     struct return_addr_data r = {
1209     - .skip = level + 1,
1210     + .skip = level,
1211     };
1212     walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
1213     return r.addr;
1214     diff --git a/block/bio.c b/block/bio.c
1215     index 4db1008309ed..a06f58bd4c72 100644
1216     --- a/block/bio.c
1217     +++ b/block/bio.c
1218     @@ -1238,8 +1238,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
1219     }
1220     }
1221    
1222     - if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1223     + if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
1224     + if (!map_data)
1225     + __free_page(page);
1226     break;
1227     + }
1228    
1229     len -= bytes;
1230     offset = 0;
1231     diff --git a/block/blk-core.c b/block/blk-core.c
1232     index 6b78ec56a4f2..5bde73a49399 100644
1233     --- a/block/blk-core.c
1234     +++ b/block/blk-core.c
1235     @@ -1246,8 +1246,6 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
1236     */
1237     blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1238     {
1239     - blk_qc_t unused;
1240     -
1241     if (blk_cloned_rq_check_limits(q, rq))
1242     return BLK_STS_IOERR;
1243    
1244     @@ -1263,7 +1261,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
1245     * bypass a potential scheduler on the bottom device for
1246     * insert.
1247     */
1248     - return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true);
1249     + return blk_mq_request_issue_directly(rq, true);
1250     }
1251     EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1252    
1253     diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
1254     index 140933e4a7d1..0c98b6c1ca49 100644
1255     --- a/block/blk-mq-sched.c
1256     +++ b/block/blk-mq-sched.c
1257     @@ -423,10 +423,12 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
1258     * busy in case of 'none' scheduler, and this way may save
1259     * us one extra enqueue & dequeue to sw queue.
1260     */
1261     - if (!hctx->dispatch_busy && !e && !run_queue_async)
1262     + if (!hctx->dispatch_busy && !e && !run_queue_async) {
1263     blk_mq_try_issue_list_directly(hctx, list);
1264     - else
1265     - blk_mq_insert_requests(hctx, ctx, list);
1266     + if (list_empty(list))
1267     + return;
1268     + }
1269     + blk_mq_insert_requests(hctx, ctx, list);
1270     }
1271    
1272     blk_mq_run_hw_queue(hctx, run_queue_async);
1273     diff --git a/block/blk-mq.c b/block/blk-mq.c
1274     index b9283b63d116..16f9675c57e6 100644
1275     --- a/block/blk-mq.c
1276     +++ b/block/blk-mq.c
1277     @@ -1805,74 +1805,76 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1278     return ret;
1279     }
1280    
1281     -blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1282     +static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1283     struct request *rq,
1284     blk_qc_t *cookie,
1285     - bool bypass, bool last)
1286     + bool bypass_insert, bool last)
1287     {
1288     struct request_queue *q = rq->q;
1289     bool run_queue = true;
1290     - blk_status_t ret = BLK_STS_RESOURCE;
1291     - int srcu_idx;
1292     - bool force = false;
1293    
1294     - hctx_lock(hctx, &srcu_idx);
1295     /*
1296     - * hctx_lock is needed before checking quiesced flag.
1297     + * RCU or SRCU read lock is needed before checking quiesced flag.
1298     *
1299     - * When queue is stopped or quiesced, ignore 'bypass', insert
1300     - * and return BLK_STS_OK to caller, and avoid driver to try to
1301     - * dispatch again.
1302     + * When queue is stopped or quiesced, ignore 'bypass_insert' from
1303     + * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
1304     + * and avoid driver to try to dispatch again.
1305     */
1306     - if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
1307     + if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
1308     run_queue = false;
1309     - bypass = false;
1310     - goto out_unlock;
1311     + bypass_insert = false;
1312     + goto insert;
1313     }
1314    
1315     - if (unlikely(q->elevator && !bypass))
1316     - goto out_unlock;
1317     + if (q->elevator && !bypass_insert)
1318     + goto insert;
1319    
1320     if (!blk_mq_get_dispatch_budget(hctx))
1321     - goto out_unlock;
1322     + goto insert;
1323    
1324     if (!blk_mq_get_driver_tag(rq)) {
1325     blk_mq_put_dispatch_budget(hctx);
1326     - goto out_unlock;
1327     + goto insert;
1328     }
1329    
1330     - /*
1331     - * Always add a request that has been through
1332     - *.queue_rq() to the hardware dispatch list.
1333     - */
1334     - force = true;
1335     - ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
1336     -out_unlock:
1337     + return __blk_mq_issue_directly(hctx, rq, cookie, last);
1338     +insert:
1339     + if (bypass_insert)
1340     + return BLK_STS_RESOURCE;
1341     +
1342     + blk_mq_request_bypass_insert(rq, run_queue);
1343     + return BLK_STS_OK;
1344     +}
1345     +
1346     +static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1347     + struct request *rq, blk_qc_t *cookie)
1348     +{
1349     + blk_status_t ret;
1350     + int srcu_idx;
1351     +
1352     + might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1353     +
1354     + hctx_lock(hctx, &srcu_idx);
1355     +
1356     + ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
1357     + if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1358     + blk_mq_request_bypass_insert(rq, true);
1359     + else if (ret != BLK_STS_OK)
1360     + blk_mq_end_request(rq, ret);
1361     +
1362     + hctx_unlock(hctx, srcu_idx);
1363     +}
1364     +
1365     +blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
1366     +{
1367     + blk_status_t ret;
1368     + int srcu_idx;
1369     + blk_qc_t unused_cookie;
1370     + struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1371     +
1372     + hctx_lock(hctx, &srcu_idx);
1373     + ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
1374     hctx_unlock(hctx, srcu_idx);
1375     - switch (ret) {
1376     - case BLK_STS_OK:
1377     - break;
1378     - case BLK_STS_DEV_RESOURCE:
1379     - case BLK_STS_RESOURCE:
1380     - if (force) {
1381     - blk_mq_request_bypass_insert(rq, run_queue);
1382     - /*
1383     - * We have to return BLK_STS_OK for the DM
1384     - * to avoid livelock. Otherwise, we return
1385     - * the real result to indicate whether the
1386     - * request is direct-issued successfully.
1387     - */
1388     - ret = bypass ? BLK_STS_OK : ret;
1389     - } else if (!bypass) {
1390     - blk_mq_sched_insert_request(rq, false,
1391     - run_queue, false);
1392     - }
1393     - break;
1394     - default:
1395     - if (!bypass)
1396     - blk_mq_end_request(rq, ret);
1397     - break;
1398     - }
1399    
1400     return ret;
1401     }
1402     @@ -1880,20 +1882,22 @@ out_unlock:
1403     void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
1404     struct list_head *list)
1405     {
1406     - blk_qc_t unused;
1407     - blk_status_t ret = BLK_STS_OK;
1408     -
1409     while (!list_empty(list)) {
1410     + blk_status_t ret;
1411     struct request *rq = list_first_entry(list, struct request,
1412     queuelist);
1413    
1414     list_del_init(&rq->queuelist);
1415     - if (ret == BLK_STS_OK)
1416     - ret = blk_mq_try_issue_directly(hctx, rq, &unused,
1417     - false,
1418     + ret = blk_mq_request_issue_directly(rq, list_empty(list));
1419     + if (ret != BLK_STS_OK) {
1420     + if (ret == BLK_STS_RESOURCE ||
1421     + ret == BLK_STS_DEV_RESOURCE) {
1422     + blk_mq_request_bypass_insert(rq,
1423     list_empty(list));
1424     - else
1425     - blk_mq_sched_insert_request(rq, false, true, false);
1426     + break;
1427     + }
1428     + blk_mq_end_request(rq, ret);
1429     + }
1430     }
1431    
1432     /*
1433     @@ -1901,7 +1905,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
1434     * the driver there was more coming, but that turned out to
1435     * be a lie.
1436     */
1437     - if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs)
1438     + if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
1439     hctx->queue->mq_ops->commit_rqs(hctx);
1440     }
1441    
1442     @@ -2014,13 +2018,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1443     if (same_queue_rq) {
1444     data.hctx = same_queue_rq->mq_hctx;
1445     blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1446     - &cookie, false, true);
1447     + &cookie);
1448     }
1449     } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
1450     !data.hctx->dispatch_busy)) {
1451     blk_mq_put_ctx(data.ctx);
1452     blk_mq_bio_to_request(rq, bio);
1453     - blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
1454     + blk_mq_try_issue_directly(data.hctx, rq, &cookie);
1455     } else {
1456     blk_mq_put_ctx(data.ctx);
1457     blk_mq_bio_to_request(rq, bio);
1458     diff --git a/block/blk-mq.h b/block/blk-mq.h
1459     index d0b3dd54ef8d..a3a684a8c633 100644
1460     --- a/block/blk-mq.h
1461     +++ b/block/blk-mq.h
1462     @@ -67,10 +67,8 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
1463     void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1464     struct list_head *list);
1465    
1466     -blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1467     - struct request *rq,
1468     - blk_qc_t *cookie,
1469     - bool bypass, bool last);
1470     +/* Used by blk_insert_cloned_request() to issue request directly */
1471     +blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
1472     void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
1473     struct list_head *list);
1474    
1475     diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
1476     index e10fec99a182..4424997ecf30 100644
1477     --- a/drivers/acpi/acpica/evgpe.c
1478     +++ b/drivers/acpi/acpica/evgpe.c
1479     @@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
1480    
1481     ACPI_FUNCTION_TRACE(ev_enable_gpe);
1482    
1483     - /* Enable the requested GPE */
1484     + /* Clear the GPE status */
1485     + status = acpi_hw_clear_gpe(gpe_event_info);
1486     + if (ACPI_FAILURE(status))
1487     + return_ACPI_STATUS(status);
1488    
1489     + /* Enable the requested GPE */
1490     status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
1491     return_ACPI_STATUS(status);
1492     }
1493     diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
1494     index 8638f43cfc3d..79d86da1c892 100644
1495     --- a/drivers/acpi/acpica/nsobject.c
1496     +++ b/drivers/acpi/acpica/nsobject.c
1497     @@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
1498     }
1499     }
1500    
1501     + if (obj_desc->common.type == ACPI_TYPE_REGION) {
1502     + acpi_ut_remove_address_range(obj_desc->region.space_id, node);
1503     + }
1504     +
1505     /* Clear the Node entry in all cases */
1506    
1507     node->object = NULL;
1508     diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
1509     index 2e2ffe7010aa..51c77f0e47b2 100644
1510     --- a/drivers/char/Kconfig
1511     +++ b/drivers/char/Kconfig
1512     @@ -351,7 +351,7 @@ config XILINX_HWICAP
1513    
1514     config R3964
1515     tristate "Siemens R3964 line discipline"
1516     - depends on TTY
1517     + depends on TTY && BROKEN
1518     ---help---
1519     This driver allows synchronous communication with devices using the
1520     Siemens R3964 packet protocol. Unless you are dealing with special
1521     diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
1522     index 258c8d259ea1..f965845917e3 100644
1523     --- a/drivers/clk/meson/meson-aoclk.c
1524     +++ b/drivers/clk/meson/meson-aoclk.c
1525     @@ -65,20 +65,15 @@ int meson_aoclkc_probe(struct platform_device *pdev)
1526     return ret;
1527     }
1528    
1529     - /* Populate regmap */
1530     - for (clkid = 0; clkid < data->num_clks; clkid++)
1531     + /*
1532     + * Populate regmap and register all clks
1533     + */
1534     + for (clkid = 0; clkid < data->num_clks; clkid++) {
1535     data->clks[clkid]->map = regmap;
1536    
1537     - /* Register all clks */
1538     - for (clkid = 0; clkid < data->hw_data->num; clkid++) {
1539     - if (!data->hw_data->hws[clkid])
1540     - continue;
1541     -
1542     ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]);
1543     - if (ret) {
1544     - dev_err(dev, "Clock registration failed\n");
1545     + if (ret)
1546     return ret;
1547     - }
1548     }
1549    
1550     return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
1551     diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
1552     index c7103dd2d8d5..563ab8590061 100644
1553     --- a/drivers/gpu/drm/i915/gvt/gtt.c
1554     +++ b/drivers/gpu/drm/i915/gvt/gtt.c
1555     @@ -1942,7 +1942,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
1556     */
1557     void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1558     {
1559     - atomic_dec(&mm->pincount);
1560     + atomic_dec_if_positive(&mm->pincount);
1561     }
1562    
1563     /**
1564     diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
1565     index 55bb7885e228..8fff49affc11 100644
1566     --- a/drivers/gpu/drm/i915/gvt/scheduler.c
1567     +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
1568     @@ -1475,8 +1475,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1569     intel_runtime_pm_put(dev_priv);
1570     }
1571    
1572     - if (ret && (vgpu_is_vm_unhealthy(ret))) {
1573     - enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1574     + if (ret) {
1575     + if (vgpu_is_vm_unhealthy(ret))
1576     + enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1577     intel_vgpu_destroy_workload(workload);
1578     return ERR_PTR(ret);
1579     }
1580     diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
1581     index 22a74608c6e4..dcd1df5322e8 100644
1582     --- a/drivers/gpu/drm/i915/intel_dp.c
1583     +++ b/drivers/gpu/drm/i915/intel_dp.c
1584     @@ -1845,42 +1845,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1585     return false;
1586     }
1587    
1588     -/* Optimize link config in order: max bpp, min lanes, min clock */
1589     -static bool
1590     -intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
1591     - struct intel_crtc_state *pipe_config,
1592     - const struct link_config_limits *limits)
1593     -{
1594     - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1595     - int bpp, clock, lane_count;
1596     - int mode_rate, link_clock, link_avail;
1597     -
1598     - for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1599     - mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1600     - bpp);
1601     -
1602     - for (lane_count = limits->min_lane_count;
1603     - lane_count <= limits->max_lane_count;
1604     - lane_count <<= 1) {
1605     - for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1606     - link_clock = intel_dp->common_rates[clock];
1607     - link_avail = intel_dp_max_data_rate(link_clock,
1608     - lane_count);
1609     -
1610     - if (mode_rate <= link_avail) {
1611     - pipe_config->lane_count = lane_count;
1612     - pipe_config->pipe_bpp = bpp;
1613     - pipe_config->port_clock = link_clock;
1614     -
1615     - return true;
1616     - }
1617     - }
1618     - }
1619     - }
1620     -
1621     - return false;
1622     -}
1623     -
1624     static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1625     {
1626     int i, num_bpc;
1627     @@ -2013,15 +1977,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
1628     limits.min_bpp = 6 * 3;
1629     limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
1630    
1631     - if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
1632     + if (intel_dp_is_edp(intel_dp)) {
1633     /*
1634     * Use the maximum clock and number of lanes the eDP panel
1635     - * advertizes being capable of. The eDP 1.3 and earlier panels
1636     - * are generally designed to support only a single clock and
1637     - * lane configuration, and typically these values correspond to
1638     - * the native resolution of the panel. With eDP 1.4 rate select
1639     - * and DSC, this is decreasingly the case, and we need to be
1640     - * able to select less than maximum link config.
1641     + * advertizes being capable of. The panels are generally
1642     + * designed to support only a single clock and lane
1643     + * configuration, and typically these values correspond to the
1644     + * native resolution of the panel.
1645     */
1646     limits.min_lane_count = limits.max_lane_count;
1647     limits.min_clock = limits.max_clock;
1648     @@ -2035,22 +1997,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
1649     intel_dp->common_rates[limits.max_clock],
1650     limits.max_bpp, adjusted_mode->crtc_clock);
1651    
1652     - if (intel_dp_is_edp(intel_dp))
1653     - /*
1654     - * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
1655     - * section A.1: "It is recommended that the minimum number of
1656     - * lanes be used, using the minimum link rate allowed for that
1657     - * lane configuration."
1658     - *
1659     - * Note that we use the max clock and lane count for eDP 1.3 and
1660     - * earlier, and fast vs. wide is irrelevant.
1661     - */
1662     - ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
1663     - &limits);
1664     - else
1665     - /* Optimize for slow and wide. */
1666     - ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
1667     - &limits);
1668     + /*
1669     + * Optimize for slow and wide. This is the place to add alternative
1670     + * optimization policy.
1671     + */
1672     + ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
1673    
1674     /* enable compression if the mode doesn't fit available BW */
1675     if (!ret) {
1676     diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
1677     index dc47720c99ba..39d8509d96a0 100644
1678     --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
1679     +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
1680     @@ -48,8 +48,13 @@ static enum drm_mode_status
1681     sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector,
1682     const struct drm_display_mode *mode)
1683     {
1684     - /* This is max for HDMI 2.0b (4K@60Hz) */
1685     - if (mode->clock > 594000)
1686     + /*
1687     + * Controller support maximum of 594 MHz, which correlates to
1688     + * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than
1689     + * 340 MHz scrambling has to be enabled. Because scrambling is
1690     + * not yet implemented, just limit to 340 MHz for now.
1691     + */
1692     + if (mode->clock > 340000)
1693     return MODE_CLOCK_HIGH;
1694    
1695     return MODE_OK;
1696     diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
1697     index a63e3011e971..bd4f0b88bbd7 100644
1698     --- a/drivers/gpu/drm/udl/udl_drv.c
1699     +++ b/drivers/gpu/drm/udl/udl_drv.c
1700     @@ -51,6 +51,7 @@ static struct drm_driver driver = {
1701     .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
1702     .load = udl_driver_load,
1703     .unload = udl_driver_unload,
1704     + .release = udl_driver_release,
1705    
1706     /* gem hooks */
1707     .gem_free_object_unlocked = udl_gem_free_object,
1708     diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
1709     index e9e9b1ff678e..4ae67d882eae 100644
1710     --- a/drivers/gpu/drm/udl/udl_drv.h
1711     +++ b/drivers/gpu/drm/udl/udl_drv.h
1712     @@ -104,6 +104,7 @@ void udl_urb_completion(struct urb *urb);
1713    
1714     int udl_driver_load(struct drm_device *dev, unsigned long flags);
1715     void udl_driver_unload(struct drm_device *dev);
1716     +void udl_driver_release(struct drm_device *dev);
1717    
1718     int udl_fbdev_init(struct drm_device *dev);
1719     void udl_fbdev_cleanup(struct drm_device *dev);
1720     diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
1721     index 1b014d92855b..19055dda3140 100644
1722     --- a/drivers/gpu/drm/udl/udl_main.c
1723     +++ b/drivers/gpu/drm/udl/udl_main.c
1724     @@ -378,6 +378,12 @@ void udl_driver_unload(struct drm_device *dev)
1725     udl_free_urb_list(dev);
1726    
1727     udl_fbdev_cleanup(dev);
1728     - udl_modeset_cleanup(dev);
1729     kfree(udl);
1730     }
1731     +
1732     +void udl_driver_release(struct drm_device *dev)
1733     +{
1734     + udl_modeset_cleanup(dev);
1735     + drm_dev_fini(dev);
1736     + kfree(dev);
1737     +}
1738     diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
1739     index f39a183d59c2..e7e946035027 100644
1740     --- a/drivers/gpu/drm/virtio/virtgpu_object.c
1741     +++ b/drivers/gpu/drm/virtio/virtgpu_object.c
1742     @@ -28,10 +28,21 @@
1743     static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
1744     uint32_t *resid)
1745     {
1746     +#if 0
1747     int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
1748    
1749     if (handle < 0)
1750     return handle;
1751     +#else
1752     + static int handle;
1753     +
1754     + /*
1755     + * FIXME: dirty hack to avoid re-using IDs, virglrenderer
1756     + * can't deal with that. Needs fixing in virglrenderer, also
1757     + * should figure a better way to handle that in the guest.
1758     + */
1759     + handle++;
1760     +#endif
1761    
1762     *resid = handle + 1;
1763     return 0;
1764     @@ -39,7 +50,9 @@ static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
1765    
1766     static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
1767     {
1768     +#if 0
1769     ida_free(&vgdev->resource_ida, id - 1);
1770     +#endif
1771     }
1772    
1773     static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
1774     diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
1775     index 15ed6177a7a3..f040c8a7f9a9 100644
1776     --- a/drivers/hid/hid-logitech-hidpp.c
1777     +++ b/drivers/hid/hid-logitech-hidpp.c
1778     @@ -2608,8 +2608,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
1779     input_report_rel(mydata->input, REL_Y, v);
1780    
1781     v = hid_snto32(data[6], 8);
1782     - hidpp_scroll_counter_handle_scroll(
1783     - &hidpp->vertical_wheel_counter, v);
1784     + if (v != 0)
1785     + hidpp_scroll_counter_handle_scroll(
1786     + &hidpp->vertical_wheel_counter, v);
1787    
1788     input_sync(mydata->input);
1789     }
1790     diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
1791     index 6f929bfa9fcd..d0f1dfe2bcbb 100644
1792     --- a/drivers/hwmon/Kconfig
1793     +++ b/drivers/hwmon/Kconfig
1794     @@ -1759,6 +1759,7 @@ config SENSORS_VT8231
1795     config SENSORS_W83773G
1796     tristate "Nuvoton W83773G"
1797     depends on I2C
1798     + select REGMAP_I2C
1799     help
1800     If you say yes here you get support for the Nuvoton W83773G hardware
1801     monitoring chip.
1802     diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
1803     index 391118c8aae8..c888f4aca45c 100644
1804     --- a/drivers/hwmon/occ/common.c
1805     +++ b/drivers/hwmon/occ/common.c
1806     @@ -889,6 +889,8 @@ static int occ_setup_sensor_attrs(struct occ *occ)
1807     s++;
1808     }
1809     }
1810     +
1811     + s = (sensors->power.num_sensors * 4) + 1;
1812     } else {
1813     for (i = 0; i < sensors->power.num_sensors; ++i) {
1814     s = i + 1;
1815     @@ -917,11 +919,11 @@ static int occ_setup_sensor_attrs(struct occ *occ)
1816     show_power, NULL, 3, i);
1817     attr++;
1818     }
1819     - }
1820    
1821     - if (sensors->caps.num_sensors >= 1) {
1822     s = sensors->power.num_sensors + 1;
1823     + }
1824    
1825     + if (sensors->caps.num_sensors >= 1) {
1826     snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
1827     attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
1828     0, 0);
1829     diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
1830     index 4ee32964e1dd..948eb6e25219 100644
1831     --- a/drivers/infiniband/hw/mlx5/odp.c
1832     +++ b/drivers/infiniband/hw/mlx5/odp.c
1833     @@ -560,7 +560,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1834     struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
1835     bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
1836     bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
1837     - u64 access_mask = ODP_READ_ALLOWED_BIT;
1838     + u64 access_mask;
1839     u64 start_idx, page_mask;
1840     struct ib_umem_odp *odp;
1841     size_t size;
1842     @@ -582,6 +582,7 @@ next_mr:
1843     page_shift = mr->umem->page_shift;
1844     page_mask = ~(BIT(page_shift) - 1);
1845     start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
1846     + access_mask = ODP_READ_ALLOWED_BIT;
1847    
1848     if (prefetch && !downgrade && !mr->umem->writable) {
1849     /* prefetch with write-access must
1850     diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
1851     index 95c6d86ab5e8..c4ef1fceead6 100644
1852     --- a/drivers/md/dm-core.h
1853     +++ b/drivers/md/dm-core.h
1854     @@ -115,6 +115,7 @@ struct mapped_device {
1855     struct srcu_struct io_barrier;
1856     };
1857    
1858     +void disable_discard(struct mapped_device *md);
1859     void disable_write_same(struct mapped_device *md);
1860     void disable_write_zeroes(struct mapped_device *md);
1861    
1862     diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
1863     index 2e823252d797..f535fd8ac82d 100644
1864     --- a/drivers/md/dm-integrity.c
1865     +++ b/drivers/md/dm-integrity.c
1866     @@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
1867     static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1868     {
1869     return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1870     - range2->logical_sector + range2->n_sectors > range2->logical_sector;
1871     + range1->logical_sector + range1->n_sectors > range2->logical_sector;
1872     }
1873    
1874     static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1875     @@ -959,8 +959,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
1876     struct dm_integrity_range *last_range =
1877     list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1878     struct task_struct *last_range_task;
1879     - if (!ranges_overlap(range, last_range))
1880     - break;
1881     last_range_task = last_range->task;
1882     list_del(&last_range->wait_entry);
1883     if (!add_new_range(ic, last_range, false)) {
1884     @@ -3185,7 +3183,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
1885     journal_watermark = val;
1886     else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
1887     sync_msec = val;
1888     - else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) {
1889     + else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
1890     if (ic->meta_dev) {
1891     dm_put_device(ti, ic->meta_dev);
1892     ic->meta_dev = NULL;
1893     @@ -3204,17 +3202,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
1894     goto bad;
1895     }
1896     ic->sectors_per_block = val >> SECTOR_SHIFT;
1897     - } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
1898     + } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
1899     r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
1900     "Invalid internal_hash argument");
1901     if (r)
1902     goto bad;
1903     - } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
1904     + } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
1905     r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
1906     "Invalid journal_crypt argument");
1907     if (r)
1908     goto bad;
1909     - } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
1910     + } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
1911     r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
1912     "Invalid journal_mac argument");
1913     if (r)
1914     diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
1915     index a20531e5f3b4..582265e043a6 100644
1916     --- a/drivers/md/dm-rq.c
1917     +++ b/drivers/md/dm-rq.c
1918     @@ -206,11 +206,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
1919     }
1920    
1921     if (unlikely(error == BLK_STS_TARGET)) {
1922     - if (req_op(clone) == REQ_OP_WRITE_SAME &&
1923     - !clone->q->limits.max_write_same_sectors)
1924     + if (req_op(clone) == REQ_OP_DISCARD &&
1925     + !clone->q->limits.max_discard_sectors)
1926     + disable_discard(tio->md);
1927     + else if (req_op(clone) == REQ_OP_WRITE_SAME &&
1928     + !clone->q->limits.max_write_same_sectors)
1929     disable_write_same(tio->md);
1930     - if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
1931     - !clone->q->limits.max_write_zeroes_sectors)
1932     + else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
1933     + !clone->q->limits.max_write_zeroes_sectors)
1934     disable_write_zeroes(tio->md);
1935     }
1936    
1937     diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
1938     index 4b1be754cc41..eb257e4dcb1c 100644
1939     --- a/drivers/md/dm-table.c
1940     +++ b/drivers/md/dm-table.c
1941     @@ -1852,6 +1852,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
1942     return true;
1943     }
1944    
1945     +static int device_requires_stable_pages(struct dm_target *ti,
1946     + struct dm_dev *dev, sector_t start,
1947     + sector_t len, void *data)
1948     +{
1949     + struct request_queue *q = bdev_get_queue(dev->bdev);
1950     +
1951     + return q && bdi_cap_stable_pages_required(q->backing_dev_info);
1952     +}
1953     +
1954     +/*
1955     + * If any underlying device requires stable pages, a table must require
1956     + * them as well. Only targets that support iterate_devices are considered:
1957     + * don't want error, zero, etc to require stable pages.
1958     + */
1959     +static bool dm_table_requires_stable_pages(struct dm_table *t)
1960     +{
1961     + struct dm_target *ti;
1962     + unsigned i;
1963     +
1964     + for (i = 0; i < dm_table_get_num_targets(t); i++) {
1965     + ti = dm_table_get_target(t, i);
1966     +
1967     + if (ti->type->iterate_devices &&
1968     + ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
1969     + return true;
1970     + }
1971     +
1972     + return false;
1973     +}
1974     +
1975     void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1976     struct queue_limits *limits)
1977     {
1978     @@ -1909,6 +1939,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1979    
1980     dm_table_verify_integrity(t);
1981    
1982     + /*
1983     + * Some devices don't use blk_integrity but still want stable pages
1984     + * because they do their own checksumming.
1985     + */
1986     + if (dm_table_requires_stable_pages(t))
1987     + q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
1988     + else
1989     + q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
1990     +
1991     /*
1992     * Determine whether or not this queue's I/O timings contribute
1993     * to the entropy pool, Only request-based targets use this.
1994     diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1995     index 515e6af9bed2..4986eea520b6 100644
1996     --- a/drivers/md/dm.c
1997     +++ b/drivers/md/dm.c
1998     @@ -963,6 +963,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
1999     }
2000     }
2001    
2002     +void disable_discard(struct mapped_device *md)
2003     +{
2004     + struct queue_limits *limits = dm_get_queue_limits(md);
2005     +
2006     + /* device doesn't really support DISCARD, disable it */
2007     + limits->max_discard_sectors = 0;
2008     + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
2009     +}
2010     +
2011     void disable_write_same(struct mapped_device *md)
2012     {
2013     struct queue_limits *limits = dm_get_queue_limits(md);
2014     @@ -988,11 +997,14 @@ static void clone_endio(struct bio *bio)
2015     dm_endio_fn endio = tio->ti->type->end_io;
2016    
2017     if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
2018     - if (bio_op(bio) == REQ_OP_WRITE_SAME &&
2019     - !bio->bi_disk->queue->limits.max_write_same_sectors)
2020     + if (bio_op(bio) == REQ_OP_DISCARD &&
2021     + !bio->bi_disk->queue->limits.max_discard_sectors)
2022     + disable_discard(md);
2023     + else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
2024     + !bio->bi_disk->queue->limits.max_write_same_sectors)
2025     disable_write_same(md);
2026     - if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
2027     - !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
2028     + else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
2029     + !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
2030     disable_write_zeroes(md);
2031     }
2032    
2033     @@ -1060,15 +1072,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
2034     return -EINVAL;
2035     }
2036    
2037     - /*
2038     - * BIO based queue uses its own splitting. When multipage bvecs
2039     - * is switched on, size of the incoming bio may be too big to
2040     - * be handled in some targets, such as crypt.
2041     - *
2042     - * When these targets are ready for the big bio, we can remove
2043     - * the limit.
2044     - */
2045     - ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
2046     + ti->max_io_len = (uint32_t) len;
2047    
2048     return 0;
2049     }
2050     diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
2051     index 82a97866e0cf..7c8f203f9a24 100644
2052     --- a/drivers/mmc/host/alcor.c
2053     +++ b/drivers/mmc/host/alcor.c
2054     @@ -48,7 +48,6 @@ struct alcor_sdmmc_host {
2055     struct mmc_command *cmd;
2056     struct mmc_data *data;
2057     unsigned int dma_on:1;
2058     - unsigned int early_data:1;
2059    
2060     struct mutex cmd_mutex;
2061    
2062     @@ -144,8 +143,7 @@ static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
2063     host->sg_count--;
2064     }
2065    
2066     -static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
2067     - bool early)
2068     +static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host)
2069     {
2070     struct alcor_pci_priv *priv = host->alcor_pci;
2071     struct mmc_data *data = host->data;
2072     @@ -155,13 +153,6 @@ static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
2073     ctrl |= AU6601_DATA_WRITE;
2074    
2075     if (data->host_cookie == COOKIE_MAPPED) {
2076     - if (host->early_data) {
2077     - host->early_data = false;
2078     - return;
2079     - }
2080     -
2081     - host->early_data = early;
2082     -
2083     alcor_data_set_dma(host);
2084     ctrl |= AU6601_DATA_DMA_MODE;
2085     host->dma_on = 1;
2086     @@ -231,6 +222,7 @@ static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host)
2087     static void alcor_prepare_data(struct alcor_sdmmc_host *host,
2088     struct mmc_command *cmd)
2089     {
2090     + struct alcor_pci_priv *priv = host->alcor_pci;
2091     struct mmc_data *data = cmd->data;
2092    
2093     if (!data)
2094     @@ -248,7 +240,7 @@ static void alcor_prepare_data(struct alcor_sdmmc_host *host,
2095     if (data->host_cookie != COOKIE_MAPPED)
2096     alcor_prepare_sg_miter(host);
2097    
2098     - alcor_trigger_data_transfer(host, true);
2099     + alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL);
2100     }
2101    
2102     static void alcor_send_cmd(struct alcor_sdmmc_host *host,
2103     @@ -435,7 +427,7 @@ static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
2104     if (!host->data)
2105     return false;
2106    
2107     - alcor_trigger_data_transfer(host, false);
2108     + alcor_trigger_data_transfer(host);
2109     host->cmd = NULL;
2110     return true;
2111     }
2112     @@ -456,7 +448,7 @@ static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
2113     if (!host->data)
2114     alcor_request_complete(host, 1);
2115     else
2116     - alcor_trigger_data_transfer(host, false);
2117     + alcor_trigger_data_transfer(host);
2118     host->cmd = NULL;
2119     }
2120    
2121     @@ -487,15 +479,9 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
2122     break;
2123     case AU6601_INT_READ_BUF_RDY:
2124     alcor_trf_block_pio(host, true);
2125     - if (!host->blocks)
2126     - break;
2127     - alcor_trigger_data_transfer(host, false);
2128     return 1;
2129     case AU6601_INT_WRITE_BUF_RDY:
2130     alcor_trf_block_pio(host, false);
2131     - if (!host->blocks)
2132     - break;
2133     - alcor_trigger_data_transfer(host, false);
2134     return 1;
2135     case AU6601_INT_DMA_END:
2136     if (!host->sg_count)
2137     @@ -508,8 +494,14 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
2138     break;
2139     }
2140    
2141     - if (intmask & AU6601_INT_DATA_END)
2142     - return 0;
2143     + if (intmask & AU6601_INT_DATA_END) {
2144     + if (!host->dma_on && host->blocks) {
2145     + alcor_trigger_data_transfer(host);
2146     + return 1;
2147     + } else {
2148     + return 0;
2149     + }
2150     + }
2151    
2152     return 1;
2153     }
2154     diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
2155     index c11c18a9aacb..9ec300ec94ba 100644
2156     --- a/drivers/mmc/host/sdhci-omap.c
2157     +++ b/drivers/mmc/host/sdhci-omap.c
2158     @@ -797,6 +797,43 @@ void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
2159     sdhci_reset(host, mask);
2160     }
2161    
2162     +#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\
2163     + SDHCI_INT_TIMEOUT)
2164     +#define CMD_MASK (CMD_ERR_MASK | SDHCI_INT_RESPONSE)
2165     +
2166     +static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask)
2167     +{
2168     + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2169     + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
2170     +
2171     + if (omap_host->is_tuning && host->cmd && !host->data_early &&
2172     + (intmask & CMD_ERR_MASK)) {
2173     +
2174     + /*
2175     + * Since we are not resetting data lines during tuning
2176     + * operation, data error or data complete interrupts
2177     + * might still arrive. Mark this request as a failure
2178     + * but still wait for the data interrupt
2179     + */
2180     + if (intmask & SDHCI_INT_TIMEOUT)
2181     + host->cmd->error = -ETIMEDOUT;
2182     + else
2183     + host->cmd->error = -EILSEQ;
2184     +
2185     + host->cmd = NULL;
2186     +
2187     + /*
2188     + * Sometimes command error interrupts and command complete
2189     + * interrupt will arrive together. Clear all command related
2190     + * interrupts here.
2191     + */
2192     + sdhci_writel(host, intmask & CMD_MASK, SDHCI_INT_STATUS);
2193     + intmask &= ~CMD_MASK;
2194     + }
2195     +
2196     + return intmask;
2197     +}
2198     +
2199     static struct sdhci_ops sdhci_omap_ops = {
2200     .set_clock = sdhci_omap_set_clock,
2201     .set_power = sdhci_omap_set_power,
2202     @@ -807,6 +844,7 @@ static struct sdhci_ops sdhci_omap_ops = {
2203     .platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
2204     .reset = sdhci_omap_reset,
2205     .set_uhs_signaling = sdhci_omap_set_uhs_signaling,
2206     + .irq = sdhci_omap_irq,
2207     };
2208    
2209     static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
2210     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2211     index 803f7990d32b..40ca339ec3df 100644
2212     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2213     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2214     @@ -1129,6 +1129,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
2215     tpa_info = &rxr->rx_tpa[agg_id];
2216    
2217     if (unlikely(cons != rxr->rx_next_cons)) {
2218     + netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
2219     + cons, rxr->rx_next_cons);
2220     bnxt_sched_reset(bp, rxr);
2221     return;
2222     }
2223     @@ -1581,15 +1583,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2224     }
2225    
2226     cons = rxcmp->rx_cmp_opaque;
2227     - rx_buf = &rxr->rx_buf_ring[cons];
2228     - data = rx_buf->data;
2229     - data_ptr = rx_buf->data_ptr;
2230     if (unlikely(cons != rxr->rx_next_cons)) {
2231     int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
2232    
2233     + netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2234     + cons, rxr->rx_next_cons);
2235     bnxt_sched_reset(bp, rxr);
2236     return rc1;
2237     }
2238     + rx_buf = &rxr->rx_buf_ring[cons];
2239     + data = rx_buf->data;
2240     + data_ptr = rx_buf->data_ptr;
2241     prefetch(data_ptr);
2242    
2243     misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2244     @@ -1606,11 +1610,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2245    
2246     rx_buf->data = NULL;
2247     if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2248     + u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2249     +
2250     bnxt_reuse_rx_data(rxr, cons, data);
2251     if (agg_bufs)
2252     bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
2253    
2254     rc = -EIO;
2255     + if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2256     + netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
2257     + bnxt_sched_reset(bp, rxr);
2258     + }
2259     goto next_rx;
2260     }
2261    
2262     diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
2263     index 503cfadff4ac..d4ee9f9c8c34 100644
2264     --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
2265     +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
2266     @@ -1328,10 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
2267     struct nicvf_cq_poll *cq_poll = NULL;
2268     union nic_mbx mbx = {};
2269    
2270     - cancel_delayed_work_sync(&nic->link_change_work);
2271     -
2272     /* wait till all queued set_rx_mode tasks completes */
2273     - drain_workqueue(nic->nicvf_rx_mode_wq);
2274     + if (nic->nicvf_rx_mode_wq) {
2275     + cancel_delayed_work_sync(&nic->link_change_work);
2276     + drain_workqueue(nic->nicvf_rx_mode_wq);
2277     + }
2278    
2279     mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
2280     nicvf_send_msg_to_pf(nic, &mbx);
2281     @@ -1452,7 +1453,8 @@ int nicvf_open(struct net_device *netdev)
2282     struct nicvf_cq_poll *cq_poll = NULL;
2283    
2284     /* wait till all queued set_rx_mode tasks completes if any */
2285     - drain_workqueue(nic->nicvf_rx_mode_wq);
2286     + if (nic->nicvf_rx_mode_wq)
2287     + drain_workqueue(nic->nicvf_rx_mode_wq);
2288    
2289     netif_carrier_off(netdev);
2290    
2291     @@ -1550,10 +1552,12 @@ int nicvf_open(struct net_device *netdev)
2292     /* Send VF config done msg to PF */
2293     nicvf_send_cfg_done(nic);
2294    
2295     - INIT_DELAYED_WORK(&nic->link_change_work,
2296     - nicvf_link_status_check_task);
2297     - queue_delayed_work(nic->nicvf_rx_mode_wq,
2298     - &nic->link_change_work, 0);
2299     + if (nic->nicvf_rx_mode_wq) {
2300     + INIT_DELAYED_WORK(&nic->link_change_work,
2301     + nicvf_link_status_check_task);
2302     + queue_delayed_work(nic->nicvf_rx_mode_wq,
2303     + &nic->link_change_work, 0);
2304     + }
2305    
2306     return 0;
2307     cleanup:
2308     diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
2309     index 5ecbb1adcf3b..51cfe95f3e24 100644
2310     --- a/drivers/net/ethernet/ibm/ibmvnic.c
2311     +++ b/drivers/net/ethernet/ibm/ibmvnic.c
2312     @@ -1885,6 +1885,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
2313     */
2314     adapter->state = VNIC_PROBED;
2315    
2316     + reinit_completion(&adapter->init_done);
2317     rc = init_crq_queue(adapter);
2318     if (rc) {
2319     netdev_err(adapter->netdev,
2320     @@ -4625,7 +4626,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
2321     old_num_rx_queues = adapter->req_rx_queues;
2322     old_num_tx_queues = adapter->req_tx_queues;
2323    
2324     - init_completion(&adapter->init_done);
2325     + reinit_completion(&adapter->init_done);
2326     adapter->init_done_rc = 0;
2327     ibmvnic_send_crq_init(adapter);
2328     if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2329     @@ -4680,7 +4681,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
2330    
2331     adapter->from_passive_init = false;
2332    
2333     - init_completion(&adapter->init_done);
2334     adapter->init_done_rc = 0;
2335     ibmvnic_send_crq_init(adapter);
2336     if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2337     @@ -4759,6 +4759,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
2338     INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
2339     INIT_LIST_HEAD(&adapter->rwi_list);
2340     spin_lock_init(&adapter->rwi_lock);
2341     + init_completion(&adapter->init_done);
2342     adapter->resetting = false;
2343    
2344     adapter->mac_change_pending = false;
2345     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
2346     index eac245a93f91..4ab0d030b544 100644
2347     --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
2348     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
2349     @@ -122,7 +122,9 @@ out:
2350     return err;
2351     }
2352    
2353     -/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
2354     +/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
2355     + * minimum speed value is 40Gbps
2356     + */
2357     static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
2358     {
2359     u32 speed;
2360     @@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
2361     int err;
2362    
2363     err = mlx5e_port_linkspeed(priv->mdev, &speed);
2364     - if (err) {
2365     - mlx5_core_warn(priv->mdev, "cannot get port speed\n");
2366     - return 0;
2367     - }
2368     + if (err)
2369     + speed = SPEED_40000;
2370     + speed = max_t(u32, speed, SPEED_40000);
2371    
2372     xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
2373    
2374     @@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
2375     }
2376    
2377     static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
2378     - u32 xoff, unsigned int mtu)
2379     + u32 xoff, unsigned int max_mtu)
2380     {
2381     int i;
2382    
2383     @@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
2384     }
2385    
2386     if (port_buffer->buffer[i].size <
2387     - (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
2388     + (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
2389     return -ENOMEM;
2390    
2391     port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
2392     - port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu;
2393     + port_buffer->buffer[i].xon =
2394     + port_buffer->buffer[i].xoff - max_mtu;
2395     }
2396    
2397     return 0;
2398     @@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
2399    
2400     /**
2401     * update_buffer_lossy()
2402     - * mtu: device's MTU
2403     + * max_mtu: netdev's max_mtu
2404     * pfc_en: <input> current pfc configuration
2405     * buffer: <input> current prio to buffer mapping
2406     * xoff: <input> xoff value
2407     @@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
2408     * Return 0 if no error.
2409     * Set change to true if buffer configuration is modified.
2410     */
2411     -static int update_buffer_lossy(unsigned int mtu,
2412     +static int update_buffer_lossy(unsigned int max_mtu,
2413     u8 pfc_en, u8 *buffer, u32 xoff,
2414     struct mlx5e_port_buffer *port_buffer,
2415     bool *change)
2416     @@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
2417     }
2418    
2419     if (changed) {
2420     - err = update_xoff_threshold(port_buffer, xoff, mtu);
2421     + err = update_xoff_threshold(port_buffer, xoff, max_mtu);
2422     if (err)
2423     return err;
2424    
2425     @@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
2426     return 0;
2427     }
2428    
2429     +#define MINIMUM_MAX_MTU 9216
2430     int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
2431     u32 change, unsigned int mtu,
2432     struct ieee_pfc *pfc,
2433     @@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
2434     bool update_prio2buffer = false;
2435     u8 buffer[MLX5E_MAX_PRIORITY];
2436     bool update_buffer = false;
2437     + unsigned int max_mtu;
2438     u32 total_used = 0;
2439     u8 curr_pfc_en;
2440     int err;
2441     int i;
2442    
2443     mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
2444     + max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
2445    
2446     err = mlx5e_port_query_buffer(priv, &port_buffer);
2447     if (err)
2448     @@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
2449    
2450     if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
2451     update_buffer = true;
2452     - err = update_xoff_threshold(&port_buffer, xoff, mtu);
2453     + err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
2454     if (err)
2455     return err;
2456     }
2457     @@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
2458     if (err)
2459     return err;
2460    
2461     - err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
2462     + err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
2463     &port_buffer, &update_buffer);
2464     if (err)
2465     return err;
2466     @@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
2467     if (err)
2468     return err;
2469    
2470     - err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
2471     - &port_buffer, &update_buffer);
2472     + err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
2473     + xoff, &port_buffer, &update_buffer);
2474     if (err)
2475     return err;
2476     }
2477     @@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
2478     return -EINVAL;
2479    
2480     update_buffer = true;
2481     - err = update_xoff_threshold(&port_buffer, xoff, mtu);
2482     + err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
2483     if (err)
2484     return err;
2485     }
2486     @@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
2487     /* Need to update buffer configuration if xoff value is changed */
2488     if (!update_buffer && xoff != priv->dcbx.xoff) {
2489     update_buffer = true;
2490     - err = update_xoff_threshold(&port_buffer, xoff, mtu);
2491     + err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
2492     if (err)
2493     return err;
2494     }
2495     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
2496     index 3078491cc0d0..1539cf3de5dc 100644
2497     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
2498     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
2499     @@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
2500     if (err)
2501     return err;
2502    
2503     + mutex_lock(&mdev->mlx5e_res.td.list_lock);
2504     list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
2505     + mutex_unlock(&mdev->mlx5e_res.td.list_lock);
2506    
2507     return 0;
2508     }
2509     @@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
2510     void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
2511     struct mlx5e_tir *tir)
2512     {
2513     + mutex_lock(&mdev->mlx5e_res.td.list_lock);
2514     mlx5_core_destroy_tir(mdev, tir->tirn);
2515     list_del(&tir->list);
2516     + mutex_unlock(&mdev->mlx5e_res.td.list_lock);
2517     }
2518    
2519     static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
2520     @@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
2521     }
2522    
2523     INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
2524     + mutex_init(&mdev->mlx5e_res.td.list_lock);
2525    
2526     return 0;
2527    
2528     @@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
2529     {
2530     struct mlx5_core_dev *mdev = priv->mdev;
2531     struct mlx5e_tir *tir;
2532     - int err = -ENOMEM;
2533     + int err = 0;
2534     u32 tirn = 0;
2535     int inlen;
2536     void *in;
2537    
2538     inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
2539     in = kvzalloc(inlen, GFP_KERNEL);
2540     - if (!in)
2541     + if (!in) {
2542     + err = -ENOMEM;
2543     goto out;
2544     + }
2545    
2546     if (enable_uc_lb)
2547     MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
2548     @@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
2549    
2550     MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
2551    
2552     + mutex_lock(&mdev->mlx5e_res.td.list_lock);
2553     list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
2554     tirn = tir->tirn;
2555     err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
2556     @@ -168,6 +176,7 @@ out:
2557     kvfree(in);
2558     if (err)
2559     netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
2560     + mutex_unlock(&mdev->mlx5e_res.td.list_lock);
2561    
2562     return err;
2563     }
2564     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
2565     index 5cf5f2a9d51f..8de64e88c670 100644
2566     --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
2567     +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
2568     @@ -217,15 +217,21 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
2569     void *cmd;
2570     int ret;
2571    
2572     + rcu_read_lock();
2573     + flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
2574     + rcu_read_unlock();
2575     +
2576     + if (!flow) {
2577     + WARN_ONCE(1, "Received NULL pointer for handle\n");
2578     + return -EINVAL;
2579     + }
2580     +
2581     buf = kzalloc(size, GFP_ATOMIC);
2582     if (!buf)
2583     return -ENOMEM;
2584    
2585     cmd = (buf + 1);
2586    
2587     - rcu_read_lock();
2588     - flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
2589     - rcu_read_unlock();
2590     mlx5_fpga_tls_flow_to_cmd(flow, cmd);
2591    
2592     MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
2593     @@ -238,6 +244,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
2594     buf->complete = mlx_tls_kfree_complete;
2595    
2596     ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
2597     + if (ret < 0)
2598     + kfree(buf);
2599    
2600     return ret;
2601     }
2602     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2603     index be81b319b0dc..694edd899322 100644
2604     --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
2605     +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2606     @@ -163,26 +163,6 @@ static struct mlx5_profile profile[] = {
2607     .size = 8,
2608     .limit = 4
2609     },
2610     - .mr_cache[16] = {
2611     - .size = 8,
2612     - .limit = 4
2613     - },
2614     - .mr_cache[17] = {
2615     - .size = 8,
2616     - .limit = 4
2617     - },
2618     - .mr_cache[18] = {
2619     - .size = 8,
2620     - .limit = 4
2621     - },
2622     - .mr_cache[19] = {
2623     - .size = 4,
2624     - .limit = 2
2625     - },
2626     - .mr_cache[20] = {
2627     - .size = 4,
2628     - .limit = 2
2629     - },
2630     },
2631     };
2632    
2633     diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
2634     index 69d7aebda09b..73db94e55fd0 100644
2635     --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
2636     +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
2637     @@ -196,7 +196,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
2638     ret = dev_queue_xmit(skb);
2639     nfp_repr_inc_tx_stats(netdev, len, ret);
2640    
2641     - return ret;
2642     + return NETDEV_TX_OK;
2643     }
2644    
2645     static int nfp_repr_stop(struct net_device *netdev)
2646     @@ -384,7 +384,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
2647     netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2648     netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
2649    
2650     - netdev->priv_flags |= IFF_NO_QUEUE;
2651     + netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
2652     netdev->features |= NETIF_F_LLTX;
2653    
2654     if (nfp_app_has_tc(app)) {
2655     diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
2656     index f55d177ae894..365cddbfc684 100644
2657     --- a/drivers/net/ethernet/realtek/r8169.c
2658     +++ b/drivers/net/ethernet/realtek/r8169.c
2659     @@ -28,6 +28,7 @@
2660     #include <linux/pm_runtime.h>
2661     #include <linux/firmware.h>
2662     #include <linux/prefetch.h>
2663     +#include <linux/pci-aspm.h>
2664     #include <linux/ipv6.h>
2665     #include <net/ip6_checksum.h>
2666    
2667     @@ -5332,7 +5333,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
2668     tp->cp_cmd |= PktCntrDisable | INTT_1;
2669     RTL_W16(tp, CPlusCmd, tp->cp_cmd);
2670    
2671     - RTL_W16(tp, IntrMitigate, 0x5151);
2672     + RTL_W16(tp, IntrMitigate, 0x5100);
2673    
2674     /* Work around for RxFIFO overflow. */
2675     if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
2676     @@ -7224,6 +7225,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2677     return rc;
2678     }
2679    
2680     + /* Disable ASPM completely as that cause random device stop working
2681     + * problems as well as full system hangs for some PCIe devices users.
2682     + */
2683     + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
2684     +
2685     /* enable device (incl. PCI PM wakeup and hotplug setup) */
2686     rc = pcim_enable_device(pdev);
2687     if (rc < 0) {
2688     diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
2689     index e859ae2e42d5..49f41b64077b 100644
2690     --- a/drivers/net/hyperv/hyperv_net.h
2691     +++ b/drivers/net/hyperv/hyperv_net.h
2692     @@ -987,6 +987,7 @@ struct netvsc_device {
2693    
2694     wait_queue_head_t wait_drain;
2695     bool destroy;
2696     + bool tx_disable; /* if true, do not wake up queue again */
2697    
2698     /* Receive buffer allocated by us but manages by NetVSP */
2699     void *recv_buf;
2700     diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
2701     index 813d195bbd57..e0dce373cdd9 100644
2702     --- a/drivers/net/hyperv/netvsc.c
2703     +++ b/drivers/net/hyperv/netvsc.c
2704     @@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
2705    
2706     init_waitqueue_head(&net_device->wait_drain);
2707     net_device->destroy = false;
2708     + net_device->tx_disable = false;
2709    
2710     net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
2711     net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
2712     @@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
2713     } else {
2714     struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
2715    
2716     - if (netif_tx_queue_stopped(txq) &&
2717     + if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
2718     (hv_get_avail_to_write_percent(&channel->outbound) >
2719     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
2720     netif_tx_wake_queue(txq);
2721     @@ -874,7 +875,8 @@ static inline int netvsc_send_pkt(
2722     } else if (ret == -EAGAIN) {
2723     netif_tx_stop_queue(txq);
2724     ndev_ctx->eth_stats.stop_queue++;
2725     - if (atomic_read(&nvchan->queue_sends) < 1) {
2726     + if (atomic_read(&nvchan->queue_sends) < 1 &&
2727     + !net_device->tx_disable) {
2728     netif_tx_wake_queue(txq);
2729     ndev_ctx->eth_stats.wake_queue++;
2730     ret = -ENOSPC;
2731     diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
2732     index cf4897043e83..b20fb0fb595b 100644
2733     --- a/drivers/net/hyperv/netvsc_drv.c
2734     +++ b/drivers/net/hyperv/netvsc_drv.c
2735     @@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
2736     rcu_read_unlock();
2737     }
2738    
2739     +static void netvsc_tx_enable(struct netvsc_device *nvscdev,
2740     + struct net_device *ndev)
2741     +{
2742     + nvscdev->tx_disable = false;
2743     + virt_wmb(); /* ensure queue wake up mechanism is on */
2744     +
2745     + netif_tx_wake_all_queues(ndev);
2746     +}
2747     +
2748     static int netvsc_open(struct net_device *net)
2749     {
2750     struct net_device_context *ndev_ctx = netdev_priv(net);
2751     @@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
2752     rdev = nvdev->extension;
2753     if (!rdev->link_state) {
2754     netif_carrier_on(net);
2755     - netif_tx_wake_all_queues(net);
2756     + netvsc_tx_enable(nvdev, net);
2757     }
2758    
2759     if (vf_netdev) {
2760     @@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
2761     }
2762     }
2763    
2764     +static void netvsc_tx_disable(struct netvsc_device *nvscdev,
2765     + struct net_device *ndev)
2766     +{
2767     + if (nvscdev) {
2768     + nvscdev->tx_disable = true;
2769     + virt_wmb(); /* ensure txq will not wake up after stop */
2770     + }
2771     +
2772     + netif_tx_disable(ndev);
2773     +}
2774     +
2775     static int netvsc_close(struct net_device *net)
2776     {
2777     struct net_device_context *net_device_ctx = netdev_priv(net);
2778     @@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
2779     struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
2780     int ret;
2781    
2782     - netif_tx_disable(net);
2783     + netvsc_tx_disable(nvdev, net);
2784    
2785     /* No need to close rndis filter if it is removed already */
2786     if (!nvdev)
2787     @@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev,
2788    
2789     /* If device was up (receiving) then shutdown */
2790     if (netif_running(ndev)) {
2791     - netif_tx_disable(ndev);
2792     + netvsc_tx_disable(nvdev, ndev);
2793    
2794     ret = rndis_filter_close(nvdev);
2795     if (ret) {
2796     @@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w)
2797     if (rdev->link_state) {
2798     rdev->link_state = false;
2799     netif_carrier_on(net);
2800     - netif_tx_wake_all_queues(net);
2801     + netvsc_tx_enable(net_device, net);
2802     } else {
2803     notify = true;
2804     }
2805     @@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
2806     if (!rdev->link_state) {
2807     rdev->link_state = true;
2808     netif_carrier_off(net);
2809     - netif_tx_stop_all_queues(net);
2810     + netvsc_tx_disable(net_device, net);
2811     }
2812     kfree(event);
2813     break;
2814     @@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w)
2815     if (!rdev->link_state) {
2816     rdev->link_state = true;
2817     netif_carrier_off(net);
2818     - netif_tx_stop_all_queues(net);
2819     + netvsc_tx_disable(net_device, net);
2820     event->event = RNDIS_STATUS_MEDIA_CONNECT;
2821     spin_lock_irqsave(&ndev_ctx->lock, flags);
2822     list_add(&event->list, &ndev_ctx->reconfig_events);
2823     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2824     index 74bebbdb4b15..9195f3476b1d 100644
2825     --- a/drivers/net/usb/qmi_wwan.c
2826     +++ b/drivers/net/usb/qmi_wwan.c
2827     @@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = {
2828     {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
2829     {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
2830     {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
2831     + {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
2832     {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
2833     {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
2834     {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
2835     diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
2836     index 6d1a1abbed27..cd15c32b2e43 100644
2837     --- a/drivers/net/vrf.c
2838     +++ b/drivers/net/vrf.c
2839     @@ -1275,8 +1275,12 @@ static void vrf_setup(struct net_device *dev)
2840     dev->priv_flags |= IFF_NO_QUEUE;
2841     dev->priv_flags |= IFF_NO_RX_HANDLER;
2842    
2843     - dev->min_mtu = 0;
2844     - dev->max_mtu = 0;
2845     + /* VRF devices do not care about MTU, but if the MTU is set
2846     + * too low then the ipv4 and ipv6 protocols are disabled
2847     + * which breaks networking.
2848     + */
2849     + dev->min_mtu = IPV6_MIN_MTU;
2850     + dev->max_mtu = ETH_MAX_MTU;
2851     }
2852    
2853     static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
2854     diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
2855     index 3f3df4c29f6e..905282a8ddaa 100644
2856     --- a/drivers/pci/hotplug/pciehp_ctrl.c
2857     +++ b/drivers/pci/hotplug/pciehp_ctrl.c
2858     @@ -115,6 +115,10 @@ static void remove_board(struct controller *ctrl, bool safe_removal)
2859     * removed from the slot/adapter.
2860     */
2861     msleep(1000);
2862     +
2863     + /* Ignore link or presence changes caused by power off */
2864     + atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
2865     + &ctrl->pending_events);
2866     }
2867    
2868     /* turn off Green LED */
2869     diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2870     index e2a879e93d86..fba03a7d5c7f 100644
2871     --- a/drivers/pci/quirks.c
2872     +++ b/drivers/pci/quirks.c
2873     @@ -3877,6 +3877,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
2874     /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
2875     DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
2876     quirk_dma_func1_alias);
2877     +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
2878     + quirk_dma_func1_alias);
2879     /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
2880     DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
2881     quirk_dma_func1_alias);
2882     diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
2883     index 0840d27381ea..e0a04bfc873e 100644
2884     --- a/drivers/tty/Kconfig
2885     +++ b/drivers/tty/Kconfig
2886     @@ -441,4 +441,28 @@ config VCC
2887     depends on SUN_LDOMS
2888     help
2889     Support for Sun logical domain consoles.
2890     +
2891     +config LDISC_AUTOLOAD
2892     + bool "Automatically load TTY Line Disciplines"
2893     + default y
2894     + help
2895     + Historically the kernel has always automatically loaded any
2896     + line discipline that is in a kernel module when a user asks
2897     + for it to be loaded with the TIOCSETD ioctl, or through other
2898     + means. This is not always the best thing to do on systems
2899     + where you know you will not be using some of the more
2900     + "ancient" line disciplines, so prevent the kernel from doing
2901     + this unless the request is coming from a process with the
2902     + CAP_SYS_MODULE permissions.
2903     +
2904     + Say 'Y' here if you trust your userspace users to do the right
2905     + thing, or if you have only provided the line disciplines that
2906     + you know you will be using, or if you wish to continue to use
2907     + the traditional method of on-demand loading of these modules
2908     + by any user.
2909     +
2910     + This functionality can be changed at runtime with the
2911     + dev.tty.ldisc_autoload sysctl, this configuration option will
2912     + only set the default value of this functionality.
2913     +
2914     endif # TTY
2915     diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
2916     index 21ffcce16927..5fa250157025 100644
2917     --- a/drivers/tty/tty_io.c
2918     +++ b/drivers/tty/tty_io.c
2919     @@ -513,6 +513,8 @@ static const struct file_operations hung_up_tty_fops = {
2920     static DEFINE_SPINLOCK(redirect_lock);
2921     static struct file *redirect;
2922    
2923     +extern void tty_sysctl_init(void);
2924     +
2925     /**
2926     * tty_wakeup - request more data
2927     * @tty: terminal
2928     @@ -3483,6 +3485,7 @@ void console_sysfs_notify(void)
2929     */
2930     int __init tty_init(void)
2931     {
2932     + tty_sysctl_init();
2933     cdev_init(&tty_cdev, &tty_fops);
2934     if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
2935     register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
2936     diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
2937     index 45eda69b150c..e38f104db174 100644
2938     --- a/drivers/tty/tty_ldisc.c
2939     +++ b/drivers/tty/tty_ldisc.c
2940     @@ -156,6 +156,13 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
2941     * takes tty_ldiscs_lock to guard against ldisc races
2942     */
2943    
2944     +#if defined(CONFIG_LDISC_AUTOLOAD)
2945     + #define INITIAL_AUTOLOAD_STATE 1
2946     +#else
2947     + #define INITIAL_AUTOLOAD_STATE 0
2948     +#endif
2949     +static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
2950     +
2951     static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
2952     {
2953     struct tty_ldisc *ld;
2954     @@ -170,6 +177,8 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
2955     */
2956     ldops = get_ldops(disc);
2957     if (IS_ERR(ldops)) {
2958     + if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload)
2959     + return ERR_PTR(-EPERM);
2960     request_module("tty-ldisc-%d", disc);
2961     ldops = get_ldops(disc);
2962     if (IS_ERR(ldops))
2963     @@ -845,3 +854,41 @@ void tty_ldisc_deinit(struct tty_struct *tty)
2964     tty_ldisc_put(tty->ldisc);
2965     tty->ldisc = NULL;
2966     }
2967     +
2968     +static int zero;
2969     +static int one = 1;
2970     +static struct ctl_table tty_table[] = {
2971     + {
2972     + .procname = "ldisc_autoload",
2973     + .data = &tty_ldisc_autoload,
2974     + .maxlen = sizeof(tty_ldisc_autoload),
2975     + .mode = 0644,
2976     + .proc_handler = proc_dointvec,
2977     + .extra1 = &zero,
2978     + .extra2 = &one,
2979     + },
2980     + { }
2981     +};
2982     +
2983     +static struct ctl_table tty_dir_table[] = {
2984     + {
2985     + .procname = "tty",
2986     + .mode = 0555,
2987     + .child = tty_table,
2988     + },
2989     + { }
2990     +};
2991     +
2992     +static struct ctl_table tty_root_table[] = {
2993     + {
2994     + .procname = "dev",
2995     + .mode = 0555,
2996     + .child = tty_dir_table,
2997     + },
2998     + { }
2999     +};
3000     +
3001     +void tty_sysctl_init(void)
3002     +{
3003     + register_sysctl_table(tty_root_table);
3004     +}
3005     diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
3006     index a0b07c331255..a38b65b97be0 100644
3007     --- a/drivers/virtio/virtio_ring.c
3008     +++ b/drivers/virtio/virtio_ring.c
3009     @@ -871,6 +871,8 @@ static struct virtqueue *vring_create_virtqueue_split(
3010     GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
3011     if (queue)
3012     break;
3013     + if (!may_reduce_num)
3014     + return NULL;
3015     }
3016    
3017     if (!num)
3018     diff --git a/fs/block_dev.c b/fs/block_dev.c
3019     index 58a4c1217fa8..06ef48ad1998 100644
3020     --- a/fs/block_dev.c
3021     +++ b/fs/block_dev.c
3022     @@ -298,10 +298,10 @@ static void blkdev_bio_end_io(struct bio *bio)
3023     struct blkdev_dio *dio = bio->bi_private;
3024     bool should_dirty = dio->should_dirty;
3025    
3026     - if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
3027     - if (bio->bi_status && !dio->bio.bi_status)
3028     - dio->bio.bi_status = bio->bi_status;
3029     - } else {
3030     + if (bio->bi_status && !dio->bio.bi_status)
3031     + dio->bio.bi_status = bio->bi_status;
3032     +
3033     + if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
3034     if (!dio->is_sync) {
3035     struct kiocb *iocb = dio->iocb;
3036     ssize_t ret;
3037     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
3038     index 6e1119496721..1d64a6b8e413 100644
3039     --- a/fs/btrfs/ioctl.c
3040     +++ b/fs/btrfs/ioctl.c
3041     @@ -501,6 +501,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
3042     if (!capable(CAP_SYS_ADMIN))
3043     return -EPERM;
3044    
3045     + /*
3046     + * If the fs is mounted with nologreplay, which requires it to be
3047     + * mounted in RO mode as well, we can not allow discard on free space
3048     + * inside block groups, because log trees refer to extents that are not
3049     + * pinned in a block group's free space cache (pinning the extents is
3050     + * precisely the first phase of replaying a log tree).
3051     + */
3052     + if (btrfs_test_opt(fs_info, NOLOGREPLAY))
3053     + return -EROFS;
3054     +
3055     rcu_read_lock();
3056     list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
3057     dev_list) {
3058     diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
3059     index dc6140013ae8..61d22a56c0ba 100644
3060     --- a/fs/btrfs/props.c
3061     +++ b/fs/btrfs/props.c
3062     @@ -366,11 +366,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
3063    
3064     static int prop_compression_validate(const char *value, size_t len)
3065     {
3066     - if (!strncmp("lzo", value, len))
3067     + if (!strncmp("lzo", value, 3))
3068     return 0;
3069     - else if (!strncmp("zlib", value, len))
3070     + else if (!strncmp("zlib", value, 4))
3071     return 0;
3072     - else if (!strncmp("zstd", value, len))
3073     + else if (!strncmp("zstd", value, 4))
3074     return 0;
3075    
3076     return -EINVAL;
3077     @@ -396,7 +396,7 @@ static int prop_compression_apply(struct inode *inode,
3078     btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
3079     } else if (!strncmp("zlib", value, 4)) {
3080     type = BTRFS_COMPRESS_ZLIB;
3081     - } else if (!strncmp("zstd", value, len)) {
3082     + } else if (!strncmp("zstd", value, 4)) {
3083     type = BTRFS_COMPRESS_ZSTD;
3084     btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
3085     } else {
3086     diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
3087     index f2c0d863fb52..07cad54b84f1 100644
3088     --- a/fs/cifs/cifsfs.c
3089     +++ b/fs/cifs/cifsfs.c
3090     @@ -559,6 +559,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
3091     tcon->ses->server->echo_interval / HZ);
3092     if (tcon->snapshot_time)
3093     seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
3094     + if (tcon->handle_timeout)
3095     + seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
3096     /* convert actimeo and display it in seconds */
3097     seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
3098    
3099     diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
3100     index 1b25e6e95d45..6c934ab3722b 100644
3101     --- a/fs/cifs/cifsglob.h
3102     +++ b/fs/cifs/cifsglob.h
3103     @@ -59,6 +59,12 @@
3104     */
3105     #define CIFS_MAX_ACTIMEO (1 << 30)
3106    
3107     +/*
3108     + * Max persistent and resilient handle timeout (milliseconds).
3109     + * Windows durable max was 960000 (16 minutes)
3110     + */
3111     +#define SMB3_MAX_HANDLE_TIMEOUT 960000
3112     +
3113     /*
3114     * MAX_REQ is the maximum number of requests that WE will send
3115     * on one socket concurrently.
3116     @@ -572,6 +578,7 @@ struct smb_vol {
3117     struct nls_table *local_nls;
3118     unsigned int echo_interval; /* echo interval in secs */
3119     __u64 snapshot_time; /* needed for timewarp tokens */
3120     + __u32 handle_timeout; /* persistent and durable handle timeout in ms */
3121     unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
3122     };
3123    
3124     @@ -1028,6 +1035,7 @@ struct cifs_tcon {
3125     __u32 vol_serial_number;
3126     __le64 vol_create_time;
3127     __u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */
3128     + __u32 handle_timeout; /* persistent and durable handle timeout in ms */
3129     __u32 ss_flags; /* sector size flags */
3130     __u32 perf_sector_size; /* best sector size for perf */
3131     __u32 max_chunks;
3132     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3133     index 9d4e60123db4..44e6ec85f832 100644
3134     --- a/fs/cifs/connect.c
3135     +++ b/fs/cifs/connect.c
3136     @@ -103,7 +103,7 @@ enum {
3137     Opt_cruid, Opt_gid, Opt_file_mode,
3138     Opt_dirmode, Opt_port,
3139     Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo,
3140     - Opt_echo_interval, Opt_max_credits,
3141     + Opt_echo_interval, Opt_max_credits, Opt_handletimeout,
3142     Opt_snapshot,
3143    
3144     /* Mount options which take string value */
3145     @@ -208,6 +208,7 @@ static const match_table_t cifs_mount_option_tokens = {
3146     { Opt_rsize, "rsize=%s" },
3147     { Opt_wsize, "wsize=%s" },
3148     { Opt_actimeo, "actimeo=%s" },
3149     + { Opt_handletimeout, "handletimeout=%s" },
3150     { Opt_echo_interval, "echo_interval=%s" },
3151     { Opt_max_credits, "max_credits=%s" },
3152     { Opt_snapshot, "snapshot=%s" },
3153     @@ -1600,6 +1601,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
3154    
3155     vol->actimeo = CIFS_DEF_ACTIMEO;
3156    
3157     + /* Most clients set timeout to 0, allows server to use its default */
3158     + vol->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
3159     +
3160     /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */
3161     vol->ops = &smb30_operations;
3162     vol->vals = &smbdefault_values;
3163     @@ -1998,6 +2002,18 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
3164     goto cifs_parse_mount_err;
3165     }
3166     break;
3167     + case Opt_handletimeout:
3168     + if (get_option_ul(args, &option)) {
3169     + cifs_dbg(VFS, "%s: Invalid handletimeout value\n",
3170     + __func__);
3171     + goto cifs_parse_mount_err;
3172     + }
3173     + vol->handle_timeout = option;
3174     + if (vol->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) {
3175     + cifs_dbg(VFS, "Invalid handle cache timeout, longer than 16 minutes\n");
3176     + goto cifs_parse_mount_err;
3177     + }
3178     + break;
3179     case Opt_echo_interval:
3180     if (get_option_ul(args, &option)) {
3181     cifs_dbg(VFS, "%s: Invalid echo interval value\n",
3182     @@ -3164,6 +3180,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
3183     return 0;
3184     if (tcon->snapshot_time != volume_info->snapshot_time)
3185     return 0;
3186     + if (tcon->handle_timeout != volume_info->handle_timeout)
3187     + return 0;
3188     return 1;
3189     }
3190    
3191     @@ -3278,6 +3296,16 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
3192     tcon->snapshot_time = volume_info->snapshot_time;
3193     }
3194    
3195     + if (volume_info->handle_timeout) {
3196     + if (ses->server->vals->protocol_id == 0) {
3197     + cifs_dbg(VFS,
3198     + "Use SMB2.1 or later for handle timeout option\n");
3199     + rc = -EOPNOTSUPP;
3200     + goto out_fail;
3201     + } else
3202     + tcon->handle_timeout = volume_info->handle_timeout;
3203     + }
3204     +
3205     tcon->ses = ses;
3206     if (volume_info->password) {
3207     tcon->password = kstrdup(volume_info->password, GFP_KERNEL);
3208     diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
3209     index b204e84b87fb..b0e76d27d752 100644
3210     --- a/fs/cifs/smb2file.c
3211     +++ b/fs/cifs/smb2file.c
3212     @@ -68,7 +68,9 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
3213    
3214    
3215     if (oparms->tcon->use_resilient) {
3216     - nr_ioctl_req.Timeout = 0; /* use server default (120 seconds) */
3217     + /* default timeout is 0, servers pick default (120 seconds) */
3218     + nr_ioctl_req.Timeout =
3219     + cpu_to_le32(oparms->tcon->handle_timeout);
3220     nr_ioctl_req.Reserved = 0;
3221     rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
3222     fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
3223     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3224     index 53642a237bf9..068febe37fe4 100644
3225     --- a/fs/cifs/smb2pdu.c
3226     +++ b/fs/cifs/smb2pdu.c
3227     @@ -1837,8 +1837,9 @@ add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
3228     }
3229    
3230     static struct create_durable_v2 *
3231     -create_durable_v2_buf(struct cifs_fid *pfid)
3232     +create_durable_v2_buf(struct cifs_open_parms *oparms)
3233     {
3234     + struct cifs_fid *pfid = oparms->fid;
3235     struct create_durable_v2 *buf;
3236    
3237     buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
3238     @@ -1852,7 +1853,14 @@ create_durable_v2_buf(struct cifs_fid *pfid)
3239     (struct create_durable_v2, Name));
3240     buf->ccontext.NameLength = cpu_to_le16(4);
3241    
3242     - buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
3243     + /*
3244     + * NB: Handle timeout defaults to 0, which allows server to choose
3245     + * (most servers default to 120 seconds) and most clients default to 0.
3246     + * This can be overridden at mount ("handletimeout=") if the user wants
3247     + * a different persistent (or resilient) handle timeout for all opens
3248     + * opens on a particular SMB3 mount.
3249     + */
3250     + buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
3251     buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
3252     generate_random_uuid(buf->dcontext.CreateGuid);
3253     memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
3254     @@ -1905,7 +1913,7 @@ add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
3255     struct smb2_create_req *req = iov[0].iov_base;
3256     unsigned int num = *num_iovec;
3257    
3258     - iov[num].iov_base = create_durable_v2_buf(oparms->fid);
3259     + iov[num].iov_base = create_durable_v2_buf(oparms);
3260     if (iov[num].iov_base == NULL)
3261     return -ENOMEM;
3262     iov[num].iov_len = sizeof(struct create_durable_v2);
3263     diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
3264     index 50fb0dee23e8..d35b8ec1c485 100644
3265     --- a/include/linux/bitrev.h
3266     +++ b/include/linux/bitrev.h
3267     @@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x)
3268    
3269     #define __constant_bitrev32(x) \
3270     ({ \
3271     - u32 __x = x; \
3272     - __x = (__x >> 16) | (__x << 16); \
3273     - __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
3274     - __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
3275     - __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
3276     - __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
3277     - __x; \
3278     + u32 ___x = x; \
3279     + ___x = (___x >> 16) | (___x << 16); \
3280     + ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \
3281     + ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
3282     + ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
3283     + ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
3284     + ___x; \
3285     })
3286    
3287     #define __constant_bitrev16(x) \
3288     ({ \
3289     - u16 __x = x; \
3290     - __x = (__x >> 8) | (__x << 8); \
3291     - __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
3292     - __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
3293     - __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
3294     - __x; \
3295     + u16 ___x = x; \
3296     + ___x = (___x >> 8) | (___x << 8); \
3297     + ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \
3298     + ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \
3299     + ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \
3300     + ___x; \
3301     })
3302    
3303     #define __constant_bitrev8x4(x) \
3304     ({ \
3305     - u32 __x = x; \
3306     - __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
3307     - __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
3308     - __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
3309     - __x; \
3310     + u32 ___x = x; \
3311     + ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
3312     + ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
3313     + ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
3314     + ___x; \
3315     })
3316    
3317     #define __constant_bitrev8(x) \
3318     ({ \
3319     - u8 __x = x; \
3320     - __x = (__x >> 4) | (__x << 4); \
3321     - __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
3322     - __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
3323     - __x; \
3324     + u8 ___x = x; \
3325     + ___x = (___x >> 4) | (___x << 4); \
3326     + ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \
3327     + ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \
3328     + ___x; \
3329     })
3330    
3331     #define bitrev32(x) \
3332     diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
3333     index 83ae11cbd12c..7391f5fe4eda 100644
3334     --- a/include/linux/memcontrol.h
3335     +++ b/include/linux/memcontrol.h
3336     @@ -561,7 +561,10 @@ struct mem_cgroup *lock_page_memcg(struct page *page);
3337     void __unlock_page_memcg(struct mem_cgroup *memcg);
3338     void unlock_page_memcg(struct page *page);
3339    
3340     -/* idx can be of type enum memcg_stat_item or node_stat_item */
3341     +/*
3342     + * idx can be of type enum memcg_stat_item or node_stat_item.
3343     + * Keep in sync with memcg_exact_page_state().
3344     + */
3345     static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
3346     int idx)
3347     {
3348     diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
3349     index 54299251d40d..4f001619f854 100644
3350     --- a/include/linux/mlx5/driver.h
3351     +++ b/include/linux/mlx5/driver.h
3352     @@ -591,6 +591,8 @@ enum mlx5_pagefault_type_flags {
3353     };
3354    
3355     struct mlx5_td {
3356     + /* protects tirs list changes while tirs refresh */
3357     + struct mutex list_lock;
3358     struct list_head tirs_list;
3359     u32 tdn;
3360     };
3361     diff --git a/include/linux/string.h b/include/linux/string.h
3362     index 7927b875f80c..6ab0a6fa512e 100644
3363     --- a/include/linux/string.h
3364     +++ b/include/linux/string.h
3365     @@ -150,6 +150,9 @@ extern void * memscan(void *,int,__kernel_size_t);
3366     #ifndef __HAVE_ARCH_MEMCMP
3367     extern int memcmp(const void *,const void *,__kernel_size_t);
3368     #endif
3369     +#ifndef __HAVE_ARCH_BCMP
3370     +extern int bcmp(const void *,const void *,__kernel_size_t);
3371     +#endif
3372     #ifndef __HAVE_ARCH_MEMCHR
3373     extern void * memchr(const void *,int,__kernel_size_t);
3374     #endif
3375     diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
3376     index fab02133a919..3dc70adfe5f5 100644
3377     --- a/include/linux/virtio_ring.h
3378     +++ b/include/linux/virtio_ring.h
3379     @@ -63,7 +63,7 @@ struct virtqueue;
3380     /*
3381     * Creates a virtqueue and allocates the descriptor ring. If
3382     * may_reduce_num is set, then this may allocate a smaller ring than
3383     - * expected. The caller should query virtqueue_get_ring_size to learn
3384     + * expected. The caller should query virtqueue_get_vring_size to learn
3385     * the actual size of the ring.
3386     */
3387     struct virtqueue *vring_create_virtqueue(unsigned int index,
3388     diff --git a/include/net/ip.h b/include/net/ip.h
3389     index be3cad9c2e4c..583526aad1d0 100644
3390     --- a/include/net/ip.h
3391     +++ b/include/net/ip.h
3392     @@ -677,7 +677,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
3393     unsigned char __user *data, int optlen);
3394     void ip_options_undo(struct ip_options *opt);
3395     void ip_forward_options(struct sk_buff *skb);
3396     -int ip_options_rcv_srr(struct sk_buff *skb);
3397     +int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
3398    
3399     /*
3400     * Functions provided by ip_sockglue.c
3401     diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
3402     index 99d4148e0f90..1c3126c14930 100644
3403     --- a/include/net/net_namespace.h
3404     +++ b/include/net/net_namespace.h
3405     @@ -58,6 +58,7 @@ struct net {
3406     */
3407     spinlock_t rules_mod_lock;
3408    
3409     + u32 hash_mix;
3410     atomic64_t cookie_gen;
3411    
3412     struct list_head list; /* list of network namespaces */
3413     diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
3414     index 3d58acf94dd2..0612439909dc 100644
3415     --- a/include/net/netfilter/nf_tables.h
3416     +++ b/include/net/netfilter/nf_tables.h
3417     @@ -691,10 +691,12 @@ static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
3418     gcb->elems[gcb->head.cnt++] = elem;
3419     }
3420    
3421     +struct nft_expr_ops;
3422     /**
3423     * struct nft_expr_type - nf_tables expression type
3424     *
3425     * @select_ops: function to select nft_expr_ops
3426     + * @release_ops: release nft_expr_ops
3427     * @ops: default ops, used when no select_ops functions is present
3428     * @list: used internally
3429     * @name: Identifier
3430     @@ -707,6 +709,7 @@ static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
3431     struct nft_expr_type {
3432     const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *,
3433     const struct nlattr * const tb[]);
3434     + void (*release_ops)(const struct nft_expr_ops *ops);
3435     const struct nft_expr_ops *ops;
3436     struct list_head list;
3437     const char *name;
3438     diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
3439     index 16a842456189..d9b665151f3d 100644
3440     --- a/include/net/netns/hash.h
3441     +++ b/include/net/netns/hash.h
3442     @@ -2,16 +2,10 @@
3443     #ifndef __NET_NS_HASH_H__
3444     #define __NET_NS_HASH_H__
3445    
3446     -#include <asm/cache.h>
3447     -
3448     -struct net;
3449     +#include <net/net_namespace.h>
3450    
3451     static inline u32 net_hash_mix(const struct net *net)
3452     {
3453     -#ifdef CONFIG_NET_NS
3454     - return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
3455     -#else
3456     - return 0;
3457     -#endif
3458     + return net->hash_mix;
3459     }
3460     #endif
3461     diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
3462     index e960c4f46ee0..b07a2acc4eec 100644
3463     --- a/kernel/irq/chip.c
3464     +++ b/kernel/irq/chip.c
3465     @@ -1384,6 +1384,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
3466     int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
3467     {
3468     data = data->parent_data;
3469     +
3470     + if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
3471     + return 0;
3472     +
3473     if (data->chip->irq_set_wake)
3474     return data->chip->irq_set_wake(data, on);
3475    
3476     diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
3477     index 84fa255d0329..e16e022eae09 100644
3478     --- a/kernel/irq/irqdesc.c
3479     +++ b/kernel/irq/irqdesc.c
3480     @@ -558,6 +558,7 @@ int __init early_irq_init(void)
3481     alloc_masks(&desc[i], node);
3482     raw_spin_lock_init(&desc[i].lock);
3483     lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
3484     + mutex_init(&desc[i].request_mutex);
3485     desc_set_defaults(i, &desc[i], node, NULL, NULL);
3486     }
3487     return arch_early_irq_init();
3488     diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
3489     index 310d0637fe4b..5e61a1a99e38 100644
3490     --- a/kernel/sched/fair.c
3491     +++ b/kernel/sched/fair.c
3492     @@ -7713,10 +7713,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
3493     if (cfs_rq->last_h_load_update == now)
3494     return;
3495    
3496     - cfs_rq->h_load_next = NULL;
3497     + WRITE_ONCE(cfs_rq->h_load_next, NULL);
3498     for_each_sched_entity(se) {
3499     cfs_rq = cfs_rq_of(se);
3500     - cfs_rq->h_load_next = se;
3501     + WRITE_ONCE(cfs_rq->h_load_next, se);
3502     if (cfs_rq->last_h_load_update == now)
3503     break;
3504     }
3505     @@ -7726,7 +7726,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
3506     cfs_rq->last_h_load_update = now;
3507     }
3508    
3509     - while ((se = cfs_rq->h_load_next) != NULL) {
3510     + while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
3511     load = cfs_rq->h_load;
3512     load = div64_ul(load * se->avg.load_avg,
3513     cfs_rq_load_avg(cfs_rq) + 1);
3514     diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
3515     index 2c97e8c2d29f..0519a8805aab 100644
3516     --- a/kernel/time/alarmtimer.c
3517     +++ b/kernel/time/alarmtimer.c
3518     @@ -594,7 +594,7 @@ static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now)
3519     {
3520     struct alarm *alarm = &timr->it.alarm.alarmtimer;
3521    
3522     - return ktime_sub(now, alarm->node.expires);
3523     + return ktime_sub(alarm->node.expires, now);
3524     }
3525    
3526     /**
3527     diff --git a/lib/string.c b/lib/string.c
3528     index 38e4ca08e757..3ab861c1a857 100644
3529     --- a/lib/string.c
3530     +++ b/lib/string.c
3531     @@ -866,6 +866,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
3532     EXPORT_SYMBOL(memcmp);
3533     #endif
3534    
3535     +#ifndef __HAVE_ARCH_BCMP
3536     +/**
3537     + * bcmp - returns 0 if and only if the buffers have identical contents.
3538     + * @a: pointer to first buffer.
3539     + * @b: pointer to second buffer.
3540     + * @len: size of buffers.
3541     + *
3542     + * The sign or magnitude of a non-zero return value has no particular
3543     + * meaning, and architectures may implement their own more efficient bcmp(). So
3544     + * while this particular implementation is a simple (tail) call to memcmp, do
3545     + * not rely on anything but whether the return value is zero or non-zero.
3546     + */
3547     +#undef bcmp
3548     +int bcmp(const void *a, const void *b, size_t len)
3549     +{
3550     + return memcmp(a, b, len);
3551     +}
3552     +EXPORT_SYMBOL(bcmp);
3553     +#endif
3554     +
3555     #ifndef __HAVE_ARCH_MEMSCAN
3556     /**
3557     * memscan - Find a character in an area of memory.
3558     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
3559     index faf357eaf0ce..8b03c698f86e 100644
3560     --- a/mm/huge_memory.c
3561     +++ b/mm/huge_memory.c
3562     @@ -753,6 +753,21 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
3563     spinlock_t *ptl;
3564    
3565     ptl = pmd_lock(mm, pmd);
3566     + if (!pmd_none(*pmd)) {
3567     + if (write) {
3568     + if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
3569     + WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
3570     + goto out_unlock;
3571     + }
3572     + entry = pmd_mkyoung(*pmd);
3573     + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
3574     + if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
3575     + update_mmu_cache_pmd(vma, addr, pmd);
3576     + }
3577     +
3578     + goto out_unlock;
3579     + }
3580     +
3581     entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
3582     if (pfn_t_devmap(pfn))
3583     entry = pmd_mkdevmap(entry);
3584     @@ -764,11 +779,16 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
3585     if (pgtable) {
3586     pgtable_trans_huge_deposit(mm, pmd, pgtable);
3587     mm_inc_nr_ptes(mm);
3588     + pgtable = NULL;
3589     }
3590    
3591     set_pmd_at(mm, addr, pmd, entry);
3592     update_mmu_cache_pmd(vma, addr, pmd);
3593     +
3594     +out_unlock:
3595     spin_unlock(ptl);
3596     + if (pgtable)
3597     + pte_free(mm, pgtable);
3598     }
3599    
3600     vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
3601     @@ -819,6 +839,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
3602     spinlock_t *ptl;
3603    
3604     ptl = pud_lock(mm, pud);
3605     + if (!pud_none(*pud)) {
3606     + if (write) {
3607     + if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
3608     + WARN_ON_ONCE(!is_huge_zero_pud(*pud));
3609     + goto out_unlock;
3610     + }
3611     + entry = pud_mkyoung(*pud);
3612     + entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
3613     + if (pudp_set_access_flags(vma, addr, pud, entry, 1))
3614     + update_mmu_cache_pud(vma, addr, pud);
3615     + }
3616     + goto out_unlock;
3617     + }
3618     +
3619     entry = pud_mkhuge(pfn_t_pud(pfn, prot));
3620     if (pfn_t_devmap(pfn))
3621     entry = pud_mkdevmap(entry);
3622     @@ -828,6 +862,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
3623     }
3624     set_pud_at(mm, addr, pud, entry);
3625     update_mmu_cache_pud(vma, addr, pud);
3626     +
3627     +out_unlock:
3628     spin_unlock(ptl);
3629     }
3630    
3631     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
3632     index 79a7d2a06bba..5bbf2de02a0f 100644
3633     --- a/mm/memcontrol.c
3634     +++ b/mm/memcontrol.c
3635     @@ -3882,6 +3882,22 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3636     return &memcg->cgwb_domain;
3637     }
3638    
3639     +/*
3640     + * idx can be of type enum memcg_stat_item or node_stat_item.
3641     + * Keep in sync with memcg_exact_page().
3642     + */
3643     +static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
3644     +{
3645     + long x = atomic_long_read(&memcg->stat[idx]);
3646     + int cpu;
3647     +
3648     + for_each_online_cpu(cpu)
3649     + x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx];
3650     + if (x < 0)
3651     + x = 0;
3652     + return x;
3653     +}
3654     +
3655     /**
3656     * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3657     * @wb: bdi_writeback in question
3658     @@ -3907,10 +3923,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3659     struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3660     struct mem_cgroup *parent;
3661    
3662     - *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3663     + *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
3664    
3665     /* this should eventually include NR_UNSTABLE_NFS */
3666     - *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3667     + *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
3668     *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3669     (1 << LRU_ACTIVE_FILE));
3670     *pheadroom = PAGE_COUNTER_MAX;
3671     diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
3672     index ac92b2eb32b1..e4777614a8a0 100644
3673     --- a/net/bridge/br_multicast.c
3674     +++ b/net/bridge/br_multicast.c
3675     @@ -599,6 +599,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
3676     if (ipv4_is_local_multicast(group))
3677     return 0;
3678    
3679     + memset(&br_group, 0, sizeof(br_group));
3680     br_group.u.ip4 = group;
3681     br_group.proto = htons(ETH_P_IP);
3682     br_group.vid = vid;
3683     @@ -1489,6 +1490,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
3684    
3685     own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
3686    
3687     + memset(&br_group, 0, sizeof(br_group));
3688     br_group.u.ip4 = group;
3689     br_group.proto = htons(ETH_P_IP);
3690     br_group.vid = vid;
3691     @@ -1512,6 +1514,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
3692    
3693     own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
3694    
3695     + memset(&br_group, 0, sizeof(br_group));
3696     br_group.u.ip6 = *group;
3697     br_group.proto = htons(ETH_P_IPV6);
3698     br_group.vid = vid;
3699     diff --git a/net/core/dev.c b/net/core/dev.c
3700     index 5d03889502eb..12824e007e06 100644
3701     --- a/net/core/dev.c
3702     +++ b/net/core/dev.c
3703     @@ -5014,8 +5014,10 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
3704     if (pt_prev->list_func != NULL)
3705     pt_prev->list_func(head, pt_prev, orig_dev);
3706     else
3707     - list_for_each_entry_safe(skb, next, head, list)
3708     + list_for_each_entry_safe(skb, next, head, list) {
3709     + skb_list_del_init(skb);
3710     pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3711     + }
3712     }
3713    
3714     static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
3715     diff --git a/net/core/ethtool.c b/net/core/ethtool.c
3716     index 158264f7cfaf..3a7f19a61768 100644
3717     --- a/net/core/ethtool.c
3718     +++ b/net/core/ethtool.c
3719     @@ -1794,11 +1794,16 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
3720     WARN_ON_ONCE(!ret);
3721    
3722     gstrings.len = ret;
3723     - data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
3724     - if (gstrings.len && !data)
3725     - return -ENOMEM;
3726    
3727     - __ethtool_get_strings(dev, gstrings.string_set, data);
3728     + if (gstrings.len) {
3729     + data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
3730     + if (!data)
3731     + return -ENOMEM;
3732     +
3733     + __ethtool_get_strings(dev, gstrings.string_set, data);
3734     + } else {
3735     + data = NULL;
3736     + }
3737    
3738     ret = -EFAULT;
3739     if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
3740     @@ -1894,11 +1899,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
3741     return -EFAULT;
3742    
3743     stats.n_stats = n_stats;
3744     - data = vzalloc(array_size(n_stats, sizeof(u64)));
3745     - if (n_stats && !data)
3746     - return -ENOMEM;
3747    
3748     - ops->get_ethtool_stats(dev, &stats, data);
3749     + if (n_stats) {
3750     + data = vzalloc(array_size(n_stats, sizeof(u64)));
3751     + if (!data)
3752     + return -ENOMEM;
3753     + ops->get_ethtool_stats(dev, &stats, data);
3754     + } else {
3755     + data = NULL;
3756     + }
3757    
3758     ret = -EFAULT;
3759     if (copy_to_user(useraddr, &stats, sizeof(stats)))
3760     @@ -1938,16 +1947,21 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
3761     return -EFAULT;
3762    
3763     stats.n_stats = n_stats;
3764     - data = vzalloc(array_size(n_stats, sizeof(u64)));
3765     - if (n_stats && !data)
3766     - return -ENOMEM;
3767    
3768     - if (dev->phydev && !ops->get_ethtool_phy_stats) {
3769     - ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
3770     - if (ret < 0)
3771     - return ret;
3772     + if (n_stats) {
3773     + data = vzalloc(array_size(n_stats, sizeof(u64)));
3774     + if (!data)
3775     + return -ENOMEM;
3776     +
3777     + if (dev->phydev && !ops->get_ethtool_phy_stats) {
3778     + ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
3779     + if (ret < 0)
3780     + goto out;
3781     + } else {
3782     + ops->get_ethtool_phy_stats(dev, &stats, data);
3783     + }
3784     } else {
3785     - ops->get_ethtool_phy_stats(dev, &stats, data);
3786     + data = NULL;
3787     }
3788    
3789     ret = -EFAULT;
3790     diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
3791     index b02fb19df2cc..40c249c574c1 100644
3792     --- a/net/core/net_namespace.c
3793     +++ b/net/core/net_namespace.c
3794     @@ -304,6 +304,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
3795    
3796     refcount_set(&net->count, 1);
3797     refcount_set(&net->passive, 1);
3798     + get_random_bytes(&net->hash_mix, sizeof(u32));
3799     net->dev_base_seq = 1;
3800     net->user_ns = user_ns;
3801     idr_init(&net->netns_ids);
3802     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
3803     index 2415d9cb9b89..ef2cd5712098 100644
3804     --- a/net/core/skbuff.c
3805     +++ b/net/core/skbuff.c
3806     @@ -3801,7 +3801,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
3807     unsigned int delta_truesize;
3808     struct sk_buff *lp;
3809    
3810     - if (unlikely(p->len + len >= 65536))
3811     + if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
3812     return -E2BIG;
3813    
3814     lp = NAPI_GRO_CB(p)->last;
3815     diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
3816     index 6ae89f2b541b..2d5734079e6b 100644
3817     --- a/net/ipv4/ip_gre.c
3818     +++ b/net/ipv4/ip_gre.c
3819     @@ -259,7 +259,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
3820     struct net *net = dev_net(skb->dev);
3821     struct metadata_dst *tun_dst = NULL;
3822     struct erspan_base_hdr *ershdr;
3823     - struct erspan_metadata *pkt_md;
3824     struct ip_tunnel_net *itn;
3825     struct ip_tunnel *tunnel;
3826     const struct iphdr *iph;
3827     @@ -282,9 +281,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
3828     if (unlikely(!pskb_may_pull(skb, len)))
3829     return PACKET_REJECT;
3830    
3831     - ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
3832     - pkt_md = (struct erspan_metadata *)(ershdr + 1);
3833     -
3834     if (__iptunnel_pull_header(skb,
3835     len,
3836     htons(ETH_P_TEB),
3837     @@ -292,8 +288,9 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
3838     goto drop;
3839    
3840     if (tunnel->collect_md) {
3841     + struct erspan_metadata *pkt_md, *md;
3842     struct ip_tunnel_info *info;
3843     - struct erspan_metadata *md;
3844     + unsigned char *gh;
3845     __be64 tun_id;
3846     __be16 flags;
3847    
3848     @@ -306,6 +303,14 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
3849     if (!tun_dst)
3850     return PACKET_REJECT;
3851    
3852     + /* skb can be uncloned in __iptunnel_pull_header, so
3853     + * old pkt_md is no longer valid and we need to reset
3854     + * it
3855     + */
3856     + gh = skb_network_header(skb) +
3857     + skb_network_header_len(skb);
3858     + pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
3859     + sizeof(*ershdr));
3860     md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
3861     md->version = ver;
3862     md2 = &md->u.md2;
3863     diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
3864     index 1f4737b77067..ccf0d31b6ce5 100644
3865     --- a/net/ipv4/ip_input.c
3866     +++ b/net/ipv4/ip_input.c
3867     @@ -257,11 +257,10 @@ int ip_local_deliver(struct sk_buff *skb)
3868     ip_local_deliver_finish);
3869     }
3870    
3871     -static inline bool ip_rcv_options(struct sk_buff *skb)
3872     +static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
3873     {
3874     struct ip_options *opt;
3875     const struct iphdr *iph;
3876     - struct net_device *dev = skb->dev;
3877    
3878     /* It looks as overkill, because not all
3879     IP options require packet mangling.
3880     @@ -297,7 +296,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
3881     }
3882     }
3883    
3884     - if (ip_options_rcv_srr(skb))
3885     + if (ip_options_rcv_srr(skb, dev))
3886     goto drop;
3887     }
3888    
3889     @@ -353,7 +352,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
3890     }
3891     #endif
3892    
3893     - if (iph->ihl > 5 && ip_rcv_options(skb))
3894     + if (iph->ihl > 5 && ip_rcv_options(skb, dev))
3895     goto drop;
3896    
3897     rt = skb_rtable(skb);
3898     diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
3899     index 32a35043c9f5..3db31bb9df50 100644
3900     --- a/net/ipv4/ip_options.c
3901     +++ b/net/ipv4/ip_options.c
3902     @@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb)
3903     }
3904     }
3905    
3906     -int ip_options_rcv_srr(struct sk_buff *skb)
3907     +int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
3908     {
3909     struct ip_options *opt = &(IPCB(skb)->opt);
3910     int srrspace, srrptr;
3911     @@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
3912    
3913     orefdst = skb->_skb_refdst;
3914     skb_dst_set(skb, NULL);
3915     - err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
3916     + err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
3917     rt2 = skb_rtable(skb);
3918     if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
3919     skb_dst_drop(skb);
3920     diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
3921     index cd4814f7e962..359da68d7c06 100644
3922     --- a/net/ipv4/tcp_dctcp.c
3923     +++ b/net/ipv4/tcp_dctcp.c
3924     @@ -67,11 +67,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
3925     module_param(dctcp_alpha_on_init, uint, 0644);
3926     MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
3927    
3928     -static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
3929     -module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
3930     -MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
3931     - "parameter for clamping alpha on loss");
3932     -
3933     static struct tcp_congestion_ops dctcp_reno;
3934    
3935     static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
3936     @@ -164,21 +159,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
3937     }
3938     }
3939    
3940     -static void dctcp_state(struct sock *sk, u8 new_state)
3941     +static void dctcp_react_to_loss(struct sock *sk)
3942     {
3943     - if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
3944     - struct dctcp *ca = inet_csk_ca(sk);
3945     + struct dctcp *ca = inet_csk_ca(sk);
3946     + struct tcp_sock *tp = tcp_sk(sk);
3947    
3948     - /* If this extension is enabled, we clamp dctcp_alpha to
3949     - * max on packet loss; the motivation is that dctcp_alpha
3950     - * is an indicator to the extend of congestion and packet
3951     - * loss is an indicator of extreme congestion; setting
3952     - * this in practice turned out to be beneficial, and
3953     - * effectively assumes total congestion which reduces the
3954     - * window by half.
3955     - */
3956     - ca->dctcp_alpha = DCTCP_MAX_ALPHA;
3957     - }
3958     + ca->loss_cwnd = tp->snd_cwnd;
3959     + tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
3960     +}
3961     +
3962     +static void dctcp_state(struct sock *sk, u8 new_state)
3963     +{
3964     + if (new_state == TCP_CA_Recovery &&
3965     + new_state != inet_csk(sk)->icsk_ca_state)
3966     + dctcp_react_to_loss(sk);
3967     + /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
3968     + * one loss-adjustment per RTT.
3969     + */
3970     }
3971    
3972     static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
3973     @@ -190,6 +187,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
3974     case CA_EVENT_ECN_NO_CE:
3975     dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
3976     break;
3977     + case CA_EVENT_LOSS:
3978     + dctcp_react_to_loss(sk);
3979     + break;
3980     default:
3981     /* Don't care for the rest. */
3982     break;
3983     diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
3984     index 1aae9ab57fe9..00852f47a73d 100644
3985     --- a/net/ipv4/tcp_ipv4.c
3986     +++ b/net/ipv4/tcp_ipv4.c
3987     @@ -2578,7 +2578,8 @@ static void __net_exit tcp_sk_exit(struct net *net)
3988     {
3989     int cpu;
3990    
3991     - module_put(net->ipv4.tcp_congestion_control->owner);
3992     + if (net->ipv4.tcp_congestion_control)
3993     + module_put(net->ipv4.tcp_congestion_control->owner);
3994    
3995     for_each_possible_cpu(cpu)
3996     inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
3997     diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
3998     index 26f25b6e2833..438f1a5fd19a 100644
3999     --- a/net/ipv6/ip6_gre.c
4000     +++ b/net/ipv6/ip6_gre.c
4001     @@ -524,11 +524,10 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
4002     return PACKET_REJECT;
4003     }
4004    
4005     -static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
4006     - struct tnl_ptk_info *tpi)
4007     +static int ip6erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
4008     + int gre_hdr_len)
4009     {
4010     struct erspan_base_hdr *ershdr;
4011     - struct erspan_metadata *pkt_md;
4012     const struct ipv6hdr *ipv6h;
4013     struct erspan_md2 *md2;
4014     struct ip6_tnl *tunnel;
4015     @@ -547,18 +546,16 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
4016     if (unlikely(!pskb_may_pull(skb, len)))
4017     return PACKET_REJECT;
4018    
4019     - ershdr = (struct erspan_base_hdr *)skb->data;
4020     - pkt_md = (struct erspan_metadata *)(ershdr + 1);
4021     -
4022     if (__iptunnel_pull_header(skb, len,
4023     htons(ETH_P_TEB),
4024     false, false) < 0)
4025     return PACKET_REJECT;
4026    
4027     if (tunnel->parms.collect_md) {
4028     + struct erspan_metadata *pkt_md, *md;
4029     struct metadata_dst *tun_dst;
4030     struct ip_tunnel_info *info;
4031     - struct erspan_metadata *md;
4032     + unsigned char *gh;
4033     __be64 tun_id;
4034     __be16 flags;
4035    
4036     @@ -571,6 +568,14 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
4037     if (!tun_dst)
4038     return PACKET_REJECT;
4039    
4040     + /* skb can be uncloned in __iptunnel_pull_header, so
4041     + * old pkt_md is no longer valid and we need to reset
4042     + * it
4043     + */
4044     + gh = skb_network_header(skb) +
4045     + skb_network_header_len(skb);
4046     + pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
4047     + sizeof(*ershdr));
4048     info = &tun_dst->u.tun_info;
4049     md = ip_tunnel_info_opts(info);
4050     md->version = ver;
4051     @@ -607,7 +612,7 @@ static int gre_rcv(struct sk_buff *skb)
4052    
4053     if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
4054     tpi.proto == htons(ETH_P_ERSPAN2))) {
4055     - if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD)
4056     + if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
4057     return 0;
4058     goto out;
4059     }
4060     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
4061     index 5f9fa0302b5a..e71227390bec 100644
4062     --- a/net/ipv6/ip6_output.c
4063     +++ b/net/ipv6/ip6_output.c
4064     @@ -595,7 +595,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
4065     inet6_sk(skb->sk) : NULL;
4066     struct ipv6hdr *tmp_hdr;
4067     struct frag_hdr *fh;
4068     - unsigned int mtu, hlen, left, len;
4069     + unsigned int mtu, hlen, left, len, nexthdr_offset;
4070     int hroom, troom;
4071     __be32 frag_id;
4072     int ptr, offset = 0, err = 0;
4073     @@ -606,6 +606,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
4074     goto fail;
4075     hlen = err;
4076     nexthdr = *prevhdr;
4077     + nexthdr_offset = prevhdr - skb_network_header(skb);
4078    
4079     mtu = ip6_skb_dst_mtu(skb);
4080    
4081     @@ -640,6 +641,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
4082     (err = skb_checksum_help(skb)))
4083     goto fail;
4084    
4085     + prevhdr = skb_network_header(skb) + nexthdr_offset;
4086     hroom = LL_RESERVED_SPACE(rt->dst.dev);
4087     if (skb_has_frag_list(skb)) {
4088     unsigned int first_len = skb_pagelen(skb);
4089     diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
4090     index 0c6403cf8b52..ade1390c6348 100644
4091     --- a/net/ipv6/ip6_tunnel.c
4092     +++ b/net/ipv6/ip6_tunnel.c
4093     @@ -627,7 +627,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
4094     rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
4095     eiph->daddr, eiph->saddr, 0, 0,
4096     IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
4097     - if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) {
4098     + if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
4099     if (!IS_ERR(rt))
4100     ip_rt_put(rt);
4101     goto out;
4102     @@ -636,7 +636,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
4103     } else {
4104     if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
4105     skb2->dev) ||
4106     - skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
4107     + skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
4108     goto out;
4109     }
4110    
4111     diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
4112     index 07e21a82ce4c..b2109b74857d 100644
4113     --- a/net/ipv6/sit.c
4114     +++ b/net/ipv6/sit.c
4115     @@ -669,6 +669,10 @@ static int ipip6_rcv(struct sk_buff *skb)
4116     !net_eq(tunnel->net, dev_net(tunnel->dev))))
4117     goto out;
4118    
4119     + /* skb can be uncloned in iptunnel_pull_header, so
4120     + * old iph is no longer valid
4121     + */
4122     + iph = (const struct iphdr *)skb_mac_header(skb);
4123     err = IP_ECN_decapsulate(iph, skb);
4124     if (unlikely(err)) {
4125     if (log_ecn_error)
4126     diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
4127     index 571d824e4e24..b919db02c7f9 100644
4128     --- a/net/kcm/kcmsock.c
4129     +++ b/net/kcm/kcmsock.c
4130     @@ -2054,14 +2054,14 @@ static int __init kcm_init(void)
4131     if (err)
4132     goto fail;
4133    
4134     - err = sock_register(&kcm_family_ops);
4135     - if (err)
4136     - goto sock_register_fail;
4137     -
4138     err = register_pernet_device(&kcm_net_ops);
4139     if (err)
4140     goto net_ops_fail;
4141    
4142     + err = sock_register(&kcm_family_ops);
4143     + if (err)
4144     + goto sock_register_fail;
4145     +
4146     err = kcm_proc_init();
4147     if (err)
4148     goto proc_init_fail;
4149     @@ -2069,12 +2069,12 @@ static int __init kcm_init(void)
4150     return 0;
4151    
4152     proc_init_fail:
4153     - unregister_pernet_device(&kcm_net_ops);
4154     -
4155     -net_ops_fail:
4156     sock_unregister(PF_KCM);
4157    
4158     sock_register_fail:
4159     + unregister_pernet_device(&kcm_net_ops);
4160     +
4161     +net_ops_fail:
4162     proto_unregister(&kcm_proto);
4163    
4164     fail:
4165     @@ -2090,8 +2090,8 @@ fail:
4166     static void __exit kcm_exit(void)
4167     {
4168     kcm_proc_exit();
4169     - unregister_pernet_device(&kcm_net_ops);
4170     sock_unregister(PF_KCM);
4171     + unregister_pernet_device(&kcm_net_ops);
4172     proto_unregister(&kcm_proto);
4173     destroy_workqueue(kcm_wq);
4174    
4175     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
4176     index e1724f9d8b9d..acb124ce92ec 100644
4177     --- a/net/netfilter/nf_tables_api.c
4178     +++ b/net/netfilter/nf_tables_api.c
4179     @@ -2119,9 +2119,11 @@ err1:
4180     static void nf_tables_expr_destroy(const struct nft_ctx *ctx,
4181     struct nft_expr *expr)
4182     {
4183     + const struct nft_expr_type *type = expr->ops->type;
4184     +
4185     if (expr->ops->destroy)
4186     expr->ops->destroy(ctx, expr);
4187     - module_put(expr->ops->type->owner);
4188     + module_put(type->owner);
4189     }
4190    
4191     struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
4192     @@ -2129,6 +2131,7 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
4193     {
4194     struct nft_expr_info info;
4195     struct nft_expr *expr;
4196     + struct module *owner;
4197     int err;
4198    
4199     err = nf_tables_expr_parse(ctx, nla, &info);
4200     @@ -2148,7 +2151,11 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
4201     err3:
4202     kfree(expr);
4203     err2:
4204     - module_put(info.ops->type->owner);
4205     + owner = info.ops->type->owner;
4206     + if (info.ops->type->release_ops)
4207     + info.ops->type->release_ops(info.ops);
4208     +
4209     + module_put(owner);
4210     err1:
4211     return ERR_PTR(err);
4212     }
4213     @@ -2746,8 +2753,11 @@ err2:
4214     nf_tables_rule_release(&ctx, rule);
4215     err1:
4216     for (i = 0; i < n; i++) {
4217     - if (info[i].ops != NULL)
4218     + if (info[i].ops) {
4219     module_put(info[i].ops->type->owner);
4220     + if (info[i].ops->type->release_ops)
4221     + info[i].ops->type->release_ops(info[i].ops);
4222     + }
4223     }
4224     kvfree(info);
4225     return err;
4226     diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
4227     index 0a4bad55a8aa..469f9da5073b 100644
4228     --- a/net/netfilter/nft_compat.c
4229     +++ b/net/netfilter/nft_compat.c
4230     @@ -22,23 +22,6 @@
4231     #include <linux/netfilter_bridge/ebtables.h>
4232     #include <linux/netfilter_arp/arp_tables.h>
4233     #include <net/netfilter/nf_tables.h>
4234     -#include <net/netns/generic.h>
4235     -
4236     -struct nft_xt {
4237     - struct list_head head;
4238     - struct nft_expr_ops ops;
4239     - refcount_t refcnt;
4240     -
4241     - /* used only when transaction mutex is locked */
4242     - unsigned int listcnt;
4243     -
4244     - /* Unlike other expressions, ops doesn't have static storage duration.
4245     - * nft core assumes they do. We use kfree_rcu so that nft core can
4246     - * can check expr->ops->size even after nft_compat->destroy() frees
4247     - * the nft_xt struct that holds the ops structure.
4248     - */
4249     - struct rcu_head rcu_head;
4250     -};
4251    
4252     /* Used for matches where *info is larger than X byte */
4253     #define NFT_MATCH_LARGE_THRESH 192
4254     @@ -47,46 +30,6 @@ struct nft_xt_match_priv {
4255     void *info;
4256     };
4257    
4258     -struct nft_compat_net {
4259     - struct list_head nft_target_list;
4260     - struct list_head nft_match_list;
4261     -};
4262     -
4263     -static unsigned int nft_compat_net_id __read_mostly;
4264     -static struct nft_expr_type nft_match_type;
4265     -static struct nft_expr_type nft_target_type;
4266     -
4267     -static struct nft_compat_net *nft_compat_pernet(struct net *net)
4268     -{
4269     - return net_generic(net, nft_compat_net_id);
4270     -}
4271     -
4272     -static void nft_xt_get(struct nft_xt *xt)
4273     -{
4274     - /* refcount_inc() warns on 0 -> 1 transition, but we can't
4275     - * init the reference count to 1 in .select_ops -- we can't
4276     - * undo such an increase when another expression inside the same
4277     - * rule fails afterwards.
4278     - */
4279     - if (xt->listcnt == 0)
4280     - refcount_set(&xt->refcnt, 1);
4281     - else
4282     - refcount_inc(&xt->refcnt);
4283     -
4284     - xt->listcnt++;
4285     -}
4286     -
4287     -static bool nft_xt_put(struct nft_xt *xt)
4288     -{
4289     - if (refcount_dec_and_test(&xt->refcnt)) {
4290     - WARN_ON_ONCE(!list_empty(&xt->head));
4291     - kfree_rcu(xt, rcu_head);
4292     - return true;
4293     - }
4294     -
4295     - return false;
4296     -}
4297     -
4298     static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
4299     const char *tablename)
4300     {
4301     @@ -281,7 +224,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
4302     struct xt_target *target = expr->ops->data;
4303     struct xt_tgchk_param par;
4304     size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
4305     - struct nft_xt *nft_xt;
4306     u16 proto = 0;
4307     bool inv = false;
4308     union nft_entry e = {};
4309     @@ -305,8 +247,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
4310     if (!target->target)
4311     return -EINVAL;
4312    
4313     - nft_xt = container_of(expr->ops, struct nft_xt, ops);
4314     - nft_xt_get(nft_xt);
4315     return 0;
4316     }
4317    
4318     @@ -325,8 +265,8 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
4319     if (par.target->destroy != NULL)
4320     par.target->destroy(&par);
4321    
4322     - if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
4323     - module_put(me);
4324     + module_put(me);
4325     + kfree(expr->ops);
4326     }
4327    
4328     static int nft_extension_dump_info(struct sk_buff *skb, int attr,
4329     @@ -499,7 +439,6 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
4330     struct xt_match *match = expr->ops->data;
4331     struct xt_mtchk_param par;
4332     size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
4333     - struct nft_xt *nft_xt;
4334     u16 proto = 0;
4335     bool inv = false;
4336     union nft_entry e = {};
4337     @@ -515,13 +454,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
4338    
4339     nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
4340    
4341     - ret = xt_check_match(&par, size, proto, inv);
4342     - if (ret < 0)
4343     - return ret;
4344     -
4345     - nft_xt = container_of(expr->ops, struct nft_xt, ops);
4346     - nft_xt_get(nft_xt);
4347     - return 0;
4348     + return xt_check_match(&par, size, proto, inv);
4349     }
4350    
4351     static int
4352     @@ -564,8 +497,8 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
4353     if (par.match->destroy != NULL)
4354     par.match->destroy(&par);
4355    
4356     - if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
4357     - module_put(me);
4358     + module_put(me);
4359     + kfree(expr->ops);
4360     }
4361    
4362     static void
4363     @@ -574,18 +507,6 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
4364     __nft_match_destroy(ctx, expr, nft_expr_priv(expr));
4365     }
4366    
4367     -static void nft_compat_deactivate(const struct nft_ctx *ctx,
4368     - const struct nft_expr *expr,
4369     - enum nft_trans_phase phase)
4370     -{
4371     - struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
4372     -
4373     - if (phase == NFT_TRANS_ABORT || phase == NFT_TRANS_COMMIT) {
4374     - if (--xt->listcnt == 0)
4375     - list_del_init(&xt->head);
4376     - }
4377     -}
4378     -
4379     static void
4380     nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
4381     {
4382     @@ -780,19 +701,13 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = {
4383     .cb = nfnl_nft_compat_cb,
4384     };
4385    
4386     -static bool nft_match_cmp(const struct xt_match *match,
4387     - const char *name, u32 rev, u32 family)
4388     -{
4389     - return strcmp(match->name, name) == 0 && match->revision == rev &&
4390     - (match->family == NFPROTO_UNSPEC || match->family == family);
4391     -}
4392     +static struct nft_expr_type nft_match_type;
4393    
4394     static const struct nft_expr_ops *
4395     nft_match_select_ops(const struct nft_ctx *ctx,
4396     const struct nlattr * const tb[])
4397     {
4398     - struct nft_compat_net *cn;
4399     - struct nft_xt *nft_match;
4400     + struct nft_expr_ops *ops;
4401     struct xt_match *match;
4402     unsigned int matchsize;
4403     char *mt_name;
4404     @@ -808,16 +723,6 @@ nft_match_select_ops(const struct nft_ctx *ctx,
4405     rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
4406     family = ctx->family;
4407    
4408     - cn = nft_compat_pernet(ctx->net);
4409     -
4410     - /* Re-use the existing match if it's already loaded. */
4411     - list_for_each_entry(nft_match, &cn->nft_match_list, head) {
4412     - struct xt_match *match = nft_match->ops.data;
4413     -
4414     - if (nft_match_cmp(match, mt_name, rev, family))
4415     - return &nft_match->ops;
4416     - }
4417     -
4418     match = xt_request_find_match(family, mt_name, rev);
4419     if (IS_ERR(match))
4420     return ERR_PTR(-ENOENT);
4421     @@ -827,65 +732,62 @@ nft_match_select_ops(const struct nft_ctx *ctx,
4422     goto err;
4423     }
4424    
4425     - /* This is the first time we use this match, allocate operations */
4426     - nft_match = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
4427     - if (nft_match == NULL) {
4428     + ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
4429     + if (!ops) {
4430     err = -ENOMEM;
4431     goto err;
4432     }
4433    
4434     - refcount_set(&nft_match->refcnt, 0);
4435     - nft_match->ops.type = &nft_match_type;
4436     - nft_match->ops.eval = nft_match_eval;
4437     - nft_match->ops.init = nft_match_init;
4438     - nft_match->ops.destroy = nft_match_destroy;
4439     - nft_match->ops.deactivate = nft_compat_deactivate;
4440     - nft_match->ops.dump = nft_match_dump;
4441     - nft_match->ops.validate = nft_match_validate;
4442     - nft_match->ops.data = match;
4443     + ops->type = &nft_match_type;
4444     + ops->eval = nft_match_eval;
4445     + ops->init = nft_match_init;
4446     + ops->destroy = nft_match_destroy;
4447     + ops->dump = nft_match_dump;
4448     + ops->validate = nft_match_validate;
4449     + ops->data = match;
4450    
4451     matchsize = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
4452     if (matchsize > NFT_MATCH_LARGE_THRESH) {
4453     matchsize = NFT_EXPR_SIZE(sizeof(struct nft_xt_match_priv));
4454    
4455     - nft_match->ops.eval = nft_match_large_eval;
4456     - nft_match->ops.init = nft_match_large_init;
4457     - nft_match->ops.destroy = nft_match_large_destroy;
4458     - nft_match->ops.dump = nft_match_large_dump;
4459     + ops->eval = nft_match_large_eval;
4460     + ops->init = nft_match_large_init;
4461     + ops->destroy = nft_match_large_destroy;
4462     + ops->dump = nft_match_large_dump;
4463     }
4464    
4465     - nft_match->ops.size = matchsize;
4466     + ops->size = matchsize;
4467    
4468     - nft_match->listcnt = 0;
4469     - list_add(&nft_match->head, &cn->nft_match_list);
4470     -
4471     - return &nft_match->ops;
4472     + return ops;
4473     err:
4474     module_put(match->me);
4475     return ERR_PTR(err);
4476     }
4477    
4478     +static void nft_match_release_ops(const struct nft_expr_ops *ops)
4479     +{
4480     + struct xt_match *match = ops->data;
4481     +
4482     + module_put(match->me);
4483     + kfree(ops);
4484     +}
4485     +
4486     static struct nft_expr_type nft_match_type __read_mostly = {
4487     .name = "match",
4488     .select_ops = nft_match_select_ops,
4489     + .release_ops = nft_match_release_ops,
4490     .policy = nft_match_policy,
4491     .maxattr = NFTA_MATCH_MAX,
4492     .owner = THIS_MODULE,
4493     };
4494    
4495     -static bool nft_target_cmp(const struct xt_target *tg,
4496     - const char *name, u32 rev, u32 family)
4497     -{
4498     - return strcmp(tg->name, name) == 0 && tg->revision == rev &&
4499     - (tg->family == NFPROTO_UNSPEC || tg->family == family);
4500     -}
4501     +static struct nft_expr_type nft_target_type;
4502    
4503     static const struct nft_expr_ops *
4504     nft_target_select_ops(const struct nft_ctx *ctx,
4505     const struct nlattr * const tb[])
4506     {
4507     - struct nft_compat_net *cn;
4508     - struct nft_xt *nft_target;
4509     + struct nft_expr_ops *ops;
4510     struct xt_target *target;
4511     char *tg_name;
4512     u32 rev, family;
4513     @@ -905,18 +807,6 @@ nft_target_select_ops(const struct nft_ctx *ctx,
4514     strcmp(tg_name, "standard") == 0)
4515     return ERR_PTR(-EINVAL);
4516    
4517     - cn = nft_compat_pernet(ctx->net);
4518     - /* Re-use the existing target if it's already loaded. */
4519     - list_for_each_entry(nft_target, &cn->nft_target_list, head) {
4520     - struct xt_target *target = nft_target->ops.data;
4521     -
4522     - if (!target->target)
4523     - continue;
4524     -
4525     - if (nft_target_cmp(target, tg_name, rev, family))
4526     - return &nft_target->ops;
4527     - }
4528     -
4529     target = xt_request_find_target(family, tg_name, rev);
4530     if (IS_ERR(target))
4531     return ERR_PTR(-ENOENT);
4532     @@ -931,113 +821,55 @@ nft_target_select_ops(const struct nft_ctx *ctx,
4533     goto err;
4534     }
4535    
4536     - /* This is the first time we use this target, allocate operations */
4537     - nft_target = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
4538     - if (nft_target == NULL) {
4539     + ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
4540     + if (!ops) {
4541     err = -ENOMEM;
4542     goto err;
4543     }
4544    
4545     - refcount_set(&nft_target->refcnt, 0);
4546     - nft_target->ops.type = &nft_target_type;
4547     - nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
4548     - nft_target->ops.init = nft_target_init;
4549     - nft_target->ops.destroy = nft_target_destroy;
4550     - nft_target->ops.deactivate = nft_compat_deactivate;
4551     - nft_target->ops.dump = nft_target_dump;
4552     - nft_target->ops.validate = nft_target_validate;
4553     - nft_target->ops.data = target;
4554     + ops->type = &nft_target_type;
4555     + ops->size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
4556     + ops->init = nft_target_init;
4557     + ops->destroy = nft_target_destroy;
4558     + ops->dump = nft_target_dump;
4559     + ops->validate = nft_target_validate;
4560     + ops->data = target;
4561    
4562     if (family == NFPROTO_BRIDGE)
4563     - nft_target->ops.eval = nft_target_eval_bridge;
4564     + ops->eval = nft_target_eval_bridge;
4565     else
4566     - nft_target->ops.eval = nft_target_eval_xt;
4567     -
4568     - nft_target->listcnt = 0;
4569     - list_add(&nft_target->head, &cn->nft_target_list);
4570     + ops->eval = nft_target_eval_xt;
4571    
4572     - return &nft_target->ops;
4573     + return ops;
4574     err:
4575     module_put(target->me);
4576     return ERR_PTR(err);
4577     }
4578    
4579     +static void nft_target_release_ops(const struct nft_expr_ops *ops)
4580     +{
4581     + struct xt_target *target = ops->data;
4582     +
4583     + module_put(target->me);
4584     + kfree(ops);
4585     +}
4586     +
4587     static struct nft_expr_type nft_target_type __read_mostly = {
4588     .name = "target",
4589     .select_ops = nft_target_select_ops,
4590     + .release_ops = nft_target_release_ops,
4591     .policy = nft_target_policy,
4592     .maxattr = NFTA_TARGET_MAX,
4593     .owner = THIS_MODULE,
4594     };
4595    
4596     -static int __net_init nft_compat_init_net(struct net *net)
4597     -{
4598     - struct nft_compat_net *cn = nft_compat_pernet(net);
4599     -
4600     - INIT_LIST_HEAD(&cn->nft_target_list);
4601     - INIT_LIST_HEAD(&cn->nft_match_list);
4602     -
4603     - return 0;
4604     -}
4605     -
4606     -static void __net_exit nft_compat_exit_net(struct net *net)
4607     -{
4608     - struct nft_compat_net *cn = nft_compat_pernet(net);
4609     - struct nft_xt *xt, *next;
4610     -
4611     - if (list_empty(&cn->nft_match_list) &&
4612     - list_empty(&cn->nft_target_list))
4613     - return;
4614     -
4615     - /* If there was an error that caused nft_xt expr to not be initialized
4616     - * fully and noone else requested the same expression later, the lists
4617     - * contain 0-refcount entries that still hold module reference.
4618     - *
4619     - * Clean them here.
4620     - */
4621     - mutex_lock(&net->nft.commit_mutex);
4622     - list_for_each_entry_safe(xt, next, &cn->nft_target_list, head) {
4623     - struct xt_target *target = xt->ops.data;
4624     -
4625     - list_del_init(&xt->head);
4626     -
4627     - if (refcount_read(&xt->refcnt))
4628     - continue;
4629     - module_put(target->me);
4630     - kfree(xt);
4631     - }
4632     -
4633     - list_for_each_entry_safe(xt, next, &cn->nft_match_list, head) {
4634     - struct xt_match *match = xt->ops.data;
4635     -
4636     - list_del_init(&xt->head);
4637     -
4638     - if (refcount_read(&xt->refcnt))
4639     - continue;
4640     - module_put(match->me);
4641     - kfree(xt);
4642     - }
4643     - mutex_unlock(&net->nft.commit_mutex);
4644     -}
4645     -
4646     -static struct pernet_operations nft_compat_net_ops = {
4647     - .init = nft_compat_init_net,
4648     - .exit = nft_compat_exit_net,
4649     - .id = &nft_compat_net_id,
4650     - .size = sizeof(struct nft_compat_net),
4651     -};
4652     -
4653     static int __init nft_compat_module_init(void)
4654     {
4655     int ret;
4656    
4657     - ret = register_pernet_subsys(&nft_compat_net_ops);
4658     - if (ret < 0)
4659     - goto err_target;
4660     -
4661     ret = nft_register_expr(&nft_match_type);
4662     if (ret < 0)
4663     - goto err_pernet;
4664     + return ret;
4665    
4666     ret = nft_register_expr(&nft_target_type);
4667     if (ret < 0)
4668     @@ -1054,8 +886,6 @@ err_target:
4669     nft_unregister_expr(&nft_target_type);
4670     err_match:
4671     nft_unregister_expr(&nft_match_type);
4672     -err_pernet:
4673     - unregister_pernet_subsys(&nft_compat_net_ops);
4674     return ret;
4675     }
4676    
4677     @@ -1064,7 +894,6 @@ static void __exit nft_compat_module_exit(void)
4678     nfnetlink_subsys_unregister(&nfnl_compat_subsys);
4679     nft_unregister_expr(&nft_target_type);
4680     nft_unregister_expr(&nft_match_type);
4681     - unregister_pernet_subsys(&nft_compat_net_ops);
4682     }
4683    
4684     MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
4685     diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
4686     index 691da853bef5..4bdf5e3ac208 100644
4687     --- a/net/openvswitch/flow_netlink.c
4688     +++ b/net/openvswitch/flow_netlink.c
4689     @@ -2306,14 +2306,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
4690    
4691     struct sw_flow_actions *acts;
4692     int new_acts_size;
4693     - int req_size = NLA_ALIGN(attr_len);
4694     + size_t req_size = NLA_ALIGN(attr_len);
4695     int next_offset = offsetof(struct sw_flow_actions, actions) +
4696     (*sfa)->actions_len;
4697    
4698     if (req_size <= (ksize(*sfa) - next_offset))
4699     goto out;
4700    
4701     - new_acts_size = ksize(*sfa) * 2;
4702     + new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
4703    
4704     if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
4705     if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
4706     diff --git a/net/rds/tcp.c b/net/rds/tcp.c
4707     index c16f0a362c32..a729c47db781 100644
4708     --- a/net/rds/tcp.c
4709     +++ b/net/rds/tcp.c
4710     @@ -600,7 +600,7 @@ static void rds_tcp_kill_sock(struct net *net)
4711     list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
4712     struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
4713    
4714     - if (net != c_net || !tc->t_sock)
4715     + if (net != c_net)
4716     continue;
4717     if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
4718     list_move_tail(&tc->t_tcp_node, &tmp_list);
4719     diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
4720     index 1a0c682fd734..fd62fe6c8e73 100644
4721     --- a/net/sched/act_sample.c
4722     +++ b/net/sched/act_sample.c
4723     @@ -43,8 +43,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
4724     struct tc_action_net *tn = net_generic(net, sample_net_id);
4725     struct nlattr *tb[TCA_SAMPLE_MAX + 1];
4726     struct psample_group *psample_group;
4727     + u32 psample_group_num, rate;
4728     struct tc_sample *parm;
4729     - u32 psample_group_num;
4730     struct tcf_sample *s;
4731     bool exists = false;
4732     int ret, err;
4733     @@ -80,6 +80,12 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
4734     return -EEXIST;
4735     }
4736    
4737     + rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
4738     + if (!rate) {
4739     + NL_SET_ERR_MSG(extack, "invalid sample rate");
4740     + tcf_idr_release(*a, bind);
4741     + return -EINVAL;
4742     + }
4743     psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
4744     psample_group = psample_group_get(net, psample_group_num);
4745     if (!psample_group) {
4746     @@ -91,7 +97,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
4747    
4748     spin_lock_bh(&s->tcf_lock);
4749     s->tcf_action = parm->action;
4750     - s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
4751     + s->rate = rate;
4752     s->psample_group_num = psample_group_num;
4753     RCU_INIT_POINTER(s->psample_group, psample_group);
4754    
4755     diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
4756     index 0e408ee9dcec..5ba07cd11e31 100644
4757     --- a/net/sched/cls_matchall.c
4758     +++ b/net/sched/cls_matchall.c
4759     @@ -125,6 +125,11 @@ static void mall_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
4760    
4761     static void *mall_get(struct tcf_proto *tp, u32 handle)
4762     {
4763     + struct cls_mall_head *head = rtnl_dereference(tp->root);
4764     +
4765     + if (head && head->handle == handle)
4766     + return head;
4767     +
4768     return NULL;
4769     }
4770    
4771     diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
4772     index 6abc8b274270..951afdeea5e9 100644
4773     --- a/net/sctp/protocol.c
4774     +++ b/net/sctp/protocol.c
4775     @@ -600,6 +600,7 @@ out:
4776     static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
4777     {
4778     /* No address mapping for V4 sockets */
4779     + memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
4780     return sizeof(struct sockaddr_in);
4781     }
4782    
4783     diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
4784     index 21113bfd4eca..a5ae9c036b9c 100644
4785     --- a/net/sunrpc/xprtrdma/verbs.c
4786     +++ b/net/sunrpc/xprtrdma/verbs.c
4787     @@ -90,7 +90,7 @@ static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
4788     /* Flush Receives, then wait for deferred Reply work
4789     * to complete.
4790     */
4791     - ib_drain_qp(ia->ri_id->qp);
4792     + ib_drain_rq(ia->ri_id->qp);
4793     drain_workqueue(buf->rb_completion_wq);
4794    
4795     /* Deferred Reply processing might have scheduled
4796     diff --git a/scripts/package/Makefile b/scripts/package/Makefile
4797     index 453fecee62f0..aa39c2b5e46a 100644
4798     --- a/scripts/package/Makefile
4799     +++ b/scripts/package/Makefile
4800     @@ -59,7 +59,7 @@ rpm-pkg: FORCE
4801     # binrpm-pkg
4802     # ---------------------------------------------------------------------------
4803     binrpm-pkg: FORCE
4804     - $(MAKE) KBUILD_SRC=
4805     + $(MAKE) -f $(srctree)/Makefile
4806     $(CONFIG_SHELL) $(MKSPEC) prebuilt > $(objtree)/binkernel.spec
4807     +rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \
4808     $(UTS_MACHINE) -bb $(objtree)/binkernel.spec
4809     @@ -102,7 +102,7 @@ clean-dirs += $(objtree)/snap/
4810     # tarball targets
4811     # ---------------------------------------------------------------------------
4812     tar%pkg: FORCE
4813     - $(MAKE) KBUILD_SRC=
4814     + $(MAKE) -f $(srctree)/Makefile
4815     $(CONFIG_SHELL) $(srctree)/scripts/package/buildtar $@
4816    
4817     clean-dirs += $(objtree)/tar-install/
4818     diff --git a/scripts/package/builddeb b/scripts/package/builddeb
4819     index f43a274f4f1d..8ac25d10a6ad 100755
4820     --- a/scripts/package/builddeb
4821     +++ b/scripts/package/builddeb
4822     @@ -86,12 +86,12 @@ cp "$($MAKE -s -f $srctree/Makefile image_name)" "$tmpdir/$installed_image_path"
4823     if grep -q "^CONFIG_OF_EARLY_FLATTREE=y" $KCONFIG_CONFIG ; then
4824     # Only some architectures with OF support have this target
4825     if [ -d "${srctree}/arch/$SRCARCH/boot/dts" ]; then
4826     - $MAKE KBUILD_SRC= INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install
4827     + $MAKE -f $srctree/Makefile INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install
4828     fi
4829     fi
4830    
4831     if grep -q '^CONFIG_MODULES=y' $KCONFIG_CONFIG ; then
4832     - INSTALL_MOD_PATH="$tmpdir" $MAKE KBUILD_SRC= modules_install
4833     + INSTALL_MOD_PATH="$tmpdir" $MAKE -f $srctree/Makefile modules_install
4834     rm -f "$tmpdir/lib/modules/$version/build"
4835     rm -f "$tmpdir/lib/modules/$version/source"
4836     if [ "$ARCH" = "um" ] ; then
4837     @@ -113,14 +113,14 @@ if grep -q '^CONFIG_MODULES=y' $KCONFIG_CONFIG ; then
4838     # resign stripped modules
4839     MODULE_SIG_ALL="$(grep -s '^CONFIG_MODULE_SIG_ALL=y' $KCONFIG_CONFIG || true)"
4840     if [ -n "$MODULE_SIG_ALL" ]; then
4841     - INSTALL_MOD_PATH="$tmpdir" $MAKE KBUILD_SRC= modules_sign
4842     + INSTALL_MOD_PATH="$tmpdir" $MAKE -f $srctree/Makefile modules_sign
4843     fi
4844     fi
4845     fi
4846    
4847     if [ "$ARCH" != "um" ]; then
4848     - $MAKE headers_check KBUILD_SRC=
4849     - $MAKE headers_install KBUILD_SRC= INSTALL_HDR_PATH="$libc_headers_dir/usr"
4850     + $MAKE -f $srctree/Makefile headers_check
4851     + $MAKE -f $srctree/Makefile headers_install INSTALL_HDR_PATH="$libc_headers_dir/usr"
4852     fi
4853    
4854     # Install the maintainer scripts
4855     diff --git a/scripts/package/buildtar b/scripts/package/buildtar
4856     index d624a07a4e77..cfd2a4a3fe42 100755
4857     --- a/scripts/package/buildtar
4858     +++ b/scripts/package/buildtar
4859     @@ -57,7 +57,7 @@ dirs=boot
4860     # Try to install modules
4861     #
4862     if grep -q '^CONFIG_MODULES=y' "${KCONFIG_CONFIG}"; then
4863     - make ARCH="${ARCH}" O="${objtree}" KBUILD_SRC= INSTALL_MOD_PATH="${tmpdir}" modules_install
4864     + make ARCH="${ARCH}" -f ${srctree}/Makefile INSTALL_MOD_PATH="${tmpdir}" modules_install
4865     dirs="$dirs lib"
4866     fi
4867    
4868     diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
4869     index edcad61fe3cd..f030961c5165 100755
4870     --- a/scripts/package/mkdebian
4871     +++ b/scripts/package/mkdebian
4872     @@ -205,13 +205,15 @@ EOF
4873     cat <<EOF > debian/rules
4874     #!$(command -v $MAKE) -f
4875    
4876     +srctree ?= .
4877     +
4878     build:
4879     \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
4880     - KBUILD_BUILD_VERSION=${revision} KBUILD_SRC=
4881     + KBUILD_BUILD_VERSION=${revision} -f \$(srctree)/Makefile
4882    
4883     binary-arch:
4884     \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
4885     - KBUILD_BUILD_VERSION=${revision} KBUILD_SRC= intdeb-pkg
4886     + KBUILD_BUILD_VERSION=${revision} -f \$(srctree)/Makefile intdeb-pkg
4887    
4888     clean:
4889     rm -rf debian/*tmp debian/files
4890     diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
4891     index 7d4640d1fe9f..38e7deab6384 100644
4892     --- a/sound/core/seq/seq_clientmgr.c
4893     +++ b/sound/core/seq/seq_clientmgr.c
4894     @@ -1252,7 +1252,7 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
4895    
4896     /* fill the info fields */
4897     if (client_info->name[0])
4898     - strlcpy(client->name, client_info->name, sizeof(client->name));
4899     + strscpy(client->name, client_info->name, sizeof(client->name));
4900    
4901     client->filter = client_info->filter;
4902     client->event_lost = client_info->event_lost;
4903     @@ -1530,7 +1530,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
4904     /* set queue name */
4905     if (!info->name[0])
4906     snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
4907     - strlcpy(q->name, info->name, sizeof(q->name));
4908     + strscpy(q->name, info->name, sizeof(q->name));
4909     snd_use_lock_free(&q->use_lock);
4910    
4911     return 0;
4912     @@ -1592,7 +1592,7 @@ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
4913     queuefree(q);
4914     return -EPERM;
4915     }
4916     - strlcpy(q->name, info->name, sizeof(q->name));
4917     + strscpy(q->name, info->name, sizeof(q->name));
4918     queuefree(q);
4919    
4920     return 0;
4921     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4922     index ece256a3b48f..2ec91085fa3e 100644
4923     --- a/sound/pci/hda/hda_intel.c
4924     +++ b/sound/pci/hda/hda_intel.c
4925     @@ -2142,6 +2142,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
4926     SND_PCI_QUIRK(0x8086, 0x2040, "Intel DZ77BH-55K", 0),
4927     /* https://bugzilla.kernel.org/show_bug.cgi?id=199607 */
4928     SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0),
4929     + /* https://bugs.launchpad.net/bugs/1821663 */
4930     + SND_PCI_QUIRK(0x8086, 0x2064, "Intel SDP 8086:2064", 0),
4931     /* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */
4932     SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
4933     /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
4934     @@ -2150,6 +2152,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
4935     SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
4936     /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
4937     SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
4938     + /* https://bugs.launchpad.net/bugs/1821663 */
4939     + SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
4940     {}
4941     };
4942     #endif /* CONFIG_PM */
4943     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4944     index 00c27b3b8c14..84fae0df59e9 100644
4945     --- a/sound/pci/hda/patch_realtek.c
4946     +++ b/sound/pci/hda/patch_realtek.c
4947     @@ -1864,8 +1864,8 @@ enum {
4948     ALC887_FIXUP_BASS_CHMAP,
4949     ALC1220_FIXUP_GB_DUAL_CODECS,
4950     ALC1220_FIXUP_CLEVO_P950,
4951     - ALC1220_FIXUP_SYSTEM76_ORYP5,
4952     - ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
4953     + ALC1220_FIXUP_CLEVO_PB51ED,
4954     + ALC1220_FIXUP_CLEVO_PB51ED_PINS,
4955     };
4956    
4957     static void alc889_fixup_coef(struct hda_codec *codec,
4958     @@ -2070,7 +2070,7 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
4959     static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
4960     const struct hda_fixup *fix, int action);
4961    
4962     -static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
4963     +static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec,
4964     const struct hda_fixup *fix,
4965     int action)
4966     {
4967     @@ -2322,18 +2322,18 @@ static const struct hda_fixup alc882_fixups[] = {
4968     .type = HDA_FIXUP_FUNC,
4969     .v.func = alc1220_fixup_clevo_p950,
4970     },
4971     - [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
4972     + [ALC1220_FIXUP_CLEVO_PB51ED] = {
4973     .type = HDA_FIXUP_FUNC,
4974     - .v.func = alc1220_fixup_system76_oryp5,
4975     + .v.func = alc1220_fixup_clevo_pb51ed,
4976     },
4977     - [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
4978     + [ALC1220_FIXUP_CLEVO_PB51ED_PINS] = {
4979     .type = HDA_FIXUP_PINS,
4980     .v.pins = (const struct hda_pintbl[]) {
4981     { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
4982     {}
4983     },
4984     .chained = true,
4985     - .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
4986     + .chain_id = ALC1220_FIXUP_CLEVO_PB51ED,
4987     },
4988     };
4989    
4990     @@ -2411,8 +2411,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
4991     SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
4992     SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
4993     SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
4994     - SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
4995     - SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
4996     + SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
4997     + SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
4998     + SND_PCI_QUIRK(0x1558, 0x65d1, "Tuxedo Book XC1509", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
4999     SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
5000     SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
5001     SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
5002     @@ -5661,6 +5662,7 @@ enum {
5003     ALC233_FIXUP_ASUS_MIC_NO_PRESENCE,
5004     ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE,
5005     ALC233_FIXUP_LENOVO_MULTI_CODECS,
5006     + ALC233_FIXUP_ACER_HEADSET_MIC,
5007     ALC294_FIXUP_LENOVO_MIC_LOCATION,
5008     ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
5009     ALC700_FIXUP_INTEL_REFERENCE,
5010     @@ -6488,6 +6490,16 @@ static const struct hda_fixup alc269_fixups[] = {
5011     .type = HDA_FIXUP_FUNC,
5012     .v.func = alc233_alc662_fixup_lenovo_dual_codecs,
5013     },
5014     + [ALC233_FIXUP_ACER_HEADSET_MIC] = {
5015     + .type = HDA_FIXUP_VERBS,
5016     + .v.verbs = (const struct hda_verb[]) {
5017     + { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
5018     + { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
5019     + { }
5020     + },
5021     + .chained = true,
5022     + .chain_id = ALC233_FIXUP_ASUS_MIC_NO_PRESENCE
5023     + },
5024     [ALC294_FIXUP_LENOVO_MIC_LOCATION] = {
5025     .type = HDA_FIXUP_PINS,
5026     .v.pins = (const struct hda_pintbl[]) {
5027     @@ -6735,6 +6747,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5028     SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
5029     SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
5030     SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
5031     + SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
5032     SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
5033     SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
5034     SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
5035     diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
5036     index afe67c865330..3623aa9a6f2e 100644
5037     --- a/sound/soc/fsl/fsl_esai.c
5038     +++ b/sound/soc/fsl/fsl_esai.c
5039     @@ -54,6 +54,8 @@ struct fsl_esai {
5040     u32 fifo_depth;
5041     u32 slot_width;
5042     u32 slots;
5043     + u32 tx_mask;
5044     + u32 rx_mask;
5045     u32 hck_rate[2];
5046     u32 sck_rate[2];
5047     bool hck_dir[2];
5048     @@ -361,21 +363,13 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
5049     regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
5050     ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
5051    
5052     - regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
5053     - ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
5054     - regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
5055     - ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
5056     -
5057     regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
5058     ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
5059    
5060     - regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
5061     - ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
5062     - regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
5063     - ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
5064     -
5065     esai_priv->slot_width = slot_width;
5066     esai_priv->slots = slots;
5067     + esai_priv->tx_mask = tx_mask;
5068     + esai_priv->rx_mask = rx_mask;
5069    
5070     return 0;
5071     }
5072     @@ -596,6 +590,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
5073     bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
5074     u8 i, channels = substream->runtime->channels;
5075     u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
5076     + u32 mask;
5077    
5078     switch (cmd) {
5079     case SNDRV_PCM_TRIGGER_START:
5080     @@ -608,15 +603,38 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
5081     for (i = 0; tx && i < channels; i++)
5082     regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0);
5083    
5084     + /*
5085     + * When set the TE/RE in the end of enablement flow, there
5086     + * will be channel swap issue for multi data line case.
5087     + * In order to workaround this issue, we switch the bit
5088     + * enablement sequence to below sequence
5089     + * 1) clear the xSMB & xSMA: which is done in probe and
5090     + * stop state.
5091     + * 2) set TE/RE
5092     + * 3) set xSMB
5093     + * 4) set xSMA: xSMA is the last one in this flow, which
5094     + * will trigger esai to start.
5095     + */
5096     regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
5097     tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK,
5098     tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins));
5099     + mask = tx ? esai_priv->tx_mask : esai_priv->rx_mask;
5100     +
5101     + regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
5102     + ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(mask));
5103     + regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
5104     + ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(mask));
5105     +
5106     break;
5107     case SNDRV_PCM_TRIGGER_SUSPEND:
5108     case SNDRV_PCM_TRIGGER_STOP:
5109     case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
5110     regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
5111     tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0);
5112     + regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
5113     + ESAI_xSMA_xS_MASK, 0);
5114     + regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
5115     + ESAI_xSMB_xS_MASK, 0);
5116    
5117     /* Disable and reset FIFO */
5118     regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx),
5119     @@ -906,6 +924,15 @@ static int fsl_esai_probe(struct platform_device *pdev)
5120     return ret;
5121     }
5122    
5123     + esai_priv->tx_mask = 0xFFFFFFFF;
5124     + esai_priv->rx_mask = 0xFFFFFFFF;
5125     +
5126     + /* Clear the TSMA, TSMB, RSMA, RSMB */
5127     + regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0);
5128     + regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0);
5129     + regmap_write(esai_priv->regmap, REG_ESAI_RSMA, 0);
5130     + regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0);
5131     +
5132     ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
5133     &fsl_esai_dai, 1);
5134     if (ret) {
5135     diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
5136     index 91a2436ce952..e9623da911d5 100644
5137     --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
5138     +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
5139     @@ -711,9 +711,17 @@ static int sst_soc_probe(struct snd_soc_component *component)
5140     return sst_dsp_init_v2_dpcm(component);
5141     }
5142    
5143     +static void sst_soc_remove(struct snd_soc_component *component)
5144     +{
5145     + struct sst_data *drv = dev_get_drvdata(component->dev);
5146     +
5147     + drv->soc_card = NULL;
5148     +}
5149     +
5150     static const struct snd_soc_component_driver sst_soc_platform_drv = {
5151     .name = DRV_NAME,
5152     .probe = sst_soc_probe,
5153     + .remove = sst_soc_remove,
5154     .ops = &sst_platform_ops,
5155     .compr_ops = &sst_platform_compr_ops,
5156     .pcm_new = sst_pcm_new,
5157     diff --git a/sound/xen/xen_snd_front_alsa.c b/sound/xen/xen_snd_front_alsa.c
5158     index a7f413cb704d..b14ab512c2ce 100644
5159     --- a/sound/xen/xen_snd_front_alsa.c
5160     +++ b/sound/xen/xen_snd_front_alsa.c
5161     @@ -441,7 +441,7 @@ static int shbuf_setup_backstore(struct xen_snd_front_pcm_stream_info *stream,
5162     {
5163     int i;
5164    
5165     - stream->buffer = alloc_pages_exact(stream->buffer_sz, GFP_KERNEL);
5166     + stream->buffer = alloc_pages_exact(buffer_sz, GFP_KERNEL);
5167     if (!stream->buffer)
5168     return -ENOMEM;
5169