Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0134-4.19.35-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3413 - (show annotations) (download)
Fri Aug 2 11:47:44 2019 UTC (4 years, 9 months ago) by niro
File size: 124685 byte(s)
-linux-4.19.35
1 diff --git a/Makefile b/Makefile
2 index 8fdfe0af5862..f4229975b48c 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 19
9 -SUBLEVEL = 34
10 +SUBLEVEL = 35
11 EXTRAVERSION =
12 NAME = "People's Front"
13
14 @@ -483,7 +483,7 @@ endif
15 ifeq ($(cc-name),clang)
16 ifneq ($(CROSS_COMPILE),)
17 CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
18 -GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
19 +GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
20 CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
21 GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
22 endif
23 diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
24 index c87d01297a01..20bbb899b3b7 100644
25 --- a/arch/arm/boot/dts/am335x-evm.dts
26 +++ b/arch/arm/boot/dts/am335x-evm.dts
27 @@ -57,6 +57,24 @@
28 enable-active-high;
29 };
30
31 + /* TPS79501 */
32 + v1_8d_reg: fixedregulator-v1_8d {
33 + compatible = "regulator-fixed";
34 + regulator-name = "v1_8d";
35 + vin-supply = <&vbat>;
36 + regulator-min-microvolt = <1800000>;
37 + regulator-max-microvolt = <1800000>;
38 + };
39 +
40 + /* TPS79501 */
41 + v3_3d_reg: fixedregulator-v3_3d {
42 + compatible = "regulator-fixed";
43 + regulator-name = "v3_3d";
44 + vin-supply = <&vbat>;
45 + regulator-min-microvolt = <3300000>;
46 + regulator-max-microvolt = <3300000>;
47 + };
48 +
49 matrix_keypad: matrix_keypad0 {
50 compatible = "gpio-matrix-keypad";
51 debounce-delay-ms = <5>;
52 @@ -499,10 +517,10 @@
53 status = "okay";
54
55 /* Regulators */
56 - AVDD-supply = <&vaux2_reg>;
57 - IOVDD-supply = <&vaux2_reg>;
58 - DRVDD-supply = <&vaux2_reg>;
59 - DVDD-supply = <&vbat>;
60 + AVDD-supply = <&v3_3d_reg>;
61 + IOVDD-supply = <&v3_3d_reg>;
62 + DRVDD-supply = <&v3_3d_reg>;
63 + DVDD-supply = <&v1_8d_reg>;
64 };
65 };
66
67 diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
68 index bf1a40e45c97..ba589bc41a57 100644
69 --- a/arch/arm/boot/dts/am335x-evmsk.dts
70 +++ b/arch/arm/boot/dts/am335x-evmsk.dts
71 @@ -73,6 +73,24 @@
72 enable-active-high;
73 };
74
75 + /* TPS79518 */
76 + v1_8d_reg: fixedregulator-v1_8d {
77 + compatible = "regulator-fixed";
78 + regulator-name = "v1_8d";
79 + vin-supply = <&vbat>;
80 + regulator-min-microvolt = <1800000>;
81 + regulator-max-microvolt = <1800000>;
82 + };
83 +
84 + /* TPS78633 */
85 + v3_3d_reg: fixedregulator-v3_3d {
86 + compatible = "regulator-fixed";
87 + regulator-name = "v3_3d";
88 + vin-supply = <&vbat>;
89 + regulator-min-microvolt = <3300000>;
90 + regulator-max-microvolt = <3300000>;
91 + };
92 +
93 leds {
94 pinctrl-names = "default";
95 pinctrl-0 = <&user_leds_s0>;
96 @@ -501,10 +519,10 @@
97 status = "okay";
98
99 /* Regulators */
100 - AVDD-supply = <&vaux2_reg>;
101 - IOVDD-supply = <&vaux2_reg>;
102 - DRVDD-supply = <&vaux2_reg>;
103 - DVDD-supply = <&vbat>;
104 + AVDD-supply = <&v3_3d_reg>;
105 + IOVDD-supply = <&v3_3d_reg>;
106 + DRVDD-supply = <&v3_3d_reg>;
107 + DVDD-supply = <&v1_8d_reg>;
108 };
109 };
110
111 diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
112 index 0840ffb3205c..e6a36a792bae 100644
113 --- a/arch/arm/boot/dts/rk3288.dtsi
114 +++ b/arch/arm/boot/dts/rk3288.dtsi
115 @@ -70,7 +70,7 @@
116 compatible = "arm,cortex-a12";
117 reg = <0x501>;
118 resets = <&cru SRST_CORE1>;
119 - operating-points = <&cpu_opp_table>;
120 + operating-points-v2 = <&cpu_opp_table>;
121 #cooling-cells = <2>; /* min followed by max */
122 clock-latency = <40000>;
123 clocks = <&cru ARMCLK>;
124 @@ -80,7 +80,7 @@
125 compatible = "arm,cortex-a12";
126 reg = <0x502>;
127 resets = <&cru SRST_CORE2>;
128 - operating-points = <&cpu_opp_table>;
129 + operating-points-v2 = <&cpu_opp_table>;
130 #cooling-cells = <2>; /* min followed by max */
131 clock-latency = <40000>;
132 clocks = <&cru ARMCLK>;
133 @@ -90,7 +90,7 @@
134 compatible = "arm,cortex-a12";
135 reg = <0x503>;
136 resets = <&cru SRST_CORE3>;
137 - operating-points = <&cpu_opp_table>;
138 + operating-points-v2 = <&cpu_opp_table>;
139 #cooling-cells = <2>; /* min followed by max */
140 clock-latency = <40000>;
141 clocks = <&cru ARMCLK>;
142 diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
143 index 1c01a6f843d8..28a2e45752fe 100644
144 --- a/arch/arm/boot/dts/sama5d2-pinfunc.h
145 +++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
146 @@ -518,7 +518,7 @@
147 #define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0)
148 #define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3)
149 #define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1)
150 -#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 2, 1)
151 +#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 3, 1)
152 #define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2)
153 #define PIN_PC10 74
154 #define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0)
155 diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
156 index 5272e887a434..c142169a58fc 100644
157 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
158 +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
159 @@ -45,8 +45,7 @@
160
161 vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator {
162 compatible = "regulator-fixed";
163 - enable-active-high;
164 - gpio = <&gpio0 RK_PD3 GPIO_ACTIVE_HIGH>;
165 + gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
166 pinctrl-names = "default";
167 pinctrl-0 = <&usb20_host_drv>;
168 regulator-name = "vcc_host1_5v";
169 @@ -238,7 +237,7 @@
170
171 usb2 {
172 usb20_host_drv: usb20-host-drv {
173 - rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
174 + rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
175 };
176 };
177
178 diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
179 index 3f5a2944300f..e065394360bb 100644
180 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
181 +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
182 @@ -1356,11 +1356,11 @@
183
184 sdmmc0 {
185 sdmmc0_clk: sdmmc0-clk {
186 - rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>;
187 + rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>;
188 };
189
190 sdmmc0_cmd: sdmmc0-cmd {
191 - rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>;
192 + rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>;
193 };
194
195 sdmmc0_dectn: sdmmc0-dectn {
196 @@ -1372,14 +1372,14 @@
197 };
198
199 sdmmc0_bus1: sdmmc0-bus1 {
200 - rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>;
201 + rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>;
202 };
203
204 sdmmc0_bus4: sdmmc0-bus4 {
205 - rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>,
206 - <1 RK_PA1 1 &pcfg_pull_up_4ma>,
207 - <1 RK_PA2 1 &pcfg_pull_up_4ma>,
208 - <1 RK_PA3 1 &pcfg_pull_up_4ma>;
209 + rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>,
210 + <1 RK_PA1 1 &pcfg_pull_up_8ma>,
211 + <1 RK_PA2 1 &pcfg_pull_up_8ma>,
212 + <1 RK_PA3 1 &pcfg_pull_up_8ma>;
213 };
214
215 sdmmc0_gpio: sdmmc0-gpio {
216 @@ -1553,50 +1553,50 @@
217 rgmiim1_pins: rgmiim1-pins {
218 rockchip,pins =
219 /* mac_txclk */
220 - <1 RK_PB4 2 &pcfg_pull_none_12ma>,
221 + <1 RK_PB4 2 &pcfg_pull_none_8ma>,
222 /* mac_rxclk */
223 - <1 RK_PB5 2 &pcfg_pull_none_2ma>,
224 + <1 RK_PB5 2 &pcfg_pull_none_4ma>,
225 /* mac_mdio */
226 - <1 RK_PC3 2 &pcfg_pull_none_2ma>,
227 + <1 RK_PC3 2 &pcfg_pull_none_4ma>,
228 /* mac_txen */
229 - <1 RK_PD1 2 &pcfg_pull_none_12ma>,
230 + <1 RK_PD1 2 &pcfg_pull_none_8ma>,
231 /* mac_clk */
232 - <1 RK_PC5 2 &pcfg_pull_none_2ma>,
233 + <1 RK_PC5 2 &pcfg_pull_none_4ma>,
234 /* mac_rxdv */
235 - <1 RK_PC6 2 &pcfg_pull_none_2ma>,
236 + <1 RK_PC6 2 &pcfg_pull_none_4ma>,
237 /* mac_mdc */
238 - <1 RK_PC7 2 &pcfg_pull_none_2ma>,
239 + <1 RK_PC7 2 &pcfg_pull_none_4ma>,
240 /* mac_rxd1 */
241 - <1 RK_PB2 2 &pcfg_pull_none_2ma>,
242 + <1 RK_PB2 2 &pcfg_pull_none_4ma>,
243 /* mac_rxd0 */
244 - <1 RK_PB3 2 &pcfg_pull_none_2ma>,
245 + <1 RK_PB3 2 &pcfg_pull_none_4ma>,
246 /* mac_txd1 */
247 - <1 RK_PB0 2 &pcfg_pull_none_12ma>,
248 + <1 RK_PB0 2 &pcfg_pull_none_8ma>,
249 /* mac_txd0 */
250 - <1 RK_PB1 2 &pcfg_pull_none_12ma>,
251 + <1 RK_PB1 2 &pcfg_pull_none_8ma>,
252 /* mac_rxd3 */
253 - <1 RK_PB6 2 &pcfg_pull_none_2ma>,
254 + <1 RK_PB6 2 &pcfg_pull_none_4ma>,
255 /* mac_rxd2 */
256 - <1 RK_PB7 2 &pcfg_pull_none_2ma>,
257 + <1 RK_PB7 2 &pcfg_pull_none_4ma>,
258 /* mac_txd3 */
259 - <1 RK_PC0 2 &pcfg_pull_none_12ma>,
260 + <1 RK_PC0 2 &pcfg_pull_none_8ma>,
261 /* mac_txd2 */
262 - <1 RK_PC1 2 &pcfg_pull_none_12ma>,
263 + <1 RK_PC1 2 &pcfg_pull_none_8ma>,
264
265 /* mac_txclk */
266 - <0 RK_PB0 1 &pcfg_pull_none>,
267 + <0 RK_PB0 1 &pcfg_pull_none_8ma>,
268 /* mac_txen */
269 - <0 RK_PB4 1 &pcfg_pull_none>,
270 + <0 RK_PB4 1 &pcfg_pull_none_8ma>,
271 /* mac_clk */
272 - <0 RK_PD0 1 &pcfg_pull_none>,
273 + <0 RK_PD0 1 &pcfg_pull_none_4ma>,
274 /* mac_txd1 */
275 - <0 RK_PC0 1 &pcfg_pull_none>,
276 + <0 RK_PC0 1 &pcfg_pull_none_8ma>,
277 /* mac_txd0 */
278 - <0 RK_PC1 1 &pcfg_pull_none>,
279 + <0 RK_PC1 1 &pcfg_pull_none_8ma>,
280 /* mac_txd3 */
281 - <0 RK_PC7 1 &pcfg_pull_none>,
282 + <0 RK_PC7 1 &pcfg_pull_none_8ma>,
283 /* mac_txd2 */
284 - <0 RK_PC6 1 &pcfg_pull_none>;
285 + <0 RK_PC6 1 &pcfg_pull_none_8ma>;
286 };
287
288 rmiim1_pins: rmiim1-pins {
289 diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
290 index 07fe2479d310..b447b4db423a 100644
291 --- a/arch/arm64/include/asm/futex.h
292 +++ b/arch/arm64/include/asm/futex.h
293 @@ -30,8 +30,8 @@ do { \
294 " prfm pstl1strm, %2\n" \
295 "1: ldxr %w1, %2\n" \
296 insn "\n" \
297 -"2: stlxr %w3, %w0, %2\n" \
298 -" cbnz %w3, 1b\n" \
299 +"2: stlxr %w0, %w3, %2\n" \
300 +" cbnz %w0, 1b\n" \
301 " dmb ish\n" \
302 "3:\n" \
303 " .pushsection .fixup,\"ax\"\n" \
304 @@ -50,30 +50,30 @@ do { \
305 static inline int
306 arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
307 {
308 - int oldval = 0, ret, tmp;
309 + int oldval, ret, tmp;
310 u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
311
312 pagefault_disable();
313
314 switch (op) {
315 case FUTEX_OP_SET:
316 - __futex_atomic_op("mov %w0, %w4",
317 + __futex_atomic_op("mov %w3, %w4",
318 ret, oldval, uaddr, tmp, oparg);
319 break;
320 case FUTEX_OP_ADD:
321 - __futex_atomic_op("add %w0, %w1, %w4",
322 + __futex_atomic_op("add %w3, %w1, %w4",
323 ret, oldval, uaddr, tmp, oparg);
324 break;
325 case FUTEX_OP_OR:
326 - __futex_atomic_op("orr %w0, %w1, %w4",
327 + __futex_atomic_op("orr %w3, %w1, %w4",
328 ret, oldval, uaddr, tmp, oparg);
329 break;
330 case FUTEX_OP_ANDN:
331 - __futex_atomic_op("and %w0, %w1, %w4",
332 + __futex_atomic_op("and %w3, %w1, %w4",
333 ret, oldval, uaddr, tmp, ~oparg);
334 break;
335 case FUTEX_OP_XOR:
336 - __futex_atomic_op("eor %w0, %w1, %w4",
337 + __futex_atomic_op("eor %w3, %w1, %w4",
338 ret, oldval, uaddr, tmp, oparg);
339 break;
340 default:
341 diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
342 index b9da093e0341..a0099be4311a 100644
343 --- a/arch/arm64/kernel/traps.c
344 +++ b/arch/arm64/kernel/traps.c
345 @@ -101,10 +101,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
346 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
347 {
348 struct stackframe frame;
349 - int skip;
350 + int skip = 0;
351
352 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
353
354 + if (regs) {
355 + if (user_mode(regs))
356 + return;
357 + skip = 1;
358 + }
359 +
360 if (!tsk)
361 tsk = current;
362
363 @@ -125,7 +131,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
364 frame.graph = tsk->curr_ret_stack;
365 #endif
366
367 - skip = !!regs;
368 printk("Call trace:\n");
369 do {
370 /* skip until specified stack frame */
371 @@ -175,15 +180,13 @@ static int __die(const char *str, int err, struct pt_regs *regs)
372 return ret;
373
374 print_modules();
375 - __show_regs(regs);
376 pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
377 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
378 end_of_stack(tsk));
379 + show_regs(regs);
380
381 - if (!user_mode(regs)) {
382 - dump_backtrace(regs, tsk);
383 + if (!user_mode(regs))
384 dump_instr(KERN_EMERG, regs);
385 - }
386
387 return ret;
388 }
389 diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
390 index 787e27964ab9..774c3e17c798 100644
391 --- a/arch/arm64/mm/init.c
392 +++ b/arch/arm64/mm/init.c
393 @@ -450,7 +450,7 @@ void __init arm64_memblock_init(void)
394 * memory spans, randomize the linear region as well.
395 */
396 if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
397 - range = range / ARM64_MEMSTART_ALIGN + 1;
398 + range /= ARM64_MEMSTART_ALIGN;
399 memstart_addr -= ARM64_MEMSTART_ALIGN *
400 ((range * memstart_offset_seed) >> 16);
401 }
402 diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
403 index 2a27b275ab09..9ff033d261ab 100644
404 --- a/arch/parisc/include/asm/ptrace.h
405 +++ b/arch/parisc/include/asm/ptrace.h
406 @@ -22,13 +22,14 @@ unsigned long profile_pc(struct pt_regs *);
407
408 static inline unsigned long regs_return_value(struct pt_regs *regs)
409 {
410 - return regs->gr[20];
411 + return regs->gr[28];
412 }
413
414 static inline void instruction_pointer_set(struct pt_regs *regs,
415 unsigned long val)
416 {
417 - regs->iaoq[0] = val;
418 + regs->iaoq[0] = val;
419 + regs->iaoq[1] = val + 4;
420 }
421
422 /* Query offset/name of register from its name/offset */
423 diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
424 index eb39e7e380d7..841db71958cd 100644
425 --- a/arch/parisc/kernel/process.c
426 +++ b/arch/parisc/kernel/process.c
427 @@ -210,12 +210,6 @@ void __cpuidle arch_cpu_idle(void)
428
429 static int __init parisc_idle_init(void)
430 {
431 - const char *marker;
432 -
433 - /* check QEMU/SeaBIOS marker in PAGE0 */
434 - marker = (char *) &PAGE0->pad0;
435 - running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
436 -
437 if (!running_on_qemu)
438 cpu_idle_poll_ctrl(1);
439
440 diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
441 index 4e87c35c22b7..79c8b994e7d1 100644
442 --- a/arch/parisc/kernel/setup.c
443 +++ b/arch/parisc/kernel/setup.c
444 @@ -399,6 +399,9 @@ void __init start_parisc(void)
445 int ret, cpunum;
446 struct pdc_coproc_cfg coproc_cfg;
447
448 + /* check QEMU/SeaBIOS marker in PAGE0 */
449 + running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
450 +
451 cpunum = smp_processor_id();
452
453 init_cpu_topology();
454 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
455 index bbd1c73243d7..14b0f5b6a373 100644
456 --- a/arch/powerpc/kernel/signal_64.c
457 +++ b/arch/powerpc/kernel/signal_64.c
458 @@ -755,12 +755,25 @@ SYSCALL_DEFINE0(rt_sigreturn)
459 if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
460 &uc_transact->uc_mcontext))
461 goto badframe;
462 - }
463 - else
464 - /* Fall through, for non-TM restore */
465 + } else
466 #endif
467 - if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
468 - goto badframe;
469 + {
470 + /*
471 + * Fall through, for non-TM restore
472 + *
473 + * Unset MSR[TS] on the thread regs since MSR from user
474 + * context does not have MSR active, and recheckpoint was
475 + * not called since restore_tm_sigcontexts() was not called
476 + * also.
477 + *
478 + * If not unsetting it, the code can RFID to userspace with
479 + * MSR[TS] set, but without CPU in the proper state,
480 + * causing a TM bad thing.
481 + */
482 + current->thread.regs->msr &= ~MSR_TS_MASK;
483 + if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
484 + goto badframe;
485 + }
486
487 if (restore_altstack(&uc->uc_stack))
488 goto badframe;
489 diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
490 index 8d25f8904c00..1dcde0fda435 100644
491 --- a/arch/riscv/include/asm/syscall.h
492 +++ b/arch/riscv/include/asm/syscall.h
493 @@ -78,10 +78,11 @@ static inline void syscall_get_arguments(struct task_struct *task,
494 if (i == 0) {
495 args[0] = regs->orig_a0;
496 args++;
497 - i++;
498 n--;
499 + } else {
500 + i--;
501 }
502 - memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
503 + memcpy(args, &regs->a1 + i, n * sizeof(args[0]));
504 }
505
506 static inline void syscall_set_arguments(struct task_struct *task,
507 @@ -93,10 +94,11 @@ static inline void syscall_set_arguments(struct task_struct *task,
508 if (i == 0) {
509 regs->orig_a0 = args[0];
510 args++;
511 - i++;
512 n--;
513 - }
514 - memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
515 + } else {
516 + i--;
517 + }
518 + memcpy(&regs->a1 + i, args, n * sizeof(regs->a1));
519 }
520
521 #endif /* _ASM_RISCV_SYSCALL_H */
522 diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
523 index c3d7ccd25381..5bfe2243a08f 100644
524 --- a/arch/x86/entry/vdso/Makefile
525 +++ b/arch/x86/entry/vdso/Makefile
526 @@ -47,7 +47,7 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
527 CPPFLAGS_vdso.lds += -P -C
528
529 VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
530 - -z max-page-size=4096 -z common-page-size=4096
531 + -z max-page-size=4096
532
533 $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
534 $(call if_changed,vdso)
535 @@ -98,7 +98,7 @@ CFLAGS_REMOVE_vvar.o = -pg
536
537 CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
538 VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
539 - -z max-page-size=4096 -z common-page-size=4096
540 + -z max-page-size=4096
541
542 # x32-rebranded versions
543 vobjx32s-y := $(vobjs-y:.o=-x32.o)
544 diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
545 index c84584bb9402..3e5dd85b019a 100644
546 --- a/arch/x86/events/amd/core.c
547 +++ b/arch/x86/events/amd/core.c
548 @@ -3,10 +3,14 @@
549 #include <linux/types.h>
550 #include <linux/init.h>
551 #include <linux/slab.h>
552 +#include <linux/delay.h>
553 #include <asm/apicdef.h>
554 +#include <asm/nmi.h>
555
556 #include "../perf_event.h"
557
558 +static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
559 +
560 static __initconst const u64 amd_hw_cache_event_ids
561 [PERF_COUNT_HW_CACHE_MAX]
562 [PERF_COUNT_HW_CACHE_OP_MAX]
563 @@ -429,6 +433,132 @@ static void amd_pmu_cpu_dead(int cpu)
564 }
565 }
566
567 +/*
568 + * When a PMC counter overflows, an NMI is used to process the event and
569 + * reset the counter. NMI latency can result in the counter being updated
570 + * before the NMI can run, which can result in what appear to be spurious
571 + * NMIs. This function is intended to wait for the NMI to run and reset
572 + * the counter to avoid possible unhandled NMI messages.
573 + */
574 +#define OVERFLOW_WAIT_COUNT 50
575 +
576 +static void amd_pmu_wait_on_overflow(int idx)
577 +{
578 + unsigned int i;
579 + u64 counter;
580 +
581 + /*
582 + * Wait for the counter to be reset if it has overflowed. This loop
583 + * should exit very, very quickly, but just in case, don't wait
584 + * forever...
585 + */
586 + for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
587 + rdmsrl(x86_pmu_event_addr(idx), counter);
588 + if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
589 + break;
590 +
591 + /* Might be in IRQ context, so can't sleep */
592 + udelay(1);
593 + }
594 +}
595 +
596 +static void amd_pmu_disable_all(void)
597 +{
598 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
599 + int idx;
600 +
601 + x86_pmu_disable_all();
602 +
603 + /*
604 + * This shouldn't be called from NMI context, but add a safeguard here
605 + * to return, since if we're in NMI context we can't wait for an NMI
606 + * to reset an overflowed counter value.
607 + */
608 + if (in_nmi())
609 + return;
610 +
611 + /*
612 + * Check each counter for overflow and wait for it to be reset by the
613 + * NMI if it has overflowed. This relies on the fact that all active
614 + * counters are always enabled when this function is caled and
615 + * ARCH_PERFMON_EVENTSEL_INT is always set.
616 + */
617 + for (idx = 0; idx < x86_pmu.num_counters; idx++) {
618 + if (!test_bit(idx, cpuc->active_mask))
619 + continue;
620 +
621 + amd_pmu_wait_on_overflow(idx);
622 + }
623 +}
624 +
625 +static void amd_pmu_disable_event(struct perf_event *event)
626 +{
627 + x86_pmu_disable_event(event);
628 +
629 + /*
630 + * This can be called from NMI context (via x86_pmu_stop). The counter
631 + * may have overflowed, but either way, we'll never see it get reset
632 + * by the NMI if we're already in the NMI. And the NMI latency support
633 + * below will take care of any pending NMI that might have been
634 + * generated by the overflow.
635 + */
636 + if (in_nmi())
637 + return;
638 +
639 + amd_pmu_wait_on_overflow(event->hw.idx);
640 +}
641 +
642 +/*
643 + * Because of NMI latency, if multiple PMC counters are active or other sources
644 + * of NMIs are received, the perf NMI handler can handle one or more overflowed
645 + * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
646 + * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
647 + * back-to-back NMI support won't be active. This PMC handler needs to take into
648 + * account that this can occur, otherwise this could result in unknown NMI
649 + * messages being issued. Examples of this is PMC overflow while in the NMI
650 + * handler when multiple PMCs are active or PMC overflow while handling some
651 + * other source of an NMI.
652 + *
653 + * Attempt to mitigate this by using the number of active PMCs to determine
654 + * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
655 + * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
656 + * number of active PMCs or 2. The value of 2 is used in case an NMI does not
657 + * arrive at the LAPIC in time to be collapsed into an already pending NMI.
658 + */
659 +static int amd_pmu_handle_irq(struct pt_regs *regs)
660 +{
661 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
662 + int active, handled;
663 +
664 + /*
665 + * Obtain the active count before calling x86_pmu_handle_irq() since
666 + * it is possible that x86_pmu_handle_irq() may make a counter
667 + * inactive (through x86_pmu_stop).
668 + */
669 + active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
670 +
671 + /* Process any counter overflows */
672 + handled = x86_pmu_handle_irq(regs);
673 +
674 + /*
675 + * If a counter was handled, record the number of possible remaining
676 + * NMIs that can occur.
677 + */
678 + if (handled) {
679 + this_cpu_write(perf_nmi_counter,
680 + min_t(unsigned int, 2, active));
681 +
682 + return handled;
683 + }
684 +
685 + if (!this_cpu_read(perf_nmi_counter))
686 + return NMI_DONE;
687 +
688 + this_cpu_dec(perf_nmi_counter);
689 +
690 + return NMI_HANDLED;
691 +}
692 +
693 static struct event_constraint *
694 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
695 struct perf_event *event)
696 @@ -621,11 +751,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config)
697
698 static __initconst const struct x86_pmu amd_pmu = {
699 .name = "AMD",
700 - .handle_irq = x86_pmu_handle_irq,
701 - .disable_all = x86_pmu_disable_all,
702 + .handle_irq = amd_pmu_handle_irq,
703 + .disable_all = amd_pmu_disable_all,
704 .enable_all = x86_pmu_enable_all,
705 .enable = x86_pmu_enable_event,
706 - .disable = x86_pmu_disable_event,
707 + .disable = amd_pmu_disable_event,
708 .hw_config = amd_pmu_hw_config,
709 .schedule_events = x86_schedule_events,
710 .eventsel = MSR_K7_EVNTSEL0,
711 @@ -728,7 +858,7 @@ void amd_pmu_enable_virt(void)
712 cpuc->perf_ctr_virt_mask = 0;
713
714 /* Reload all events */
715 - x86_pmu_disable_all();
716 + amd_pmu_disable_all();
717 x86_pmu_enable_all(0);
718 }
719 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
720 @@ -746,7 +876,7 @@ void amd_pmu_disable_virt(void)
721 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
722
723 /* Reload all events */
724 - x86_pmu_disable_all();
725 + amd_pmu_disable_all();
726 x86_pmu_enable_all(0);
727 }
728 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
729 diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
730 index a41554350893..c9625bff4328 100644
731 --- a/arch/x86/events/core.c
732 +++ b/arch/x86/events/core.c
733 @@ -1328,8 +1328,9 @@ void x86_pmu_stop(struct perf_event *event, int flags)
734 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
735 struct hw_perf_event *hwc = &event->hw;
736
737 - if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
738 + if (test_bit(hwc->idx, cpuc->active_mask)) {
739 x86_pmu.disable(event);
740 + __clear_bit(hwc->idx, cpuc->active_mask);
741 cpuc->events[hwc->idx] = NULL;
742 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
743 hwc->state |= PERF_HES_STOPPED;
744 @@ -1426,16 +1427,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
745 apic_write(APIC_LVTPC, APIC_DM_NMI);
746
747 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
748 - if (!test_bit(idx, cpuc->active_mask)) {
749 - /*
750 - * Though we deactivated the counter some cpus
751 - * might still deliver spurious interrupts still
752 - * in flight. Catch them:
753 - */
754 - if (__test_and_clear_bit(idx, cpuc->running))
755 - handled++;
756 + if (!test_bit(idx, cpuc->active_mask))
757 continue;
758 - }
759
760 event = cpuc->events[idx];
761
762 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
763 index 9f645ba57dbb..33611a74bfff 100644
764 --- a/arch/x86/include/asm/bitops.h
765 +++ b/arch/x86/include/asm/bitops.h
766 @@ -36,22 +36,17 @@
767 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
768 */
769
770 -#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
771 -/* Technically wrong, but this avoids compilation errors on some gcc
772 - versions. */
773 -#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
774 -#else
775 -#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
776 -#endif
777 +#define RLONG_ADDR(x) "m" (*(volatile long *) (x))
778 +#define WBYTE_ADDR(x) "+m" (*(volatile char *) (x))
779
780 -#define ADDR BITOP_ADDR(addr)
781 +#define ADDR RLONG_ADDR(addr)
782
783 /*
784 * We do the locked ops that don't return the old value as
785 * a mask operation on a byte.
786 */
787 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
788 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
789 +#define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
790 #define CONST_MASK(nr) (1 << ((nr) & 7))
791
792 /**
793 @@ -79,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr)
794 : "memory");
795 } else {
796 asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
797 - : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
798 + : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
799 }
800 }
801
802 @@ -94,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr)
803 */
804 static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
805 {
806 - asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
807 + asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
808 }
809
810 /**
811 @@ -116,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr)
812 : "iq" ((u8)~CONST_MASK(nr)));
813 } else {
814 asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
815 - : BITOP_ADDR(addr)
816 - : "Ir" (nr));
817 + : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
818 }
819 }
820
821 @@ -137,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
822
823 static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
824 {
825 - asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
826 + asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
827 }
828
829 static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
830 @@ -145,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
831 bool negative;
832 asm volatile(LOCK_PREFIX "andb %2,%1"
833 CC_SET(s)
834 - : CC_OUT(s) (negative), ADDR
835 + : CC_OUT(s) (negative), WBYTE_ADDR(addr)
836 : "ir" ((char) ~(1 << nr)) : "memory");
837 return negative;
838 }
839 @@ -161,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
840 * __clear_bit() is non-atomic and implies release semantics before the memory
841 * operation. It can be used for an unlock if no other CPUs can concurrently
842 * modify other bits in the word.
843 - *
844 - * No memory barrier is required here, because x86 cannot reorder stores past
845 - * older loads. Same principle as spin_unlock.
846 */
847 static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
848 {
849 - barrier();
850 __clear_bit(nr, addr);
851 }
852
853 @@ -182,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
854 */
855 static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
856 {
857 - asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
858 + asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
859 }
860
861 /**
862 @@ -202,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
863 : "iq" ((u8)CONST_MASK(nr)));
864 } else {
865 asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
866 - : BITOP_ADDR(addr)
867 - : "Ir" (nr));
868 + : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
869 }
870 }
871
872 @@ -249,8 +238,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
873
874 asm(__ASM_SIZE(bts) " %2,%1"
875 CC_SET(c)
876 - : CC_OUT(c) (oldbit), ADDR
877 - : "Ir" (nr));
878 + : CC_OUT(c) (oldbit)
879 + : ADDR, "Ir" (nr) : "memory");
880 return oldbit;
881 }
882
883 @@ -290,8 +279,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
884
885 asm volatile(__ASM_SIZE(btr) " %2,%1"
886 CC_SET(c)
887 - : CC_OUT(c) (oldbit), ADDR
888 - : "Ir" (nr));
889 + : CC_OUT(c) (oldbit)
890 + : ADDR, "Ir" (nr) : "memory");
891 return oldbit;
892 }
893
894 @@ -302,8 +291,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
895
896 asm volatile(__ASM_SIZE(btc) " %2,%1"
897 CC_SET(c)
898 - : CC_OUT(c) (oldbit), ADDR
899 - : "Ir" (nr) : "memory");
900 + : CC_OUT(c) (oldbit)
901 + : ADDR, "Ir" (nr) : "memory");
902
903 return oldbit;
904 }
905 @@ -335,7 +324,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
906 asm volatile(__ASM_SIZE(bt) " %2,%1"
907 CC_SET(c)
908 : CC_OUT(c) (oldbit)
909 - : "m" (*(unsigned long *)addr), "Ir" (nr));
910 + : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
911
912 return oldbit;
913 }
914 diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
915 index 55d392c6bd29..2fd165f1cffa 100644
916 --- a/arch/x86/include/asm/string_32.h
917 +++ b/arch/x86/include/asm/string_32.h
918 @@ -179,14 +179,7 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
919 * No 3D Now!
920 */
921
922 -#if (__GNUC__ >= 4)
923 #define memcpy(t, f, n) __builtin_memcpy(t, f, n)
924 -#else
925 -#define memcpy(t, f, n) \
926 - (__builtin_constant_p((n)) \
927 - ? __constant_memcpy((t), (f), (n)) \
928 - : __memcpy((t), (f), (n)))
929 -#endif
930
931 #endif
932 #endif /* !CONFIG_FORTIFY_SOURCE */
933 @@ -282,12 +275,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
934
935 {
936 int d0, d1;
937 -#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
938 - /* Workaround for broken gcc 4.0 */
939 - register unsigned long eax asm("%eax") = pattern;
940 -#else
941 unsigned long eax = pattern;
942 -#endif
943
944 switch (count % 4) {
945 case 0:
946 @@ -321,15 +309,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
947 #define __HAVE_ARCH_MEMSET
948 extern void *memset(void *, int, size_t);
949 #ifndef CONFIG_FORTIFY_SOURCE
950 -#if (__GNUC__ >= 4)
951 #define memset(s, c, count) __builtin_memset(s, c, count)
952 -#else
953 -#define memset(s, c, count) \
954 - (__builtin_constant_p(c) \
955 - ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
956 - (count)) \
957 - : __memset((s), (c), (count)))
958 -#endif
959 #endif /* !CONFIG_FORTIFY_SOURCE */
960
961 #define __HAVE_ARCH_MEMSET16
962 diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
963 index d33f92b9fa22..052a7a4ac025 100644
964 --- a/arch/x86/include/asm/string_64.h
965 +++ b/arch/x86/include/asm/string_64.h
966 @@ -32,21 +32,6 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
967 extern void *memcpy(void *to, const void *from, size_t len);
968 extern void *__memcpy(void *to, const void *from, size_t len);
969
970 -#ifndef CONFIG_FORTIFY_SOURCE
971 -#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
972 -#define memcpy(dst, src, len) \
973 -({ \
974 - size_t __len = (len); \
975 - void *__ret; \
976 - if (__builtin_constant_p(len) && __len >= 64) \
977 - __ret = __memcpy((dst), (src), __len); \
978 - else \
979 - __ret = __builtin_memcpy((dst), (src), __len); \
980 - __ret; \
981 -})
982 -#endif
983 -#endif /* !CONFIG_FORTIFY_SOURCE */
984 -
985 #define __HAVE_ARCH_MEMSET
986 void *memset(void *s, int c, size_t n);
987 void *__memset(void *s, int c, size_t n);
988 diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
989 index ef05bea7010d..6b5c710846f5 100644
990 --- a/arch/x86/include/asm/xen/hypercall.h
991 +++ b/arch/x86/include/asm/xen/hypercall.h
992 @@ -206,6 +206,9 @@ xen_single_call(unsigned int call,
993 __HYPERCALL_DECLS;
994 __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
995
996 + if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
997 + return -EINVAL;
998 +
999 asm volatile(CALL_NOSPEC
1000 : __HYPERCALL_5PARAM
1001 : [thunk_target] "a" (&hypercall_page[call])
1002 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1003 index b47541962012..6dc72804fe6e 100644
1004 --- a/arch/x86/kvm/svm.c
1005 +++ b/arch/x86/kvm/svm.c
1006 @@ -6398,11 +6398,11 @@ e_free:
1007 return ret;
1008 }
1009
1010 -static int get_num_contig_pages(int idx, struct page **inpages,
1011 - unsigned long npages)
1012 +static unsigned long get_num_contig_pages(unsigned long idx,
1013 + struct page **inpages, unsigned long npages)
1014 {
1015 unsigned long paddr, next_paddr;
1016 - int i = idx + 1, pages = 1;
1017 + unsigned long i = idx + 1, pages = 1;
1018
1019 /* find the number of contiguous pages starting from idx */
1020 paddr = __sme_page_pa(inpages[idx]);
1021 @@ -6421,12 +6421,12 @@ static int get_num_contig_pages(int idx, struct page **inpages,
1022
1023 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1024 {
1025 - unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
1026 + unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
1027 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1028 struct kvm_sev_launch_update_data params;
1029 struct sev_data_launch_update_data *data;
1030 struct page **inpages;
1031 - int i, ret, pages;
1032 + int ret;
1033
1034 if (!sev_guest(kvm))
1035 return -ENOTTY;
1036 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1037 index f99f59625da5..6b6bcafd1d2c 100644
1038 --- a/arch/x86/kvm/vmx.c
1039 +++ b/arch/x86/kvm/vmx.c
1040 @@ -11582,6 +11582,17 @@ static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
1041 return 0;
1042 }
1043
1044 +static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
1045 + int msr;
1046 +
1047 + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
1048 + unsigned word = msr / BITS_PER_LONG;
1049 +
1050 + msr_bitmap[word] = ~0;
1051 + msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
1052 + }
1053 +}
1054 +
1055 /*
1056 * Merge L0's and L1's MSR bitmap, return false to indicate that
1057 * we do not use the hardware.
1058 @@ -11623,39 +11634,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
1059 return false;
1060
1061 msr_bitmap_l1 = (unsigned long *)kmap(page);
1062 - if (nested_cpu_has_apic_reg_virt(vmcs12)) {
1063 - /*
1064 - * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
1065 - * just lets the processor take the value from the virtual-APIC page;
1066 - * take those 256 bits directly from the L1 bitmap.
1067 - */
1068 - for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
1069 - unsigned word = msr / BITS_PER_LONG;
1070 - msr_bitmap_l0[word] = msr_bitmap_l1[word];
1071 - msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
1072 - }
1073 - } else {
1074 - for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
1075 - unsigned word = msr / BITS_PER_LONG;
1076 - msr_bitmap_l0[word] = ~0;
1077 - msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
1078 - }
1079 - }
1080
1081 - nested_vmx_disable_intercept_for_msr(
1082 - msr_bitmap_l1, msr_bitmap_l0,
1083 - X2APIC_MSR(APIC_TASKPRI),
1084 - MSR_TYPE_W);
1085 + /*
1086 + * To keep the control flow simple, pay eight 8-byte writes (sixteen
1087 + * 4-byte writes on 32-bit systems) up front to enable intercepts for
1088 + * the x2APIC MSR range and selectively disable them below.
1089 + */
1090 + enable_x2apic_msr_intercepts(msr_bitmap_l0);
1091 +
1092 + if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
1093 + if (nested_cpu_has_apic_reg_virt(vmcs12)) {
1094 + /*
1095 + * L0 need not intercept reads for MSRs between 0x800
1096 + * and 0x8ff, it just lets the processor take the value
1097 + * from the virtual-APIC page; take those 256 bits
1098 + * directly from the L1 bitmap.
1099 + */
1100 + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
1101 + unsigned word = msr / BITS_PER_LONG;
1102 +
1103 + msr_bitmap_l0[word] = msr_bitmap_l1[word];
1104 + }
1105 + }
1106
1107 - if (nested_cpu_has_vid(vmcs12)) {
1108 - nested_vmx_disable_intercept_for_msr(
1109 - msr_bitmap_l1, msr_bitmap_l0,
1110 - X2APIC_MSR(APIC_EOI),
1111 - MSR_TYPE_W);
1112 nested_vmx_disable_intercept_for_msr(
1113 msr_bitmap_l1, msr_bitmap_l0,
1114 - X2APIC_MSR(APIC_SELF_IPI),
1115 - MSR_TYPE_W);
1116 + X2APIC_MSR(APIC_TASKPRI),
1117 + MSR_TYPE_R | MSR_TYPE_W);
1118 +
1119 + if (nested_cpu_has_vid(vmcs12)) {
1120 + nested_vmx_disable_intercept_for_msr(
1121 + msr_bitmap_l1, msr_bitmap_l0,
1122 + X2APIC_MSR(APIC_EOI),
1123 + MSR_TYPE_W);
1124 + nested_vmx_disable_intercept_for_msr(
1125 + msr_bitmap_l1, msr_bitmap_l0,
1126 + X2APIC_MSR(APIC_SELF_IPI),
1127 + MSR_TYPE_W);
1128 + }
1129 }
1130
1131 if (spec_ctrl)
1132 @@ -12836,11 +12852,15 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
1133 nested_cache_shadow_vmcs12(vcpu, vmcs12);
1134
1135 /*
1136 - * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
1137 - * by event injection, halt vcpu.
1138 + * If we're entering a halted L2 vcpu and the L2 vcpu won't be
1139 + * awakened by event injection or by an NMI-window VM-exit or
1140 + * by an interrupt-window VM-exit, halt the vcpu.
1141 */
1142 if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
1143 - !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) {
1144 + !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
1145 + !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) &&
1146 + !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) &&
1147 + (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
1148 vmx->nested.nested_run_pending = 0;
1149 return kvm_vcpu_halt(vcpu);
1150 }
1151 diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
1152 index 0df4080fa20f..a94da7dd3eae 100644
1153 --- a/arch/xtensa/kernel/stacktrace.c
1154 +++ b/arch/xtensa/kernel/stacktrace.c
1155 @@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data)
1156 return 1;
1157 }
1158
1159 +/*
1160 + * level == 0 is for the return address from the caller of this function,
1161 + * not from this function itself.
1162 + */
1163 unsigned long return_address(unsigned level)
1164 {
1165 struct return_addr_data r = {
1166 - .skip = level + 1,
1167 + .skip = level,
1168 };
1169 walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
1170 return r.addr;
1171 diff --git a/block/bio.c b/block/bio.c
1172 index 55a5386fd431..3d757055305f 100644
1173 --- a/block/bio.c
1174 +++ b/block/bio.c
1175 @@ -1240,8 +1240,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
1176 }
1177 }
1178
1179 - if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1180 + if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
1181 + if (!map_data)
1182 + __free_page(page);
1183 break;
1184 + }
1185
1186 len -= bytes;
1187 offset = 0;
1188 diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
1189 index 78f9de260d5f..2f4641e5ecde 100644
1190 --- a/drivers/acpi/acpica/dsopcode.c
1191 +++ b/drivers/acpi/acpica/dsopcode.c
1192 @@ -523,6 +523,10 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
1193 ACPI_FORMAT_UINT64(obj_desc->region.address),
1194 obj_desc->region.length));
1195
1196 + status = acpi_ut_add_address_range(obj_desc->region.space_id,
1197 + obj_desc->region.address,
1198 + obj_desc->region.length, node);
1199 +
1200 /* Now the address and length are valid for this opregion */
1201
1202 obj_desc->region.flags |= AOPOBJ_DATA_VALID;
1203 diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
1204 index e10fec99a182..4424997ecf30 100644
1205 --- a/drivers/acpi/acpica/evgpe.c
1206 +++ b/drivers/acpi/acpica/evgpe.c
1207 @@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
1208
1209 ACPI_FUNCTION_TRACE(ev_enable_gpe);
1210
1211 - /* Enable the requested GPE */
1212 + /* Clear the GPE status */
1213 + status = acpi_hw_clear_gpe(gpe_event_info);
1214 + if (ACPI_FAILURE(status))
1215 + return_ACPI_STATUS(status);
1216
1217 + /* Enable the requested GPE */
1218 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
1219 return_ACPI_STATUS(status);
1220 }
1221 diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
1222 index 8638f43cfc3d..79d86da1c892 100644
1223 --- a/drivers/acpi/acpica/nsobject.c
1224 +++ b/drivers/acpi/acpica/nsobject.c
1225 @@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
1226 }
1227 }
1228
1229 + if (obj_desc->common.type == ACPI_TYPE_REGION) {
1230 + acpi_ut_remove_address_range(obj_desc->region.space_id, node);
1231 + }
1232 +
1233 /* Clear the Node entry in all cases */
1234
1235 node->object = NULL;
1236 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
1237 index 40728491f37b..1df9cb8e659e 100644
1238 --- a/drivers/char/Kconfig
1239 +++ b/drivers/char/Kconfig
1240 @@ -343,7 +343,7 @@ config XILINX_HWICAP
1241
1242 config R3964
1243 tristate "Siemens R3964 line discipline"
1244 - depends on TTY
1245 + depends on TTY && BROKEN
1246 ---help---
1247 This driver allows synchronous communication with devices using the
1248 Siemens R3964 packet protocol. Unless you are dealing with special
1249 diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
1250 index 258c8d259ea1..f965845917e3 100644
1251 --- a/drivers/clk/meson/meson-aoclk.c
1252 +++ b/drivers/clk/meson/meson-aoclk.c
1253 @@ -65,20 +65,15 @@ int meson_aoclkc_probe(struct platform_device *pdev)
1254 return ret;
1255 }
1256
1257 - /* Populate regmap */
1258 - for (clkid = 0; clkid < data->num_clks; clkid++)
1259 + /*
1260 + * Populate regmap and register all clks
1261 + */
1262 + for (clkid = 0; clkid < data->num_clks; clkid++) {
1263 data->clks[clkid]->map = regmap;
1264
1265 - /* Register all clks */
1266 - for (clkid = 0; clkid < data->hw_data->num; clkid++) {
1267 - if (!data->hw_data->hws[clkid])
1268 - continue;
1269 -
1270 ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]);
1271 - if (ret) {
1272 - dev_err(dev, "Clock registration failed\n");
1273 + if (ret)
1274 return ret;
1275 - }
1276 }
1277
1278 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
1279 diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
1280 index 00aad8164dec..542f31ce108f 100644
1281 --- a/drivers/gpu/drm/i915/gvt/gtt.c
1282 +++ b/drivers/gpu/drm/i915/gvt/gtt.c
1283 @@ -1940,7 +1940,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
1284 */
1285 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1286 {
1287 - atomic_dec(&mm->pincount);
1288 + atomic_dec_if_positive(&mm->pincount);
1289 }
1290
1291 /**
1292 diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
1293 index 43aa058e29fc..663a7c9ca3d3 100644
1294 --- a/drivers/gpu/drm/i915/gvt/scheduler.c
1295 +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
1296 @@ -1389,8 +1389,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1297 intel_runtime_pm_put(dev_priv);
1298 }
1299
1300 - if (ret && (vgpu_is_vm_unhealthy(ret))) {
1301 - enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1302 + if (ret) {
1303 + if (vgpu_is_vm_unhealthy(ret))
1304 + enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1305 intel_vgpu_destroy_workload(workload);
1306 return ERR_PTR(ret);
1307 }
1308 diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
1309 index 9ef515df724b..54e767bd5ddb 100644
1310 --- a/drivers/gpu/drm/udl/udl_drv.c
1311 +++ b/drivers/gpu/drm/udl/udl_drv.c
1312 @@ -51,6 +51,7 @@ static struct drm_driver driver = {
1313 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
1314 .load = udl_driver_load,
1315 .unload = udl_driver_unload,
1316 + .release = udl_driver_release,
1317
1318 /* gem hooks */
1319 .gem_free_object_unlocked = udl_gem_free_object,
1320 diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
1321 index e9e9b1ff678e..4ae67d882eae 100644
1322 --- a/drivers/gpu/drm/udl/udl_drv.h
1323 +++ b/drivers/gpu/drm/udl/udl_drv.h
1324 @@ -104,6 +104,7 @@ void udl_urb_completion(struct urb *urb);
1325
1326 int udl_driver_load(struct drm_device *dev, unsigned long flags);
1327 void udl_driver_unload(struct drm_device *dev);
1328 +void udl_driver_release(struct drm_device *dev);
1329
1330 int udl_fbdev_init(struct drm_device *dev);
1331 void udl_fbdev_cleanup(struct drm_device *dev);
1332 diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
1333 index 1b014d92855b..19055dda3140 100644
1334 --- a/drivers/gpu/drm/udl/udl_main.c
1335 +++ b/drivers/gpu/drm/udl/udl_main.c
1336 @@ -378,6 +378,12 @@ void udl_driver_unload(struct drm_device *dev)
1337 udl_free_urb_list(dev);
1338
1339 udl_fbdev_cleanup(dev);
1340 - udl_modeset_cleanup(dev);
1341 kfree(udl);
1342 }
1343 +
1344 +void udl_driver_release(struct drm_device *dev)
1345 +{
1346 + udl_modeset_cleanup(dev);
1347 + drm_dev_fini(dev);
1348 + kfree(dev);
1349 +}
1350 diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
1351 index 81da17a42dc9..c7adaca2ab01 100644
1352 --- a/drivers/hwmon/Kconfig
1353 +++ b/drivers/hwmon/Kconfig
1354 @@ -1755,6 +1755,7 @@ config SENSORS_VT8231
1355 config SENSORS_W83773G
1356 tristate "Nuvoton W83773G"
1357 depends on I2C
1358 + select REGMAP_I2C
1359 help
1360 If you say yes here you get support for the Nuvoton W83773G hardware
1361 monitoring chip.
1362 diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
1363 index 96d5fb3f6199..bc6ef2303f0b 100644
1364 --- a/drivers/md/dm-integrity.c
1365 +++ b/drivers/md/dm-integrity.c
1366 @@ -908,7 +908,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
1367 static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1368 {
1369 return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1370 - range2->logical_sector + range2->n_sectors > range2->logical_sector;
1371 + range1->logical_sector + range1->n_sectors > range2->logical_sector;
1372 }
1373
1374 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1375 @@ -954,8 +954,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
1376 struct dm_integrity_range *last_range =
1377 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1378 struct task_struct *last_range_task;
1379 - if (!ranges_overlap(range, last_range))
1380 - break;
1381 last_range_task = last_range->task;
1382 list_del(&last_range->wait_entry);
1383 if (!add_new_range(ic, last_range, false)) {
1384 @@ -3174,7 +3172,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
1385 journal_watermark = val;
1386 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
1387 sync_msec = val;
1388 - else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) {
1389 + else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
1390 if (ic->meta_dev) {
1391 dm_put_device(ti, ic->meta_dev);
1392 ic->meta_dev = NULL;
1393 @@ -3193,17 +3191,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
1394 goto bad;
1395 }
1396 ic->sectors_per_block = val >> SECTOR_SHIFT;
1397 - } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
1398 + } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
1399 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
1400 "Invalid internal_hash argument");
1401 if (r)
1402 goto bad;
1403 - } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
1404 + } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
1405 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
1406 "Invalid journal_crypt argument");
1407 if (r)
1408 goto bad;
1409 - } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
1410 + } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
1411 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
1412 "Invalid journal_mac argument");
1413 if (r)
1414 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
1415 index 3d0e2c198f06..c7fe4789c40e 100644
1416 --- a/drivers/md/dm-table.c
1417 +++ b/drivers/md/dm-table.c
1418 @@ -1872,6 +1872,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
1419 return true;
1420 }
1421
1422 +static int device_requires_stable_pages(struct dm_target *ti,
1423 + struct dm_dev *dev, sector_t start,
1424 + sector_t len, void *data)
1425 +{
1426 + struct request_queue *q = bdev_get_queue(dev->bdev);
1427 +
1428 + return q && bdi_cap_stable_pages_required(q->backing_dev_info);
1429 +}
1430 +
1431 +/*
1432 + * If any underlying device requires stable pages, a table must require
1433 + * them as well. Only targets that support iterate_devices are considered:
1434 + * don't want error, zero, etc to require stable pages.
1435 + */
1436 +static bool dm_table_requires_stable_pages(struct dm_table *t)
1437 +{
1438 + struct dm_target *ti;
1439 + unsigned i;
1440 +
1441 + for (i = 0; i < dm_table_get_num_targets(t); i++) {
1442 + ti = dm_table_get_target(t, i);
1443 +
1444 + if (ti->type->iterate_devices &&
1445 + ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
1446 + return true;
1447 + }
1448 +
1449 + return false;
1450 +}
1451 +
1452 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1453 struct queue_limits *limits)
1454 {
1455 @@ -1929,6 +1959,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1456
1457 dm_table_verify_integrity(t);
1458
1459 + /*
1460 + * Some devices don't use blk_integrity but still want stable pages
1461 + * because they do their own checksumming.
1462 + */
1463 + if (dm_table_requires_stable_pages(t))
1464 + q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
1465 + else
1466 + q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
1467 +
1468 /*
1469 * Determine whether or not this queue's I/O timings contribute
1470 * to the entropy pool, Only request-based targets use this.
1471 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1472 index 07d2949a8746..42768fe92b41 100644
1473 --- a/drivers/md/dm.c
1474 +++ b/drivers/md/dm.c
1475 @@ -1007,15 +1007,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1476 return -EINVAL;
1477 }
1478
1479 - /*
1480 - * BIO based queue uses its own splitting. When multipage bvecs
1481 - * is switched on, size of the incoming bio may be too big to
1482 - * be handled in some targets, such as crypt.
1483 - *
1484 - * When these targets are ready for the big bio, we can remove
1485 - * the limit.
1486 - */
1487 - ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
1488 + ti->max_io_len = (uint32_t) len;
1489
1490 return 0;
1491 }
1492 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1493 index 0bd93bb7d1a2..581ad0a17d0c 100644
1494 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1495 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1496 @@ -1092,6 +1092,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1497 tpa_info = &rxr->rx_tpa[agg_id];
1498
1499 if (unlikely(cons != rxr->rx_next_cons)) {
1500 + netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
1501 + cons, rxr->rx_next_cons);
1502 bnxt_sched_reset(bp, rxr);
1503 return;
1504 }
1505 @@ -1544,15 +1546,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1506 }
1507
1508 cons = rxcmp->rx_cmp_opaque;
1509 - rx_buf = &rxr->rx_buf_ring[cons];
1510 - data = rx_buf->data;
1511 - data_ptr = rx_buf->data_ptr;
1512 if (unlikely(cons != rxr->rx_next_cons)) {
1513 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1514
1515 + netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1516 + cons, rxr->rx_next_cons);
1517 bnxt_sched_reset(bp, rxr);
1518 return rc1;
1519 }
1520 + rx_buf = &rxr->rx_buf_ring[cons];
1521 + data = rx_buf->data;
1522 + data_ptr = rx_buf->data_ptr;
1523 prefetch(data_ptr);
1524
1525 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1526 @@ -1569,11 +1573,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1527
1528 rx_buf->data = NULL;
1529 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1530 + u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1531 +
1532 bnxt_reuse_rx_data(rxr, cons, data);
1533 if (agg_bufs)
1534 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1535
1536 rc = -EIO;
1537 + if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1538 + netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
1539 + bnxt_sched_reset(bp, rxr);
1540 + }
1541 goto next_rx;
1542 }
1543
1544 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
1545 index c8704b1690eb..a475f36ddf8c 100644
1546 --- a/drivers/net/ethernet/ibm/ibmvnic.c
1547 +++ b/drivers/net/ethernet/ibm/ibmvnic.c
1548 @@ -1888,6 +1888,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
1549 */
1550 adapter->state = VNIC_PROBED;
1551
1552 + reinit_completion(&adapter->init_done);
1553 rc = init_crq_queue(adapter);
1554 if (rc) {
1555 netdev_err(adapter->netdev,
1556 @@ -4569,7 +4570,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
1557 old_num_rx_queues = adapter->req_rx_queues;
1558 old_num_tx_queues = adapter->req_tx_queues;
1559
1560 - init_completion(&adapter->init_done);
1561 + reinit_completion(&adapter->init_done);
1562 adapter->init_done_rc = 0;
1563 ibmvnic_send_crq_init(adapter);
1564 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
1565 @@ -4624,7 +4625,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
1566
1567 adapter->from_passive_init = false;
1568
1569 - init_completion(&adapter->init_done);
1570 adapter->init_done_rc = 0;
1571 ibmvnic_send_crq_init(adapter);
1572 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
1573 @@ -4703,6 +4703,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
1574 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
1575 INIT_LIST_HEAD(&adapter->rwi_list);
1576 spin_lock_init(&adapter->rwi_lock);
1577 + init_completion(&adapter->init_done);
1578 adapter->resetting = false;
1579
1580 adapter->mac_change_pending = false;
1581 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
1582 index eac245a93f91..4ab0d030b544 100644
1583 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
1584 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
1585 @@ -122,7 +122,9 @@ out:
1586 return err;
1587 }
1588
1589 -/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
1590 +/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
1591 + * minimum speed value is 40Gbps
1592 + */
1593 static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
1594 {
1595 u32 speed;
1596 @@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
1597 int err;
1598
1599 err = mlx5e_port_linkspeed(priv->mdev, &speed);
1600 - if (err) {
1601 - mlx5_core_warn(priv->mdev, "cannot get port speed\n");
1602 - return 0;
1603 - }
1604 + if (err)
1605 + speed = SPEED_40000;
1606 + speed = max_t(u32, speed, SPEED_40000);
1607
1608 xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
1609
1610 @@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
1611 }
1612
1613 static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
1614 - u32 xoff, unsigned int mtu)
1615 + u32 xoff, unsigned int max_mtu)
1616 {
1617 int i;
1618
1619 @@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
1620 }
1621
1622 if (port_buffer->buffer[i].size <
1623 - (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
1624 + (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
1625 return -ENOMEM;
1626
1627 port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
1628 - port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu;
1629 + port_buffer->buffer[i].xon =
1630 + port_buffer->buffer[i].xoff - max_mtu;
1631 }
1632
1633 return 0;
1634 @@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
1635
1636 /**
1637 * update_buffer_lossy()
1638 - * mtu: device's MTU
1639 + * max_mtu: netdev's max_mtu
1640 * pfc_en: <input> current pfc configuration
1641 * buffer: <input> current prio to buffer mapping
1642 * xoff: <input> xoff value
1643 @@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
1644 * Return 0 if no error.
1645 * Set change to true if buffer configuration is modified.
1646 */
1647 -static int update_buffer_lossy(unsigned int mtu,
1648 +static int update_buffer_lossy(unsigned int max_mtu,
1649 u8 pfc_en, u8 *buffer, u32 xoff,
1650 struct mlx5e_port_buffer *port_buffer,
1651 bool *change)
1652 @@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
1653 }
1654
1655 if (changed) {
1656 - err = update_xoff_threshold(port_buffer, xoff, mtu);
1657 + err = update_xoff_threshold(port_buffer, xoff, max_mtu);
1658 if (err)
1659 return err;
1660
1661 @@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
1662 return 0;
1663 }
1664
1665 +#define MINIMUM_MAX_MTU 9216
1666 int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
1667 u32 change, unsigned int mtu,
1668 struct ieee_pfc *pfc,
1669 @@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
1670 bool update_prio2buffer = false;
1671 u8 buffer[MLX5E_MAX_PRIORITY];
1672 bool update_buffer = false;
1673 + unsigned int max_mtu;
1674 u32 total_used = 0;
1675 u8 curr_pfc_en;
1676 int err;
1677 int i;
1678
1679 mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
1680 + max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
1681
1682 err = mlx5e_port_query_buffer(priv, &port_buffer);
1683 if (err)
1684 @@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
1685
1686 if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
1687 update_buffer = true;
1688 - err = update_xoff_threshold(&port_buffer, xoff, mtu);
1689 + err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
1690 if (err)
1691 return err;
1692 }
1693 @@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
1694 if (err)
1695 return err;
1696
1697 - err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
1698 + err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
1699 &port_buffer, &update_buffer);
1700 if (err)
1701 return err;
1702 @@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
1703 if (err)
1704 return err;
1705
1706 - err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
1707 - &port_buffer, &update_buffer);
1708 + err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
1709 + xoff, &port_buffer, &update_buffer);
1710 if (err)
1711 return err;
1712 }
1713 @@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
1714 return -EINVAL;
1715
1716 update_buffer = true;
1717 - err = update_xoff_threshold(&port_buffer, xoff, mtu);
1718 + err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
1719 if (err)
1720 return err;
1721 }
1722 @@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
1723 /* Need to update buffer configuration if xoff value is changed */
1724 if (!update_buffer && xoff != priv->dcbx.xoff) {
1725 update_buffer = true;
1726 - err = update_xoff_threshold(&port_buffer, xoff, mtu);
1727 + err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
1728 if (err)
1729 return err;
1730 }
1731 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
1732 index db3278cc052b..124e4567a4ee 100644
1733 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
1734 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
1735 @@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
1736 if (err)
1737 return err;
1738
1739 + mutex_lock(&mdev->mlx5e_res.td.list_lock);
1740 list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
1741 + mutex_unlock(&mdev->mlx5e_res.td.list_lock);
1742
1743 return 0;
1744 }
1745 @@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
1746 void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
1747 struct mlx5e_tir *tir)
1748 {
1749 + mutex_lock(&mdev->mlx5e_res.td.list_lock);
1750 mlx5_core_destroy_tir(mdev, tir->tirn);
1751 list_del(&tir->list);
1752 + mutex_unlock(&mdev->mlx5e_res.td.list_lock);
1753 }
1754
1755 static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
1756 @@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
1757 }
1758
1759 INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
1760 + mutex_init(&mdev->mlx5e_res.td.list_lock);
1761
1762 return 0;
1763
1764 @@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
1765 {
1766 struct mlx5_core_dev *mdev = priv->mdev;
1767 struct mlx5e_tir *tir;
1768 - int err = -ENOMEM;
1769 + int err = 0;
1770 u32 tirn = 0;
1771 int inlen;
1772 void *in;
1773
1774 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1775 in = kvzalloc(inlen, GFP_KERNEL);
1776 - if (!in)
1777 + if (!in) {
1778 + err = -ENOMEM;
1779 goto out;
1780 + }
1781
1782 if (enable_uc_lb)
1783 MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
1784 @@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
1785
1786 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
1787
1788 + mutex_lock(&mdev->mlx5e_res.td.list_lock);
1789 list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
1790 tirn = tir->tirn;
1791 err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
1792 @@ -168,6 +176,7 @@ out:
1793 kvfree(in);
1794 if (err)
1795 netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
1796 + mutex_unlock(&mdev->mlx5e_res.td.list_lock);
1797
1798 return err;
1799 }
1800 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
1801 index 5cf5f2a9d51f..8de64e88c670 100644
1802 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
1803 +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
1804 @@ -217,15 +217,21 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
1805 void *cmd;
1806 int ret;
1807
1808 + rcu_read_lock();
1809 + flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
1810 + rcu_read_unlock();
1811 +
1812 + if (!flow) {
1813 + WARN_ONCE(1, "Received NULL pointer for handle\n");
1814 + return -EINVAL;
1815 + }
1816 +
1817 buf = kzalloc(size, GFP_ATOMIC);
1818 if (!buf)
1819 return -ENOMEM;
1820
1821 cmd = (buf + 1);
1822
1823 - rcu_read_lock();
1824 - flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
1825 - rcu_read_unlock();
1826 mlx5_fpga_tls_flow_to_cmd(flow, cmd);
1827
1828 MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
1829 @@ -238,6 +244,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
1830 buf->complete = mlx_tls_kfree_complete;
1831
1832 ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
1833 + if (ret < 0)
1834 + kfree(buf);
1835
1836 return ret;
1837 }
1838 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1839 index 563ce3fedab4..0e820cf92f8a 100644
1840 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
1841 +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1842 @@ -162,26 +162,6 @@ static struct mlx5_profile profile[] = {
1843 .size = 8,
1844 .limit = 4
1845 },
1846 - .mr_cache[16] = {
1847 - .size = 8,
1848 - .limit = 4
1849 - },
1850 - .mr_cache[17] = {
1851 - .size = 8,
1852 - .limit = 4
1853 - },
1854 - .mr_cache[18] = {
1855 - .size = 8,
1856 - .limit = 4
1857 - },
1858 - .mr_cache[19] = {
1859 - .size = 4,
1860 - .limit = 2
1861 - },
1862 - .mr_cache[20] = {
1863 - .size = 4,
1864 - .limit = 2
1865 - },
1866 },
1867 };
1868
1869 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
1870 index 18a09cdcd9c6..aa5869eb2e3f 100644
1871 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
1872 +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
1873 @@ -225,7 +225,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
1874 ret = dev_queue_xmit(skb);
1875 nfp_repr_inc_tx_stats(netdev, len, ret);
1876
1877 - return ret;
1878 + return NETDEV_TX_OK;
1879 }
1880
1881 static int nfp_repr_stop(struct net_device *netdev)
1882 @@ -329,6 +329,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
1883
1884 SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
1885
1886 + netdev->priv_flags |= IFF_DISABLE_NETPOLL;
1887 +
1888 if (nfp_app_has_tc(app)) {
1889 netdev->features |= NETIF_F_HW_TC;
1890 netdev->hw_features |= NETIF_F_HW_TC;
1891 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
1892 index 5f45ffeeecb4..7a50b911b180 100644
1893 --- a/drivers/net/ethernet/realtek/r8169.c
1894 +++ b/drivers/net/ethernet/realtek/r8169.c
1895 @@ -28,6 +28,7 @@
1896 #include <linux/pm_runtime.h>
1897 #include <linux/firmware.h>
1898 #include <linux/prefetch.h>
1899 +#include <linux/pci-aspm.h>
1900 #include <linux/ipv6.h>
1901 #include <net/ip6_checksum.h>
1902
1903 @@ -5417,7 +5418,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
1904 tp->cp_cmd |= PktCntrDisable | INTT_1;
1905 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
1906
1907 - RTL_W16(tp, IntrMitigate, 0x5151);
1908 + RTL_W16(tp, IntrMitigate, 0x5100);
1909
1910 /* Work around for RxFIFO overflow. */
1911 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
1912 @@ -7324,6 +7325,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1913 return rc;
1914 }
1915
1916 + /* Disable ASPM completely as that cause random device stop working
1917 + * problems as well as full system hangs for some PCIe devices users.
1918 + */
1919 + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
1920 +
1921 /* enable device (incl. PCI PM wakeup and hotplug setup) */
1922 rc = pcim_enable_device(pdev);
1923 if (rc < 0) {
1924 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
1925 index 42d284669b03..31d8d83c25ac 100644
1926 --- a/drivers/net/hyperv/hyperv_net.h
1927 +++ b/drivers/net/hyperv/hyperv_net.h
1928 @@ -970,6 +970,7 @@ struct netvsc_device {
1929
1930 wait_queue_head_t wait_drain;
1931 bool destroy;
1932 + bool tx_disable; /* if true, do not wake up queue again */
1933
1934 /* Receive buffer allocated by us but manages by NetVSP */
1935 void *recv_buf;
1936 diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
1937 index 1a942feab954..fb12b63439c6 100644
1938 --- a/drivers/net/hyperv/netvsc.c
1939 +++ b/drivers/net/hyperv/netvsc.c
1940 @@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
1941
1942 init_waitqueue_head(&net_device->wait_drain);
1943 net_device->destroy = false;
1944 + net_device->tx_disable = false;
1945
1946 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
1947 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
1948 @@ -716,7 +717,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
1949 } else {
1950 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
1951
1952 - if (netif_tx_queue_stopped(txq) &&
1953 + if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
1954 (hv_get_avail_to_write_percent(&channel->outbound) >
1955 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
1956 netif_tx_wake_queue(txq);
1957 @@ -871,7 +872,8 @@ static inline int netvsc_send_pkt(
1958 } else if (ret == -EAGAIN) {
1959 netif_tx_stop_queue(txq);
1960 ndev_ctx->eth_stats.stop_queue++;
1961 - if (atomic_read(&nvchan->queue_sends) < 1) {
1962 + if (atomic_read(&nvchan->queue_sends) < 1 &&
1963 + !net_device->tx_disable) {
1964 netif_tx_wake_queue(txq);
1965 ndev_ctx->eth_stats.wake_queue++;
1966 ret = -ENOSPC;
1967 diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
1968 index c8320405c8f1..9d699bd5f715 100644
1969 --- a/drivers/net/hyperv/netvsc_drv.c
1970 +++ b/drivers/net/hyperv/netvsc_drv.c
1971 @@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
1972 rcu_read_unlock();
1973 }
1974
1975 +static void netvsc_tx_enable(struct netvsc_device *nvscdev,
1976 + struct net_device *ndev)
1977 +{
1978 + nvscdev->tx_disable = false;
1979 + virt_wmb(); /* ensure queue wake up mechanism is on */
1980 +
1981 + netif_tx_wake_all_queues(ndev);
1982 +}
1983 +
1984 static int netvsc_open(struct net_device *net)
1985 {
1986 struct net_device_context *ndev_ctx = netdev_priv(net);
1987 @@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
1988 rdev = nvdev->extension;
1989 if (!rdev->link_state) {
1990 netif_carrier_on(net);
1991 - netif_tx_wake_all_queues(net);
1992 + netvsc_tx_enable(nvdev, net);
1993 }
1994
1995 if (vf_netdev) {
1996 @@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
1997 }
1998 }
1999
2000 +static void netvsc_tx_disable(struct netvsc_device *nvscdev,
2001 + struct net_device *ndev)
2002 +{
2003 + if (nvscdev) {
2004 + nvscdev->tx_disable = true;
2005 + virt_wmb(); /* ensure txq will not wake up after stop */
2006 + }
2007 +
2008 + netif_tx_disable(ndev);
2009 +}
2010 +
2011 static int netvsc_close(struct net_device *net)
2012 {
2013 struct net_device_context *net_device_ctx = netdev_priv(net);
2014 @@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
2015 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
2016 int ret;
2017
2018 - netif_tx_disable(net);
2019 + netvsc_tx_disable(nvdev, net);
2020
2021 /* No need to close rndis filter if it is removed already */
2022 if (!nvdev)
2023 @@ -918,7 +938,7 @@ static int netvsc_detach(struct net_device *ndev,
2024
2025 /* If device was up (receiving) then shutdown */
2026 if (netif_running(ndev)) {
2027 - netif_tx_disable(ndev);
2028 + netvsc_tx_disable(nvdev, ndev);
2029
2030 ret = rndis_filter_close(nvdev);
2031 if (ret) {
2032 @@ -1899,7 +1919,7 @@ static void netvsc_link_change(struct work_struct *w)
2033 if (rdev->link_state) {
2034 rdev->link_state = false;
2035 netif_carrier_on(net);
2036 - netif_tx_wake_all_queues(net);
2037 + netvsc_tx_enable(net_device, net);
2038 } else {
2039 notify = true;
2040 }
2041 @@ -1909,7 +1929,7 @@ static void netvsc_link_change(struct work_struct *w)
2042 if (!rdev->link_state) {
2043 rdev->link_state = true;
2044 netif_carrier_off(net);
2045 - netif_tx_stop_all_queues(net);
2046 + netvsc_tx_disable(net_device, net);
2047 }
2048 kfree(event);
2049 break;
2050 @@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
2051 if (!rdev->link_state) {
2052 rdev->link_state = true;
2053 netif_carrier_off(net);
2054 - netif_tx_stop_all_queues(net);
2055 + netvsc_tx_disable(net_device, net);
2056 event->event = RNDIS_STATUS_MEDIA_CONNECT;
2057 spin_lock_irqsave(&ndev_ctx->lock, flags);
2058 list_add(&event->list, &ndev_ctx->reconfig_events);
2059 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2060 index 74bebbdb4b15..9195f3476b1d 100644
2061 --- a/drivers/net/usb/qmi_wwan.c
2062 +++ b/drivers/net/usb/qmi_wwan.c
2063 @@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = {
2064 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
2065 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
2066 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
2067 + {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
2068 {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
2069 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
2070 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
2071 diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
2072 index da7c72372ffc..9c397fa8704c 100644
2073 --- a/drivers/pci/hotplug/pciehp_ctrl.c
2074 +++ b/drivers/pci/hotplug/pciehp_ctrl.c
2075 @@ -117,6 +117,10 @@ static void remove_board(struct slot *p_slot)
2076 * removed from the slot/adapter.
2077 */
2078 msleep(1000);
2079 +
2080 + /* Ignore link or presence changes caused by power off */
2081 + atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
2082 + &ctrl->pending_events);
2083 }
2084
2085 /* turn off Green LED */
2086 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2087 index c0673a717239..37d897bc4cf1 100644
2088 --- a/drivers/pci/quirks.c
2089 +++ b/drivers/pci/quirks.c
2090 @@ -3852,6 +3852,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
2091 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
2092 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
2093 quirk_dma_func1_alias);
2094 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
2095 + quirk_dma_func1_alias);
2096 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
2097 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
2098 quirk_dma_func1_alias);
2099 diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
2100 index 0840d27381ea..e0a04bfc873e 100644
2101 --- a/drivers/tty/Kconfig
2102 +++ b/drivers/tty/Kconfig
2103 @@ -441,4 +441,28 @@ config VCC
2104 depends on SUN_LDOMS
2105 help
2106 Support for Sun logical domain consoles.
2107 +
2108 +config LDISC_AUTOLOAD
2109 + bool "Automatically load TTY Line Disciplines"
2110 + default y
2111 + help
2112 + Historically the kernel has always automatically loaded any
2113 + line discipline that is in a kernel module when a user asks
2114 + for it to be loaded with the TIOCSETD ioctl, or through other
2115 + means. This is not always the best thing to do on systems
2116 + where you know you will not be using some of the more
2117 + "ancient" line disciplines, so prevent the kernel from doing
2118 + this unless the request is coming from a process with the
2119 + CAP_SYS_MODULE permissions.
2120 +
2121 + Say 'Y' here if you trust your userspace users to do the right
2122 + thing, or if you have only provided the line disciplines that
2123 + you know you will be using, or if you wish to continue to use
2124 + the traditional method of on-demand loading of these modules
2125 + by any user.
2126 +
2127 + This functionality can be changed at runtime with the
2128 + dev.tty.ldisc_autoload sysctl, this configuration option will
2129 + only set the default value of this functionality.
2130 +
2131 endif # TTY
2132 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
2133 index e7d192ebecd7..ac8025cd4a1f 100644
2134 --- a/drivers/tty/tty_io.c
2135 +++ b/drivers/tty/tty_io.c
2136 @@ -512,6 +512,8 @@ static const struct file_operations hung_up_tty_fops = {
2137 static DEFINE_SPINLOCK(redirect_lock);
2138 static struct file *redirect;
2139
2140 +extern void tty_sysctl_init(void);
2141 +
2142 /**
2143 * tty_wakeup - request more data
2144 * @tty: terminal
2145 @@ -3340,6 +3342,7 @@ void console_sysfs_notify(void)
2146 */
2147 int __init tty_init(void)
2148 {
2149 + tty_sysctl_init();
2150 cdev_init(&tty_cdev, &tty_fops);
2151 if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
2152 register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
2153 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
2154 index fc4c97cae01e..53bb6d4e9e8d 100644
2155 --- a/drivers/tty/tty_ldisc.c
2156 +++ b/drivers/tty/tty_ldisc.c
2157 @@ -156,6 +156,13 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
2158 * takes tty_ldiscs_lock to guard against ldisc races
2159 */
2160
2161 +#if defined(CONFIG_LDISC_AUTOLOAD)
2162 + #define INITIAL_AUTOLOAD_STATE 1
2163 +#else
2164 + #define INITIAL_AUTOLOAD_STATE 0
2165 +#endif
2166 +static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
2167 +
2168 static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
2169 {
2170 struct tty_ldisc *ld;
2171 @@ -170,6 +177,8 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
2172 */
2173 ldops = get_ldops(disc);
2174 if (IS_ERR(ldops)) {
2175 + if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload)
2176 + return ERR_PTR(-EPERM);
2177 request_module("tty-ldisc-%d", disc);
2178 ldops = get_ldops(disc);
2179 if (IS_ERR(ldops))
2180 @@ -829,3 +838,41 @@ void tty_ldisc_deinit(struct tty_struct *tty)
2181 tty_ldisc_put(tty->ldisc);
2182 tty->ldisc = NULL;
2183 }
2184 +
2185 +static int zero;
2186 +static int one = 1;
2187 +static struct ctl_table tty_table[] = {
2188 + {
2189 + .procname = "ldisc_autoload",
2190 + .data = &tty_ldisc_autoload,
2191 + .maxlen = sizeof(tty_ldisc_autoload),
2192 + .mode = 0644,
2193 + .proc_handler = proc_dointvec,
2194 + .extra1 = &zero,
2195 + .extra2 = &one,
2196 + },
2197 + { }
2198 +};
2199 +
2200 +static struct ctl_table tty_dir_table[] = {
2201 + {
2202 + .procname = "tty",
2203 + .mode = 0555,
2204 + .child = tty_table,
2205 + },
2206 + { }
2207 +};
2208 +
2209 +static struct ctl_table tty_root_table[] = {
2210 + {
2211 + .procname = "dev",
2212 + .mode = 0555,
2213 + .child = tty_dir_table,
2214 + },
2215 + { }
2216 +};
2217 +
2218 +void tty_sysctl_init(void)
2219 +{
2220 + register_sysctl_table(tty_root_table);
2221 +}
2222 diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
2223 index 814b395007b2..9529e28e1822 100644
2224 --- a/drivers/virtio/virtio_ring.c
2225 +++ b/drivers/virtio/virtio_ring.c
2226 @@ -1086,6 +1086,8 @@ struct virtqueue *vring_create_virtqueue(
2227 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
2228 if (queue)
2229 break;
2230 + if (!may_reduce_num)
2231 + return NULL;
2232 }
2233
2234 if (!num)
2235 diff --git a/fs/block_dev.c b/fs/block_dev.c
2236 index cdbb888a8d4a..1c25dae083a8 100644
2237 --- a/fs/block_dev.c
2238 +++ b/fs/block_dev.c
2239 @@ -296,10 +296,10 @@ static void blkdev_bio_end_io(struct bio *bio)
2240 struct blkdev_dio *dio = bio->bi_private;
2241 bool should_dirty = dio->should_dirty;
2242
2243 - if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
2244 - if (bio->bi_status && !dio->bio.bi_status)
2245 - dio->bio.bi_status = bio->bi_status;
2246 - } else {
2247 + if (bio->bi_status && !dio->bio.bi_status)
2248 + dio->bio.bi_status = bio->bi_status;
2249 +
2250 + if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
2251 if (!dio->is_sync) {
2252 struct kiocb *iocb = dio->iocb;
2253 ssize_t ret;
2254 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2255 index 8bf9cce11213..0eb333c62fe4 100644
2256 --- a/fs/btrfs/ioctl.c
2257 +++ b/fs/btrfs/ioctl.c
2258 @@ -496,6 +496,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
2259 if (!capable(CAP_SYS_ADMIN))
2260 return -EPERM;
2261
2262 + /*
2263 + * If the fs is mounted with nologreplay, which requires it to be
2264 + * mounted in RO mode as well, we can not allow discard on free space
2265 + * inside block groups, because log trees refer to extents that are not
2266 + * pinned in a block group's free space cache (pinning the extents is
2267 + * precisely the first phase of replaying a log tree).
2268 + */
2269 + if (btrfs_test_opt(fs_info, NOLOGREPLAY))
2270 + return -EROFS;
2271 +
2272 rcu_read_lock();
2273 list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
2274 dev_list) {
2275 diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
2276 index dc6140013ae8..61d22a56c0ba 100644
2277 --- a/fs/btrfs/props.c
2278 +++ b/fs/btrfs/props.c
2279 @@ -366,11 +366,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
2280
2281 static int prop_compression_validate(const char *value, size_t len)
2282 {
2283 - if (!strncmp("lzo", value, len))
2284 + if (!strncmp("lzo", value, 3))
2285 return 0;
2286 - else if (!strncmp("zlib", value, len))
2287 + else if (!strncmp("zlib", value, 4))
2288 return 0;
2289 - else if (!strncmp("zstd", value, len))
2290 + else if (!strncmp("zstd", value, 4))
2291 return 0;
2292
2293 return -EINVAL;
2294 @@ -396,7 +396,7 @@ static int prop_compression_apply(struct inode *inode,
2295 btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
2296 } else if (!strncmp("zlib", value, 4)) {
2297 type = BTRFS_COMPRESS_ZLIB;
2298 - } else if (!strncmp("zstd", value, len)) {
2299 + } else if (!strncmp("zstd", value, 4)) {
2300 type = BTRFS_COMPRESS_ZSTD;
2301 btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
2302 } else {
2303 diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
2304 index 50fb0dee23e8..d35b8ec1c485 100644
2305 --- a/include/linux/bitrev.h
2306 +++ b/include/linux/bitrev.h
2307 @@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x)
2308
2309 #define __constant_bitrev32(x) \
2310 ({ \
2311 - u32 __x = x; \
2312 - __x = (__x >> 16) | (__x << 16); \
2313 - __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
2314 - __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
2315 - __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
2316 - __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
2317 - __x; \
2318 + u32 ___x = x; \
2319 + ___x = (___x >> 16) | (___x << 16); \
2320 + ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \
2321 + ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
2322 + ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
2323 + ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
2324 + ___x; \
2325 })
2326
2327 #define __constant_bitrev16(x) \
2328 ({ \
2329 - u16 __x = x; \
2330 - __x = (__x >> 8) | (__x << 8); \
2331 - __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
2332 - __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
2333 - __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
2334 - __x; \
2335 + u16 ___x = x; \
2336 + ___x = (___x >> 8) | (___x << 8); \
2337 + ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \
2338 + ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \
2339 + ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \
2340 + ___x; \
2341 })
2342
2343 #define __constant_bitrev8x4(x) \
2344 ({ \
2345 - u32 __x = x; \
2346 - __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
2347 - __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
2348 - __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
2349 - __x; \
2350 + u32 ___x = x; \
2351 + ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
2352 + ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
2353 + ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
2354 + ___x; \
2355 })
2356
2357 #define __constant_bitrev8(x) \
2358 ({ \
2359 - u8 __x = x; \
2360 - __x = (__x >> 4) | (__x << 4); \
2361 - __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
2362 - __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
2363 - __x; \
2364 + u8 ___x = x; \
2365 + ___x = (___x >> 4) | (___x << 4); \
2366 + ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \
2367 + ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \
2368 + ___x; \
2369 })
2370
2371 #define bitrev32(x) \
2372 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
2373 index 652f602167df..cc6b6532eb56 100644
2374 --- a/include/linux/memcontrol.h
2375 +++ b/include/linux/memcontrol.h
2376 @@ -559,7 +559,10 @@ struct mem_cgroup *lock_page_memcg(struct page *page);
2377 void __unlock_page_memcg(struct mem_cgroup *memcg);
2378 void unlock_page_memcg(struct page *page);
2379
2380 -/* idx can be of type enum memcg_stat_item or node_stat_item */
2381 +/*
2382 + * idx can be of type enum memcg_stat_item or node_stat_item.
2383 + * Keep in sync with memcg_exact_page_state().
2384 + */
2385 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
2386 int idx)
2387 {
2388 diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
2389 index bbcfe2e5fd91..e8b92dee5a72 100644
2390 --- a/include/linux/mlx5/driver.h
2391 +++ b/include/linux/mlx5/driver.h
2392 @@ -776,6 +776,8 @@ struct mlx5_pagefault {
2393 };
2394
2395 struct mlx5_td {
2396 + /* protects tirs list changes while tirs refresh */
2397 + struct mutex list_lock;
2398 struct list_head tirs_list;
2399 u32 tdn;
2400 };
2401 diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
2402 index b8d95564bd53..14edb795ab43 100644
2403 --- a/include/linux/netfilter/nf_conntrack_proto_gre.h
2404 +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
2405 @@ -21,6 +21,19 @@ struct nf_ct_gre_keymap {
2406 struct nf_conntrack_tuple tuple;
2407 };
2408
2409 +enum grep_conntrack {
2410 + GRE_CT_UNREPLIED,
2411 + GRE_CT_REPLIED,
2412 + GRE_CT_MAX
2413 +};
2414 +
2415 +struct netns_proto_gre {
2416 + struct nf_proto_net nf;
2417 + rwlock_t keymap_lock;
2418 + struct list_head keymap_list;
2419 + unsigned int gre_timeouts[GRE_CT_MAX];
2420 +};
2421 +
2422 /* add new tuple->key_reply pair to keymap */
2423 int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
2424 struct nf_conntrack_tuple *t);
2425 diff --git a/include/linux/string.h b/include/linux/string.h
2426 index 4a5a0eb7df51..f58e1ef76572 100644
2427 --- a/include/linux/string.h
2428 +++ b/include/linux/string.h
2429 @@ -143,6 +143,9 @@ extern void * memscan(void *,int,__kernel_size_t);
2430 #ifndef __HAVE_ARCH_MEMCMP
2431 extern int memcmp(const void *,const void *,__kernel_size_t);
2432 #endif
2433 +#ifndef __HAVE_ARCH_BCMP
2434 +extern int bcmp(const void *,const void *,__kernel_size_t);
2435 +#endif
2436 #ifndef __HAVE_ARCH_MEMCHR
2437 extern void * memchr(const void *,int,__kernel_size_t);
2438 #endif
2439 diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
2440 index fab02133a919..3dc70adfe5f5 100644
2441 --- a/include/linux/virtio_ring.h
2442 +++ b/include/linux/virtio_ring.h
2443 @@ -63,7 +63,7 @@ struct virtqueue;
2444 /*
2445 * Creates a virtqueue and allocates the descriptor ring. If
2446 * may_reduce_num is set, then this may allocate a smaller ring than
2447 - * expected. The caller should query virtqueue_get_ring_size to learn
2448 + * expected. The caller should query virtqueue_get_vring_size to learn
2449 * the actual size of the ring.
2450 */
2451 struct virtqueue *vring_create_virtqueue(unsigned int index,
2452 diff --git a/include/net/ip.h b/include/net/ip.h
2453 index 71d31e4d4391..cfc3dd5ff085 100644
2454 --- a/include/net/ip.h
2455 +++ b/include/net/ip.h
2456 @@ -648,7 +648,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
2457 unsigned char __user *data, int optlen);
2458 void ip_options_undo(struct ip_options *opt);
2459 void ip_forward_options(struct sk_buff *skb);
2460 -int ip_options_rcv_srr(struct sk_buff *skb);
2461 +int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
2462
2463 /*
2464 * Functions provided by ip_sockglue.c
2465 diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
2466 index 9b5fdc50519a..3f7b166262d7 100644
2467 --- a/include/net/net_namespace.h
2468 +++ b/include/net/net_namespace.h
2469 @@ -57,6 +57,7 @@ struct net {
2470 */
2471 spinlock_t rules_mod_lock;
2472
2473 + u32 hash_mix;
2474 atomic64_t cookie_gen;
2475
2476 struct list_head list; /* list of network namespaces */
2477 diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
2478 index 16a842456189..d9b665151f3d 100644
2479 --- a/include/net/netns/hash.h
2480 +++ b/include/net/netns/hash.h
2481 @@ -2,16 +2,10 @@
2482 #ifndef __NET_NS_HASH_H__
2483 #define __NET_NS_HASH_H__
2484
2485 -#include <asm/cache.h>
2486 -
2487 -struct net;
2488 +#include <net/net_namespace.h>
2489
2490 static inline u32 net_hash_mix(const struct net *net)
2491 {
2492 -#ifdef CONFIG_NET_NS
2493 - return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
2494 -#else
2495 - return 0;
2496 -#endif
2497 + return net->hash_mix;
2498 }
2499 #endif
2500 diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
2501 index 811009ebacd4..379e89c706c9 100644
2502 --- a/kernel/irq/chip.c
2503 +++ b/kernel/irq/chip.c
2504 @@ -1384,6 +1384,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
2505 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
2506 {
2507 data = data->parent_data;
2508 +
2509 + if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
2510 + return 0;
2511 +
2512 if (data->chip->irq_set_wake)
2513 return data->chip->irq_set_wake(data, on);
2514
2515 diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
2516 index ba454cba4069..8e009cee6517 100644
2517 --- a/kernel/irq/irqdesc.c
2518 +++ b/kernel/irq/irqdesc.c
2519 @@ -554,6 +554,7 @@ int __init early_irq_init(void)
2520 alloc_masks(&desc[i], node);
2521 raw_spin_lock_init(&desc[i].lock);
2522 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
2523 + mutex_init(&desc[i].request_mutex);
2524 desc_set_defaults(i, &desc[i], node, NULL, NULL);
2525 }
2526 return arch_early_irq_init();
2527 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
2528 index f7c375d1e601..640094391169 100644
2529 --- a/kernel/sched/fair.c
2530 +++ b/kernel/sched/fair.c
2531 @@ -7437,10 +7437,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
2532 if (cfs_rq->last_h_load_update == now)
2533 return;
2534
2535 - cfs_rq->h_load_next = NULL;
2536 + WRITE_ONCE(cfs_rq->h_load_next, NULL);
2537 for_each_sched_entity(se) {
2538 cfs_rq = cfs_rq_of(se);
2539 - cfs_rq->h_load_next = se;
2540 + WRITE_ONCE(cfs_rq->h_load_next, se);
2541 if (cfs_rq->last_h_load_update == now)
2542 break;
2543 }
2544 @@ -7450,7 +7450,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
2545 cfs_rq->last_h_load_update = now;
2546 }
2547
2548 - while ((se = cfs_rq->h_load_next) != NULL) {
2549 + while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
2550 load = cfs_rq->h_load;
2551 load = div64_ul(load * se->avg.load_avg,
2552 cfs_rq_load_avg(cfs_rq) + 1);
2553 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
2554 index fa5de5e8de61..fdeb9bc6affb 100644
2555 --- a/kernel/time/alarmtimer.c
2556 +++ b/kernel/time/alarmtimer.c
2557 @@ -597,7 +597,7 @@ static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now)
2558 {
2559 struct alarm *alarm = &timr->it.alarm.alarmtimer;
2560
2561 - return ktime_sub(now, alarm->node.expires);
2562 + return ktime_sub(alarm->node.expires, now);
2563 }
2564
2565 /**
2566 diff --git a/lib/string.c b/lib/string.c
2567 index 2c0900a5d51a..72125fd5b4a6 100644
2568 --- a/lib/string.c
2569 +++ b/lib/string.c
2570 @@ -865,6 +865,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
2571 EXPORT_SYMBOL(memcmp);
2572 #endif
2573
2574 +#ifndef __HAVE_ARCH_BCMP
2575 +/**
2576 + * bcmp - returns 0 if and only if the buffers have identical contents.
2577 + * @a: pointer to first buffer.
2578 + * @b: pointer to second buffer.
2579 + * @len: size of buffers.
2580 + *
2581 + * The sign or magnitude of a non-zero return value has no particular
2582 + * meaning, and architectures may implement their own more efficient bcmp(). So
2583 + * while this particular implementation is a simple (tail) call to memcmp, do
2584 + * not rely on anything but whether the return value is zero or non-zero.
2585 + */
2586 +#undef bcmp
2587 +int bcmp(const void *a, const void *b, size_t len)
2588 +{
2589 + return memcmp(a, b, len);
2590 +}
2591 +EXPORT_SYMBOL(bcmp);
2592 +#endif
2593 +
2594 #ifndef __HAVE_ARCH_MEMSCAN
2595 /**
2596 * memscan - Find a character in an area of memory.
2597 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
2598 index d2cd70cfaa90..7d08e89361ee 100644
2599 --- a/mm/huge_memory.c
2600 +++ b/mm/huge_memory.c
2601 @@ -734,6 +734,21 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
2602 spinlock_t *ptl;
2603
2604 ptl = pmd_lock(mm, pmd);
2605 + if (!pmd_none(*pmd)) {
2606 + if (write) {
2607 + if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
2608 + WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
2609 + goto out_unlock;
2610 + }
2611 + entry = pmd_mkyoung(*pmd);
2612 + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
2613 + if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
2614 + update_mmu_cache_pmd(vma, addr, pmd);
2615 + }
2616 +
2617 + goto out_unlock;
2618 + }
2619 +
2620 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
2621 if (pfn_t_devmap(pfn))
2622 entry = pmd_mkdevmap(entry);
2623 @@ -745,11 +760,16 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
2624 if (pgtable) {
2625 pgtable_trans_huge_deposit(mm, pmd, pgtable);
2626 mm_inc_nr_ptes(mm);
2627 + pgtable = NULL;
2628 }
2629
2630 set_pmd_at(mm, addr, pmd, entry);
2631 update_mmu_cache_pmd(vma, addr, pmd);
2632 +
2633 +out_unlock:
2634 spin_unlock(ptl);
2635 + if (pgtable)
2636 + pte_free(mm, pgtable);
2637 }
2638
2639 vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
2640 @@ -800,6 +820,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
2641 spinlock_t *ptl;
2642
2643 ptl = pud_lock(mm, pud);
2644 + if (!pud_none(*pud)) {
2645 + if (write) {
2646 + if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
2647 + WARN_ON_ONCE(!is_huge_zero_pud(*pud));
2648 + goto out_unlock;
2649 + }
2650 + entry = pud_mkyoung(*pud);
2651 + entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
2652 + if (pudp_set_access_flags(vma, addr, pud, entry, 1))
2653 + update_mmu_cache_pud(vma, addr, pud);
2654 + }
2655 + goto out_unlock;
2656 + }
2657 +
2658 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
2659 if (pfn_t_devmap(pfn))
2660 entry = pud_mkdevmap(entry);
2661 @@ -809,6 +843,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
2662 }
2663 set_pud_at(mm, addr, pud, entry);
2664 update_mmu_cache_pud(vma, addr, pud);
2665 +
2666 +out_unlock:
2667 spin_unlock(ptl);
2668 }
2669
2670 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2671 index 7c712c4565e6..7e7cc0cd89fe 100644
2672 --- a/mm/memcontrol.c
2673 +++ b/mm/memcontrol.c
2674 @@ -3897,6 +3897,22 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
2675 return &memcg->cgwb_domain;
2676 }
2677
2678 +/*
2679 + * idx can be of type enum memcg_stat_item or node_stat_item.
2680 + * Keep in sync with memcg_exact_page().
2681 + */
2682 +static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
2683 +{
2684 + long x = atomic_long_read(&memcg->stat[idx]);
2685 + int cpu;
2686 +
2687 + for_each_online_cpu(cpu)
2688 + x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx];
2689 + if (x < 0)
2690 + x = 0;
2691 + return x;
2692 +}
2693 +
2694 /**
2695 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
2696 * @wb: bdi_writeback in question
2697 @@ -3922,10 +3938,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
2698 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
2699 struct mem_cgroup *parent;
2700
2701 - *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
2702 + *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
2703
2704 /* this should eventually include NR_UNSTABLE_NFS */
2705 - *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
2706 + *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
2707 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
2708 (1 << LRU_ACTIVE_FILE));
2709 *pheadroom = PAGE_COUNTER_MAX;
2710 diff --git a/net/core/dev.c b/net/core/dev.c
2711 index 5c8c0a572ee9..d47554307a6d 100644
2712 --- a/net/core/dev.c
2713 +++ b/net/core/dev.c
2714 @@ -4959,8 +4959,10 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
2715 if (pt_prev->list_func != NULL)
2716 pt_prev->list_func(head, pt_prev, orig_dev);
2717 else
2718 - list_for_each_entry_safe(skb, next, head, list)
2719 + list_for_each_entry_safe(skb, next, head, list) {
2720 + skb_list_del_init(skb);
2721 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2722 + }
2723 }
2724
2725 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
2726 diff --git a/net/core/ethtool.c b/net/core/ethtool.c
2727 index aeabc4831fca..7cc97f43f138 100644
2728 --- a/net/core/ethtool.c
2729 +++ b/net/core/ethtool.c
2730 @@ -1863,11 +1863,16 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
2731 WARN_ON_ONCE(!ret);
2732
2733 gstrings.len = ret;
2734 - data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
2735 - if (gstrings.len && !data)
2736 - return -ENOMEM;
2737
2738 - __ethtool_get_strings(dev, gstrings.string_set, data);
2739 + if (gstrings.len) {
2740 + data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
2741 + if (!data)
2742 + return -ENOMEM;
2743 +
2744 + __ethtool_get_strings(dev, gstrings.string_set, data);
2745 + } else {
2746 + data = NULL;
2747 + }
2748
2749 ret = -EFAULT;
2750 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
2751 @@ -1963,11 +1968,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
2752 return -EFAULT;
2753
2754 stats.n_stats = n_stats;
2755 - data = vzalloc(array_size(n_stats, sizeof(u64)));
2756 - if (n_stats && !data)
2757 - return -ENOMEM;
2758
2759 - ops->get_ethtool_stats(dev, &stats, data);
2760 + if (n_stats) {
2761 + data = vzalloc(array_size(n_stats, sizeof(u64)));
2762 + if (!data)
2763 + return -ENOMEM;
2764 + ops->get_ethtool_stats(dev, &stats, data);
2765 + } else {
2766 + data = NULL;
2767 + }
2768
2769 ret = -EFAULT;
2770 if (copy_to_user(useraddr, &stats, sizeof(stats)))
2771 @@ -2007,16 +2016,21 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
2772 return -EFAULT;
2773
2774 stats.n_stats = n_stats;
2775 - data = vzalloc(array_size(n_stats, sizeof(u64)));
2776 - if (n_stats && !data)
2777 - return -ENOMEM;
2778
2779 - if (dev->phydev && !ops->get_ethtool_phy_stats) {
2780 - ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
2781 - if (ret < 0)
2782 - return ret;
2783 + if (n_stats) {
2784 + data = vzalloc(array_size(n_stats, sizeof(u64)));
2785 + if (!data)
2786 + return -ENOMEM;
2787 +
2788 + if (dev->phydev && !ops->get_ethtool_phy_stats) {
2789 + ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
2790 + if (ret < 0)
2791 + goto out;
2792 + } else {
2793 + ops->get_ethtool_phy_stats(dev, &stats, data);
2794 + }
2795 } else {
2796 - ops->get_ethtool_phy_stats(dev, &stats, data);
2797 + data = NULL;
2798 }
2799
2800 ret = -EFAULT;
2801 diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
2802 index 670c84b1bfc2..7320f0844a50 100644
2803 --- a/net/core/net_namespace.c
2804 +++ b/net/core/net_namespace.c
2805 @@ -304,6 +304,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
2806
2807 refcount_set(&net->count, 1);
2808 refcount_set(&net->passive, 1);
2809 + get_random_bytes(&net->hash_mix, sizeof(u32));
2810 net->dev_base_seq = 1;
2811 net->user_ns = user_ns;
2812 idr_init(&net->netns_ids);
2813 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2814 index 8656b1e20d35..ceee28e184af 100644
2815 --- a/net/core/skbuff.c
2816 +++ b/net/core/skbuff.c
2817 @@ -3832,7 +3832,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
2818 unsigned int delta_truesize;
2819 struct sk_buff *lp;
2820
2821 - if (unlikely(p->len + len >= 65536))
2822 + if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
2823 return -E2BIG;
2824
2825 lp = NAPI_GRO_CB(p)->last;
2826 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
2827 index f199945f6e4a..3c734832bb7c 100644
2828 --- a/net/ipv4/ip_gre.c
2829 +++ b/net/ipv4/ip_gre.c
2830 @@ -260,7 +260,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2831 struct net *net = dev_net(skb->dev);
2832 struct metadata_dst *tun_dst = NULL;
2833 struct erspan_base_hdr *ershdr;
2834 - struct erspan_metadata *pkt_md;
2835 struct ip_tunnel_net *itn;
2836 struct ip_tunnel *tunnel;
2837 const struct iphdr *iph;
2838 @@ -283,9 +282,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2839 if (unlikely(!pskb_may_pull(skb, len)))
2840 return PACKET_REJECT;
2841
2842 - ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
2843 - pkt_md = (struct erspan_metadata *)(ershdr + 1);
2844 -
2845 if (__iptunnel_pull_header(skb,
2846 len,
2847 htons(ETH_P_TEB),
2848 @@ -293,8 +289,9 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2849 goto drop;
2850
2851 if (tunnel->collect_md) {
2852 + struct erspan_metadata *pkt_md, *md;
2853 struct ip_tunnel_info *info;
2854 - struct erspan_metadata *md;
2855 + unsigned char *gh;
2856 __be64 tun_id;
2857 __be16 flags;
2858
2859 @@ -307,6 +304,14 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2860 if (!tun_dst)
2861 return PACKET_REJECT;
2862
2863 + /* skb can be uncloned in __iptunnel_pull_header, so
2864 + * old pkt_md is no longer valid and we need to reset
2865 + * it
2866 + */
2867 + gh = skb_network_header(skb) +
2868 + skb_network_header_len(skb);
2869 + pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
2870 + sizeof(*ershdr));
2871 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
2872 md->version = ver;
2873 md2 = &md->u.md2;
2874 diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
2875 index bd8ef4f87c79..c3a0683e83df 100644
2876 --- a/net/ipv4/ip_input.c
2877 +++ b/net/ipv4/ip_input.c
2878 @@ -258,11 +258,10 @@ int ip_local_deliver(struct sk_buff *skb)
2879 ip_local_deliver_finish);
2880 }
2881
2882 -static inline bool ip_rcv_options(struct sk_buff *skb)
2883 +static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
2884 {
2885 struct ip_options *opt;
2886 const struct iphdr *iph;
2887 - struct net_device *dev = skb->dev;
2888
2889 /* It looks as overkill, because not all
2890 IP options require packet mangling.
2891 @@ -298,7 +297,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
2892 }
2893 }
2894
2895 - if (ip_options_rcv_srr(skb))
2896 + if (ip_options_rcv_srr(skb, dev))
2897 goto drop;
2898 }
2899
2900 @@ -354,7 +353,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
2901 }
2902 #endif
2903
2904 - if (iph->ihl > 5 && ip_rcv_options(skb))
2905 + if (iph->ihl > 5 && ip_rcv_options(skb, dev))
2906 goto drop;
2907
2908 rt = skb_rtable(skb);
2909 diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
2910 index 32a35043c9f5..3db31bb9df50 100644
2911 --- a/net/ipv4/ip_options.c
2912 +++ b/net/ipv4/ip_options.c
2913 @@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb)
2914 }
2915 }
2916
2917 -int ip_options_rcv_srr(struct sk_buff *skb)
2918 +int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
2919 {
2920 struct ip_options *opt = &(IPCB(skb)->opt);
2921 int srrspace, srrptr;
2922 @@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
2923
2924 orefdst = skb->_skb_refdst;
2925 skb_dst_set(skb, NULL);
2926 - err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
2927 + err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
2928 rt2 = skb_rtable(skb);
2929 if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
2930 skb_dst_drop(skb);
2931 diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
2932 index ca61e2a659e7..5205c5a5d8d5 100644
2933 --- a/net/ipv4/tcp_dctcp.c
2934 +++ b/net/ipv4/tcp_dctcp.c
2935 @@ -66,11 +66,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
2936 module_param(dctcp_alpha_on_init, uint, 0644);
2937 MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
2938
2939 -static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
2940 -module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
2941 -MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
2942 - "parameter for clamping alpha on loss");
2943 -
2944 static struct tcp_congestion_ops dctcp_reno;
2945
2946 static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
2947 @@ -211,21 +206,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
2948 }
2949 }
2950
2951 -static void dctcp_state(struct sock *sk, u8 new_state)
2952 +static void dctcp_react_to_loss(struct sock *sk)
2953 {
2954 - if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
2955 - struct dctcp *ca = inet_csk_ca(sk);
2956 + struct dctcp *ca = inet_csk_ca(sk);
2957 + struct tcp_sock *tp = tcp_sk(sk);
2958
2959 - /* If this extension is enabled, we clamp dctcp_alpha to
2960 - * max on packet loss; the motivation is that dctcp_alpha
2961 - * is an indicator to the extend of congestion and packet
2962 - * loss is an indicator of extreme congestion; setting
2963 - * this in practice turned out to be beneficial, and
2964 - * effectively assumes total congestion which reduces the
2965 - * window by half.
2966 - */
2967 - ca->dctcp_alpha = DCTCP_MAX_ALPHA;
2968 - }
2969 + ca->loss_cwnd = tp->snd_cwnd;
2970 + tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
2971 +}
2972 +
2973 +static void dctcp_state(struct sock *sk, u8 new_state)
2974 +{
2975 + if (new_state == TCP_CA_Recovery &&
2976 + new_state != inet_csk(sk)->icsk_ca_state)
2977 + dctcp_react_to_loss(sk);
2978 + /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
2979 + * one loss-adjustment per RTT.
2980 + */
2981 }
2982
2983 static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
2984 @@ -237,6 +234,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
2985 case CA_EVENT_ECN_NO_CE:
2986 dctcp_ce_state_1_to_0(sk);
2987 break;
2988 + case CA_EVENT_LOSS:
2989 + dctcp_react_to_loss(sk);
2990 + break;
2991 default:
2992 /* Don't care for the rest. */
2993 break;
2994 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
2995 index 30fdf891940b..11101cf8693b 100644
2996 --- a/net/ipv4/tcp_ipv4.c
2997 +++ b/net/ipv4/tcp_ipv4.c
2998 @@ -2490,7 +2490,8 @@ static void __net_exit tcp_sk_exit(struct net *net)
2999 {
3000 int cpu;
3001
3002 - module_put(net->ipv4.tcp_congestion_control->owner);
3003 + if (net->ipv4.tcp_congestion_control)
3004 + module_put(net->ipv4.tcp_congestion_control->owner);
3005
3006 for_each_possible_cpu(cpu)
3007 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
3008 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
3009 index faed98dab913..c4a7db62658e 100644
3010 --- a/net/ipv6/ip6_gre.c
3011 +++ b/net/ipv6/ip6_gre.c
3012 @@ -540,11 +540,10 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
3013 return PACKET_REJECT;
3014 }
3015
3016 -static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
3017 - struct tnl_ptk_info *tpi)
3018 +static int ip6erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
3019 + int gre_hdr_len)
3020 {
3021 struct erspan_base_hdr *ershdr;
3022 - struct erspan_metadata *pkt_md;
3023 const struct ipv6hdr *ipv6h;
3024 struct erspan_md2 *md2;
3025 struct ip6_tnl *tunnel;
3026 @@ -563,18 +562,16 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
3027 if (unlikely(!pskb_may_pull(skb, len)))
3028 return PACKET_REJECT;
3029
3030 - ershdr = (struct erspan_base_hdr *)skb->data;
3031 - pkt_md = (struct erspan_metadata *)(ershdr + 1);
3032 -
3033 if (__iptunnel_pull_header(skb, len,
3034 htons(ETH_P_TEB),
3035 false, false) < 0)
3036 return PACKET_REJECT;
3037
3038 if (tunnel->parms.collect_md) {
3039 + struct erspan_metadata *pkt_md, *md;
3040 struct metadata_dst *tun_dst;
3041 struct ip_tunnel_info *info;
3042 - struct erspan_metadata *md;
3043 + unsigned char *gh;
3044 __be64 tun_id;
3045 __be16 flags;
3046
3047 @@ -587,6 +584,14 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
3048 if (!tun_dst)
3049 return PACKET_REJECT;
3050
3051 + /* skb can be uncloned in __iptunnel_pull_header, so
3052 + * old pkt_md is no longer valid and we need to reset
3053 + * it
3054 + */
3055 + gh = skb_network_header(skb) +
3056 + skb_network_header_len(skb);
3057 + pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
3058 + sizeof(*ershdr));
3059 info = &tun_dst->u.tun_info;
3060 md = ip_tunnel_info_opts(info);
3061 md->version = ver;
3062 @@ -623,7 +628,7 @@ static int gre_rcv(struct sk_buff *skb)
3063
3064 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
3065 tpi.proto == htons(ETH_P_ERSPAN2))) {
3066 - if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD)
3067 + if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
3068 return 0;
3069 goto out;
3070 }
3071 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
3072 index 0bb87f3a10c7..eed9231c90ad 100644
3073 --- a/net/ipv6/ip6_output.c
3074 +++ b/net/ipv6/ip6_output.c
3075 @@ -587,7 +587,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
3076 inet6_sk(skb->sk) : NULL;
3077 struct ipv6hdr *tmp_hdr;
3078 struct frag_hdr *fh;
3079 - unsigned int mtu, hlen, left, len;
3080 + unsigned int mtu, hlen, left, len, nexthdr_offset;
3081 int hroom, troom;
3082 __be32 frag_id;
3083 int ptr, offset = 0, err = 0;
3084 @@ -598,6 +598,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
3085 goto fail;
3086 hlen = err;
3087 nexthdr = *prevhdr;
3088 + nexthdr_offset = prevhdr - skb_network_header(skb);
3089
3090 mtu = ip6_skb_dst_mtu(skb);
3091
3092 @@ -632,6 +633,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
3093 (err = skb_checksum_help(skb)))
3094 goto fail;
3095
3096 + prevhdr = skb_network_header(skb) + nexthdr_offset;
3097 hroom = LL_RESERVED_SPACE(rt->dst.dev);
3098 if (skb_has_frag_list(skb)) {
3099 unsigned int first_len = skb_pagelen(skb);
3100 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
3101 index 0c6403cf8b52..ade1390c6348 100644
3102 --- a/net/ipv6/ip6_tunnel.c
3103 +++ b/net/ipv6/ip6_tunnel.c
3104 @@ -627,7 +627,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
3105 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
3106 eiph->daddr, eiph->saddr, 0, 0,
3107 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
3108 - if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) {
3109 + if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
3110 if (!IS_ERR(rt))
3111 ip_rt_put(rt);
3112 goto out;
3113 @@ -636,7 +636,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
3114 } else {
3115 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
3116 skb2->dev) ||
3117 - skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
3118 + skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
3119 goto out;
3120 }
3121
3122 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
3123 index de9aa5cb295c..8f6cf8e6b5c1 100644
3124 --- a/net/ipv6/sit.c
3125 +++ b/net/ipv6/sit.c
3126 @@ -669,6 +669,10 @@ static int ipip6_rcv(struct sk_buff *skb)
3127 !net_eq(tunnel->net, dev_net(tunnel->dev))))
3128 goto out;
3129
3130 + /* skb can be uncloned in iptunnel_pull_header, so
3131 + * old iph is no longer valid
3132 + */
3133 + iph = (const struct iphdr *)skb_mac_header(skb);
3134 err = IP_ECN_decapsulate(iph, skb);
3135 if (unlikely(err)) {
3136 if (log_ecn_error)
3137 diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
3138 index 571d824e4e24..b919db02c7f9 100644
3139 --- a/net/kcm/kcmsock.c
3140 +++ b/net/kcm/kcmsock.c
3141 @@ -2054,14 +2054,14 @@ static int __init kcm_init(void)
3142 if (err)
3143 goto fail;
3144
3145 - err = sock_register(&kcm_family_ops);
3146 - if (err)
3147 - goto sock_register_fail;
3148 -
3149 err = register_pernet_device(&kcm_net_ops);
3150 if (err)
3151 goto net_ops_fail;
3152
3153 + err = sock_register(&kcm_family_ops);
3154 + if (err)
3155 + goto sock_register_fail;
3156 +
3157 err = kcm_proc_init();
3158 if (err)
3159 goto proc_init_fail;
3160 @@ -2069,12 +2069,12 @@ static int __init kcm_init(void)
3161 return 0;
3162
3163 proc_init_fail:
3164 - unregister_pernet_device(&kcm_net_ops);
3165 -
3166 -net_ops_fail:
3167 sock_unregister(PF_KCM);
3168
3169 sock_register_fail:
3170 + unregister_pernet_device(&kcm_net_ops);
3171 +
3172 +net_ops_fail:
3173 proto_unregister(&kcm_proto);
3174
3175 fail:
3176 @@ -2090,8 +2090,8 @@ fail:
3177 static void __exit kcm_exit(void)
3178 {
3179 kcm_proc_exit();
3180 - unregister_pernet_device(&kcm_net_ops);
3181 sock_unregister(PF_KCM);
3182 + unregister_pernet_device(&kcm_net_ops);
3183 proto_unregister(&kcm_proto);
3184 destroy_workqueue(kcm_wq);
3185
3186 diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
3187 index 650eb4fba2c5..841c472aae1c 100644
3188 --- a/net/netfilter/nf_conntrack_proto_gre.c
3189 +++ b/net/netfilter/nf_conntrack_proto_gre.c
3190 @@ -43,24 +43,12 @@
3191 #include <linux/netfilter/nf_conntrack_proto_gre.h>
3192 #include <linux/netfilter/nf_conntrack_pptp.h>
3193
3194 -enum grep_conntrack {
3195 - GRE_CT_UNREPLIED,
3196 - GRE_CT_REPLIED,
3197 - GRE_CT_MAX
3198 -};
3199 -
3200 static const unsigned int gre_timeouts[GRE_CT_MAX] = {
3201 [GRE_CT_UNREPLIED] = 30*HZ,
3202 [GRE_CT_REPLIED] = 180*HZ,
3203 };
3204
3205 static unsigned int proto_gre_net_id __read_mostly;
3206 -struct netns_proto_gre {
3207 - struct nf_proto_net nf;
3208 - rwlock_t keymap_lock;
3209 - struct list_head keymap_list;
3210 - unsigned int gre_timeouts[GRE_CT_MAX];
3211 -};
3212
3213 static inline struct netns_proto_gre *gre_pernet(struct net *net)
3214 {
3215 @@ -408,6 +396,8 @@ static int __init nf_ct_proto_gre_init(void)
3216 {
3217 int ret;
3218
3219 + BUILD_BUG_ON(offsetof(struct netns_proto_gre, nf) != 0);
3220 +
3221 ret = register_pernet_subsys(&proto_gre_net_ops);
3222 if (ret < 0)
3223 goto out_pernet;
3224 diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
3225 index a30f8ba4b89a..70a7382b9787 100644
3226 --- a/net/netfilter/nfnetlink_cttimeout.c
3227 +++ b/net/netfilter/nfnetlink_cttimeout.c
3228 @@ -392,7 +392,8 @@ err:
3229 static int
3230 cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
3231 u32 seq, u32 type, int event,
3232 - const struct nf_conntrack_l4proto *l4proto)
3233 + const struct nf_conntrack_l4proto *l4proto,
3234 + const unsigned int *timeouts)
3235 {
3236 struct nlmsghdr *nlh;
3237 struct nfgenmsg *nfmsg;
3238 @@ -421,7 +422,7 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
3239 if (!nest_parms)
3240 goto nla_put_failure;
3241
3242 - ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL);
3243 + ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
3244 if (ret < 0)
3245 goto nla_put_failure;
3246
3247 @@ -444,6 +445,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
3248 struct netlink_ext_ack *extack)
3249 {
3250 const struct nf_conntrack_l4proto *l4proto;
3251 + unsigned int *timeouts = NULL;
3252 struct sk_buff *skb2;
3253 int ret, err;
3254 __u16 l3num;
3255 @@ -456,12 +458,55 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
3256 l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
3257 l4proto = nf_ct_l4proto_find_get(l3num, l4num);
3258
3259 - /* This protocol is not supported, skip. */
3260 - if (l4proto->l4proto != l4num) {
3261 - err = -EOPNOTSUPP;
3262 + err = -EOPNOTSUPP;
3263 + if (l4proto->l4proto != l4num)
3264 goto err;
3265 +
3266 + switch (l4proto->l4proto) {
3267 + case IPPROTO_ICMP:
3268 + timeouts = &net->ct.nf_ct_proto.icmp.timeout;
3269 + break;
3270 + case IPPROTO_TCP:
3271 + timeouts = net->ct.nf_ct_proto.tcp.timeouts;
3272 + break;
3273 + case IPPROTO_UDP: /* fallthrough */
3274 + case IPPROTO_UDPLITE:
3275 + timeouts = net->ct.nf_ct_proto.udp.timeouts;
3276 + break;
3277 + case IPPROTO_DCCP:
3278 +#ifdef CONFIG_NF_CT_PROTO_DCCP
3279 + timeouts = net->ct.nf_ct_proto.dccp.dccp_timeout;
3280 +#endif
3281 + break;
3282 + case IPPROTO_ICMPV6:
3283 + timeouts = &net->ct.nf_ct_proto.icmpv6.timeout;
3284 + break;
3285 + case IPPROTO_SCTP:
3286 +#ifdef CONFIG_NF_CT_PROTO_SCTP
3287 + timeouts = net->ct.nf_ct_proto.sctp.timeouts;
3288 +#endif
3289 + break;
3290 + case IPPROTO_GRE:
3291 +#ifdef CONFIG_NF_CT_PROTO_GRE
3292 + if (l4proto->net_id) {
3293 + struct netns_proto_gre *net_gre;
3294 +
3295 + net_gre = net_generic(net, *l4proto->net_id);
3296 + timeouts = net_gre->gre_timeouts;
3297 + }
3298 +#endif
3299 + break;
3300 + case 255:
3301 + timeouts = &net->ct.nf_ct_proto.generic.timeout;
3302 + break;
3303 + default:
3304 + WARN_ONCE(1, "Missing timeouts for proto %d", l4proto->l4proto);
3305 + break;
3306 }
3307
3308 + if (!timeouts)
3309 + goto err;
3310 +
3311 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3312 if (skb2 == NULL) {
3313 err = -ENOMEM;
3314 @@ -472,7 +517,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
3315 nlh->nlmsg_seq,
3316 NFNL_MSG_TYPE(nlh->nlmsg_type),
3317 IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
3318 - l4proto);
3319 + l4proto, timeouts);
3320 if (ret <= 0) {
3321 kfree_skb(skb2);
3322 err = -ENOMEM;
3323 diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
3324 index c7b6010b2c09..eab5e8eaddaa 100644
3325 --- a/net/openvswitch/flow_netlink.c
3326 +++ b/net/openvswitch/flow_netlink.c
3327 @@ -2306,14 +2306,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
3328
3329 struct sw_flow_actions *acts;
3330 int new_acts_size;
3331 - int req_size = NLA_ALIGN(attr_len);
3332 + size_t req_size = NLA_ALIGN(attr_len);
3333 int next_offset = offsetof(struct sw_flow_actions, actions) +
3334 (*sfa)->actions_len;
3335
3336 if (req_size <= (ksize(*sfa) - next_offset))
3337 goto out;
3338
3339 - new_acts_size = ksize(*sfa) * 2;
3340 + new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
3341
3342 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
3343 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
3344 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
3345 index b9bbcf3d6c63..18bb522df282 100644
3346 --- a/net/rds/tcp.c
3347 +++ b/net/rds/tcp.c
3348 @@ -600,7 +600,7 @@ static void rds_tcp_kill_sock(struct net *net)
3349 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
3350 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
3351
3352 - if (net != c_net || !tc->t_sock)
3353 + if (net != c_net)
3354 continue;
3355 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
3356 list_move_tail(&tc->t_tcp_node, &tmp_list);
3357 diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
3358 index 6b67aa13d2dd..c7f5d630d97c 100644
3359 --- a/net/sched/act_sample.c
3360 +++ b/net/sched/act_sample.c
3361 @@ -43,8 +43,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
3362 struct tc_action_net *tn = net_generic(net, sample_net_id);
3363 struct nlattr *tb[TCA_SAMPLE_MAX + 1];
3364 struct psample_group *psample_group;
3365 + u32 psample_group_num, rate;
3366 struct tc_sample *parm;
3367 - u32 psample_group_num;
3368 struct tcf_sample *s;
3369 bool exists = false;
3370 int ret, err;
3371 @@ -80,6 +80,12 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
3372 return -EEXIST;
3373 }
3374
3375 + rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
3376 + if (!rate) {
3377 + NL_SET_ERR_MSG(extack, "invalid sample rate");
3378 + tcf_idr_release(*a, bind);
3379 + return -EINVAL;
3380 + }
3381 psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
3382 psample_group = psample_group_get(net, psample_group_num);
3383 if (!psample_group) {
3384 @@ -91,7 +97,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
3385
3386 spin_lock_bh(&s->tcf_lock);
3387 s->tcf_action = parm->action;
3388 - s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
3389 + s->rate = rate;
3390 s->psample_group_num = psample_group_num;
3391 RCU_INIT_POINTER(s->psample_group, psample_group);
3392
3393 diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
3394 index 856fa79d4ffd..621bc1d5b057 100644
3395 --- a/net/sched/cls_matchall.c
3396 +++ b/net/sched/cls_matchall.c
3397 @@ -126,6 +126,11 @@ static void mall_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
3398
3399 static void *mall_get(struct tcf_proto *tp, u32 handle)
3400 {
3401 + struct cls_mall_head *head = rtnl_dereference(tp->root);
3402 +
3403 + if (head && head->handle == handle)
3404 + return head;
3405 +
3406 return NULL;
3407 }
3408
3409 diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
3410 index 1c9f079e8a50..d97b2b4b7a8b 100644
3411 --- a/net/sctp/protocol.c
3412 +++ b/net/sctp/protocol.c
3413 @@ -600,6 +600,7 @@ out:
3414 static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
3415 {
3416 /* No address mapping for V4 sockets */
3417 + memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
3418 return sizeof(struct sockaddr_in);
3419 }
3420
3421 diff --git a/scripts/package/builddeb b/scripts/package/builddeb
3422 index 90c9a8ac7adb..0b31f4f1f92c 100755
3423 --- a/scripts/package/builddeb
3424 +++ b/scripts/package/builddeb
3425 @@ -81,7 +81,7 @@ else
3426 cp System.map "$tmpdir/boot/System.map-$version"
3427 cp $KCONFIG_CONFIG "$tmpdir/boot/config-$version"
3428 fi
3429 -cp "$($MAKE -s image_name)" "$tmpdir/$installed_image_path"
3430 +cp "$($MAKE -s -f $srctree/Makefile image_name)" "$tmpdir/$installed_image_path"
3431
3432 if grep -q "^CONFIG_OF=y" $KCONFIG_CONFIG ; then
3433 # Only some architectures with OF support have this target
3434 diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
3435 index 92e6524a3a9d..b55cb96d1fed 100644
3436 --- a/sound/core/seq/seq_clientmgr.c
3437 +++ b/sound/core/seq/seq_clientmgr.c
3438 @@ -1252,7 +1252,7 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
3439
3440 /* fill the info fields */
3441 if (client_info->name[0])
3442 - strlcpy(client->name, client_info->name, sizeof(client->name));
3443 + strscpy(client->name, client_info->name, sizeof(client->name));
3444
3445 client->filter = client_info->filter;
3446 client->event_lost = client_info->event_lost;
3447 @@ -1530,7 +1530,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
3448 /* set queue name */
3449 if (!info->name[0])
3450 snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
3451 - strlcpy(q->name, info->name, sizeof(q->name));
3452 + strscpy(q->name, info->name, sizeof(q->name));
3453 snd_use_lock_free(&q->use_lock);
3454
3455 return 0;
3456 @@ -1592,7 +1592,7 @@ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
3457 queuefree(q);
3458 return -EPERM;
3459 }
3460 - strlcpy(q->name, info->name, sizeof(q->name));
3461 + strscpy(q->name, info->name, sizeof(q->name));
3462 queuefree(q);
3463
3464 return 0;
3465 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
3466 index 7572b8cc7127..9bc8a7cb40ea 100644
3467 --- a/sound/pci/hda/hda_intel.c
3468 +++ b/sound/pci/hda/hda_intel.c
3469 @@ -2272,6 +2272,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
3470 SND_PCI_QUIRK(0x8086, 0x2040, "Intel DZ77BH-55K", 0),
3471 /* https://bugzilla.kernel.org/show_bug.cgi?id=199607 */
3472 SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0),
3473 + /* https://bugs.launchpad.net/bugs/1821663 */
3474 + SND_PCI_QUIRK(0x8086, 0x2064, "Intel SDP 8086:2064", 0),
3475 /* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */
3476 SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
3477 /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
3478 @@ -2280,6 +2282,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
3479 SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
3480 /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
3481 SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
3482 + /* https://bugs.launchpad.net/bugs/1821663 */
3483 + SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
3484 {}
3485 };
3486 #endif /* CONFIG_PM */
3487 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3488 index 4c6321ec844d..b9d832bde23e 100644
3489 --- a/sound/pci/hda/patch_realtek.c
3490 +++ b/sound/pci/hda/patch_realtek.c
3491 @@ -1864,8 +1864,8 @@ enum {
3492 ALC887_FIXUP_BASS_CHMAP,
3493 ALC1220_FIXUP_GB_DUAL_CODECS,
3494 ALC1220_FIXUP_CLEVO_P950,
3495 - ALC1220_FIXUP_SYSTEM76_ORYP5,
3496 - ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
3497 + ALC1220_FIXUP_CLEVO_PB51ED,
3498 + ALC1220_FIXUP_CLEVO_PB51ED_PINS,
3499 };
3500
3501 static void alc889_fixup_coef(struct hda_codec *codec,
3502 @@ -2070,7 +2070,7 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
3503 static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
3504 const struct hda_fixup *fix, int action);
3505
3506 -static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
3507 +static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec,
3508 const struct hda_fixup *fix,
3509 int action)
3510 {
3511 @@ -2322,18 +2322,18 @@ static const struct hda_fixup alc882_fixups[] = {
3512 .type = HDA_FIXUP_FUNC,
3513 .v.func = alc1220_fixup_clevo_p950,
3514 },
3515 - [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
3516 + [ALC1220_FIXUP_CLEVO_PB51ED] = {
3517 .type = HDA_FIXUP_FUNC,
3518 - .v.func = alc1220_fixup_system76_oryp5,
3519 + .v.func = alc1220_fixup_clevo_pb51ed,
3520 },
3521 - [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
3522 + [ALC1220_FIXUP_CLEVO_PB51ED_PINS] = {
3523 .type = HDA_FIXUP_PINS,
3524 .v.pins = (const struct hda_pintbl[]) {
3525 { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
3526 {}
3527 },
3528 .chained = true,
3529 - .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
3530 + .chain_id = ALC1220_FIXUP_CLEVO_PB51ED,
3531 },
3532 };
3533
3534 @@ -2411,8 +2411,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
3535 SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
3536 SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
3537 SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
3538 - SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
3539 - SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
3540 + SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
3541 + SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
3542 + SND_PCI_QUIRK(0x1558, 0x65d1, "Tuxedo Book XC1509", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
3543 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
3544 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
3545 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
3546 @@ -5594,6 +5595,7 @@ enum {
3547 ALC233_FIXUP_ASUS_MIC_NO_PRESENCE,
3548 ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE,
3549 ALC233_FIXUP_LENOVO_MULTI_CODECS,
3550 + ALC233_FIXUP_ACER_HEADSET_MIC,
3551 ALC294_FIXUP_LENOVO_MIC_LOCATION,
3552 ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
3553 ALC700_FIXUP_INTEL_REFERENCE,
3554 @@ -6401,6 +6403,16 @@ static const struct hda_fixup alc269_fixups[] = {
3555 .type = HDA_FIXUP_FUNC,
3556 .v.func = alc233_alc662_fixup_lenovo_dual_codecs,
3557 },
3558 + [ALC233_FIXUP_ACER_HEADSET_MIC] = {
3559 + .type = HDA_FIXUP_VERBS,
3560 + .v.verbs = (const struct hda_verb[]) {
3561 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
3562 + { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
3563 + { }
3564 + },
3565 + .chained = true,
3566 + .chain_id = ALC233_FIXUP_ASUS_MIC_NO_PRESENCE
3567 + },
3568 [ALC294_FIXUP_LENOVO_MIC_LOCATION] = {
3569 .type = HDA_FIXUP_PINS,
3570 .v.pins = (const struct hda_pintbl[]) {
3571 @@ -6644,6 +6656,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3572 SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
3573 SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
3574 SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
3575 + SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
3576 SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
3577 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
3578 SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
3579 diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
3580 index 4daefa5b150a..38fd32ab443c 100644
3581 --- a/sound/soc/fsl/fsl_esai.c
3582 +++ b/sound/soc/fsl/fsl_esai.c
3583 @@ -54,6 +54,8 @@ struct fsl_esai {
3584 u32 fifo_depth;
3585 u32 slot_width;
3586 u32 slots;
3587 + u32 tx_mask;
3588 + u32 rx_mask;
3589 u32 hck_rate[2];
3590 u32 sck_rate[2];
3591 bool hck_dir[2];
3592 @@ -361,21 +363,13 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
3593 regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
3594 ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
3595
3596 - regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
3597 - ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
3598 - regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
3599 - ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
3600 -
3601 regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
3602 ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
3603
3604 - regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
3605 - ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
3606 - regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
3607 - ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
3608 -
3609 esai_priv->slot_width = slot_width;
3610 esai_priv->slots = slots;
3611 + esai_priv->tx_mask = tx_mask;
3612 + esai_priv->rx_mask = rx_mask;
3613
3614 return 0;
3615 }
3616 @@ -596,6 +590,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
3617 bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
3618 u8 i, channels = substream->runtime->channels;
3619 u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
3620 + u32 mask;
3621
3622 switch (cmd) {
3623 case SNDRV_PCM_TRIGGER_START:
3624 @@ -608,15 +603,38 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
3625 for (i = 0; tx && i < channels; i++)
3626 regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0);
3627
3628 + /*
3629 + * When set the TE/RE in the end of enablement flow, there
3630 + * will be channel swap issue for multi data line case.
3631 + * In order to workaround this issue, we switch the bit
3632 + * enablement sequence to below sequence
3633 + * 1) clear the xSMB & xSMA: which is done in probe and
3634 + * stop state.
3635 + * 2) set TE/RE
3636 + * 3) set xSMB
3637 + * 4) set xSMA: xSMA is the last one in this flow, which
3638 + * will trigger esai to start.
3639 + */
3640 regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
3641 tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK,
3642 tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins));
3643 + mask = tx ? esai_priv->tx_mask : esai_priv->rx_mask;
3644 +
3645 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
3646 + ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(mask));
3647 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
3648 + ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(mask));
3649 +
3650 break;
3651 case SNDRV_PCM_TRIGGER_SUSPEND:
3652 case SNDRV_PCM_TRIGGER_STOP:
3653 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
3654 regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
3655 tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0);
3656 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
3657 + ESAI_xSMA_xS_MASK, 0);
3658 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
3659 + ESAI_xSMB_xS_MASK, 0);
3660
3661 /* Disable and reset FIFO */
3662 regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx),
3663 @@ -906,6 +924,15 @@ static int fsl_esai_probe(struct platform_device *pdev)
3664 return ret;
3665 }
3666
3667 + esai_priv->tx_mask = 0xFFFFFFFF;
3668 + esai_priv->rx_mask = 0xFFFFFFFF;
3669 +
3670 + /* Clear the TSMA, TSMB, RSMA, RSMB */
3671 + regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0);
3672 + regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0);
3673 + regmap_write(esai_priv->regmap, REG_ESAI_RSMA, 0);
3674 + regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0);
3675 +
3676 ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
3677 &fsl_esai_dai, 1);
3678 if (ret) {
3679 diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
3680 index e662400873ec..6868e71e3a3f 100644
3681 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
3682 +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
3683 @@ -711,9 +711,17 @@ static int sst_soc_probe(struct snd_soc_component *component)
3684 return sst_dsp_init_v2_dpcm(component);
3685 }
3686
3687 +static void sst_soc_remove(struct snd_soc_component *component)
3688 +{
3689 + struct sst_data *drv = dev_get_drvdata(component->dev);
3690 +
3691 + drv->soc_card = NULL;
3692 +}
3693 +
3694 static const struct snd_soc_component_driver sst_soc_platform_drv = {
3695 .name = DRV_NAME,
3696 .probe = sst_soc_probe,
3697 + .remove = sst_soc_remove,
3698 .ops = &sst_platform_ops,
3699 .compr_ops = &sst_platform_compr_ops,
3700 .pcm_new = sst_pcm_new,
3701 diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
3702 index 3aca33c00039..618def9bdf0e 100644
3703 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
3704 +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
3705 @@ -143,6 +143,30 @@
3706 "$TC actions flush action sample"
3707 ]
3708 },
3709 + {
3710 + "id": "7571",
3711 + "name": "Add sample action with invalid rate",
3712 + "category": [
3713 + "actions",
3714 + "sample"
3715 + ],
3716 + "setup": [
3717 + [
3718 + "$TC actions flush action sample",
3719 + 0,
3720 + 1,
3721 + 255
3722 + ]
3723 + ],
3724 + "cmdUnderTest": "$TC actions add action sample rate 0 group 1 index 2",
3725 + "expExitCode": "255",
3726 + "verifyCmd": "$TC actions get action sample index 2",
3727 + "matchPattern": "action order [0-9]+: sample rate 1/0 group 1.*index 2 ref",
3728 + "matchCount": "0",
3729 + "teardown": [
3730 + "$TC actions flush action sample"
3731 + ]
3732 + },
3733 {
3734 "id": "b6d4",
3735 "name": "Add sample action with mandatory arguments and invalid control action",