Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0141-5.4.42-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3522 - (show annotations) (download)
Thu Jun 25 11:14:54 2020 UTC (3 years, 11 months ago) by niro
File size: 171929 byte(s)
-linux-5.4.42
1 diff --git a/Makefile b/Makefile
2 index a8c772b299aa..1bd1b17cd207 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 41
10 +SUBLEVEL = 42
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 @@ -707,12 +707,9 @@ else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
15 KBUILD_CFLAGS += -Os
16 endif
17
18 -ifdef CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED
19 -KBUILD_CFLAGS += -Wno-maybe-uninitialized
20 -endif
21 -
22 # Tell gcc to never replace conditional load with a non-conditional one
23 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
24 +KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races)
25
26 include scripts/Makefile.kcov
27 include scripts/Makefile.gcc-plugins
28 @@ -860,6 +857,17 @@ KBUILD_CFLAGS += -Wno-pointer-sign
29 # disable stringop warnings in gcc 8+
30 KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
31
32 +# We'll want to enable this eventually, but it's not going away for 5.7 at least
33 +KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds)
34 +KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds)
35 +KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow)
36 +
37 +# Another good warning that we'll want to enable eventually
38 +KBUILD_CFLAGS += $(call cc-disable-warning, restrict)
39 +
40 +# Enabled with W=2, disabled by default as noisy
41 +KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized)
42 +
43 # disable invalid "can't wrap" optimizations for signed / pointers
44 KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
45
46 diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
47 index c6be65249f42..a6ef3d137c7a 100644
48 --- a/arch/arm/boot/dts/dra7.dtsi
49 +++ b/arch/arm/boot/dts/dra7.dtsi
50 @@ -172,6 +172,7 @@
51 #address-cells = <1>;
52 ranges = <0x51000000 0x51000000 0x3000
53 0x0 0x20000000 0x10000000>;
54 + dma-ranges;
55 /**
56 * To enable PCI endpoint mode, disable the pcie1_rc
57 * node and enable pcie1_ep mode.
58 @@ -185,7 +186,6 @@
59 device_type = "pci";
60 ranges = <0x81000000 0 0 0x03000 0 0x00010000
61 0x82000000 0 0x20013000 0x13000 0 0xffed000>;
62 - dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
63 bus-range = <0x00 0xff>;
64 #interrupt-cells = <1>;
65 num-lanes = <1>;
66 @@ -230,6 +230,7 @@
67 #address-cells = <1>;
68 ranges = <0x51800000 0x51800000 0x3000
69 0x0 0x30000000 0x10000000>;
70 + dma-ranges;
71 status = "disabled";
72 pcie2_rc: pcie@51800000 {
73 reg = <0x51800000 0x2000>, <0x51802000 0x14c>, <0x1000 0x2000>;
74 @@ -240,7 +241,6 @@
75 device_type = "pci";
76 ranges = <0x81000000 0 0 0x03000 0 0x00010000
77 0x82000000 0 0x30013000 0x13000 0 0xffed000>;
78 - dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
79 bus-range = <0x00 0xff>;
80 #interrupt-cells = <1>;
81 num-lanes = <1>;
82 diff --git a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
83 index 0cd75dadf292..188639738dc3 100644
84 --- a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
85 +++ b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
86 @@ -75,8 +75,8 @@
87 imx27-phycard-s-rdk {
88 pinctrl_i2c1: i2c1grp {
89 fsl,pins = <
90 - MX27_PAD_I2C2_SDA__I2C2_SDA 0x0
91 - MX27_PAD_I2C2_SCL__I2C2_SCL 0x0
92 + MX27_PAD_I2C_DATA__I2C_DATA 0x0
93 + MX27_PAD_I2C_CLK__I2C_CLK 0x0
94 >;
95 };
96
97 diff --git a/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts b/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts
98 index 0d594e4bd559..a1173bf5bff5 100644
99 --- a/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts
100 +++ b/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts
101 @@ -38,7 +38,7 @@
102 };
103
104 &switch_ports {
105 - /delete-node/ port@2;
106 + /delete-node/ port@3;
107 };
108
109 &touchscreen {
110 diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
111 index dd865f3c2eda..4447f45f0cba 100644
112 --- a/arch/arm/boot/dts/r8a73a4.dtsi
113 +++ b/arch/arm/boot/dts/r8a73a4.dtsi
114 @@ -131,7 +131,14 @@
115 cmt1: timer@e6130000 {
116 compatible = "renesas,r8a73a4-cmt1", "renesas,rcar-gen2-cmt1";
117 reg = <0 0xe6130000 0 0x1004>;
118 - interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
119 + interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
120 + <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
121 + <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
122 + <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
123 + <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
124 + <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
125 + <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
126 + <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
127 clocks = <&mstp3_clks R8A73A4_CLK_CMT1>;
128 clock-names = "fck";
129 power-domains = <&pd_c5>;
130 diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
131 index 12ffe73bf2bc..155f58e6d4e8 100644
132 --- a/arch/arm/boot/dts/r8a7740.dtsi
133 +++ b/arch/arm/boot/dts/r8a7740.dtsi
134 @@ -479,7 +479,7 @@
135 cpg_clocks: cpg_clocks@e6150000 {
136 compatible = "renesas,r8a7740-cpg-clocks";
137 reg = <0xe6150000 0x10000>;
138 - clocks = <&extal1_clk>, <&extalr_clk>;
139 + clocks = <&extal1_clk>, <&extal2_clk>, <&extalr_clk>;
140 #clock-cells = <1>;
141 clock-output-names = "system", "pllc0", "pllc1",
142 "pllc2", "r",
143 diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
144 index 0ee8a369c547..2199a54c720c 100644
145 --- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
146 +++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
147 @@ -2365,7 +2365,7 @@
148 reg = <0x0 0xff400000 0x0 0x40000>;
149 interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
150 clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
151 - clock-names = "ddr";
152 + clock-names = "otg";
153 phys = <&usb2_phy1>;
154 phy-names = "usb2-phy";
155 dr_mode = "peripheral";
156 diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
157 index 554863429aa6..e2094575f528 100644
158 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
159 +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
160 @@ -152,6 +152,10 @@
161 clock-latency = <50000>;
162 };
163
164 +&frddr_a {
165 + status = "okay";
166 +};
167 +
168 &frddr_b {
169 status = "okay";
170 };
171 diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
172 index 43c4db312146..ac3a3b333efa 100644
173 --- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
174 +++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
175 @@ -616,7 +616,7 @@
176 reg = <0x30bd0000 0x10000>;
177 interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
178 clocks = <&clk IMX8MN_CLK_SDMA1_ROOT>,
179 - <&clk IMX8MN_CLK_SDMA1_ROOT>;
180 + <&clk IMX8MN_CLK_AHB>;
181 clock-names = "ipg", "ahb";
182 #dma-cells = <3>;
183 fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
184 diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
185 index 461a47ea656d..e81cd83b138b 100644
186 --- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
187 +++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
188 @@ -1318,6 +1318,7 @@
189 ipmmu_vip0: mmu@e7b00000 {
190 compatible = "renesas,ipmmu-r8a77980";
191 reg = <0 0xe7b00000 0 0x1000>;
192 + renesas,ipmmu-main = <&ipmmu_mm 4>;
193 power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
194 #iommu-cells = <1>;
195 };
196 @@ -1325,6 +1326,7 @@
197 ipmmu_vip1: mmu@e7960000 {
198 compatible = "renesas,ipmmu-r8a77980";
199 reg = <0 0xe7960000 0 0x1000>;
200 + renesas,ipmmu-main = <&ipmmu_mm 11>;
201 power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
202 #iommu-cells = <1>;
203 };
204 diff --git a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
205 index 49c4b96da3d4..6abc6f4a86cf 100644
206 --- a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
207 +++ b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
208 @@ -92,7 +92,7 @@
209 &i2c1 {
210 status = "okay";
211
212 - rk805: rk805@18 {
213 + rk805: pmic@18 {
214 compatible = "rockchip,rk805";
215 reg = <0x18>;
216 interrupt-parent = <&gpio2>;
217 diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
218 index 62936b432f9a..304fad1a0b57 100644
219 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
220 +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
221 @@ -169,7 +169,7 @@
222 &i2c1 {
223 status = "okay";
224
225 - rk805: rk805@18 {
226 + rk805: pmic@18 {
227 compatible = "rockchip,rk805";
228 reg = <0x18>;
229 interrupt-parent = <&gpio2>;
230 diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
231 index cede1ad81be2..cd97016b7c18 100644
232 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
233 +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
234 @@ -410,7 +410,7 @@
235 reset-names = "usb3-otg";
236 status = "disabled";
237
238 - usbdrd_dwc3_0: dwc3 {
239 + usbdrd_dwc3_0: usb@fe800000 {
240 compatible = "snps,dwc3";
241 reg = <0x0 0xfe800000 0x0 0x100000>;
242 interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH 0>;
243 @@ -446,7 +446,7 @@
244 reset-names = "usb3-otg";
245 status = "disabled";
246
247 - usbdrd_dwc3_1: dwc3 {
248 + usbdrd_dwc3_1: usb@fe900000 {
249 compatible = "snps,dwc3";
250 reg = <0x0 0xfe900000 0x0 0x100000>;
251 interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>;
252 diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
253 index 0df8493624e0..cc049ff5c6a5 100644
254 --- a/arch/arm64/kernel/machine_kexec.c
255 +++ b/arch/arm64/kernel/machine_kexec.c
256 @@ -189,6 +189,7 @@ void machine_kexec(struct kimage *kimage)
257 * the offline CPUs. Therefore, we must use the __* variant here.
258 */
259 __flush_icache_range((uintptr_t)reboot_code_buffer,
260 + (uintptr_t)reboot_code_buffer +
261 arm64_relocate_new_kernel_size);
262
263 /* Flush the kimage list and its buffers. */
264 diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
265 index 91c8f1d9bcee..1a2c80e8be84 100644
266 --- a/arch/powerpc/include/asm/book3s/32/kup.h
267 +++ b/arch/powerpc/include/asm/book3s/32/kup.h
268 @@ -75,7 +75,7 @@
269
270 .macro kuap_check current, gpr
271 #ifdef CONFIG_PPC_KUAP_DEBUG
272 - lwz \gpr2, KUAP(thread)
273 + lwz \gpr, KUAP(thread)
274 999: twnei \gpr, 0
275 EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
276 #endif
277 diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
278 index 33b16f4212f7..a4ee3a0e7d20 100644
279 --- a/arch/riscv/kernel/vdso/Makefile
280 +++ b/arch/riscv/kernel/vdso/Makefile
281 @@ -33,15 +33,15 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
282 $(call if_changed,vdsold)
283
284 # We also create a special relocatable object that should mirror the symbol
285 -# table and layout of the linked DSO. With ld -R we can then refer to
286 -# these symbols in the kernel code rather than hand-coded addresses.
287 +# table and layout of the linked DSO. With ld --just-symbols we can then
288 +# refer to these symbols in the kernel code rather than hand-coded addresses.
289
290 SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
291 -Wl,--build-id -Wl,--hash-style=both
292 $(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
293 $(call if_changed,vdsold)
294
295 -LDFLAGS_vdso-syms.o := -r -R
296 +LDFLAGS_vdso-syms.o := -r --just-symbols
297 $(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
298 $(call if_changed,ld)
299
300 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
301 index 91e29b6a86a5..9804a7957f4e 100644
302 --- a/arch/x86/include/asm/stackprotector.h
303 +++ b/arch/x86/include/asm/stackprotector.h
304 @@ -55,8 +55,13 @@
305 /*
306 * Initialize the stackprotector canary value.
307 *
308 - * NOTE: this must only be called from functions that never return,
309 + * NOTE: this must only be called from functions that never return
310 * and it must always be inlined.
311 + *
312 + * In addition, it should be called from a compilation unit for which
313 + * stack protector is disabled. Alternatively, the caller should not end
314 + * with a function call which gets tail-call optimized as that would
315 + * lead to checking a modified canary value.
316 */
317 static __always_inline void boot_init_stack_canary(void)
318 {
319 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
320 index 69881b2d446c..9674321ce3a3 100644
321 --- a/arch/x86/kernel/smpboot.c
322 +++ b/arch/x86/kernel/smpboot.c
323 @@ -262,6 +262,14 @@ static void notrace start_secondary(void *unused)
324
325 wmb();
326 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
327 +
328 + /*
329 + * Prevent tail call to cpu_startup_entry() because the stack protector
330 + * guard has been changed a couple of function calls up, in
331 + * boot_init_stack_canary() and must not be checked before tail calling
332 + * another function.
333 + */
334 + prevent_tail_call_optimization();
335 }
336
337 /**
338 diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
339 index fb37221a1532..647e6af0883d 100644
340 --- a/arch/x86/kernel/unwind_orc.c
341 +++ b/arch/x86/kernel/unwind_orc.c
342 @@ -608,23 +608,23 @@ EXPORT_SYMBOL_GPL(unwind_next_frame);
343 void __unwind_start(struct unwind_state *state, struct task_struct *task,
344 struct pt_regs *regs, unsigned long *first_frame)
345 {
346 - if (!orc_init)
347 - goto done;
348 -
349 memset(state, 0, sizeof(*state));
350 state->task = task;
351
352 + if (!orc_init)
353 + goto err;
354 +
355 /*
356 * Refuse to unwind the stack of a task while it's executing on another
357 * CPU. This check is racy, but that's ok: the unwinder has other
358 * checks to prevent it from going off the rails.
359 */
360 if (task_on_another_cpu(task))
361 - goto done;
362 + goto err;
363
364 if (regs) {
365 if (user_mode(regs))
366 - goto done;
367 + goto the_end;
368
369 state->ip = regs->ip;
370 state->sp = regs->sp;
371 @@ -657,6 +657,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
372 * generate some kind of backtrace if this happens.
373 */
374 void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
375 + state->error = true;
376 if (get_stack_info(next_page, state->task, &state->stack_info,
377 &state->stack_mask))
378 return;
379 @@ -682,8 +683,9 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
380
381 return;
382
383 -done:
384 +err:
385 + state->error = true;
386 +the_end:
387 state->stack_info.type = STACK_TYPE_UNKNOWN;
388 - return;
389 }
390 EXPORT_SYMBOL_GPL(__unwind_start);
391 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
392 index 56a0f9c18892..41408065574f 100644
393 --- a/arch/x86/kvm/x86.c
394 +++ b/arch/x86/kvm/x86.c
395 @@ -3682,7 +3682,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
396 unsigned bank_num = mcg_cap & 0xff, bank;
397
398 r = -EINVAL;
399 - if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
400 + if (!bank_num || bank_num > KVM_MAX_MCE_BANKS)
401 goto out;
402 if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
403 goto out;
404 diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
405 index 802ee5bba66c..0cebe5db691d 100644
406 --- a/arch/x86/xen/smp_pv.c
407 +++ b/arch/x86/xen/smp_pv.c
408 @@ -92,6 +92,7 @@ asmlinkage __visible void cpu_bringup_and_idle(void)
409 cpu_bringup();
410 boot_init_stack_canary();
411 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
412 + prevent_tail_call_optimization();
413 }
414
415 void xen_smp_intr_free_pv(unsigned int cpu)
416 diff --git a/crypto/lrw.c b/crypto/lrw.c
417 index be829f6afc8e..3d40e1f32bea 100644
418 --- a/crypto/lrw.c
419 +++ b/crypto/lrw.c
420 @@ -289,7 +289,7 @@ static void exit_tfm(struct crypto_skcipher *tfm)
421 crypto_free_skcipher(ctx->child);
422 }
423
424 -static void free(struct skcipher_instance *inst)
425 +static void free_inst(struct skcipher_instance *inst)
426 {
427 crypto_drop_skcipher(skcipher_instance_ctx(inst));
428 kfree(inst);
429 @@ -401,7 +401,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
430 inst->alg.encrypt = encrypt;
431 inst->alg.decrypt = decrypt;
432
433 - inst->free = free;
434 + inst->free = free_inst;
435
436 err = skcipher_register_instance(tmpl, inst);
437 if (err)
438 diff --git a/crypto/xts.c b/crypto/xts.c
439 index ab117633d64e..9d72429f666e 100644
440 --- a/crypto/xts.c
441 +++ b/crypto/xts.c
442 @@ -328,7 +328,7 @@ static void exit_tfm(struct crypto_skcipher *tfm)
443 crypto_free_cipher(ctx->tweak);
444 }
445
446 -static void free(struct skcipher_instance *inst)
447 +static void free_inst(struct skcipher_instance *inst)
448 {
449 crypto_drop_skcipher(skcipher_instance_ctx(inst));
450 kfree(inst);
451 @@ -439,7 +439,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
452 inst->alg.encrypt = encrypt;
453 inst->alg.decrypt = decrypt;
454
455 - inst->free = free;
456 + inst->free = free_inst;
457
458 err = skcipher_register_instance(tmpl, inst);
459 if (err)
460 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
461 index 5e6c8bfc6612..5b53a66d403d 100644
462 --- a/drivers/acpi/ec.c
463 +++ b/drivers/acpi/ec.c
464 @@ -1962,23 +1962,31 @@ void acpi_ec_set_gpe_wake_mask(u8 action)
465 acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
466 }
467
468 -bool acpi_ec_other_gpes_active(void)
469 -{
470 - return acpi_any_gpe_status_set(first_ec ? first_ec->gpe : U32_MAX);
471 -}
472 -
473 bool acpi_ec_dispatch_gpe(void)
474 {
475 u32 ret;
476
477 if (!first_ec)
478 + return acpi_any_gpe_status_set(U32_MAX);
479 +
480 + /*
481 + * Report wakeup if the status bit is set for any enabled GPE other
482 + * than the EC one.
483 + */
484 + if (acpi_any_gpe_status_set(first_ec->gpe))
485 + return true;
486 +
487 + if (ec_no_wakeup)
488 return false;
489
490 + /*
491 + * Dispatch the EC GPE in-band, but do not report wakeup in any case
492 + * to allow the caller to process events properly after that.
493 + */
494 ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
495 - if (ret == ACPI_INTERRUPT_HANDLED) {
496 + if (ret == ACPI_INTERRUPT_HANDLED)
497 pm_pr_dbg("EC GPE dispatched\n");
498 - return true;
499 - }
500 +
501 return false;
502 }
503 #endif /* CONFIG_PM_SLEEP */
504 diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
505 index cbf7f34c3ce7..afe6636f9ad3 100644
506 --- a/drivers/acpi/internal.h
507 +++ b/drivers/acpi/internal.h
508 @@ -201,7 +201,6 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
509
510 #ifdef CONFIG_PM_SLEEP
511 void acpi_ec_flush_work(void);
512 -bool acpi_ec_other_gpes_active(void);
513 bool acpi_ec_dispatch_gpe(void);
514 #endif
515
516 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
517 index edad89e58c58..85514c0f3aa5 100644
518 --- a/drivers/acpi/sleep.c
519 +++ b/drivers/acpi/sleep.c
520 @@ -1010,20 +1010,10 @@ static bool acpi_s2idle_wake(void)
521 if (acpi_check_wakeup_handlers())
522 return true;
523
524 - /*
525 - * If the status bit is set for any enabled GPE other than the
526 - * EC one, the wakeup is regarded as a genuine one.
527 - */
528 - if (acpi_ec_other_gpes_active())
529 + /* Check non-EC GPE wakeups and dispatch the EC GPE. */
530 + if (acpi_ec_dispatch_gpe())
531 return true;
532
533 - /*
534 - * If the EC GPE status bit has not been set, the wakeup is
535 - * regarded as a spurious one.
536 - */
537 - if (!acpi_ec_dispatch_gpe())
538 - return false;
539 -
540 /*
541 * Cancel the wakeup and process all pending events in case
542 * there are any wakeup ones in there.
543 diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
544 index a55383b139df..0cf2fe290230 100644
545 --- a/drivers/block/virtio_blk.c
546 +++ b/drivers/block/virtio_blk.c
547 @@ -33,6 +33,15 @@ struct virtio_blk_vq {
548 } ____cacheline_aligned_in_smp;
549
550 struct virtio_blk {
551 + /*
552 + * This mutex must be held by anything that may run after
553 + * virtblk_remove() sets vblk->vdev to NULL.
554 + *
555 + * blk-mq, virtqueue processing, and sysfs attribute code paths are
556 + * shut down before vblk->vdev is set to NULL and therefore do not need
557 + * to hold this mutex.
558 + */
559 + struct mutex vdev_mutex;
560 struct virtio_device *vdev;
561
562 /* The disk structure for the kernel. */
563 @@ -44,6 +53,13 @@ struct virtio_blk {
564 /* Process context for config space updates */
565 struct work_struct config_work;
566
567 + /*
568 + * Tracks references from block_device_operations open/release and
569 + * virtio_driver probe/remove so this object can be freed once no
570 + * longer in use.
571 + */
572 + refcount_t refs;
573 +
574 /* What host tells us, plus 2 for header & tailer. */
575 unsigned int sg_elems;
576
577 @@ -388,10 +404,55 @@ out:
578 return err;
579 }
580
581 +static void virtblk_get(struct virtio_blk *vblk)
582 +{
583 + refcount_inc(&vblk->refs);
584 +}
585 +
586 +static void virtblk_put(struct virtio_blk *vblk)
587 +{
588 + if (refcount_dec_and_test(&vblk->refs)) {
589 + ida_simple_remove(&vd_index_ida, vblk->index);
590 + mutex_destroy(&vblk->vdev_mutex);
591 + kfree(vblk);
592 + }
593 +}
594 +
595 +static int virtblk_open(struct block_device *bd, fmode_t mode)
596 +{
597 + struct virtio_blk *vblk = bd->bd_disk->private_data;
598 + int ret = 0;
599 +
600 + mutex_lock(&vblk->vdev_mutex);
601 +
602 + if (vblk->vdev)
603 + virtblk_get(vblk);
604 + else
605 + ret = -ENXIO;
606 +
607 + mutex_unlock(&vblk->vdev_mutex);
608 + return ret;
609 +}
610 +
611 +static void virtblk_release(struct gendisk *disk, fmode_t mode)
612 +{
613 + struct virtio_blk *vblk = disk->private_data;
614 +
615 + virtblk_put(vblk);
616 +}
617 +
618 /* We provide getgeo only to please some old bootloader/partitioning tools */
619 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
620 {
621 struct virtio_blk *vblk = bd->bd_disk->private_data;
622 + int ret = 0;
623 +
624 + mutex_lock(&vblk->vdev_mutex);
625 +
626 + if (!vblk->vdev) {
627 + ret = -ENXIO;
628 + goto out;
629 + }
630
631 /* see if the host passed in geometry config */
632 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
633 @@ -407,12 +468,16 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
634 geo->sectors = 1 << 5;
635 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
636 }
637 - return 0;
638 +out:
639 + mutex_unlock(&vblk->vdev_mutex);
640 + return ret;
641 }
642
643 static const struct block_device_operations virtblk_fops = {
644 .ioctl = virtblk_ioctl,
645 .owner = THIS_MODULE,
646 + .open = virtblk_open,
647 + .release = virtblk_release,
648 .getgeo = virtblk_getgeo,
649 };
650
651 @@ -767,6 +832,10 @@ static int virtblk_probe(struct virtio_device *vdev)
652 goto out_free_index;
653 }
654
655 + /* This reference is dropped in virtblk_remove(). */
656 + refcount_set(&vblk->refs, 1);
657 + mutex_init(&vblk->vdev_mutex);
658 +
659 vblk->vdev = vdev;
660 vblk->sg_elems = sg_elems;
661
662 @@ -932,8 +1001,6 @@ out:
663 static void virtblk_remove(struct virtio_device *vdev)
664 {
665 struct virtio_blk *vblk = vdev->priv;
666 - int index = vblk->index;
667 - int refc;
668
669 /* Make sure no work handler is accessing the device. */
670 flush_work(&vblk->config_work);
671 @@ -943,18 +1010,21 @@ static void virtblk_remove(struct virtio_device *vdev)
672
673 blk_mq_free_tag_set(&vblk->tag_set);
674
675 + mutex_lock(&vblk->vdev_mutex);
676 +
677 /* Stop all the virtqueues. */
678 vdev->config->reset(vdev);
679
680 - refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
681 + /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
682 + vblk->vdev = NULL;
683 +
684 put_disk(vblk->disk);
685 vdev->config->del_vqs(vdev);
686 kfree(vblk->vqs);
687 - kfree(vblk);
688
689 - /* Only free device id if we don't have any users */
690 - if (refc == 1)
691 - ida_simple_remove(&vd_index_ida, index);
692 + mutex_unlock(&vblk->vdev_mutex);
693 +
694 + virtblk_put(vblk);
695 }
696
697 #ifdef CONFIG_PM_SLEEP
698 diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
699 index 80b029713722..9728d1282e43 100644
700 --- a/drivers/clk/clk.c
701 +++ b/drivers/clk/clk.c
702 @@ -3448,6 +3448,9 @@ static int __clk_core_init(struct clk_core *core)
703 out:
704 clk_pm_runtime_put(core);
705 unlock:
706 + if (ret)
707 + hlist_del_init(&core->child_node);
708 +
709 clk_prepare_unlock();
710
711 if (!ret)
712 diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
713 index d17cfb7a3ff4..d7243c09cc84 100644
714 --- a/drivers/clk/rockchip/clk-rk3228.c
715 +++ b/drivers/clk/rockchip/clk-rk3228.c
716 @@ -156,8 +156,6 @@ PNAME(mux_i2s_out_p) = { "i2s1_pre", "xin12m" };
717 PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" };
718 PNAME(mux_sclk_spdif_p) = { "sclk_spdif_src", "spdif_frac", "xin12m" };
719
720 -PNAME(mux_aclk_gpu_pre_p) = { "cpll_gpu", "gpll_gpu", "hdmiphy_gpu", "usb480m_gpu" };
721 -
722 PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
723 PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
724 PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
725 @@ -468,16 +466,9 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
726 RK2928_CLKSEL_CON(24), 6, 10, DFLAGS,
727 RK2928_CLKGATE_CON(2), 8, GFLAGS),
728
729 - GATE(0, "cpll_gpu", "cpll", 0,
730 - RK2928_CLKGATE_CON(3), 13, GFLAGS),
731 - GATE(0, "gpll_gpu", "gpll", 0,
732 - RK2928_CLKGATE_CON(3), 13, GFLAGS),
733 - GATE(0, "hdmiphy_gpu", "hdmiphy", 0,
734 - RK2928_CLKGATE_CON(3), 13, GFLAGS),
735 - GATE(0, "usb480m_gpu", "usb480m", 0,
736 + COMPOSITE(0, "aclk_gpu_pre", mux_pll_src_4plls_p, 0,
737 + RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS,
738 RK2928_CLKGATE_CON(3), 13, GFLAGS),
739 - COMPOSITE_NOGATE(0, "aclk_gpu_pre", mux_aclk_gpu_pre_p, 0,
740 - RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS),
741
742 COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_2plls_p, 0,
743 RK2928_CLKSEL_CON(25), 8, 1, MFLAGS, 0, 7, DFLAGS,
744 @@ -582,8 +573,8 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
745 GATE(0, "pclk_peri_noc", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 2, GFLAGS),
746
747 /* PD_GPU */
748 - GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 14, GFLAGS),
749 - GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 15, GFLAGS),
750 + GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 14, GFLAGS),
751 + GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS),
752
753 /* PD_BUS */
754 GATE(0, "sclk_initmem_mbist", "aclk_cpu", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS),
755 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
756 index 45499e0b9f2f..d3d7c4ef7d04 100644
757 --- a/drivers/cpufreq/intel_pstate.c
758 +++ b/drivers/cpufreq/intel_pstate.c
759 @@ -1058,7 +1058,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
760
761 update_turbo_state();
762 if (global.turbo_disabled) {
763 - pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
764 + pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
765 mutex_unlock(&intel_pstate_limits_lock);
766 mutex_unlock(&intel_pstate_driver_lock);
767 return -EPERM;
768 diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
769 index e7d1e12bf464..89d90c456c0c 100644
770 --- a/drivers/dma/mmp_tdma.c
771 +++ b/drivers/dma/mmp_tdma.c
772 @@ -363,6 +363,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
773 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
774 size);
775 tdmac->desc_arr = NULL;
776 + if (tdmac->status == DMA_ERROR)
777 + tdmac->status = DMA_COMPLETE;
778
779 return;
780 }
781 @@ -443,7 +445,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
782 if (!desc)
783 goto err_out;
784
785 - mmp_tdma_config_write(chan, direction, &tdmac->slave_config);
786 + if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config))
787 + goto err_out;
788
789 while (buf < buf_len) {
790 desc = &tdmac->desc_arr[i];
791 diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
792 index 581e7a290d98..a3b0b4c56a19 100644
793 --- a/drivers/dma/pch_dma.c
794 +++ b/drivers/dma/pch_dma.c
795 @@ -865,6 +865,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
796 }
797
798 pci_set_master(pdev);
799 + pd->dma.dev = &pdev->dev;
800
801 err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
802 if (err) {
803 @@ -880,7 +881,6 @@ static int pch_dma_probe(struct pci_dev *pdev,
804 goto err_free_irq;
805 }
806
807 - pd->dma.dev = &pdev->dev;
808
809 INIT_LIST_HEAD(&pd->dma.channels);
810
811 diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
812 index 31f9f0e369b9..55b031d2c989 100644
813 --- a/drivers/firmware/efi/tpm.c
814 +++ b/drivers/firmware/efi/tpm.c
815 @@ -16,7 +16,7 @@
816 int efi_tpm_final_log_size;
817 EXPORT_SYMBOL(efi_tpm_final_log_size);
818
819 -static int tpm2_calc_event_log_size(void *data, int count, void *size_info)
820 +static int __init tpm2_calc_event_log_size(void *data, int count, void *size_info)
821 {
822 struct tcg_pcr_event2_head *header;
823 int event_size, size = 0;
824 diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
825 index de5d1383f28d..3edc1762803a 100644
826 --- a/drivers/gpio/gpio-pca953x.c
827 +++ b/drivers/gpio/gpio-pca953x.c
828 @@ -528,7 +528,7 @@ static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
829 {
830 struct pca953x_chip *chip = gpiochip_get_data(gc);
831
832 - switch (config) {
833 + switch (pinconf_to_config_param(config)) {
834 case PIN_CONFIG_BIAS_PULL_UP:
835 case PIN_CONFIG_BIAS_PULL_DOWN:
836 return pca953x_gpio_set_pull_up_down(chip, offset, config);
837 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
838 index 143753d237e7..eaa5e7b7c19d 100644
839 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
840 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
841 @@ -133,8 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
842 u32 cpp;
843 u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
844 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
845 - AMDGPU_GEM_CREATE_VRAM_CLEARED |
846 - AMDGPU_GEM_CREATE_CPU_GTT_USWC;
847 + AMDGPU_GEM_CREATE_VRAM_CLEARED;
848
849 info = drm_get_format_info(adev->ddev, mode_cmd);
850 cpp = info->cpp[0];
851 diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
852 index c45304f1047c..4af9acc2dc4f 100644
853 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
854 +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
855 @@ -228,7 +228,7 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
856 u32 extra_bits = vmid & 0xf;
857
858 /* IB packet must end on a 8 DW boundary */
859 - cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
860 + cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7);
861
862 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
863 amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
864 @@ -811,7 +811,7 @@ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
865 u32 pad_count;
866 int i;
867
868 - pad_count = (8 - (ib->length_dw & 0x7)) % 8;
869 + pad_count = (-ib->length_dw) & 7;
870 for (i = 0; i < pad_count; i++)
871 if (sdma && sdma->burst_nop && (i == 0))
872 ib->ptr[ib->length_dw++] =
873 diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
874 index 074a9a09c0a7..a5b60c9a2418 100644
875 --- a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
876 +++ b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
877 @@ -73,6 +73,22 @@
878 #define SDMA_OP_AQL_COPY 0
879 #define SDMA_OP_AQL_BARRIER_OR 0
880
881 +#define SDMA_GCR_RANGE_IS_PA (1 << 18)
882 +#define SDMA_GCR_SEQ(x) (((x) & 0x3) << 16)
883 +#define SDMA_GCR_GL2_WB (1 << 15)
884 +#define SDMA_GCR_GL2_INV (1 << 14)
885 +#define SDMA_GCR_GL2_DISCARD (1 << 13)
886 +#define SDMA_GCR_GL2_RANGE(x) (((x) & 0x3) << 11)
887 +#define SDMA_GCR_GL2_US (1 << 10)
888 +#define SDMA_GCR_GL1_INV (1 << 9)
889 +#define SDMA_GCR_GLV_INV (1 << 8)
890 +#define SDMA_GCR_GLK_INV (1 << 7)
891 +#define SDMA_GCR_GLK_WB (1 << 6)
892 +#define SDMA_GCR_GLM_INV (1 << 5)
893 +#define SDMA_GCR_GLM_WB (1 << 4)
894 +#define SDMA_GCR_GL1_RANGE(x) (((x) & 0x3) << 2)
895 +#define SDMA_GCR_GLI_INV(x) (((x) & 0x3) << 0)
896 +
897 /*define for op field*/
898 #define SDMA_PKT_HEADER_op_offset 0
899 #define SDMA_PKT_HEADER_op_mask 0x000000FF
900 diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
901 index a10175838013..b6af67f6f214 100644
902 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
903 +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
904 @@ -255,7 +255,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
905 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
906
907 /* IB packet must end on a 8 DW boundary */
908 - sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
909 + sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
910
911 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
912 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
913 @@ -750,7 +750,7 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
914 u32 pad_count;
915 int i;
916
917 - pad_count = (8 - (ib->length_dw & 0x7)) % 8;
918 + pad_count = (-ib->length_dw) & 7;
919 for (i = 0; i < pad_count; i++)
920 if (sdma && sdma->burst_nop && (i == 0))
921 ib->ptr[ib->length_dw++] =
922 diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
923 index 5f4e2c616241..cd3ebed46d05 100644
924 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
925 +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
926 @@ -429,7 +429,7 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
927 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
928
929 /* IB packet must end on a 8 DW boundary */
930 - sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
931 + sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
932
933 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
934 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
935 @@ -1021,7 +1021,7 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
936 u32 pad_count;
937 int i;
938
939 - pad_count = (8 - (ib->length_dw & 0x7)) % 8;
940 + pad_count = (-ib->length_dw) & 7;
941 for (i = 0; i < pad_count; i++)
942 if (sdma && sdma->burst_nop && (i == 0))
943 ib->ptr[ib->length_dw++] =
944 diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
945 index 4554e72c8378..23de332f3c6e 100644
946 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
947 +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
948 @@ -698,7 +698,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
949 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
950
951 /* IB packet must end on a 8 DW boundary */
952 - sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
953 + sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
954
955 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
956 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
957 @@ -1579,7 +1579,7 @@ static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
958 u32 pad_count;
959 int i;
960
961 - pad_count = (8 - (ib->length_dw & 0x7)) % 8;
962 + pad_count = (-ib->length_dw) & 7;
963 for (i = 0; i < pad_count; i++)
964 if (sdma && sdma->burst_nop && (i == 0))
965 ib->ptr[ib->length_dw++] =
966 diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
967 index 8493bfbbc148..bd715012185c 100644
968 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
969 +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
970 @@ -382,8 +382,27 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
971 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
972 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
973
974 - /* IB packet must end on a 8 DW boundary */
975 - sdma_v5_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
976 + /* Invalidate L2, because if we don't do it, we might get stale cache
977 + * lines from previous IBs.
978 + */
979 + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
980 + amdgpu_ring_write(ring, 0);
981 + amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV |
982 + SDMA_GCR_GL2_WB |
983 + SDMA_GCR_GLM_INV |
984 + SDMA_GCR_GLM_WB) << 16);
985 + amdgpu_ring_write(ring, 0xffffff80);
986 + amdgpu_ring_write(ring, 0xffff);
987 +
988 + /* An IB packet must end on a 8 DW boundary--the next dword
989 + * must be on a 8-dword boundary. Our IB packet below is 6
990 + * dwords long, thus add x number of NOPs, such that, in
991 + * modular arithmetic,
992 + * wptr + 6 + x = 8k, k >= 0, which in C is,
993 + * (wptr + 6 + x) % 8 = 0.
994 + * The expression below, is a solution of x.
995 + */
996 + sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
997
998 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
999 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
1000 @@ -1086,10 +1105,10 @@ static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1001 }
1002
1003 /**
1004 - * sdma_v5_0_ring_pad_ib - pad the IB to the required number of dw
1005 - *
1006 + * sdma_v5_0_ring_pad_ib - pad the IB
1007 * @ib: indirect buffer to fill with padding
1008 *
1009 + * Pad the IB with NOPs to a boundary multiple of 8.
1010 */
1011 static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1012 {
1013 @@ -1097,7 +1116,7 @@ static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
1014 u32 pad_count;
1015 int i;
1016
1017 - pad_count = (8 - (ib->length_dw & 0x7)) % 8;
1018 + pad_count = (-ib->length_dw) & 0x7;
1019 for (i = 0; i < pad_count; i++)
1020 if (sdma && sdma->burst_nop && (i == 0))
1021 ib->ptr[ib->length_dw++] =
1022 @@ -1600,7 +1619,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
1023 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1024 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
1025 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
1026 - .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
1027 + .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
1028 .emit_ib = sdma_v5_0_ring_emit_ib,
1029 .emit_fence = sdma_v5_0_ring_emit_fence,
1030 .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
1031 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1032 index be61ae1430ed..99906435dcf7 100644
1033 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1034 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1035 @@ -6921,6 +6921,7 @@ static int dm_update_plane_state(struct dc *dc,
1036 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1037 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
1038 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
1039 + struct amdgpu_crtc *new_acrtc;
1040 bool needs_reset;
1041 int ret = 0;
1042
1043 @@ -6930,9 +6931,30 @@ static int dm_update_plane_state(struct dc *dc,
1044 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1045 dm_old_plane_state = to_dm_plane_state(old_plane_state);
1046
1047 - /*TODO Implement atomic check for cursor plane */
1048 - if (plane->type == DRM_PLANE_TYPE_CURSOR)
1049 + /*TODO Implement better atomic check for cursor plane */
1050 + if (plane->type == DRM_PLANE_TYPE_CURSOR) {
1051 + if (!enable || !new_plane_crtc ||
1052 + drm_atomic_plane_disabling(plane->state, new_plane_state))
1053 + return 0;
1054 +
1055 + new_acrtc = to_amdgpu_crtc(new_plane_crtc);
1056 +
1057 + if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
1058 + (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
1059 + DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
1060 + new_plane_state->crtc_w, new_plane_state->crtc_h);
1061 + return -EINVAL;
1062 + }
1063 +
1064 + if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width ||
1065 + new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) {
1066 + DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n",
1067 + new_plane_state->crtc_x, new_plane_state->crtc_y);
1068 + return -EINVAL;
1069 + }
1070 +
1071 return 0;
1072 + }
1073
1074 needs_reset = should_reset_plane(state, plane, old_plane_state,
1075 new_plane_state);
1076 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
1077 index e933f6a369f9..083c42e521f5 100644
1078 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
1079 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
1080 @@ -2015,7 +2015,8 @@ static void dcn20_fpga_init_hw(struct dc *dc)
1081
1082 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
1083 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1084 - REG_WRITE(REFCLK_CNTL, 0);
1085 + if (REG(REFCLK_CNTL))
1086 + REG_WRITE(REFCLK_CNTL, 0);
1087 //
1088
1089
1090 diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
1091 index 161bf7caf3ae..bb7add5ea227 100644
1092 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
1093 +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
1094 @@ -247,7 +247,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
1095 .dram_channel_width_bytes = 4,
1096 .fabric_datapath_to_dcn_data_return_bytes = 32,
1097 .dcn_downspread_percent = 0.5,
1098 - .downspread_percent = 0.5,
1099 + .downspread_percent = 0.38,
1100 .dram_page_open_time_ns = 50.0,
1101 .dram_rw_turnaround_time_ns = 17.5,
1102 .dram_return_buffer_per_channel_bytes = 8192,
1103 diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
1104 index d306cc711997..8bb5fbef7de0 100644
1105 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
1106 +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
1107 @@ -1425,7 +1425,8 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
1108 if (!hwmgr)
1109 return -EINVAL;
1110
1111 - if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
1112 + if (!(hwmgr->not_vf && amdgpu_dpm) ||
1113 + !hwmgr->hwmgr_func->get_asic_baco_capability)
1114 return 0;
1115
1116 mutex_lock(&hwmgr->smu_lock);
1117 @@ -1459,7 +1460,8 @@ static int pp_set_asic_baco_state(void *handle, int state)
1118 if (!hwmgr)
1119 return -EINVAL;
1120
1121 - if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
1122 + if (!(hwmgr->not_vf && amdgpu_dpm) ||
1123 + !hwmgr->hwmgr_func->set_asic_baco_state)
1124 return 0;
1125
1126 mutex_lock(&hwmgr->smu_lock);
1127 diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
1128 index 07a038f21619..caf6166622e4 100644
1129 --- a/drivers/gpu/drm/i915/display/intel_fbc.c
1130 +++ b/drivers/gpu/drm/i915/display/intel_fbc.c
1131 @@ -504,8 +504,7 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
1132 if (!ret)
1133 goto err_llb;
1134 else if (ret > 1) {
1135 - DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
1136 -
1137 + DRM_INFO_ONCE("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
1138 }
1139
1140 fbc->threshold = ret;
1141 diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
1142 index 6c79d16b381e..058dcd541644 100644
1143 --- a/drivers/gpu/drm/i915/gvt/scheduler.c
1144 +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
1145 @@ -374,7 +374,11 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
1146 for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
1147 struct i915_page_directory * const pd =
1148 i915_pd_entry(ppgtt->pd, i);
1149 -
1150 + /* skip now as current i915 ppgtt alloc won't allocate
1151 + top level pdp for non 4-level table, won't impact
1152 + shadow ppgtt. */
1153 + if (!pd)
1154 + break;
1155 px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
1156 }
1157 }
1158 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1159 index 3ccfc025fde2..ade607d93e45 100644
1160 --- a/drivers/gpu/drm/i915/intel_pm.c
1161 +++ b/drivers/gpu/drm/i915/intel_pm.c
1162 @@ -4784,7 +4784,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
1163 * WaIncreaseLatencyIPCEnabled: kbl,cfl
1164 * Display WA #1141: kbl,cfl
1165 */
1166 - if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) ||
1167 + if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
1168 dev_priv->ipc_enabled)
1169 latency += 4;
1170
1171 diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
1172 index 43688ecdd8a0..60ab7151b84d 100644
1173 --- a/drivers/gpu/drm/qxl/qxl_image.c
1174 +++ b/drivers/gpu/drm/qxl/qxl_image.c
1175 @@ -212,7 +212,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
1176 break;
1177 default:
1178 DRM_ERROR("unsupported image bit depth\n");
1179 - return -EINVAL; /* TODO: cleanup */
1180 + qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
1181 + return -EINVAL;
1182 }
1183 image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
1184 image->u.bitmap.x = width;
1185 diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
1186 index f83522717488..4f944ace665d 100644
1187 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
1188 +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
1189 @@ -718,7 +718,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
1190 struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
1191 struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
1192 struct mipi_dsi_device *device = dsi->device;
1193 - union phy_configure_opts opts = { 0 };
1194 + union phy_configure_opts opts = { };
1195 struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
1196 u16 delay;
1197
1198 diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
1199 index 53b517dbe7e6..4af2fc309c28 100644
1200 --- a/drivers/hwmon/da9052-hwmon.c
1201 +++ b/drivers/hwmon/da9052-hwmon.c
1202 @@ -244,9 +244,9 @@ static ssize_t da9052_tsi_show(struct device *dev,
1203 int channel = to_sensor_dev_attr(devattr)->index;
1204 int ret;
1205
1206 - mutex_lock(&hwmon->hwmon_lock);
1207 + mutex_lock(&hwmon->da9052->auxadc_lock);
1208 ret = __da9052_read_tsi(dev, channel);
1209 - mutex_unlock(&hwmon->hwmon_lock);
1210 + mutex_unlock(&hwmon->da9052->auxadc_lock);
1211
1212 if (ret < 0)
1213 return ret;
1214 diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
1215 index 65b10efca2b8..7affe6b4ae21 100644
1216 --- a/drivers/infiniband/core/cache.c
1217 +++ b/drivers/infiniband/core/cache.c
1218 @@ -1542,8 +1542,11 @@ int ib_cache_setup_one(struct ib_device *device)
1219 if (err)
1220 return err;
1221
1222 - rdma_for_each_port (device, p)
1223 - ib_cache_update(device, p, true);
1224 + rdma_for_each_port (device, p) {
1225 + err = ib_cache_update(device, p, true);
1226 + if (err)
1227 + return err;
1228 + }
1229
1230 return 0;
1231 }
1232 diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
1233 index ef4b0c7061e4..244ebf285fc3 100644
1234 --- a/drivers/infiniband/core/nldev.c
1235 +++ b/drivers/infiniband/core/nldev.c
1236 @@ -1248,10 +1248,10 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1237
1238 has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
1239 ret = fe->fill_res_func(msg, has_cap_net_admin, res, port);
1240 - rdma_restrack_put(res);
1241 if (ret)
1242 goto err_free;
1243
1244 + rdma_restrack_put(res);
1245 nlmsg_end(msg, nlh);
1246 ib_device_put(device);
1247 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1248 diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
1249 index d82e0589cfd2..6b4e7235d2f5 100644
1250 --- a/drivers/infiniband/hw/cxgb4/cm.c
1251 +++ b/drivers/infiniband/hw/cxgb4/cm.c
1252 @@ -2891,8 +2891,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1253 srqidx = ABORT_RSS_SRQIDX_G(
1254 be32_to_cpu(req->srqidx_status));
1255 if (srqidx) {
1256 - complete_cached_srq_buffers(ep,
1257 - req->srqidx_status);
1258 + complete_cached_srq_buffers(ep, srqidx);
1259 } else {
1260 /* Hold ep ref until finish_peer_abort() */
1261 c4iw_get_ep(&ep->com);
1262 @@ -3878,8 +3877,8 @@ static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1263 return 0;
1264 }
1265
1266 - ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_W,
1267 - TCB_RQ_START_S);
1268 + ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M,
1269 + TCB_RQ_START_S);
1270 cleanup:
1271 pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
1272
1273 diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
1274 index 13e4203497b3..a92346e88628 100644
1275 --- a/drivers/infiniband/hw/hfi1/user_sdma.c
1276 +++ b/drivers/infiniband/hw/hfi1/user_sdma.c
1277 @@ -589,10 +589,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
1278
1279 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
1280 pq->state = SDMA_PKT_Q_ACTIVE;
1281 - /* Send the first N packets in the request to buy us some time */
1282 - ret = user_sdma_send_pkts(req, pcount);
1283 - if (unlikely(ret < 0 && ret != -EBUSY))
1284 - goto free_req;
1285
1286 /*
1287 * This is a somewhat blocking send implementation.
1288 diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
1289 index 55a1fbf0e670..ae8b97c30665 100644
1290 --- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
1291 +++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
1292 @@ -534,7 +534,7 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
1293 int arp_index;
1294
1295 arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
1296 - if (arp_index == -1)
1297 + if (arp_index < 0)
1298 return;
1299 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
1300 if (!cqp_request)
1301 diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
1302 index bd4aa04416c6..6e2b3e2f83f1 100644
1303 --- a/drivers/infiniband/hw/mlx4/qp.c
1304 +++ b/drivers/infiniband/hw/mlx4/qp.c
1305 @@ -2891,6 +2891,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
1306 int send_size;
1307 int header_size;
1308 int spc;
1309 + int err;
1310 int i;
1311
1312 if (wr->wr.opcode != IB_WR_SEND)
1313 @@ -2925,7 +2926,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
1314
1315 sqp->ud_header.lrh.virtual_lane = 0;
1316 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
1317 - ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
1318 + err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
1319 + if (err)
1320 + return err;
1321 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1322 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
1323 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
1324 @@ -3212,9 +3215,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
1325 }
1326 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
1327 if (!sqp->qp.ibqp.qp_num)
1328 - ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
1329 + err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index,
1330 + &pkey);
1331 else
1332 - ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
1333 + err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index,
1334 + &pkey);
1335 + if (err)
1336 + return err;
1337 +
1338 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1339 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
1340 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1341 diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
1342 index 48f48122ddcb..6a413d73b95d 100644
1343 --- a/drivers/infiniband/sw/rxe/rxe_mmap.c
1344 +++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
1345 @@ -151,7 +151,7 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
1346
1347 ip = kmalloc(sizeof(*ip), GFP_KERNEL);
1348 if (!ip)
1349 - return NULL;
1350 + return ERR_PTR(-ENOMEM);
1351
1352 size = PAGE_ALIGN(size);
1353
1354 diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
1355 index ff92704de32f..245040c3a35d 100644
1356 --- a/drivers/infiniband/sw/rxe/rxe_queue.c
1357 +++ b/drivers/infiniband/sw/rxe/rxe_queue.c
1358 @@ -45,12 +45,15 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
1359
1360 if (outbuf) {
1361 ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
1362 - if (!ip)
1363 + if (IS_ERR(ip)) {
1364 + err = PTR_ERR(ip);
1365 goto err1;
1366 + }
1367
1368 - err = copy_to_user(outbuf, &ip->info, sizeof(ip->info));
1369 - if (err)
1370 + if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) {
1371 + err = -EFAULT;
1372 goto err2;
1373 + }
1374
1375 spin_lock_bh(&rxe->pending_lock);
1376 list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
1377 @@ -64,7 +67,7 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
1378 err2:
1379 kfree(ip);
1380 err1:
1381 - return -EINVAL;
1382 + return err;
1383 }
1384
1385 inline void rxe_queue_reset(struct rxe_queue *q)
1386 diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
1387 index 95b41c0891d0..9d01b5dca519 100644
1388 --- a/drivers/mmc/core/block.c
1389 +++ b/drivers/mmc/core/block.c
1390 @@ -1417,6 +1417,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
1391 struct mmc_request *mrq = &mqrq->brq.mrq;
1392 struct request_queue *q = req->q;
1393 struct mmc_host *host = mq->card->host;
1394 + enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
1395 unsigned long flags;
1396 bool put_card;
1397 int err;
1398 @@ -1446,7 +1447,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
1399
1400 spin_lock_irqsave(&mq->lock, flags);
1401
1402 - mq->in_flight[mmc_issue_type(mq, req)] -= 1;
1403 + mq->in_flight[issue_type] -= 1;
1404
1405 put_card = (mmc_tot_in_flight(mq) == 0);
1406
1407 diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
1408 index 9edc08685e86..9c0ccb3744c2 100644
1409 --- a/drivers/mmc/core/queue.c
1410 +++ b/drivers/mmc/core/queue.c
1411 @@ -107,11 +107,10 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
1412 case MMC_ISSUE_DCMD:
1413 if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
1414 if (recovery_needed)
1415 - __mmc_cqe_recovery_notifier(mq);
1416 + mmc_cqe_recovery_notifier(mrq);
1417 return BLK_EH_RESET_TIMER;
1418 }
1419 - /* No timeout (XXX: huh? comment doesn't make much sense) */
1420 - blk_mq_complete_request(req);
1421 + /* The request has gone already */
1422 return BLK_EH_DONE;
1423 default:
1424 /* Timeout is handled by mmc core */
1425 @@ -125,18 +124,13 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
1426 struct request_queue *q = req->q;
1427 struct mmc_queue *mq = q->queuedata;
1428 unsigned long flags;
1429 - int ret;
1430 + bool ignore_tout;
1431
1432 spin_lock_irqsave(&mq->lock, flags);
1433 -
1434 - if (mq->recovery_needed || !mq->use_cqe)
1435 - ret = BLK_EH_RESET_TIMER;
1436 - else
1437 - ret = mmc_cqe_timed_out(req);
1438 -
1439 + ignore_tout = mq->recovery_needed || !mq->use_cqe;
1440 spin_unlock_irqrestore(&mq->lock, flags);
1441
1442 - return ret;
1443 + return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
1444 }
1445
1446 static void mmc_mq_recovery_handler(struct work_struct *work)
1447 diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
1448 index 1aee485d56d4..026ca9194ce5 100644
1449 --- a/drivers/mmc/host/alcor.c
1450 +++ b/drivers/mmc/host/alcor.c
1451 @@ -1104,7 +1104,7 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
1452
1453 if (ret) {
1454 dev_err(&pdev->dev, "Failed to get irq for data line\n");
1455 - return ret;
1456 + goto free_host;
1457 }
1458
1459 mutex_init(&host->cmd_mutex);
1460 @@ -1116,6 +1116,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
1461 dev_set_drvdata(&pdev->dev, host);
1462 mmc_add_host(mmc);
1463 return 0;
1464 +
1465 +free_host:
1466 + mmc_free_host(mmc);
1467 + return ret;
1468 }
1469
1470 static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
1471 diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
1472 index 1604f512c7bd..01fc437ed965 100644
1473 --- a/drivers/mmc/host/sdhci-acpi.c
1474 +++ b/drivers/mmc/host/sdhci-acpi.c
1475 @@ -602,10 +602,12 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
1476 }
1477
1478 static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
1479 - .chip = &sdhci_acpi_chip_amd,
1480 - .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
1481 - .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
1482 - SDHCI_QUIRK_32BIT_ADMA_SIZE,
1483 + .chip = &sdhci_acpi_chip_amd,
1484 + .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
1485 + .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
1486 + SDHCI_QUIRK_32BIT_DMA_SIZE |
1487 + SDHCI_QUIRK_32BIT_ADMA_SIZE,
1488 + .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
1489 .probe_slot = sdhci_acpi_emmc_amd_probe_slot,
1490 };
1491
1492 diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
1493 index ce15a05f23d4..fd76aa672e02 100644
1494 --- a/drivers/mmc/host/sdhci-pci-gli.c
1495 +++ b/drivers/mmc/host/sdhci-pci-gli.c
1496 @@ -26,6 +26,9 @@
1497 #define SDHCI_GLI_9750_DRIVING_2 GENMASK(27, 26)
1498 #define GLI_9750_DRIVING_1_VALUE 0xFFF
1499 #define GLI_9750_DRIVING_2_VALUE 0x3
1500 +#define SDHCI_GLI_9750_SEL_1 BIT(29)
1501 +#define SDHCI_GLI_9750_SEL_2 BIT(31)
1502 +#define SDHCI_GLI_9750_ALL_RST (BIT(24)|BIT(25)|BIT(28)|BIT(30))
1503
1504 #define SDHCI_GLI_9750_PLL 0x864
1505 #define SDHCI_GLI_9750_PLL_TX2_INV BIT(23)
1506 @@ -122,6 +125,8 @@ static void gli_set_9750(struct sdhci_host *host)
1507 GLI_9750_DRIVING_1_VALUE);
1508 driving_value |= FIELD_PREP(SDHCI_GLI_9750_DRIVING_2,
1509 GLI_9750_DRIVING_2_VALUE);
1510 + driving_value &= ~(SDHCI_GLI_9750_SEL_1|SDHCI_GLI_9750_SEL_2|SDHCI_GLI_9750_ALL_RST);
1511 + driving_value |= SDHCI_GLI_9750_SEL_2;
1512 sdhci_writel(host, driving_value, SDHCI_GLI_9750_DRIVING);
1513
1514 sw_ctrl_value &= ~SDHCI_GLI_9750_SW_CTRL_4;
1515 @@ -334,6 +339,18 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg)
1516 return value;
1517 }
1518
1519 +#ifdef CONFIG_PM_SLEEP
1520 +static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
1521 +{
1522 + struct sdhci_pci_slot *slot = chip->slots[0];
1523 +
1524 + pci_free_irq_vectors(slot->chip->pdev);
1525 + gli_pcie_enable_msi(slot);
1526 +
1527 + return sdhci_pci_resume_host(chip);
1528 +}
1529 +#endif
1530 +
1531 static const struct sdhci_ops sdhci_gl9755_ops = {
1532 .set_clock = sdhci_set_clock,
1533 .enable_dma = sdhci_pci_enable_dma,
1534 @@ -348,6 +365,9 @@ const struct sdhci_pci_fixes sdhci_gl9755 = {
1535 .quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
1536 .probe_slot = gli_probe_slot_gl9755,
1537 .ops = &sdhci_gl9755_ops,
1538 +#ifdef CONFIG_PM_SLEEP
1539 + .resume = sdhci_pci_gli_resume,
1540 +#endif
1541 };
1542
1543 static const struct sdhci_ops sdhci_gl9750_ops = {
1544 @@ -366,4 +386,7 @@ const struct sdhci_pci_fixes sdhci_gl9750 = {
1545 .quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
1546 .probe_slot = gli_probe_slot_gl9750,
1547 .ops = &sdhci_gl9750_ops,
1548 +#ifdef CONFIG_PM_SLEEP
1549 + .resume = sdhci_pci_gli_resume,
1550 +#endif
1551 };
1552 diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
1553 index 925ed135a4d9..0df6c2b9484a 100644
1554 --- a/drivers/net/dsa/dsa_loop.c
1555 +++ b/drivers/net/dsa/dsa_loop.c
1556 @@ -356,6 +356,7 @@ static void __exit dsa_loop_exit(void)
1557 }
1558 module_exit(dsa_loop_exit);
1559
1560 +MODULE_SOFTDEP("pre: dsa_loop_bdinfo");
1561 MODULE_LICENSE("GPL");
1562 MODULE_AUTHOR("Florian Fainelli");
1563 MODULE_DESCRIPTION("DSA loopback driver");
1564 diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
1565 index bf5add954181..a935b20effa3 100644
1566 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
1567 +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
1568 @@ -86,7 +86,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
1569 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
1570 addr = dpaa2_sg_get_addr(&sgt[i]);
1571 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
1572 - dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
1573 + dma_unmap_page(dev, addr, priv->rx_buf_size,
1574 DMA_BIDIRECTIONAL);
1575
1576 free_pages((unsigned long)sg_vaddr, 0);
1577 @@ -144,7 +144,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
1578 /* Get the address and length from the S/G entry */
1579 sg_addr = dpaa2_sg_get_addr(sge);
1580 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
1581 - dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
1582 + dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
1583 DMA_BIDIRECTIONAL);
1584
1585 sg_length = dpaa2_sg_get_len(sge);
1586 @@ -185,7 +185,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
1587 (page_address(page) - page_address(head_page));
1588
1589 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
1590 - sg_length, DPAA2_ETH_RX_BUF_SIZE);
1591 + sg_length, priv->rx_buf_size);
1592 }
1593
1594 if (dpaa2_sg_is_final(sge))
1595 @@ -211,7 +211,7 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
1596
1597 for (i = 0; i < count; i++) {
1598 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
1599 - dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
1600 + dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
1601 DMA_BIDIRECTIONAL);
1602 free_pages((unsigned long)vaddr, 0);
1603 }
1604 @@ -331,7 +331,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
1605 break;
1606 case XDP_REDIRECT:
1607 dma_unmap_page(priv->net_dev->dev.parent, addr,
1608 - DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
1609 + priv->rx_buf_size, DMA_BIDIRECTIONAL);
1610 ch->buf_count--;
1611 xdp.data_hard_start = vaddr;
1612 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
1613 @@ -370,7 +370,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
1614 trace_dpaa2_rx_fd(priv->net_dev, fd);
1615
1616 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
1617 - dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
1618 + dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
1619 DMA_BIDIRECTIONAL);
1620
1621 fas = dpaa2_get_fas(vaddr, false);
1622 @@ -389,13 +389,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
1623 return;
1624 }
1625
1626 - dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
1627 + dma_unmap_page(dev, addr, priv->rx_buf_size,
1628 DMA_BIDIRECTIONAL);
1629 skb = build_linear_skb(ch, fd, vaddr);
1630 } else if (fd_format == dpaa2_fd_sg) {
1631 WARN_ON(priv->xdp_prog);
1632
1633 - dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
1634 + dma_unmap_page(dev, addr, priv->rx_buf_size,
1635 DMA_BIDIRECTIONAL);
1636 skb = build_frag_skb(priv, ch, buf_data);
1637 free_pages((unsigned long)vaddr, 0);
1638 @@ -963,7 +963,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
1639 if (!page)
1640 goto err_alloc;
1641
1642 - addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
1643 + addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
1644 DMA_BIDIRECTIONAL);
1645 if (unlikely(dma_mapping_error(dev, addr)))
1646 goto err_map;
1647 @@ -973,7 +973,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
1648 /* tracing point */
1649 trace_dpaa2_eth_buf_seed(priv->net_dev,
1650 page, DPAA2_ETH_RX_BUF_RAW_SIZE,
1651 - addr, DPAA2_ETH_RX_BUF_SIZE,
1652 + addr, priv->rx_buf_size,
1653 bpid);
1654 }
1655
1656 @@ -1680,7 +1680,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
1657 int mfl, linear_mfl;
1658
1659 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
1660 - linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
1661 + linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
1662 dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
1663
1664 if (mfl > linear_mfl) {
1665 @@ -2432,6 +2432,11 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
1666 else
1667 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
1668
1669 + /* We need to ensure that the buffer size seen by WRIOP is a multiple
1670 + * of 64 or 256 bytes depending on the WRIOP version.
1671 + */
1672 + priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
1673 +
1674 /* tx buffer */
1675 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
1676 buf_layout.pass_timestamp = true;
1677 @@ -3096,7 +3101,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
1678 pools_params.num_dpbp = 1;
1679 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
1680 pools_params.pools[0].backup_pool = 0;
1681 - pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
1682 + pools_params.pools[0].buffer_size = priv->rx_buf_size;
1683 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
1684 if (err) {
1685 dev_err(dev, "dpni_set_pools() failed\n");
1686 diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
1687 index 8a0e65b3267f..4570ed53c6c7 100644
1688 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
1689 +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
1690 @@ -373,6 +373,7 @@ struct dpaa2_eth_priv {
1691 u16 tx_data_offset;
1692
1693 struct fsl_mc_device *dpbp_dev;
1694 + u16 rx_buf_size;
1695 u16 bpid;
1696 struct iommu_domain *iommu_domain;
1697
1698 diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
1699 index dc9a6c36cac0..e4d9fb0e72bf 100644
1700 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
1701 +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
1702 @@ -590,7 +590,7 @@ static int num_rules(struct dpaa2_eth_priv *priv)
1703
1704 static int update_cls_rule(struct net_device *net_dev,
1705 struct ethtool_rx_flow_spec *new_fs,
1706 - int location)
1707 + unsigned int location)
1708 {
1709 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1710 struct dpaa2_eth_cls_rule *rule;
1711 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
1712 index 8995e32dd1c0..992908e6eebf 100644
1713 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
1714 +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
1715 @@ -45,6 +45,8 @@
1716
1717 #define MGMT_MSG_TIMEOUT 5000
1718
1719 +#define SET_FUNC_PORT_MGMT_TIMEOUT 25000
1720 +
1721 #define mgmt_to_pfhwdev(pf_mgmt) \
1722 container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
1723
1724 @@ -238,12 +240,13 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
1725 u8 *buf_in, u16 in_size,
1726 u8 *buf_out, u16 *out_size,
1727 enum mgmt_direction_type direction,
1728 - u16 resp_msg_id)
1729 + u16 resp_msg_id, u32 timeout)
1730 {
1731 struct hinic_hwif *hwif = pf_to_mgmt->hwif;
1732 struct pci_dev *pdev = hwif->pdev;
1733 struct hinic_recv_msg *recv_msg;
1734 struct completion *recv_done;
1735 + unsigned long timeo;
1736 u16 msg_id;
1737 int err;
1738
1739 @@ -267,8 +270,9 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
1740 goto unlock_sync_msg;
1741 }
1742
1743 - if (!wait_for_completion_timeout(recv_done,
1744 - msecs_to_jiffies(MGMT_MSG_TIMEOUT))) {
1745 + timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
1746 +
1747 + if (!wait_for_completion_timeout(recv_done, timeo)) {
1748 dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
1749 err = -ETIMEDOUT;
1750 goto unlock_sync_msg;
1751 @@ -342,6 +346,7 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
1752 {
1753 struct hinic_hwif *hwif = pf_to_mgmt->hwif;
1754 struct pci_dev *pdev = hwif->pdev;
1755 + u32 timeout = 0;
1756
1757 if (sync != HINIC_MGMT_MSG_SYNC) {
1758 dev_err(&pdev->dev, "Invalid MGMT msg type\n");
1759 @@ -353,9 +358,12 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
1760 return -EINVAL;
1761 }
1762
1763 + if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
1764 + timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
1765 +
1766 return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
1767 buf_out, out_size, MGMT_DIRECT_SEND,
1768 - MSG_NOT_RESP);
1769 + MSG_NOT_RESP, timeout);
1770 }
1771
1772 /**
1773 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
1774 index 42d00b049c6e..3f739ce40201 100644
1775 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
1776 +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
1777 @@ -483,7 +483,6 @@ static int hinic_close(struct net_device *netdev)
1778 {
1779 struct hinic_dev *nic_dev = netdev_priv(netdev);
1780 unsigned int flags;
1781 - int err;
1782
1783 down(&nic_dev->mgmt_lock);
1784
1785 @@ -497,20 +496,9 @@ static int hinic_close(struct net_device *netdev)
1786
1787 up(&nic_dev->mgmt_lock);
1788
1789 - err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
1790 - if (err) {
1791 - netif_err(nic_dev, drv, netdev,
1792 - "Failed to set func port state\n");
1793 - nic_dev->flags |= (flags & HINIC_INTF_UP);
1794 - return err;
1795 - }
1796 + hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
1797
1798 - err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
1799 - if (err) {
1800 - netif_err(nic_dev, drv, netdev, "Failed to set port state\n");
1801 - nic_dev->flags |= (flags & HINIC_INTF_UP);
1802 - return err;
1803 - }
1804 + hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
1805
1806 if (nic_dev->flags & HINIC_RSS_ENABLE) {
1807 hinic_rss_deinit(nic_dev);
1808 diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
1809 index e1651756bf9d..f70bb81e1ed6 100644
1810 --- a/drivers/net/ethernet/moxa/moxart_ether.c
1811 +++ b/drivers/net/ethernet/moxa/moxart_ether.c
1812 @@ -564,7 +564,7 @@ static int moxart_remove(struct platform_device *pdev)
1813 struct net_device *ndev = platform_get_drvdata(pdev);
1814
1815 unregister_netdev(ndev);
1816 - free_irq(ndev->irq, ndev);
1817 + devm_free_irq(&pdev->dev, ndev->irq, ndev);
1818 moxart_mac_free_memory(ndev);
1819 free_netdev(ndev);
1820
1821 diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
1822 index 51fa82b429a3..40970352d208 100644
1823 --- a/drivers/net/ethernet/natsemi/jazzsonic.c
1824 +++ b/drivers/net/ethernet/natsemi/jazzsonic.c
1825 @@ -235,11 +235,13 @@ static int jazz_sonic_probe(struct platform_device *pdev)
1826
1827 err = register_netdev(dev);
1828 if (err)
1829 - goto out1;
1830 + goto undo_probe1;
1831
1832 return 0;
1833
1834 -out1:
1835 +undo_probe1:
1836 + dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
1837 + lp->descriptors, lp->descriptors_laddr);
1838 release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
1839 out:
1840 free_netdev(dev);
1841 diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
1842 index 354efffac0f9..bdbf0726145e 100644
1843 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c
1844 +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
1845 @@ -333,8 +333,10 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
1846 goto err_free_alink;
1847
1848 alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL);
1849 - if (!alink->prio_map)
1850 + if (!alink->prio_map) {
1851 + err = -ENOMEM;
1852 goto err_free_alink;
1853 + }
1854
1855 /* This is a multi-host app, make sure MAC/PHY is up, but don't
1856 * make the MAC/PHY state follow the state of any of the ports.
1857 diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
1858 index 3bc6d1ef29ec..6fa9852e3f97 100644
1859 --- a/drivers/net/ethernet/realtek/r8169_main.c
1860 +++ b/drivers/net/ethernet/realtek/r8169_main.c
1861 @@ -2202,6 +2202,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
1862 { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
1863 { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
1864 { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 },
1865 + /* RTL8401, reportedly works if treated as RTL8101e */
1866 + { 0x7cf, 0x240, RTL_GIGA_MAC_VER_13 },
1867 { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 },
1868 { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 },
1869 { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
1870 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
1871 index e0a5fe83d8e0..bfc4a92f1d92 100644
1872 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
1873 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
1874 @@ -75,6 +75,11 @@ struct ethqos_emac_por {
1875 unsigned int value;
1876 };
1877
1878 +struct ethqos_emac_driver_data {
1879 + const struct ethqos_emac_por *por;
1880 + unsigned int num_por;
1881 +};
1882 +
1883 struct qcom_ethqos {
1884 struct platform_device *pdev;
1885 void __iomem *rgmii_base;
1886 @@ -171,6 +176,11 @@ static const struct ethqos_emac_por emac_v2_3_0_por[] = {
1887 { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
1888 };
1889
1890 +static const struct ethqos_emac_driver_data emac_v2_3_0_data = {
1891 + .por = emac_v2_3_0_por,
1892 + .num_por = ARRAY_SIZE(emac_v2_3_0_por),
1893 +};
1894 +
1895 static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
1896 {
1897 unsigned int val;
1898 @@ -442,6 +452,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
1899 struct device_node *np = pdev->dev.of_node;
1900 struct plat_stmmacenet_data *plat_dat;
1901 struct stmmac_resources stmmac_res;
1902 + const struct ethqos_emac_driver_data *data;
1903 struct qcom_ethqos *ethqos;
1904 struct resource *res;
1905 int ret;
1906 @@ -471,7 +482,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
1907 goto err_mem;
1908 }
1909
1910 - ethqos->por = of_device_get_match_data(&pdev->dev);
1911 + data = of_device_get_match_data(&pdev->dev);
1912 + ethqos->por = data->por;
1913 + ethqos->num_por = data->num_por;
1914
1915 ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii");
1916 if (IS_ERR(ethqos->rgmii_clk)) {
1917 @@ -526,7 +539,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
1918 }
1919
1920 static const struct of_device_id qcom_ethqos_match[] = {
1921 - { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_por},
1922 + { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_data},
1923 { }
1924 };
1925 MODULE_DEVICE_TABLE(of, qcom_ethqos_match);
1926 diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
1927 index 001def4509c2..fed3e395f18e 100644
1928 --- a/drivers/net/phy/microchip_t1.c
1929 +++ b/drivers/net/phy/microchip_t1.c
1930 @@ -3,9 +3,21 @@
1931
1932 #include <linux/kernel.h>
1933 #include <linux/module.h>
1934 +#include <linux/delay.h>
1935 #include <linux/mii.h>
1936 #include <linux/phy.h>
1937
1938 +/* External Register Control Register */
1939 +#define LAN87XX_EXT_REG_CTL (0x14)
1940 +#define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000)
1941 +#define LAN87XX_EXT_REG_CTL_WR_CTL (0x0800)
1942 +
1943 +/* External Register Read Data Register */
1944 +#define LAN87XX_EXT_REG_RD_DATA (0x15)
1945 +
1946 +/* External Register Write Data Register */
1947 +#define LAN87XX_EXT_REG_WR_DATA (0x16)
1948 +
1949 /* Interrupt Source Register */
1950 #define LAN87XX_INTERRUPT_SOURCE (0x18)
1951
1952 @@ -14,9 +26,160 @@
1953 #define LAN87XX_MASK_LINK_UP (0x0004)
1954 #define LAN87XX_MASK_LINK_DOWN (0x0002)
1955
1956 +/* phyaccess nested types */
1957 +#define PHYACC_ATTR_MODE_READ 0
1958 +#define PHYACC_ATTR_MODE_WRITE 1
1959 +#define PHYACC_ATTR_MODE_MODIFY 2
1960 +
1961 +#define PHYACC_ATTR_BANK_SMI 0
1962 +#define PHYACC_ATTR_BANK_MISC 1
1963 +#define PHYACC_ATTR_BANK_PCS 2
1964 +#define PHYACC_ATTR_BANK_AFE 3
1965 +#define PHYACC_ATTR_BANK_MAX 7
1966 +
1967 #define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
1968 #define DRIVER_DESC "Microchip LAN87XX T1 PHY driver"
1969
1970 +struct access_ereg_val {
1971 + u8 mode;
1972 + u8 bank;
1973 + u8 offset;
1974 + u16 val;
1975 + u16 mask;
1976 +};
1977 +
1978 +static int access_ereg(struct phy_device *phydev, u8 mode, u8 bank,
1979 + u8 offset, u16 val)
1980 +{
1981 + u16 ereg = 0;
1982 + int rc = 0;
1983 +
1984 + if (mode > PHYACC_ATTR_MODE_WRITE || bank > PHYACC_ATTR_BANK_MAX)
1985 + return -EINVAL;
1986 +
1987 + if (bank == PHYACC_ATTR_BANK_SMI) {
1988 + if (mode == PHYACC_ATTR_MODE_WRITE)
1989 + rc = phy_write(phydev, offset, val);
1990 + else
1991 + rc = phy_read(phydev, offset);
1992 + return rc;
1993 + }
1994 +
1995 + if (mode == PHYACC_ATTR_MODE_WRITE) {
1996 + ereg = LAN87XX_EXT_REG_CTL_WR_CTL;
1997 + rc = phy_write(phydev, LAN87XX_EXT_REG_WR_DATA, val);
1998 + if (rc < 0)
1999 + return rc;
2000 + } else {
2001 + ereg = LAN87XX_EXT_REG_CTL_RD_CTL;
2002 + }
2003 +
2004 + ereg |= (bank << 8) | offset;
2005 +
2006 + rc = phy_write(phydev, LAN87XX_EXT_REG_CTL, ereg);
2007 + if (rc < 0)
2008 + return rc;
2009 +
2010 + if (mode == PHYACC_ATTR_MODE_READ)
2011 + rc = phy_read(phydev, LAN87XX_EXT_REG_RD_DATA);
2012 +
2013 + return rc;
2014 +}
2015 +
2016 +static int access_ereg_modify_changed(struct phy_device *phydev,
2017 + u8 bank, u8 offset, u16 val, u16 mask)
2018 +{
2019 + int new = 0, rc = 0;
2020 +
2021 + if (bank > PHYACC_ATTR_BANK_MAX)
2022 + return -EINVAL;
2023 +
2024 + rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, bank, offset, val);
2025 + if (rc < 0)
2026 + return rc;
2027 +
2028 + new = val | (rc & (mask ^ 0xFFFF));
2029 + rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE, bank, offset, new);
2030 +
2031 + return rc;
2032 +}
2033 +
2034 +static int lan87xx_phy_init(struct phy_device *phydev)
2035 +{
2036 + static const struct access_ereg_val init[] = {
2037 + /* TX Amplitude = 5 */
2038 + {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_AFE, 0x0B,
2039 + 0x000A, 0x001E},
2040 + /* Clear SMI interrupts */
2041 + {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI, 0x18,
2042 + 0, 0},
2043 + /* Clear MISC interrupts */
2044 + {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_MISC, 0x08,
2045 + 0, 0},
2046 + /* Turn on TC10 Ring Oscillator (ROSC) */
2047 + {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_MISC, 0x20,
2048 + 0x0020, 0x0020},
2049 + /* WUR Detect Length to 1.2uS, LPC Detect Length to 1.09uS */
2050 + {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_PCS, 0x20,
2051 + 0x283C, 0},
2052 + /* Wake_In Debounce Length to 39uS, Wake_Out Length to 79uS */
2053 + {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x21,
2054 + 0x274F, 0},
2055 + /* Enable Auto Wake Forward to Wake_Out, ROSC on, Sleep,
2056 + * and Wake_In to wake PHY
2057 + */
2058 + {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x20,
2059 + 0x80A7, 0},
2060 + /* Enable WUP Auto Fwd, Enable Wake on MDI, Wakeup Debouncer
2061 + * to 128 uS
2062 + */
2063 + {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x24,
2064 + 0xF110, 0},
2065 + /* Enable HW Init */
2066 + {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_SMI, 0x1A,
2067 + 0x0100, 0x0100},
2068 + };
2069 + int rc, i;
2070 +
2071 + /* Start manual initialization procedures in Managed Mode */
2072 + rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
2073 + 0x1a, 0x0000, 0x0100);
2074 + if (rc < 0)
2075 + return rc;
2076 +
2077 + /* Soft Reset the SMI block */
2078 + rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
2079 + 0x00, 0x8000, 0x8000);
2080 + if (rc < 0)
2081 + return rc;
2082 +
2083 + /* Check to see if the self-clearing bit is cleared */
2084 + usleep_range(1000, 2000);
2085 + rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
2086 + PHYACC_ATTR_BANK_SMI, 0x00, 0);
2087 + if (rc < 0)
2088 + return rc;
2089 + if ((rc & 0x8000) != 0)
2090 + return -ETIMEDOUT;
2091 +
2092 + /* PHY Initialization */
2093 + for (i = 0; i < ARRAY_SIZE(init); i++) {
2094 + if (init[i].mode == PHYACC_ATTR_MODE_MODIFY) {
2095 + rc = access_ereg_modify_changed(phydev, init[i].bank,
2096 + init[i].offset,
2097 + init[i].val,
2098 + init[i].mask);
2099 + } else {
2100 + rc = access_ereg(phydev, init[i].mode, init[i].bank,
2101 + init[i].offset, init[i].val);
2102 + }
2103 + if (rc < 0)
2104 + return rc;
2105 + }
2106 +
2107 + return 0;
2108 +}
2109 +
2110 static int lan87xx_phy_config_intr(struct phy_device *phydev)
2111 {
2112 int rc, val = 0;
2113 @@ -40,6 +203,13 @@ static int lan87xx_phy_ack_interrupt(struct phy_device *phydev)
2114 return rc < 0 ? rc : 0;
2115 }
2116
2117 +static int lan87xx_config_init(struct phy_device *phydev)
2118 +{
2119 + int rc = lan87xx_phy_init(phydev);
2120 +
2121 + return rc < 0 ? rc : 0;
2122 +}
2123 +
2124 static struct phy_driver microchip_t1_phy_driver[] = {
2125 {
2126 .phy_id = 0x0007c150,
2127 @@ -48,6 +218,7 @@ static struct phy_driver microchip_t1_phy_driver[] = {
2128
2129 .features = PHY_BASIC_T1_FEATURES,
2130
2131 + .config_init = lan87xx_config_init,
2132 .config_aneg = genphy_config_aneg,
2133
2134 .ack_interrupt = lan87xx_phy_ack_interrupt,
2135 diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
2136 index ea890d802ffe..54e5d4f9622c 100644
2137 --- a/drivers/net/phy/phy.c
2138 +++ b/drivers/net/phy/phy.c
2139 @@ -1160,9 +1160,11 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
2140 /* Restart autonegotiation so the new modes get sent to the
2141 * link partner.
2142 */
2143 - ret = phy_restart_aneg(phydev);
2144 - if (ret < 0)
2145 - return ret;
2146 + if (phydev->autoneg == AUTONEG_ENABLE) {
2147 + ret = phy_restart_aneg(phydev);
2148 + if (ret < 0)
2149 + return ret;
2150 + }
2151 }
2152
2153 return 0;
2154 diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
2155 index a44dd3c8af63..087b01684135 100644
2156 --- a/drivers/net/ppp/pppoe.c
2157 +++ b/drivers/net/ppp/pppoe.c
2158 @@ -492,6 +492,9 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
2159 if (!skb)
2160 goto out;
2161
2162 + if (skb->pkt_type != PACKET_HOST)
2163 + goto abort;
2164 +
2165 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
2166 goto abort;
2167
2168 diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2169 index 5a635f028bdc..030d30603c29 100644
2170 --- a/drivers/net/virtio_net.c
2171 +++ b/drivers/net/virtio_net.c
2172 @@ -1231,9 +1231,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
2173 break;
2174 } while (rq->vq->num_free);
2175 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
2176 - u64_stats_update_begin(&rq->stats.syncp);
2177 + unsigned long flags;
2178 +
2179 + flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
2180 rq->stats.kicks++;
2181 - u64_stats_update_end(&rq->stats.syncp);
2182 + u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
2183 }
2184
2185 return !oom;
2186 diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
2187 index 606fe216f902..cae7caf5ab28 100644
2188 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c
2189 +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
2190 @@ -1297,6 +1297,7 @@ static const struct gpio_chip byt_gpio_chip = {
2191 .direction_output = byt_gpio_direction_output,
2192 .get = byt_gpio_get,
2193 .set = byt_gpio_set,
2194 + .set_config = gpiochip_generic_config,
2195 .dbg_show = byt_gpio_dbg_show,
2196 };
2197
2198 diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
2199 index 2c419fa5d1c1..8f06445a8e39 100644
2200 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c
2201 +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
2202 @@ -1474,11 +1474,15 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
2203 struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
2204 struct irq_chip *chip = irq_desc_get_chip(desc);
2205 unsigned long pending;
2206 + unsigned long flags;
2207 u32 intr_line;
2208
2209 chained_irq_enter(chip, desc);
2210
2211 + raw_spin_lock_irqsave(&chv_lock, flags);
2212 pending = readl(pctrl->regs + CHV_INTSTAT);
2213 + raw_spin_unlock_irqrestore(&chv_lock, flags);
2214 +
2215 for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) {
2216 unsigned irq, offset;
2217
2218 diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
2219 index d936e7aa74c4..7b7736abe9d8 100644
2220 --- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
2221 +++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
2222 @@ -15,17 +15,18 @@
2223
2224 #include "pinctrl-intel.h"
2225
2226 -#define SPT_PAD_OWN 0x020
2227 -#define SPT_PADCFGLOCK 0x0a0
2228 -#define SPT_HOSTSW_OWN 0x0d0
2229 -#define SPT_GPI_IS 0x100
2230 -#define SPT_GPI_IE 0x120
2231 +#define SPT_PAD_OWN 0x020
2232 +#define SPT_H_PADCFGLOCK 0x090
2233 +#define SPT_LP_PADCFGLOCK 0x0a0
2234 +#define SPT_HOSTSW_OWN 0x0d0
2235 +#define SPT_GPI_IS 0x100
2236 +#define SPT_GPI_IE 0x120
2237
2238 #define SPT_COMMUNITY(b, s, e) \
2239 { \
2240 .barno = (b), \
2241 .padown_offset = SPT_PAD_OWN, \
2242 - .padcfglock_offset = SPT_PADCFGLOCK, \
2243 + .padcfglock_offset = SPT_LP_PADCFGLOCK, \
2244 .hostown_offset = SPT_HOSTSW_OWN, \
2245 .is_offset = SPT_GPI_IS, \
2246 .ie_offset = SPT_GPI_IE, \
2247 @@ -47,7 +48,7 @@
2248 { \
2249 .barno = (b), \
2250 .padown_offset = SPT_PAD_OWN, \
2251 - .padcfglock_offset = SPT_PADCFGLOCK, \
2252 + .padcfglock_offset = SPT_H_PADCFGLOCK, \
2253 .hostown_offset = SPT_HOSTSW_OWN, \
2254 .is_offset = SPT_GPI_IS, \
2255 .ie_offset = SPT_GPI_IE, \
2256 diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
2257 index 763da0be10d6..44320322037d 100644
2258 --- a/drivers/pinctrl/qcom/pinctrl-msm.c
2259 +++ b/drivers/pinctrl/qcom/pinctrl-msm.c
2260 @@ -688,7 +688,7 @@ static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl,
2261
2262 pol = msm_readl_intr_cfg(pctrl, g);
2263 pol ^= BIT(g->intr_polarity_bit);
2264 - msm_writel_intr_cfg(val, pctrl, g);
2265 + msm_writel_intr_cfg(pol, pctrl, g);
2266
2267 val2 = msm_readl_io(pctrl, g) & BIT(g->in_bit);
2268 intstat = msm_readl_intr_status(pctrl, g);
2269 diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
2270 index 4fc2056bd227..e615dc240150 100644
2271 --- a/drivers/s390/net/ism_drv.c
2272 +++ b/drivers/s390/net/ism_drv.c
2273 @@ -521,8 +521,10 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2274
2275 ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
2276 ISM_NR_DMBS);
2277 - if (!ism->smcd)
2278 + if (!ism->smcd) {
2279 + ret = -ENOMEM;
2280 goto err_resource;
2281 + }
2282
2283 ism->smcd->priv = ism;
2284 ret = ism_dev_init(ism);
2285 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
2286 index 94af30f768f7..9c6bf13daaee 100644
2287 --- a/drivers/scsi/sg.c
2288 +++ b/drivers/scsi/sg.c
2289 @@ -689,8 +689,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
2290 hp->flags = input_size; /* structure abuse ... */
2291 hp->pack_id = old_hdr.pack_id;
2292 hp->usr_ptr = NULL;
2293 - if (__copy_from_user(cmnd, buf, cmd_size))
2294 + if (__copy_from_user(cmnd, buf, cmd_size)) {
2295 + sg_remove_request(sfp, srp);
2296 return -EFAULT;
2297 + }
2298 /*
2299 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
2300 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
2301 diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
2302 index f624cc87cbab..856c34010021 100644
2303 --- a/drivers/usb/cdns3/gadget.c
2304 +++ b/drivers/usb/cdns3/gadget.c
2305 @@ -2105,7 +2105,7 @@ found:
2306 link_trb = priv_req->trb;
2307
2308 /* Update ring only if removed request is on pending_req_list list */
2309 - if (req_on_hw_ring) {
2310 + if (req_on_hw_ring && link_trb) {
2311 link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
2312 ((priv_req->end_trb + 1) * TRB_SIZE));
2313 link_trb->control = (link_trb->control & TRB_CYCLE) |
2314 diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
2315 index 6ca40d135430..e26a6f18f421 100644
2316 --- a/drivers/usb/core/devio.c
2317 +++ b/drivers/usb/core/devio.c
2318 @@ -217,6 +217,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
2319 {
2320 struct usb_memory *usbm = NULL;
2321 struct usb_dev_state *ps = file->private_data;
2322 + struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
2323 size_t size = vma->vm_end - vma->vm_start;
2324 void *mem;
2325 unsigned long flags;
2326 @@ -250,11 +251,19 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
2327 usbm->vma_use_count = 1;
2328 INIT_LIST_HEAD(&usbm->memlist);
2329
2330 - if (remap_pfn_range(vma, vma->vm_start,
2331 - virt_to_phys(usbm->mem) >> PAGE_SHIFT,
2332 - size, vma->vm_page_prot) < 0) {
2333 - dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
2334 - return -EAGAIN;
2335 + if (hcd->localmem_pool || !hcd_uses_dma(hcd)) {
2336 + if (remap_pfn_range(vma, vma->vm_start,
2337 + virt_to_phys(usbm->mem) >> PAGE_SHIFT,
2338 + size, vma->vm_page_prot) < 0) {
2339 + dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
2340 + return -EAGAIN;
2341 + }
2342 + } else {
2343 + if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle,
2344 + size)) {
2345 + dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
2346 + return -EAGAIN;
2347 + }
2348 }
2349
2350 vma->vm_flags |= VM_IO;
2351 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2352 index 847c85430b05..4d3de33885ff 100644
2353 --- a/drivers/usb/core/hub.c
2354 +++ b/drivers/usb/core/hub.c
2355 @@ -38,6 +38,7 @@
2356
2357 #define USB_VENDOR_GENESYS_LOGIC 0x05e3
2358 #define USB_VENDOR_SMSC 0x0424
2359 +#define USB_PRODUCT_USB5534B 0x5534
2360 #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
2361 #define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
2362
2363 @@ -5506,8 +5507,11 @@ out_hdev_lock:
2364 }
2365
2366 static const struct usb_device_id hub_id_table[] = {
2367 - { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS,
2368 + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
2369 + | USB_DEVICE_ID_MATCH_PRODUCT
2370 + | USB_DEVICE_ID_MATCH_INT_CLASS,
2371 .idVendor = USB_VENDOR_SMSC,
2372 + .idProduct = USB_PRODUCT_USB5534B,
2373 .bInterfaceClass = USB_CLASS_HUB,
2374 .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
2375 { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
2376 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2377 index 3d30dec42c81..c30c5b1c478c 100644
2378 --- a/drivers/usb/dwc3/gadget.c
2379 +++ b/drivers/usb/dwc3/gadget.c
2380 @@ -2480,9 +2480,6 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
2381 for_each_sg(sg, s, pending, i) {
2382 trb = &dep->trb_pool[dep->trb_dequeue];
2383
2384 - if (trb->ctrl & DWC3_TRB_CTRL_HWO)
2385 - break;
2386 -
2387 req->sg = sg_next(s);
2388 req->num_pending_sgs--;
2389
2390 diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
2391 index ab9ac48a751a..a7709d126b29 100644
2392 --- a/drivers/usb/gadget/configfs.c
2393 +++ b/drivers/usb/gadget/configfs.c
2394 @@ -260,6 +260,9 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
2395 char *name;
2396 int ret;
2397
2398 + if (strlen(page) < len)
2399 + return -EOVERFLOW;
2400 +
2401 name = kstrdup(page, GFP_KERNEL);
2402 if (!name)
2403 return -ENOMEM;
2404 diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c
2405 index dd81fd538cb8..a748ed0842e8 100644
2406 --- a/drivers/usb/gadget/legacy/audio.c
2407 +++ b/drivers/usb/gadget/legacy/audio.c
2408 @@ -300,8 +300,10 @@ static int audio_bind(struct usb_composite_dev *cdev)
2409 struct usb_descriptor_header *usb_desc;
2410
2411 usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
2412 - if (!usb_desc)
2413 + if (!usb_desc) {
2414 + status = -ENOMEM;
2415 goto fail;
2416 + }
2417 usb_otg_descriptor_init(cdev->gadget, usb_desc);
2418 otg_desc[0] = usb_desc;
2419 otg_desc[1] = NULL;
2420 diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c
2421 index 8d7a556ece30..563363aba48f 100644
2422 --- a/drivers/usb/gadget/legacy/cdc2.c
2423 +++ b/drivers/usb/gadget/legacy/cdc2.c
2424 @@ -179,8 +179,10 @@ static int cdc_bind(struct usb_composite_dev *cdev)
2425 struct usb_descriptor_header *usb_desc;
2426
2427 usb_desc = usb_otg_descriptor_alloc(gadget);
2428 - if (!usb_desc)
2429 + if (!usb_desc) {
2430 + status = -ENOMEM;
2431 goto fail1;
2432 + }
2433 usb_otg_descriptor_init(gadget, usb_desc);
2434 otg_desc[0] = usb_desc;
2435 otg_desc[1] = NULL;
2436 diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
2437 index c61e71ba7045..0f1b45e3abd1 100644
2438 --- a/drivers/usb/gadget/legacy/ncm.c
2439 +++ b/drivers/usb/gadget/legacy/ncm.c
2440 @@ -156,8 +156,10 @@ static int gncm_bind(struct usb_composite_dev *cdev)
2441 struct usb_descriptor_header *usb_desc;
2442
2443 usb_desc = usb_otg_descriptor_alloc(gadget);
2444 - if (!usb_desc)
2445 + if (!usb_desc) {
2446 + status = -ENOMEM;
2447 goto fail;
2448 + }
2449 usb_otg_descriptor_init(gadget, usb_desc);
2450 otg_desc[0] = usb_desc;
2451 otg_desc[1] = NULL;
2452 diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
2453 index 247de0faaeb7..5980540a8fff 100644
2454 --- a/drivers/usb/gadget/udc/net2272.c
2455 +++ b/drivers/usb/gadget/udc/net2272.c
2456 @@ -2647,6 +2647,8 @@ net2272_plat_probe(struct platform_device *pdev)
2457 err_req:
2458 release_mem_region(base, len);
2459 err:
2460 + kfree(dev);
2461 +
2462 return ret;
2463 }
2464
2465 diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
2466 index 315b4552693c..52c625c02341 100644
2467 --- a/drivers/usb/host/xhci-plat.c
2468 +++ b/drivers/usb/host/xhci-plat.c
2469 @@ -363,6 +363,7 @@ static int xhci_plat_remove(struct platform_device *dev)
2470 struct clk *reg_clk = xhci->reg_clk;
2471 struct usb_hcd *shared_hcd = xhci->shared_hcd;
2472
2473 + pm_runtime_get_sync(&dev->dev);
2474 xhci->xhc_state |= XHCI_STATE_REMOVING;
2475
2476 usb_remove_hcd(shared_hcd);
2477 @@ -376,8 +377,9 @@ static int xhci_plat_remove(struct platform_device *dev)
2478 clk_disable_unprepare(reg_clk);
2479 usb_put_hcd(hcd);
2480
2481 - pm_runtime_set_suspended(&dev->dev);
2482 pm_runtime_disable(&dev->dev);
2483 + pm_runtime_put_noidle(&dev->dev);
2484 + pm_runtime_set_suspended(&dev->dev);
2485
2486 return 0;
2487 }
2488 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
2489 index a54f8f3234f9..49894541ea9a 100644
2490 --- a/drivers/usb/host/xhci-ring.c
2491 +++ b/drivers/usb/host/xhci-ring.c
2492 @@ -3421,8 +3421,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2493 /* New sg entry */
2494 --num_sgs;
2495 sent_len -= block_len;
2496 - if (num_sgs != 0) {
2497 - sg = sg_next(sg);
2498 + sg = sg_next(sg);
2499 + if (num_sgs != 0 && sg) {
2500 block_len = sg_dma_len(sg);
2501 addr = (u64) sg_dma_address(sg);
2502 addr += sent_len;
2503 diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
2504 index c8494fa5e19d..4b8632eda2bd 100644
2505 --- a/fs/cifs/cifssmb.c
2506 +++ b/fs/cifs/cifssmb.c
2507 @@ -2135,8 +2135,8 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
2508 }
2509 }
2510
2511 + kref_put(&wdata2->refcount, cifs_writedata_release);
2512 if (rc) {
2513 - kref_put(&wdata2->refcount, cifs_writedata_release);
2514 if (is_retryable_error(rc))
2515 continue;
2516 i += nr_pages;
2517 diff --git a/fs/exec.c b/fs/exec.c
2518 index fc2870f2aca9..d62cd1d71098 100644
2519 --- a/fs/exec.c
2520 +++ b/fs/exec.c
2521 @@ -1274,6 +1274,8 @@ int flush_old_exec(struct linux_binprm * bprm)
2522 */
2523 set_mm_exe_file(bprm->mm, bprm->file);
2524
2525 + would_dump(bprm, bprm->file);
2526 +
2527 /*
2528 * Release all of the old mmap stuff
2529 */
2530 @@ -1817,8 +1819,6 @@ static int __do_execve_file(int fd, struct filename *filename,
2531 if (retval < 0)
2532 goto out;
2533
2534 - would_dump(bprm, bprm->file);
2535 -
2536 retval = exec_binprm(bprm);
2537 if (retval < 0)
2538 goto out;
2539 diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
2540 index f63df54a08c6..adbb8fef2216 100644
2541 --- a/fs/gfs2/bmap.c
2542 +++ b/fs/gfs2/bmap.c
2543 @@ -528,10 +528,12 @@ lower_metapath:
2544
2545 /* Advance in metadata tree. */
2546 (mp->mp_list[hgt])++;
2547 - if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
2548 - if (!hgt)
2549 + if (hgt) {
2550 + if (mp->mp_list[hgt] >= sdp->sd_inptrs)
2551 + goto lower_metapath;
2552 + } else {
2553 + if (mp->mp_list[hgt] >= sdp->sd_diptrs)
2554 break;
2555 - goto lower_metapath;
2556 }
2557
2558 fill_up_metapath:
2559 @@ -876,10 +878,9 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
2560 ret = -ENOENT;
2561 goto unlock;
2562 } else {
2563 - /* report a hole */
2564 iomap->offset = pos;
2565 iomap->length = length;
2566 - goto do_alloc;
2567 + goto hole_found;
2568 }
2569 }
2570 iomap->length = size;
2571 @@ -933,8 +934,6 @@ unlock:
2572 return ret;
2573
2574 do_alloc:
2575 - iomap->addr = IOMAP_NULL_ADDR;
2576 - iomap->type = IOMAP_HOLE;
2577 if (flags & IOMAP_REPORT) {
2578 if (pos >= size)
2579 ret = -ENOENT;
2580 @@ -956,6 +955,9 @@ do_alloc:
2581 if (pos < size && height == ip->i_height)
2582 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
2583 }
2584 +hole_found:
2585 + iomap->addr = IOMAP_NULL_ADDR;
2586 + iomap->type = IOMAP_HOLE;
2587 goto out;
2588 }
2589
2590 diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
2591 index 7ca84be20cf6..8303b44a5068 100644
2592 --- a/fs/gfs2/lops.c
2593 +++ b/fs/gfs2/lops.c
2594 @@ -264,7 +264,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
2595 struct super_block *sb = sdp->sd_vfs;
2596 struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
2597
2598 - bio->bi_iter.bi_sector = blkno << (sb->s_blocksize_bits - 9);
2599 + bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
2600 bio_set_dev(bio, sb->s_bdev);
2601 bio->bi_end_io = end_io;
2602 bio->bi_private = sdp;
2603 @@ -504,7 +504,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
2604 unsigned int bsize = sdp->sd_sb.sb_bsize, off;
2605 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
2606 unsigned int shift = PAGE_SHIFT - bsize_shift;
2607 - unsigned int readahead_blocks = BIO_MAX_PAGES << shift;
2608 + unsigned int max_bio_size = 2 * 1024 * 1024;
2609 struct gfs2_journal_extent *je;
2610 int sz, ret = 0;
2611 struct bio *bio = NULL;
2612 @@ -532,12 +532,17 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
2613 off = 0;
2614 }
2615
2616 - if (!bio || (bio_chained && !off)) {
2617 + if (!bio || (bio_chained && !off) ||
2618 + bio->bi_iter.bi_size >= max_bio_size) {
2619 /* start new bio */
2620 } else {
2621 - sz = bio_add_page(bio, page, bsize, off);
2622 - if (sz == bsize)
2623 - goto block_added;
2624 + sector_t sector = dblock << sdp->sd_fsb2bb_shift;
2625 +
2626 + if (bio_end_sector(bio) == sector) {
2627 + sz = bio_add_page(bio, page, bsize, off);
2628 + if (sz == bsize)
2629 + goto block_added;
2630 + }
2631 if (off) {
2632 unsigned int blocks =
2633 (PAGE_SIZE - off) >> bsize_shift;
2634 @@ -563,7 +568,7 @@ block_added:
2635 off += bsize;
2636 if (off == PAGE_SIZE)
2637 page = NULL;
2638 - if (blocks_submitted < blocks_read + readahead_blocks) {
2639 + if (blocks_submitted < 2 * max_bio_size >> bsize_shift) {
2640 /* Keep at least one bio in flight */
2641 continue;
2642 }
2643 diff --git a/fs/nfs/fscache-index.c b/fs/nfs/fscache-index.c
2644 index 15f271401dcc..573b1da9342c 100644
2645 --- a/fs/nfs/fscache-index.c
2646 +++ b/fs/nfs/fscache-index.c
2647 @@ -84,8 +84,10 @@ enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data,
2648 return FSCACHE_CHECKAUX_OBSOLETE;
2649
2650 memset(&auxdata, 0, sizeof(auxdata));
2651 - auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
2652 - auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
2653 + auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
2654 + auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
2655 + auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
2656 + auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
2657
2658 if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
2659 auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
2660 diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
2661 index a6dcc2151e77..7d6721ec31d4 100644
2662 --- a/fs/nfs/fscache.c
2663 +++ b/fs/nfs/fscache.c
2664 @@ -188,7 +188,8 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
2665 /* create a cache index for looking up filehandles */
2666 nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
2667 &nfs_fscache_super_index_def,
2668 - key, sizeof(*key) + ulen,
2669 + &key->key,
2670 + sizeof(key->key) + ulen,
2671 NULL, 0,
2672 nfss, 0, true);
2673 dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
2674 @@ -226,6 +227,19 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
2675 }
2676 }
2677
2678 +static void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
2679 + struct nfs_inode *nfsi)
2680 +{
2681 + memset(auxdata, 0, sizeof(*auxdata));
2682 + auxdata->mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
2683 + auxdata->mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
2684 + auxdata->ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
2685 + auxdata->ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
2686 +
2687 + if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
2688 + auxdata->change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
2689 +}
2690 +
2691 /*
2692 * Initialise the per-inode cache cookie pointer for an NFS inode.
2693 */
2694 @@ -239,12 +253,7 @@ void nfs_fscache_init_inode(struct inode *inode)
2695 if (!(nfss->fscache && S_ISREG(inode->i_mode)))
2696 return;
2697
2698 - memset(&auxdata, 0, sizeof(auxdata));
2699 - auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
2700 - auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
2701 -
2702 - if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
2703 - auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
2704 + nfs_fscache_update_auxdata(&auxdata, nfsi);
2705
2706 nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache,
2707 &nfs_fscache_inode_object_def,
2708 @@ -264,9 +273,7 @@ void nfs_fscache_clear_inode(struct inode *inode)
2709
2710 dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
2711
2712 - memset(&auxdata, 0, sizeof(auxdata));
2713 - auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
2714 - auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
2715 + nfs_fscache_update_auxdata(&auxdata, nfsi);
2716 fscache_relinquish_cookie(cookie, &auxdata, false);
2717 nfsi->fscache = NULL;
2718 }
2719 @@ -306,9 +313,7 @@ void nfs_fscache_open_file(struct inode *inode, struct file *filp)
2720 if (!fscache_cookie_valid(cookie))
2721 return;
2722
2723 - memset(&auxdata, 0, sizeof(auxdata));
2724 - auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
2725 - auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
2726 + nfs_fscache_update_auxdata(&auxdata, nfsi);
2727
2728 if (inode_is_open_for_write(inode)) {
2729 dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
2730 diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
2731 index ad041cfbf9ec..6754c8607230 100644
2732 --- a/fs/nfs/fscache.h
2733 +++ b/fs/nfs/fscache.h
2734 @@ -62,9 +62,11 @@ struct nfs_fscache_key {
2735 * cache object.
2736 */
2737 struct nfs_fscache_inode_auxdata {
2738 - struct timespec mtime;
2739 - struct timespec ctime;
2740 - u64 change_attr;
2741 + s64 mtime_sec;
2742 + s64 mtime_nsec;
2743 + s64 ctime_sec;
2744 + s64 ctime_nsec;
2745 + u64 change_attr;
2746 };
2747
2748 /*
2749 diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
2750 index cb7c10e9721e..a2593b787cc7 100644
2751 --- a/fs/nfs/mount_clnt.c
2752 +++ b/fs/nfs/mount_clnt.c
2753 @@ -32,6 +32,7 @@
2754 #define MNT_fhs_status_sz (1)
2755 #define MNT_fhandle_sz XDR_QUADLEN(NFS2_FHSIZE)
2756 #define MNT_fhandle3_sz (1 + XDR_QUADLEN(NFS3_FHSIZE))
2757 +#define MNT_fhandlev3_sz XDR_QUADLEN(NFS3_FHSIZE)
2758 #define MNT_authflav3_sz (1 + NFS_MAX_SECFLAVORS)
2759
2760 /*
2761 @@ -39,7 +40,7 @@
2762 */
2763 #define MNT_enc_dirpath_sz encode_dirpath_sz
2764 #define MNT_dec_mountres_sz (MNT_status_sz + MNT_fhandle_sz)
2765 -#define MNT_dec_mountres3_sz (MNT_status_sz + MNT_fhandle_sz + \
2766 +#define MNT_dec_mountres3_sz (MNT_status_sz + MNT_fhandlev3_sz + \
2767 MNT_authflav3_sz)
2768
2769 /*
2770 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
2771 index b53bcf40e2a7..ea680f619438 100644
2772 --- a/fs/nfs/nfs4state.c
2773 +++ b/fs/nfs/nfs4state.c
2774 @@ -733,9 +733,9 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
2775 state = new;
2776 state->owner = owner;
2777 atomic_inc(&owner->so_count);
2778 - list_add_rcu(&state->inode_states, &nfsi->open_states);
2779 ihold(inode);
2780 state->inode = inode;
2781 + list_add_rcu(&state->inode_states, &nfsi->open_states);
2782 spin_unlock(&inode->i_lock);
2783 /* Note: The reclaim code dictates that we add stateless
2784 * and read-only stateids to the end of the list */
2785 diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
2786 index f5d30573f4a9..deb13f0a0f7d 100644
2787 --- a/fs/notify/fanotify/fanotify.c
2788 +++ b/fs/notify/fanotify/fanotify.c
2789 @@ -171,6 +171,13 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
2790 if (!fsnotify_iter_should_report_type(iter_info, type))
2791 continue;
2792 mark = iter_info->marks[type];
2793 + /*
2794 + * If the event is on dir and this mark doesn't care about
2795 + * events on dir, don't send it!
2796 + */
2797 + if (event_mask & FS_ISDIR && !(mark->mask & FS_ISDIR))
2798 + continue;
2799 +
2800 /*
2801 * If the event is for a child and this mark doesn't care about
2802 * events on a child, don't send it!
2803 @@ -203,10 +210,6 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
2804 user_mask &= ~FAN_ONDIR;
2805 }
2806
2807 - if (event_mask & FS_ISDIR &&
2808 - !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
2809 - return 0;
2810 -
2811 return test_mask & user_mask;
2812 }
2813
2814 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
2815 index 034b0a644efc..448c91bf543b 100644
2816 --- a/include/linux/compiler.h
2817 +++ b/include/linux/compiler.h
2818 @@ -356,4 +356,10 @@ static inline void *offset_to_ptr(const int *off)
2819 /* &a[0] degrades to a pointer: a different type from an array */
2820 #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
2821
2822 +/*
2823 + * This is needed in functions which generate the stack canary, see
2824 + * arch/x86/kernel/smpboot.c::start_secondary() for an example.
2825 + */
2826 +#define prevent_tail_call_optimization() mb()
2827 +
2828 #endif /* __LINUX_COMPILER_H */
2829 diff --git a/include/linux/fs.h b/include/linux/fs.h
2830 index 06668379109e..5bd384dbdca5 100644
2831 --- a/include/linux/fs.h
2832 +++ b/include/linux/fs.h
2833 @@ -978,7 +978,7 @@ struct file_handle {
2834 __u32 handle_bytes;
2835 int handle_type;
2836 /* file identifier */
2837 - unsigned char f_handle[0];
2838 + unsigned char f_handle[];
2839 };
2840
2841 static inline struct file *get_file(struct file *f)
2842 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
2843 index 8faca7b52543..fb5b2a41bd45 100644
2844 --- a/include/linux/memcontrol.h
2845 +++ b/include/linux/memcontrol.h
2846 @@ -793,6 +793,8 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg,
2847 atomic_long_inc(&memcg->memory_events[event]);
2848 cgroup_file_notify(&memcg->events_file);
2849
2850 + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2851 + break;
2852 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
2853 break;
2854 } while ((memcg = parent_mem_cgroup(memcg)) &&
2855 diff --git a/include/linux/pnp.h b/include/linux/pnp.h
2856 index 3b12fd28af78..fc4df3ccefc9 100644
2857 --- a/include/linux/pnp.h
2858 +++ b/include/linux/pnp.h
2859 @@ -220,10 +220,8 @@ struct pnp_card {
2860 #define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list)
2861 #define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list)
2862 #define to_pnp_card(n) container_of(n, struct pnp_card, dev)
2863 -#define pnp_for_each_card(card) \
2864 - for((card) = global_to_pnp_card(pnp_cards.next); \
2865 - (card) != global_to_pnp_card(&pnp_cards); \
2866 - (card) = global_to_pnp_card((card)->global_list.next))
2867 +#define pnp_for_each_card(card) \
2868 + list_for_each_entry(card, &pnp_cards, global_list)
2869
2870 struct pnp_card_link {
2871 struct pnp_card *card;
2872 @@ -276,14 +274,9 @@ struct pnp_dev {
2873 #define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list)
2874 #define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list)
2875 #define to_pnp_dev(n) container_of(n, struct pnp_dev, dev)
2876 -#define pnp_for_each_dev(dev) \
2877 - for((dev) = global_to_pnp_dev(pnp_global.next); \
2878 - (dev) != global_to_pnp_dev(&pnp_global); \
2879 - (dev) = global_to_pnp_dev((dev)->global_list.next))
2880 -#define card_for_each_dev(card,dev) \
2881 - for((dev) = card_to_pnp_dev((card)->devices.next); \
2882 - (dev) != card_to_pnp_dev(&(card)->devices); \
2883 - (dev) = card_to_pnp_dev((dev)->card_list.next))
2884 +#define pnp_for_each_dev(dev) list_for_each_entry(dev, &pnp_global, global_list)
2885 +#define card_for_each_dev(card, dev) \
2886 + list_for_each_entry(dev, &(card)->devices, card_list)
2887 #define pnp_dev_name(dev) (dev)->name
2888
2889 static inline void *pnp_get_drvdata(struct pnp_dev *pdev)
2890 @@ -437,14 +430,10 @@ struct pnp_protocol {
2891 };
2892
2893 #define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list)
2894 -#define protocol_for_each_card(protocol,card) \
2895 - for((card) = protocol_to_pnp_card((protocol)->cards.next); \
2896 - (card) != protocol_to_pnp_card(&(protocol)->cards); \
2897 - (card) = protocol_to_pnp_card((card)->protocol_list.next))
2898 -#define protocol_for_each_dev(protocol,dev) \
2899 - for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \
2900 - (dev) != protocol_to_pnp_dev(&(protocol)->devices); \
2901 - (dev) = protocol_to_pnp_dev((dev)->protocol_list.next))
2902 +#define protocol_for_each_card(protocol, card) \
2903 + list_for_each_entry(card, &(protocol)->cards, protocol_list)
2904 +#define protocol_for_each_dev(protocol, dev) \
2905 + list_for_each_entry(dev, &(protocol)->devices, protocol_list)
2906
2907 extern struct bus_type pnp_bus_type;
2908
2909 diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
2910 index 7eb6a8754f19..a3adbe593505 100644
2911 --- a/include/linux/skmsg.h
2912 +++ b/include/linux/skmsg.h
2913 @@ -186,6 +186,7 @@ static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
2914 dst->sg.data[which] = src->sg.data[which];
2915 dst->sg.data[which].length = size;
2916 dst->sg.size += size;
2917 + src->sg.size -= size;
2918 src->sg.data[which].length -= size;
2919 src->sg.data[which].offset += size;
2920 }
2921 diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
2922 index 5ac5db4d295f..d4326d6662a4 100644
2923 --- a/include/linux/sunrpc/gss_api.h
2924 +++ b/include/linux/sunrpc/gss_api.h
2925 @@ -22,6 +22,7 @@
2926 struct gss_ctx {
2927 struct gss_api_mech *mech_type;
2928 void *internal_ctx_id;
2929 + unsigned int slack, align;
2930 };
2931
2932 #define GSS_C_NO_BUFFER ((struct xdr_netobj) 0)
2933 @@ -67,6 +68,7 @@ u32 gss_wrap(
2934 u32 gss_unwrap(
2935 struct gss_ctx *ctx_id,
2936 int offset,
2937 + int len,
2938 struct xdr_buf *inbuf);
2939 u32 gss_delete_sec_context(
2940 struct gss_ctx **ctx_id);
2941 @@ -127,6 +129,7 @@ struct gss_api_ops {
2942 u32 (*gss_unwrap)(
2943 struct gss_ctx *ctx_id,
2944 int offset,
2945 + int len,
2946 struct xdr_buf *buf);
2947 void (*gss_delete_sec_context)(
2948 void *internal_ctx_id);
2949 diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
2950 index 02c0412e368c..07930bc9ad60 100644
2951 --- a/include/linux/sunrpc/gss_krb5.h
2952 +++ b/include/linux/sunrpc/gss_krb5.h
2953 @@ -83,7 +83,7 @@ struct gss_krb5_enctype {
2954 u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
2955 struct xdr_buf *buf,
2956 struct page **pages); /* v2 encryption function */
2957 - u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset,
2958 + u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len,
2959 struct xdr_buf *buf, u32 *headskip,
2960 u32 *tailskip); /* v2 decryption function */
2961 };
2962 @@ -255,7 +255,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
2963 struct xdr_buf *outbuf, struct page **pages);
2964
2965 u32
2966 -gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
2967 +gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len,
2968 struct xdr_buf *buf);
2969
2970
2971 @@ -312,7 +312,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
2972 struct page **pages);
2973
2974 u32
2975 -gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
2976 +gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
2977 struct xdr_buf *buf, u32 *plainoffset,
2978 u32 *plainlen);
2979
2980 diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
2981 index f33e5013bdfb..9db6097c22c5 100644
2982 --- a/include/linux/sunrpc/xdr.h
2983 +++ b/include/linux/sunrpc/xdr.h
2984 @@ -186,6 +186,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
2985 extern void xdr_shift_buf(struct xdr_buf *, size_t);
2986 extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
2987 extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
2988 +extern void xdr_buf_trim(struct xdr_buf *, unsigned int);
2989 extern int xdr_buf_read_mic(struct xdr_buf *, struct xdr_netobj *, unsigned int);
2990 extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
2991 extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
2992 diff --git a/include/linux/tty.h b/include/linux/tty.h
2993 index bd5fe0e907e8..a99e9b8e4e31 100644
2994 --- a/include/linux/tty.h
2995 +++ b/include/linux/tty.h
2996 @@ -66,7 +66,7 @@ struct tty_buffer {
2997 int read;
2998 int flags;
2999 /* Data points here */
3000 - unsigned long data[0];
3001 + unsigned long data[];
3002 };
3003
3004 /* Values for .flags field of tty_buffer */
3005 diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
3006 index 9f551f3b69c6..90690e37a56f 100644
3007 --- a/include/net/netfilter/nf_conntrack.h
3008 +++ b/include/net/netfilter/nf_conntrack.h
3009 @@ -87,7 +87,7 @@ struct nf_conn {
3010 struct hlist_node nat_bysource;
3011 #endif
3012 /* all members below initialized via memset */
3013 - u8 __nfct_init_offset[0];
3014 + struct { } __nfct_init_offset;
3015
3016 /* If we were expected by an expectation, this will be it */
3017 struct nf_conn *master;
3018 diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
3019 index 9fb7cf1cdf36..3d03756e1069 100644
3020 --- a/include/net/sch_generic.h
3021 +++ b/include/net/sch_generic.h
3022 @@ -407,6 +407,7 @@ struct tcf_block {
3023 struct mutex lock;
3024 struct list_head chain_list;
3025 u32 index; /* block index for shared blocks */
3026 + u32 classid; /* which class this block belongs to */
3027 refcount_t refcnt;
3028 struct net *net;
3029 struct Qdisc *q;
3030 diff --git a/include/net/tcp.h b/include/net/tcp.h
3031 index cce285f70c8e..7cf1b4972c66 100644
3032 --- a/include/net/tcp.h
3033 +++ b/include/net/tcp.h
3034 @@ -1401,6 +1401,19 @@ static inline int tcp_full_space(const struct sock *sk)
3035 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
3036 }
3037
3038 +/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
3039 + * If 87.5 % (7/8) of the space has been consumed, we want to override
3040 + * SO_RCVLOWAT constraint, since we are receiving skbs with too small
3041 + * len/truesize ratio.
3042 + */
3043 +static inline bool tcp_rmem_pressure(const struct sock *sk)
3044 +{
3045 + int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
3046 + int threshold = rcvbuf - (rcvbuf >> 3);
3047 +
3048 + return atomic_read(&sk->sk_rmem_alloc) > threshold;
3049 +}
3050 +
3051 extern void tcp_openreq_init_rwin(struct request_sock *req,
3052 const struct sock *sk_listener,
3053 const struct dst_entry *dst);
3054 diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
3055 index a36b7227a15a..334842daa904 100644
3056 --- a/include/sound/rawmidi.h
3057 +++ b/include/sound/rawmidi.h
3058 @@ -61,6 +61,7 @@ struct snd_rawmidi_runtime {
3059 size_t avail_min; /* min avail for wakeup */
3060 size_t avail; /* max used buffer for wakeup */
3061 size_t xruns; /* over/underruns counter */
3062 + int buffer_ref; /* buffer reference count */
3063 /* misc */
3064 spinlock_t lock;
3065 wait_queue_head_t sleep;
3066 diff --git a/init/Kconfig b/init/Kconfig
3067 index 0bffc8fdbf3d..6db3e310a5e4 100644
3068 --- a/init/Kconfig
3069 +++ b/init/Kconfig
3070 @@ -36,22 +36,6 @@ config TOOLS_SUPPORT_RELR
3071 config CC_HAS_ASM_INLINE
3072 def_bool $(success,echo 'void foo(void) { asm inline (""); }' | $(CC) -x c - -c -o /dev/null)
3073
3074 -config CC_HAS_WARN_MAYBE_UNINITIALIZED
3075 - def_bool $(cc-option,-Wmaybe-uninitialized)
3076 - help
3077 - GCC >= 4.7 supports this option.
3078 -
3079 -config CC_DISABLE_WARN_MAYBE_UNINITIALIZED
3080 - bool
3081 - depends on CC_HAS_WARN_MAYBE_UNINITIALIZED
3082 - default CC_IS_GCC && GCC_VERSION < 40900 # unreliable for GCC < 4.9
3083 - help
3084 - GCC's -Wmaybe-uninitialized is not reliable by definition.
3085 - Lots of false positive warnings are produced in some cases.
3086 -
3087 - If this option is enabled, -Wno-maybe-uninitialzed is passed
3088 - to the compiler to suppress maybe-uninitialized warnings.
3089 -
3090 config CONSTRUCTORS
3091 bool
3092 depends on !UML
3093 @@ -1226,14 +1210,12 @@ config CC_OPTIMIZE_FOR_PERFORMANCE
3094 config CC_OPTIMIZE_FOR_PERFORMANCE_O3
3095 bool "Optimize more for performance (-O3)"
3096 depends on ARC
3097 - imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives
3098 help
3099 Choosing this option will pass "-O3" to your compiler to optimize
3100 the kernel yet more for performance.
3101
3102 config CC_OPTIMIZE_FOR_SIZE
3103 bool "Optimize for size (-Os)"
3104 - imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives
3105 help
3106 Choosing this option will pass "-Os" to your compiler resulting
3107 in a smaller kernel.
3108 diff --git a/init/initramfs.c b/init/initramfs.c
3109 index c47dad0884f7..5feee4f616d5 100644
3110 --- a/init/initramfs.c
3111 +++ b/init/initramfs.c
3112 @@ -534,7 +534,7 @@ void __weak free_initrd_mem(unsigned long start, unsigned long end)
3113 }
3114
3115 #ifdef CONFIG_KEXEC_CORE
3116 -static bool kexec_free_initrd(void)
3117 +static bool __init kexec_free_initrd(void)
3118 {
3119 unsigned long crashk_start = (unsigned long)__va(crashk_res.start);
3120 unsigned long crashk_end = (unsigned long)__va(crashk_res.end);
3121 diff --git a/init/main.c b/init/main.c
3122 index 5cbb9fe937e0..8c7d6b8ee6bd 100644
3123 --- a/init/main.c
3124 +++ b/init/main.c
3125 @@ -782,6 +782,8 @@ asmlinkage __visible void __init start_kernel(void)
3126
3127 /* Do the rest non-__init'ed, we're now alive */
3128 arch_call_rest_init();
3129 +
3130 + prevent_tail_call_optimization();
3131 }
3132
3133 /* Call all constructor functions linked into the kernel. */
3134 diff --git a/ipc/util.c b/ipc/util.c
3135 index 594871610d45..1821b6386d3b 100644
3136 --- a/ipc/util.c
3137 +++ b/ipc/util.c
3138 @@ -764,21 +764,21 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
3139 total++;
3140 }
3141
3142 - *new_pos = pos + 1;
3143 + ipc = NULL;
3144 if (total >= ids->in_use)
3145 - return NULL;
3146 + goto out;
3147
3148 for (; pos < ipc_mni; pos++) {
3149 ipc = idr_find(&ids->ipcs_idr, pos);
3150 if (ipc != NULL) {
3151 rcu_read_lock();
3152 ipc_lock_object(ipc);
3153 - return ipc;
3154 + break;
3155 }
3156 }
3157 -
3158 - /* Out of range - return NULL to terminate iteration */
3159 - return NULL;
3160 +out:
3161 + *new_pos = pos + 1;
3162 + return ipc;
3163 }
3164
3165 static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
3166 diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
3167 index 14f4a76b44d5..946cfdd3b2cc 100644
3168 --- a/kernel/bpf/syscall.c
3169 +++ b/kernel/bpf/syscall.c
3170 @@ -1146,8 +1146,10 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
3171 if (err)
3172 goto free_value;
3173
3174 - if (copy_to_user(uvalue, value, value_size) != 0)
3175 + if (copy_to_user(uvalue, value, value_size) != 0) {
3176 + err = -EFAULT;
3177 goto free_value;
3178 + }
3179
3180 err = 0;
3181
3182 diff --git a/kernel/fork.c b/kernel/fork.c
3183 index 27c0ef30002e..9180f4416dba 100644
3184 --- a/kernel/fork.c
3185 +++ b/kernel/fork.c
3186 @@ -2412,11 +2412,11 @@ long do_fork(unsigned long clone_flags,
3187 int __user *child_tidptr)
3188 {
3189 struct kernel_clone_args args = {
3190 - .flags = (clone_flags & ~CSIGNAL),
3191 + .flags = (lower_32_bits(clone_flags) & ~CSIGNAL),
3192 .pidfd = parent_tidptr,
3193 .child_tid = child_tidptr,
3194 .parent_tid = parent_tidptr,
3195 - .exit_signal = (clone_flags & CSIGNAL),
3196 + .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL),
3197 .stack = stack_start,
3198 .stack_size = stack_size,
3199 };
3200 @@ -2434,8 +2434,9 @@ long do_fork(unsigned long clone_flags,
3201 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
3202 {
3203 struct kernel_clone_args args = {
3204 - .flags = ((flags | CLONE_VM | CLONE_UNTRACED) & ~CSIGNAL),
3205 - .exit_signal = (flags & CSIGNAL),
3206 + .flags = ((lower_32_bits(flags) | CLONE_VM |
3207 + CLONE_UNTRACED) & ~CSIGNAL),
3208 + .exit_signal = (lower_32_bits(flags) & CSIGNAL),
3209 .stack = (unsigned long)fn,
3210 .stack_size = (unsigned long)arg,
3211 };
3212 @@ -2496,11 +2497,11 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
3213 #endif
3214 {
3215 struct kernel_clone_args args = {
3216 - .flags = (clone_flags & ~CSIGNAL),
3217 + .flags = (lower_32_bits(clone_flags) & ~CSIGNAL),
3218 .pidfd = parent_tidptr,
3219 .child_tid = child_tidptr,
3220 .parent_tid = parent_tidptr,
3221 - .exit_signal = (clone_flags & CSIGNAL),
3222 + .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL),
3223 .stack = newsp,
3224 .tls = tls,
3225 };
3226 diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
3227 index e08527f50d2a..f3f2fc8ad81a 100644
3228 --- a/kernel/trace/Kconfig
3229 +++ b/kernel/trace/Kconfig
3230 @@ -371,7 +371,6 @@ config PROFILE_ANNOTATED_BRANCHES
3231 config PROFILE_ALL_BRANCHES
3232 bool "Profile all if conditionals" if !FORTIFY_SOURCE
3233 select TRACE_BRANCH_PROFILING
3234 - imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives
3235 help
3236 This tracer profiles all branch conditions. Every if ()
3237 taken in the kernel is recorded whether it hit or miss.
3238 diff --git a/kernel/umh.c b/kernel/umh.c
3239 index 11bf5eea474c..3474d6aa55d8 100644
3240 --- a/kernel/umh.c
3241 +++ b/kernel/umh.c
3242 @@ -475,6 +475,12 @@ static void umh_clean_and_save_pid(struct subprocess_info *info)
3243 {
3244 struct umh_info *umh_info = info->data;
3245
3246 + /* cleanup if umh_pipe_setup() was successful but exec failed */
3247 + if (info->pid && info->retval) {
3248 + fput(umh_info->pipe_to_umh);
3249 + fput(umh_info->pipe_from_umh);
3250 + }
3251 +
3252 argv_free(info->argv);
3253 umh_info->pid = info->pid;
3254 }
3255 diff --git a/mm/shmem.c b/mm/shmem.c
3256 index e71b15da1985..98802ca76a5c 100644
3257 --- a/mm/shmem.c
3258 +++ b/mm/shmem.c
3259 @@ -2183,7 +2183,11 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
3260 struct shmem_inode_info *info = SHMEM_I(inode);
3261 int retval = -ENOMEM;
3262
3263 - spin_lock_irq(&info->lock);
3264 + /*
3265 + * What serializes the accesses to info->flags?
3266 + * ipc_lock_object() when called from shmctl_do_lock(),
3267 + * no serialization needed when called from shm_destroy().
3268 + */
3269 if (lock && !(info->flags & VM_LOCKED)) {
3270 if (!user_shm_lock(inode->i_size, user))
3271 goto out_nomem;
3272 @@ -2198,7 +2202,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
3273 retval = 0;
3274
3275 out_nomem:
3276 - spin_unlock_irq(&info->lock);
3277 return retval;
3278 }
3279
3280 diff --git a/net/core/dev.c b/net/core/dev.c
3281 index 8ad1e8f00958..120b994af31c 100644
3282 --- a/net/core/dev.c
3283 +++ b/net/core/dev.c
3284 @@ -8595,11 +8595,13 @@ static void netdev_sync_lower_features(struct net_device *upper,
3285 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
3286 &feature, lower->name);
3287 lower->wanted_features &= ~feature;
3288 - netdev_update_features(lower);
3289 + __netdev_update_features(lower);
3290
3291 if (unlikely(lower->features & feature))
3292 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
3293 &feature, lower->name);
3294 + else
3295 + netdev_features_change(lower);
3296 }
3297 }
3298 }
3299 diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
3300 index 246a258b1fac..af0130039f37 100644
3301 --- a/net/core/drop_monitor.c
3302 +++ b/net/core/drop_monitor.c
3303 @@ -212,6 +212,7 @@ static void sched_send_work(struct timer_list *t)
3304 static void trace_drop_common(struct sk_buff *skb, void *location)
3305 {
3306 struct net_dm_alert_msg *msg;
3307 + struct net_dm_drop_point *point;
3308 struct nlmsghdr *nlh;
3309 struct nlattr *nla;
3310 int i;
3311 @@ -230,11 +231,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
3312 nlh = (struct nlmsghdr *)dskb->data;
3313 nla = genlmsg_data(nlmsg_data(nlh));
3314 msg = nla_data(nla);
3315 + point = msg->points;
3316 for (i = 0; i < msg->entries; i++) {
3317 - if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
3318 - msg->points[i].count++;
3319 + if (!memcmp(&location, &point->pc, sizeof(void *))) {
3320 + point->count++;
3321 goto out;
3322 }
3323 + point++;
3324 }
3325 if (msg->entries == dm_hit_limit)
3326 goto out;
3327 @@ -243,8 +246,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
3328 */
3329 __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
3330 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
3331 - memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
3332 - msg->points[msg->entries].count = 1;
3333 + memcpy(point->pc, &location, sizeof(void *));
3334 + point->count = 1;
3335 msg->entries++;
3336
3337 if (!timer_pending(&data->send_timer)) {
3338 diff --git a/net/core/filter.c b/net/core/filter.c
3339 index d59dbc88fef5..f1f2304822e3 100644
3340 --- a/net/core/filter.c
3341 +++ b/net/core/filter.c
3342 @@ -2590,8 +2590,8 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
3343 }
3344 pop = 0;
3345 } else if (pop >= sge->length - a) {
3346 - sge->length = a;
3347 pop -= (sge->length - a);
3348 + sge->length = a;
3349 }
3350 }
3351
3352 diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
3353 index 256b7954b720..8618242c677a 100644
3354 --- a/net/core/netprio_cgroup.c
3355 +++ b/net/core/netprio_cgroup.c
3356 @@ -236,6 +236,8 @@ static void net_prio_attach(struct cgroup_taskset *tset)
3357 struct task_struct *p;
3358 struct cgroup_subsys_state *css;
3359
3360 + cgroup_sk_alloc_disable();
3361 +
3362 cgroup_taskset_for_each(p, css, tset) {
3363 void *v = (void *)(unsigned long)css->cgroup->id;
3364
3365 diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
3366 index 716d265ba8ca..0f7f38c29579 100644
3367 --- a/net/dsa/dsa2.c
3368 +++ b/net/dsa/dsa2.c
3369 @@ -461,18 +461,12 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
3370
3371 err = dsa_port_setup(dp);
3372 if (err)
3373 - goto ports_teardown;
3374 + continue;
3375 }
3376 }
3377
3378 return 0;
3379
3380 -ports_teardown:
3381 - for (i = 0; i < port; i++)
3382 - dsa_port_teardown(&ds->ports[i]);
3383 -
3384 - dsa_switch_teardown(ds);
3385 -
3386 switch_teardown:
3387 for (i = 0; i < device; i++) {
3388 ds = dst->ds[i];
3389 diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
3390 index 0bd10a1f477f..a23094b050f8 100644
3391 --- a/net/ipv4/cipso_ipv4.c
3392 +++ b/net/ipv4/cipso_ipv4.c
3393 @@ -1258,7 +1258,8 @@ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def,
3394 return ret_val;
3395 }
3396
3397 - secattr->flags |= NETLBL_SECATTR_MLS_CAT;
3398 + if (secattr->attr.mls.cat)
3399 + secattr->flags |= NETLBL_SECATTR_MLS_CAT;
3400 }
3401
3402 return 0;
3403 @@ -1439,7 +1440,8 @@ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def,
3404 return ret_val;
3405 }
3406
3407 - secattr->flags |= NETLBL_SECATTR_MLS_CAT;
3408 + if (secattr->attr.mls.cat)
3409 + secattr->flags |= NETLBL_SECATTR_MLS_CAT;
3410 }
3411
3412 return 0;
3413 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3414 index fe34e9e0912a..558ddf7ab395 100644
3415 --- a/net/ipv4/route.c
3416 +++ b/net/ipv4/route.c
3417 @@ -914,7 +914,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
3418 /* Check for load limit; set rate_last to the latest sent
3419 * redirect.
3420 */
3421 - if (peer->rate_tokens == 0 ||
3422 + if (peer->n_redirects == 0 ||
3423 time_after(jiffies,
3424 (peer->rate_last +
3425 (ip_rt_redirect_load << peer->n_redirects)))) {
3426 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
3427 index e378ff17f8c6..fe3cdeddd097 100644
3428 --- a/net/ipv4/tcp.c
3429 +++ b/net/ipv4/tcp.c
3430 @@ -477,9 +477,17 @@ static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
3431 static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
3432 int target, struct sock *sk)
3433 {
3434 - return (READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq) >= target) ||
3435 - (sk->sk_prot->stream_memory_read ?
3436 - sk->sk_prot->stream_memory_read(sk) : false);
3437 + int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
3438 +
3439 + if (avail > 0) {
3440 + if (avail >= target)
3441 + return true;
3442 + if (tcp_rmem_pressure(sk))
3443 + return true;
3444 + }
3445 + if (sk->sk_prot->stream_memory_read)
3446 + return sk->sk_prot->stream_memory_read(sk);
3447 + return false;
3448 }
3449
3450 /*
3451 @@ -1757,10 +1765,11 @@ static int tcp_zerocopy_receive(struct sock *sk,
3452
3453 down_read(&current->mm->mmap_sem);
3454
3455 - ret = -EINVAL;
3456 vma = find_vma(current->mm, address);
3457 - if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops)
3458 - goto out;
3459 + if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops) {
3460 + up_read(&current->mm->mmap_sem);
3461 + return -EINVAL;
3462 + }
3463 zc->length = min_t(unsigned long, zc->length, vma->vm_end - address);
3464
3465 tp = tcp_sk(sk);
3466 @@ -2149,13 +2158,15 @@ skip_copy:
3467 tp->urg_data = 0;
3468 tcp_fast_path_check(sk);
3469 }
3470 - if (used + offset < skb->len)
3471 - continue;
3472
3473 if (TCP_SKB_CB(skb)->has_rxtstamp) {
3474 tcp_update_recv_tstamps(skb, &tss);
3475 cmsg_flags |= 2;
3476 }
3477 +
3478 + if (used + offset < skb->len)
3479 + continue;
3480 +
3481 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
3482 goto found_fin_ok;
3483 if (!(flags & MSG_PEEK))
3484 diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
3485 index 8a01428f80c1..69b025408390 100644
3486 --- a/net/ipv4/tcp_bpf.c
3487 +++ b/net/ipv4/tcp_bpf.c
3488 @@ -121,14 +121,17 @@ int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
3489 struct sk_psock *psock;
3490 int copied, ret;
3491
3492 + if (unlikely(flags & MSG_ERRQUEUE))
3493 + return inet_recv_error(sk, msg, len, addr_len);
3494 +
3495 psock = sk_psock_get(sk);
3496 if (unlikely(!psock))
3497 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
3498 - if (unlikely(flags & MSG_ERRQUEUE))
3499 - return inet_recv_error(sk, msg, len, addr_len);
3500 if (!skb_queue_empty(&sk->sk_receive_queue) &&
3501 - sk_psock_queue_empty(psock))
3502 + sk_psock_queue_empty(psock)) {
3503 + sk_psock_put(sk, psock);
3504 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
3505 + }
3506 lock_sock(sk);
3507 msg_bytes_ready:
3508 copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags);
3509 @@ -200,7 +203,6 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
3510
3511 if (!ret) {
3512 msg->sg.start = i;
3513 - msg->sg.size -= apply_bytes;
3514 sk_psock_queue_msg(psock, tmp);
3515 sk_psock_data_ready(sk, psock);
3516 } else {
3517 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3518 index 5af22c9712a6..677facbeed26 100644
3519 --- a/net/ipv4/tcp_input.c
3520 +++ b/net/ipv4/tcp_input.c
3521 @@ -4751,7 +4751,8 @@ void tcp_data_ready(struct sock *sk)
3522 const struct tcp_sock *tp = tcp_sk(sk);
3523 int avail = tp->rcv_nxt - tp->copied_seq;
3524
3525 - if (avail < sk->sk_rcvlowat && !sock_flag(sk, SOCK_DONE))
3526 + if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) &&
3527 + !sock_flag(sk, SOCK_DONE))
3528 return;
3529
3530 sk->sk_data_ready(sk);
3531 diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
3532 index 221c81f85cbf..8d3f66c310db 100644
3533 --- a/net/ipv6/calipso.c
3534 +++ b/net/ipv6/calipso.c
3535 @@ -1047,7 +1047,8 @@ static int calipso_opt_getattr(const unsigned char *calipso,
3536 goto getattr_return;
3537 }
3538
3539 - secattr->flags |= NETLBL_SECATTR_MLS_CAT;
3540 + if (secattr->attr.mls.cat)
3541 + secattr->flags |= NETLBL_SECATTR_MLS_CAT;
3542 }
3543
3544 secattr->type = NETLBL_NLTYPE_CALIPSO;
3545 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3546 index c81d8e9e5169..3b4af0a8bca6 100644
3547 --- a/net/ipv6/route.c
3548 +++ b/net/ipv6/route.c
3549 @@ -2728,8 +2728,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
3550 const struct in6_addr *daddr, *saddr;
3551 struct rt6_info *rt6 = (struct rt6_info *)dst;
3552
3553 - if (dst_metric_locked(dst, RTAX_MTU))
3554 - return;
3555 + /* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
3556 + * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
3557 + * [see also comment in rt6_mtu_change_route()]
3558 + */
3559
3560 if (iph) {
3561 daddr = &iph->daddr;
3562 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
3563 index 5cd610b547e0..c2ad462f33f1 100644
3564 --- a/net/netfilter/nf_conntrack_core.c
3565 +++ b/net/netfilter/nf_conntrack_core.c
3566 @@ -1381,9 +1381,9 @@ __nf_conntrack_alloc(struct net *net,
3567 ct->status = 0;
3568 ct->timeout = 0;
3569 write_pnet(&ct->ct_net, net);
3570 - memset(&ct->__nfct_init_offset[0], 0,
3571 + memset(&ct->__nfct_init_offset, 0,
3572 offsetof(struct nf_conn, proto) -
3573 - offsetof(struct nf_conn, __nfct_init_offset[0]));
3574 + offsetof(struct nf_conn, __nfct_init_offset));
3575
3576 nf_ct_zone_add(ct, zone);
3577
3578 diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
3579 index a9f804f7a04a..ee7c29e0a9d7 100644
3580 --- a/net/netfilter/nft_set_rbtree.c
3581 +++ b/net/netfilter/nft_set_rbtree.c
3582 @@ -33,6 +33,11 @@ static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
3583 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
3584 }
3585
3586 +static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
3587 +{
3588 + return !nft_rbtree_interval_end(rbe);
3589 +}
3590 +
3591 static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
3592 const struct nft_rbtree_elem *interval)
3593 {
3594 @@ -64,7 +69,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
3595 if (interval &&
3596 nft_rbtree_equal(set, this, interval) &&
3597 nft_rbtree_interval_end(rbe) &&
3598 - !nft_rbtree_interval_end(interval))
3599 + nft_rbtree_interval_start(interval))
3600 continue;
3601 interval = rbe;
3602 } else if (d > 0)
3603 @@ -74,6 +79,10 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
3604 parent = rcu_dereference_raw(parent->rb_left);
3605 continue;
3606 }
3607 +
3608 + if (nft_set_elem_expired(&rbe->ext))
3609 + return false;
3610 +
3611 if (nft_rbtree_interval_end(rbe)) {
3612 if (nft_set_is_anonymous(set))
3613 return false;
3614 @@ -89,7 +98,8 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
3615
3616 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
3617 nft_set_elem_active(&interval->ext, genmask) &&
3618 - !nft_rbtree_interval_end(interval)) {
3619 + !nft_set_elem_expired(&interval->ext) &&
3620 + nft_rbtree_interval_start(interval)) {
3621 *ext = &interval->ext;
3622 return true;
3623 }
3624 @@ -149,6 +159,9 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
3625 continue;
3626 }
3627
3628 + if (nft_set_elem_expired(&rbe->ext))
3629 + return false;
3630 +
3631 if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
3632 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
3633 (flags & NFT_SET_ELEM_INTERVAL_END)) {
3634 @@ -165,6 +178,7 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
3635
3636 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
3637 nft_set_elem_active(&interval->ext, genmask) &&
3638 + !nft_set_elem_expired(&interval->ext) &&
3639 ((!nft_rbtree_interval_end(interval) &&
3640 !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
3641 (nft_rbtree_interval_end(interval) &&
3642 @@ -224,9 +238,9 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
3643 p = &parent->rb_right;
3644 else {
3645 if (nft_rbtree_interval_end(rbe) &&
3646 - !nft_rbtree_interval_end(new)) {
3647 + nft_rbtree_interval_start(new)) {
3648 p = &parent->rb_left;
3649 - } else if (!nft_rbtree_interval_end(rbe) &&
3650 + } else if (nft_rbtree_interval_start(rbe) &&
3651 nft_rbtree_interval_end(new)) {
3652 p = &parent->rb_right;
3653 } else if (nft_set_elem_active(&rbe->ext, genmask)) {
3654 @@ -317,10 +331,10 @@ static void *nft_rbtree_deactivate(const struct net *net,
3655 parent = parent->rb_right;
3656 else {
3657 if (nft_rbtree_interval_end(rbe) &&
3658 - !nft_rbtree_interval_end(this)) {
3659 + nft_rbtree_interval_start(this)) {
3660 parent = parent->rb_left;
3661 continue;
3662 - } else if (!nft_rbtree_interval_end(rbe) &&
3663 + } else if (nft_rbtree_interval_start(rbe) &&
3664 nft_rbtree_interval_end(this)) {
3665 parent = parent->rb_right;
3666 continue;
3667 @@ -350,6 +364,8 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
3668
3669 if (iter->count < iter->skip)
3670 goto cont;
3671 + if (nft_set_elem_expired(&rbe->ext))
3672 + goto cont;
3673 if (!nft_set_elem_active(&rbe->ext, iter->genmask))
3674 goto cont;
3675
3676 diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
3677 index 409a3ae47ce2..5e1239cef000 100644
3678 --- a/net/netlabel/netlabel_kapi.c
3679 +++ b/net/netlabel/netlabel_kapi.c
3680 @@ -734,6 +734,12 @@ int netlbl_catmap_getlong(struct netlbl_lsm_catmap *catmap,
3681 if ((off & (BITS_PER_LONG - 1)) != 0)
3682 return -EINVAL;
3683
3684 + /* a null catmap is equivalent to an empty one */
3685 + if (!catmap) {
3686 + *offset = (u32)-1;
3687 + return 0;
3688 + }
3689 +
3690 if (off < catmap->startbit) {
3691 off = catmap->startbit;
3692 *offset = off;
3693 diff --git a/net/rds/message.c b/net/rds/message.c
3694 index 50f13f1d4ae0..2d43e13d6dd5 100644
3695 --- a/net/rds/message.c
3696 +++ b/net/rds/message.c
3697 @@ -308,26 +308,20 @@ out:
3698 /*
3699 * RDS ops use this to grab SG entries from the rm's sg pool.
3700 */
3701 -struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
3702 - int *ret)
3703 +struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
3704 {
3705 struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
3706 struct scatterlist *sg_ret;
3707
3708 - if (WARN_ON(!ret))
3709 - return NULL;
3710 -
3711 if (nents <= 0) {
3712 pr_warn("rds: alloc sgs failed! nents <= 0\n");
3713 - *ret = -EINVAL;
3714 - return NULL;
3715 + return ERR_PTR(-EINVAL);
3716 }
3717
3718 if (rm->m_used_sgs + nents > rm->m_total_sgs) {
3719 pr_warn("rds: alloc sgs failed! total %d used %d nents %d\n",
3720 rm->m_total_sgs, rm->m_used_sgs, nents);
3721 - *ret = -ENOMEM;
3722 - return NULL;
3723 + return ERR_PTR(-ENOMEM);
3724 }
3725
3726 sg_ret = &sg_first[rm->m_used_sgs];
3727 @@ -343,7 +337,6 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
3728 unsigned int i;
3729 int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE);
3730 int extra_bytes = num_sgs * sizeof(struct scatterlist);
3731 - int ret;
3732
3733 rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
3734 if (!rm)
3735 @@ -352,10 +345,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
3736 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
3737 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
3738 rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
3739 - rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
3740 - if (!rm->data.op_sg) {
3741 + rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
3742 + if (IS_ERR(rm->data.op_sg)) {
3743 rds_message_put(rm);
3744 - return ERR_PTR(ret);
3745 + return ERR_CAST(rm->data.op_sg);
3746 }
3747
3748 for (i = 0; i < rm->data.op_nents; ++i) {
3749 diff --git a/net/rds/rdma.c b/net/rds/rdma.c
3750 index 916f5ec373d8..8e10f954a22f 100644
3751 --- a/net/rds/rdma.c
3752 +++ b/net/rds/rdma.c
3753 @@ -624,9 +624,11 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
3754 op->op_active = 1;
3755 op->op_recverr = rs->rs_recverr;
3756 WARN_ON(!nr_pages);
3757 - op->op_sg = rds_message_alloc_sgs(rm, nr_pages, &ret);
3758 - if (!op->op_sg)
3759 + op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
3760 + if (IS_ERR(op->op_sg)) {
3761 + ret = PTR_ERR(op->op_sg);
3762 goto out_pages;
3763 + }
3764
3765 if (op->op_notify || op->op_recverr) {
3766 /* We allocate an uninitialized notifier here, because
3767 @@ -828,9 +830,11 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
3768 rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
3769 rm->atomic.op_active = 1;
3770 rm->atomic.op_recverr = rs->rs_recverr;
3771 - rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1, &ret);
3772 - if (!rm->atomic.op_sg)
3773 + rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
3774 + if (IS_ERR(rm->atomic.op_sg)) {
3775 + ret = PTR_ERR(rm->atomic.op_sg);
3776 goto err;
3777 + }
3778
3779 /* verify 8 byte-aligned */
3780 if (args->local_addr & 0x7) {
3781 diff --git a/net/rds/rds.h b/net/rds/rds.h
3782 index 53e86911773a..2ac5b5e55901 100644
3783 --- a/net/rds/rds.h
3784 +++ b/net/rds/rds.h
3785 @@ -849,8 +849,7 @@ rds_conn_connecting(struct rds_connection *conn)
3786
3787 /* message.c */
3788 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
3789 -struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
3790 - int *ret);
3791 +struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
3792 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
3793 bool zcopy);
3794 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
3795 diff --git a/net/rds/send.c b/net/rds/send.c
3796 index 82dcd8b84fe7..68e2bdb08fd0 100644
3797 --- a/net/rds/send.c
3798 +++ b/net/rds/send.c
3799 @@ -1274,9 +1274,11 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
3800
3801 /* Attach data to the rm */
3802 if (payload_len) {
3803 - rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
3804 - if (!rm->data.op_sg)
3805 + rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
3806 + if (IS_ERR(rm->data.op_sg)) {
3807 + ret = PTR_ERR(rm->data.op_sg);
3808 goto out;
3809 + }
3810 ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
3811 if (ret)
3812 goto out;
3813 diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
3814 index c2cdd0fc2e70..68c8fc6f535c 100644
3815 --- a/net/sched/cls_api.c
3816 +++ b/net/sched/cls_api.c
3817 @@ -2005,6 +2005,7 @@ replay:
3818 err = PTR_ERR(block);
3819 goto errout;
3820 }
3821 + block->classid = parent;
3822
3823 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
3824 if (chain_index > TC_ACT_EXT_VAL_MASK) {
3825 @@ -2547,12 +2548,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
3826 return skb->len;
3827
3828 parent = tcm->tcm_parent;
3829 - if (!parent) {
3830 + if (!parent)
3831 q = dev->qdisc;
3832 - parent = q->handle;
3833 - } else {
3834 + else
3835 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3836 - }
3837 if (!q)
3838 goto out;
3839 cops = q->ops->cl_ops;
3840 @@ -2568,6 +2567,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
3841 block = cops->tcf_block(q, cl, NULL);
3842 if (!block)
3843 goto out;
3844 + parent = block->classid;
3845 if (tcf_block_shared(block))
3846 q = NULL;
3847 }
3848 diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
3849 index ff5fcb3e1208..5fc6c028f89c 100644
3850 --- a/net/sunrpc/auth_gss/auth_gss.c
3851 +++ b/net/sunrpc/auth_gss/auth_gss.c
3852 @@ -2030,7 +2030,6 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
3853 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
3854 struct kvec *head = rqstp->rq_rcv_buf.head;
3855 struct rpc_auth *auth = cred->cr_auth;
3856 - unsigned int savedlen = rcv_buf->len;
3857 u32 offset, opaque_len, maj_stat;
3858 __be32 *p;
3859
3860 @@ -2041,9 +2040,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
3861 offset = (u8 *)(p) - (u8 *)head->iov_base;
3862 if (offset + opaque_len > rcv_buf->len)
3863 goto unwrap_failed;
3864 - rcv_buf->len = offset + opaque_len;
3865
3866 - maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
3867 + maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset,
3868 + offset + opaque_len, rcv_buf);
3869 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
3870 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
3871 if (maj_stat != GSS_S_COMPLETE)
3872 @@ -2057,10 +2056,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
3873 */
3874 xdr_init_decode(xdr, rcv_buf, p, rqstp);
3875
3876 - auth->au_rslack = auth->au_verfsize + 2 +
3877 - XDR_QUADLEN(savedlen - rcv_buf->len);
3878 - auth->au_ralign = auth->au_verfsize + 2 +
3879 - XDR_QUADLEN(savedlen - rcv_buf->len);
3880 + auth->au_rslack = auth->au_verfsize + 2 + ctx->gc_gss_ctx->slack;
3881 + auth->au_ralign = auth->au_verfsize + 2 + ctx->gc_gss_ctx->align;
3882 +
3883 return 0;
3884 unwrap_failed:
3885 trace_rpcgss_unwrap_failed(task);
3886 diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
3887 index 6f2d30d7b766..e7180da1fc6a 100644
3888 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
3889 +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
3890 @@ -851,8 +851,8 @@ out_err:
3891 }
3892
3893 u32
3894 -gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
3895 - u32 *headskip, u32 *tailskip)
3896 +gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
3897 + struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
3898 {
3899 struct xdr_buf subbuf;
3900 u32 ret = 0;
3901 @@ -881,7 +881,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
3902
3903 /* create a segment skipping the header and leaving out the checksum */
3904 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
3905 - (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
3906 + (len - offset - GSS_KRB5_TOK_HDR_LEN -
3907 kctx->gk5e->cksumlength));
3908
3909 nblocks = (subbuf.len + blocksize - 1) / blocksize;
3910 @@ -926,7 +926,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
3911 goto out_err;
3912
3913 /* Get the packet's hmac value */
3914 - ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
3915 + ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength,
3916 pkt_hmac, kctx->gk5e->cksumlength);
3917 if (ret)
3918 goto out_err;
3919 diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
3920 index 14a0aff0cd84..683755d95075 100644
3921 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
3922 +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
3923 @@ -261,7 +261,9 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
3924 }
3925
3926 static u32
3927 -gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
3928 +gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
3929 + struct xdr_buf *buf, unsigned int *slack,
3930 + unsigned int *align)
3931 {
3932 int signalg;
3933 int sealalg;
3934 @@ -279,12 +281,13 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
3935 u32 conflen = kctx->gk5e->conflen;
3936 int crypt_offset;
3937 u8 *cksumkey;
3938 + unsigned int saved_len = buf->len;
3939
3940 dprintk("RPC: gss_unwrap_kerberos\n");
3941
3942 ptr = (u8 *)buf->head[0].iov_base + offset;
3943 if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
3944 - buf->len - offset))
3945 + len - offset))
3946 return GSS_S_DEFECTIVE_TOKEN;
3947
3948 if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
3949 @@ -324,6 +327,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
3950 (!kctx->initiate && direction != 0))
3951 return GSS_S_BAD_SIG;
3952
3953 + buf->len = len;
3954 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
3955 struct crypto_sync_skcipher *cipher;
3956 int err;
3957 @@ -376,11 +380,15 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
3958 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
3959 memmove(orig_start, data_start, data_len);
3960 buf->head[0].iov_len -= (data_start - orig_start);
3961 - buf->len -= (data_start - orig_start);
3962 + buf->len = len - (data_start - orig_start);
3963
3964 if (gss_krb5_remove_padding(buf, blocksize))
3965 return GSS_S_DEFECTIVE_TOKEN;
3966
3967 + /* slack must include room for krb5 padding */
3968 + *slack = XDR_QUADLEN(saved_len - buf->len);
3969 + /* The GSS blob always precedes the RPC message payload */
3970 + *align = *slack;
3971 return GSS_S_COMPLETE;
3972 }
3973
3974 @@ -486,7 +494,9 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
3975 }
3976
3977 static u32
3978 -gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
3979 +gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
3980 + struct xdr_buf *buf, unsigned int *slack,
3981 + unsigned int *align)
3982 {
3983 s32 now;
3984 u8 *ptr;
3985 @@ -532,7 +542,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
3986 if (rrc != 0)
3987 rotate_left(offset + 16, buf, rrc);
3988
3989 - err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
3990 + err = (*kctx->gk5e->decrypt_v2)(kctx, offset, len, buf,
3991 &headskip, &tailskip);
3992 if (err)
3993 return GSS_S_FAILURE;
3994 @@ -542,7 +552,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
3995 * it against the original
3996 */
3997 err = read_bytes_from_xdr_buf(buf,
3998 - buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
3999 + len - GSS_KRB5_TOK_HDR_LEN - tailskip,
4000 decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
4001 if (err) {
4002 dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
4003 @@ -568,18 +578,19 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
4004 * Note that buf->head[0].iov_len may indicate the available
4005 * head buffer space rather than that actually occupied.
4006 */
4007 - movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
4008 + movelen = min_t(unsigned int, buf->head[0].iov_len, len);
4009 movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
4010 - if (offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
4011 - buf->head[0].iov_len)
4012 - return GSS_S_FAILURE;
4013 + BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
4014 + buf->head[0].iov_len);
4015 memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
4016 buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
4017 - buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
4018 + buf->len = len - GSS_KRB5_TOK_HDR_LEN + headskip;
4019
4020 /* Trim off the trailing "extra count" and checksum blob */
4021 - buf->len -= ec + GSS_KRB5_TOK_HDR_LEN + tailskip;
4022 + xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
4023
4024 + *align = XDR_QUADLEN(GSS_KRB5_TOK_HDR_LEN + headskip);
4025 + *slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
4026 return GSS_S_COMPLETE;
4027 }
4028
4029 @@ -603,7 +614,8 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
4030 }
4031
4032 u32
4033 -gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
4034 +gss_unwrap_kerberos(struct gss_ctx *gctx, int offset,
4035 + int len, struct xdr_buf *buf)
4036 {
4037 struct krb5_ctx *kctx = gctx->internal_ctx_id;
4038
4039 @@ -613,9 +625,11 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
4040 case ENCTYPE_DES_CBC_RAW:
4041 case ENCTYPE_DES3_CBC_RAW:
4042 case ENCTYPE_ARCFOUR_HMAC:
4043 - return gss_unwrap_kerberos_v1(kctx, offset, buf);
4044 + return gss_unwrap_kerberos_v1(kctx, offset, len, buf,
4045 + &gctx->slack, &gctx->align);
4046 case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
4047 case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
4048 - return gss_unwrap_kerberos_v2(kctx, offset, buf);
4049 + return gss_unwrap_kerberos_v2(kctx, offset, len, buf,
4050 + &gctx->slack, &gctx->align);
4051 }
4052 }
4053 diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
4054 index 82060099a429..8fa924c8e282 100644
4055 --- a/net/sunrpc/auth_gss/gss_mech_switch.c
4056 +++ b/net/sunrpc/auth_gss/gss_mech_switch.c
4057 @@ -438,10 +438,11 @@ gss_wrap(struct gss_ctx *ctx_id,
4058 u32
4059 gss_unwrap(struct gss_ctx *ctx_id,
4060 int offset,
4061 + int len,
4062 struct xdr_buf *buf)
4063 {
4064 return ctx_id->mech_type->gm_ops
4065 - ->gss_unwrap(ctx_id, offset, buf);
4066 + ->gss_unwrap(ctx_id, offset, len, buf);
4067 }
4068
4069
4070 diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
4071 index ed20fa8a6f70..d9f7439e2431 100644
4072 --- a/net/sunrpc/auth_gss/svcauth_gss.c
4073 +++ b/net/sunrpc/auth_gss/svcauth_gss.c
4074 @@ -897,7 +897,7 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
4075 if (svc_getnl(&buf->head[0]) != seq)
4076 goto out;
4077 /* trim off the mic and padding at the end before returning */
4078 - buf->len -= 4 + round_up_to_quad(mic.len);
4079 + xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
4080 stat = 0;
4081 out:
4082 kfree(mic.data);
4083 @@ -925,7 +925,7 @@ static int
4084 unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
4085 {
4086 u32 priv_len, maj_stat;
4087 - int pad, saved_len, remaining_len, offset;
4088 + int pad, remaining_len, offset;
4089
4090 clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
4091
4092 @@ -945,12 +945,8 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
4093 buf->len -= pad;
4094 fix_priv_head(buf, pad);
4095
4096 - /* Maybe it would be better to give gss_unwrap a length parameter: */
4097 - saved_len = buf->len;
4098 - buf->len = priv_len;
4099 - maj_stat = gss_unwrap(ctx, 0, buf);
4100 + maj_stat = gss_unwrap(ctx, 0, priv_len, buf);
4101 pad = priv_len - buf->len;
4102 - buf->len = saved_len;
4103 buf->len -= pad;
4104 /* The upper layers assume the buffer is aligned on 4-byte boundaries.
4105 * In the krb5p case, at least, the data ends up offset, so we need to
4106 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
4107 index f7f78566be46..f1088ca39d44 100644
4108 --- a/net/sunrpc/clnt.c
4109 +++ b/net/sunrpc/clnt.c
4110 @@ -2422,6 +2422,11 @@ rpc_check_timeout(struct rpc_task *task)
4111 {
4112 struct rpc_clnt *clnt = task->tk_client;
4113
4114 + if (RPC_SIGNALLED(task)) {
4115 + rpc_call_rpcerror(task, -ERESTARTSYS);
4116 + return;
4117 + }
4118 +
4119 if (xprt_adjust_timeout(task->tk_rqstp) == 0)
4120 return;
4121
4122 diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
4123 index f3104be8ff5d..451ca7ec321c 100644
4124 --- a/net/sunrpc/xdr.c
4125 +++ b/net/sunrpc/xdr.c
4126 @@ -1150,6 +1150,47 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
4127 }
4128 EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
4129
4130 +/**
4131 + * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
4132 + * @buf: buf to be trimmed
4133 + * @len: number of bytes to reduce "buf" by
4134 + *
4135 + * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
4136 + * that it's possible that we'll trim less than that amount if the xdr_buf is
4137 + * too small, or if (for instance) it's all in the head and the parser has
4138 + * already read too far into it.
4139 + */
4140 +void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
4141 +{
4142 + size_t cur;
4143 + unsigned int trim = len;
4144 +
4145 + if (buf->tail[0].iov_len) {
4146 + cur = min_t(size_t, buf->tail[0].iov_len, trim);
4147 + buf->tail[0].iov_len -= cur;
4148 + trim -= cur;
4149 + if (!trim)
4150 + goto fix_len;
4151 + }
4152 +
4153 + if (buf->page_len) {
4154 + cur = min_t(unsigned int, buf->page_len, trim);
4155 + buf->page_len -= cur;
4156 + trim -= cur;
4157 + if (!trim)
4158 + goto fix_len;
4159 + }
4160 +
4161 + if (buf->head[0].iov_len) {
4162 + cur = min_t(size_t, buf->head[0].iov_len, trim);
4163 + buf->head[0].iov_len -= cur;
4164 + trim -= cur;
4165 + }
4166 +fix_len:
4167 + buf->len -= (len - trim);
4168 +}
4169 +EXPORT_SYMBOL_GPL(xdr_buf_trim);
4170 +
4171 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
4172 {
4173 unsigned int this_len;
4174 diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
4175 index 8a12a7538d63..94db4683cfaf 100644
4176 --- a/sound/core/rawmidi.c
4177 +++ b/sound/core/rawmidi.c
4178 @@ -97,6 +97,17 @@ static void snd_rawmidi_input_event_work(struct work_struct *work)
4179 runtime->event(runtime->substream);
4180 }
4181
4182 +/* buffer refcount management: call with runtime->lock held */
4183 +static inline void snd_rawmidi_buffer_ref(struct snd_rawmidi_runtime *runtime)
4184 +{
4185 + runtime->buffer_ref++;
4186 +}
4187 +
4188 +static inline void snd_rawmidi_buffer_unref(struct snd_rawmidi_runtime *runtime)
4189 +{
4190 + runtime->buffer_ref--;
4191 +}
4192 +
4193 static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
4194 {
4195 struct snd_rawmidi_runtime *runtime;
4196 @@ -646,6 +657,11 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime,
4197 if (!newbuf)
4198 return -ENOMEM;
4199 spin_lock_irq(&runtime->lock);
4200 + if (runtime->buffer_ref) {
4201 + spin_unlock_irq(&runtime->lock);
4202 + kvfree(newbuf);
4203 + return -EBUSY;
4204 + }
4205 oldbuf = runtime->buffer;
4206 runtime->buffer = newbuf;
4207 runtime->buffer_size = params->buffer_size;
4208 @@ -945,8 +961,10 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
4209 long result = 0, count1;
4210 struct snd_rawmidi_runtime *runtime = substream->runtime;
4211 unsigned long appl_ptr;
4212 + int err = 0;
4213
4214 spin_lock_irqsave(&runtime->lock, flags);
4215 + snd_rawmidi_buffer_ref(runtime);
4216 while (count > 0 && runtime->avail) {
4217 count1 = runtime->buffer_size - runtime->appl_ptr;
4218 if (count1 > count)
4219 @@ -965,16 +983,19 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
4220 if (userbuf) {
4221 spin_unlock_irqrestore(&runtime->lock, flags);
4222 if (copy_to_user(userbuf + result,
4223 - runtime->buffer + appl_ptr, count1)) {
4224 - return result > 0 ? result : -EFAULT;
4225 - }
4226 + runtime->buffer + appl_ptr, count1))
4227 + err = -EFAULT;
4228 spin_lock_irqsave(&runtime->lock, flags);
4229 + if (err)
4230 + goto out;
4231 }
4232 result += count1;
4233 count -= count1;
4234 }
4235 + out:
4236 + snd_rawmidi_buffer_unref(runtime);
4237 spin_unlock_irqrestore(&runtime->lock, flags);
4238 - return result;
4239 + return result > 0 ? result : err;
4240 }
4241
4242 long snd_rawmidi_kernel_read(struct snd_rawmidi_substream *substream,
4243 @@ -1268,6 +1289,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
4244 return -EAGAIN;
4245 }
4246 }
4247 + snd_rawmidi_buffer_ref(runtime);
4248 while (count > 0 && runtime->avail > 0) {
4249 count1 = runtime->buffer_size - runtime->appl_ptr;
4250 if (count1 > count)
4251 @@ -1299,6 +1321,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
4252 }
4253 __end:
4254 count1 = runtime->avail < runtime->buffer_size;
4255 + snd_rawmidi_buffer_unref(runtime);
4256 spin_unlock_irqrestore(&runtime->lock, flags);
4257 if (count1)
4258 snd_rawmidi_output_trigger(substream, 1);
4259 diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
4260 index 16c7f6605511..26e7cb555d3c 100644
4261 --- a/sound/firewire/amdtp-stream-trace.h
4262 +++ b/sound/firewire/amdtp-stream-trace.h
4263 @@ -66,8 +66,7 @@ TRACE_EVENT(amdtp_packet,
4264 __entry->irq,
4265 __entry->index,
4266 __print_array(__get_dynamic_array(cip_header),
4267 - __get_dynamic_array_len(cip_header),
4268 - sizeof(u8)))
4269 + __get_dynamic_array_len(cip_header), 1))
4270 );
4271
4272 #endif
4273 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
4274 index 663168ddce72..d48263d1f6a2 100644
4275 --- a/sound/pci/hda/patch_hdmi.c
4276 +++ b/sound/pci/hda/patch_hdmi.c
4277 @@ -2234,7 +2234,9 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
4278
4279 for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
4280 struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
4281 + struct hdmi_eld *pin_eld = &per_pin->sink_eld;
4282
4283 + pin_eld->eld_valid = false;
4284 hdmi_present_sense(per_pin, 0);
4285 }
4286
4287 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4288 index 64270983ab7d..004d2f638cf2 100644
4289 --- a/sound/pci/hda/patch_realtek.c
4290 +++ b/sound/pci/hda/patch_realtek.c
4291 @@ -5743,6 +5743,15 @@ static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec,
4292 }
4293 }
4294
4295 +static void alc225_fixup_s3_pop_noise(struct hda_codec *codec,
4296 + const struct hda_fixup *fix, int action)
4297 +{
4298 + if (action != HDA_FIXUP_ACT_PRE_PROBE)
4299 + return;
4300 +
4301 + codec->power_save_node = 1;
4302 +}
4303 +
4304 /* Forcibly assign NID 0x03 to HP/LO while NID 0x02 to SPK for EQ */
4305 static void alc274_fixup_bind_dacs(struct hda_codec *codec,
4306 const struct hda_fixup *fix, int action)
4307 @@ -5847,6 +5856,7 @@ enum {
4308 ALC269_FIXUP_HP_LINE1_MIC1_LED,
4309 ALC269_FIXUP_INV_DMIC,
4310 ALC269_FIXUP_LENOVO_DOCK,
4311 + ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST,
4312 ALC269_FIXUP_NO_SHUTUP,
4313 ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
4314 ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
4315 @@ -5932,6 +5942,7 @@ enum {
4316 ALC233_FIXUP_ACER_HEADSET_MIC,
4317 ALC294_FIXUP_LENOVO_MIC_LOCATION,
4318 ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
4319 + ALC225_FIXUP_S3_POP_NOISE,
4320 ALC700_FIXUP_INTEL_REFERENCE,
4321 ALC274_FIXUP_DELL_BIND_DACS,
4322 ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
4323 @@ -5967,6 +5978,7 @@ enum {
4324 ALC294_FIXUP_ASUS_DUAL_SPK,
4325 ALC285_FIXUP_THINKPAD_HEADSET_JACK,
4326 ALC294_FIXUP_ASUS_HPE,
4327 + ALC294_FIXUP_ASUS_COEF_1B,
4328 ALC285_FIXUP_HP_GPIO_LED,
4329 };
4330
4331 @@ -6165,6 +6177,12 @@ static const struct hda_fixup alc269_fixups[] = {
4332 .chained = true,
4333 .chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT
4334 },
4335 + [ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST] = {
4336 + .type = HDA_FIXUP_FUNC,
4337 + .v.func = alc269_fixup_limit_int_mic_boost,
4338 + .chained = true,
4339 + .chain_id = ALC269_FIXUP_LENOVO_DOCK,
4340 + },
4341 [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = {
4342 .type = HDA_FIXUP_FUNC,
4343 .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
4344 @@ -6817,6 +6835,12 @@ static const struct hda_fixup alc269_fixups[] = {
4345 { }
4346 },
4347 .chained = true,
4348 + .chain_id = ALC225_FIXUP_S3_POP_NOISE
4349 + },
4350 + [ALC225_FIXUP_S3_POP_NOISE] = {
4351 + .type = HDA_FIXUP_FUNC,
4352 + .v.func = alc225_fixup_s3_pop_noise,
4353 + .chained = true,
4354 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
4355 },
4356 [ALC700_FIXUP_INTEL_REFERENCE] = {
4357 @@ -7089,6 +7113,17 @@ static const struct hda_fixup alc269_fixups[] = {
4358 .chained = true,
4359 .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
4360 },
4361 + [ALC294_FIXUP_ASUS_COEF_1B] = {
4362 + .type = HDA_FIXUP_VERBS,
4363 + .v.verbs = (const struct hda_verb[]) {
4364 + /* Set bit 10 to correct noisy output after reboot from
4365 + * Windows 10 (due to pop noise reduction?)
4366 + */
4367 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x1b },
4368 + { 0x20, AC_VERB_SET_PROC_COEF, 0x4e4b },
4369 + { }
4370 + },
4371 + },
4372 [ALC285_FIXUP_HP_GPIO_LED] = {
4373 .type = HDA_FIXUP_FUNC,
4374 .v.func = alc285_fixup_hp_gpio_led,
4375 @@ -7260,6 +7295,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4376 SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
4377 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
4378 SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
4379 + SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B),
4380 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
4381 SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
4382 SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4383 @@ -7301,7 +7337,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4384 SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
4385 SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
4386 SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
4387 - SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
4388 + SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST),
4389 SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
4390 SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
4391 SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
4392 @@ -7440,6 +7476,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
4393 {.id = ALC269_FIXUP_HEADSET_MODE, .name = "headset-mode"},
4394 {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"},
4395 {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
4396 + {.id = ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST, .name = "lenovo-dock-limit-boost"},
4397 {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
4398 {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"},
4399 {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
4400 @@ -8113,8 +8150,6 @@ static int patch_alc269(struct hda_codec *codec)
4401 spec->gen.mixer_nid = 0;
4402 break;
4403 case 0x10ec0225:
4404 - codec->power_save_node = 1;
4405 - /* fall through */
4406 case 0x10ec0295:
4407 case 0x10ec0299:
4408 spec->codec_variant = ALC269_TYPE_ALC225;
4409 diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
4410 index 5a81c444a18b..092720ce2c55 100644
4411 --- a/sound/usb/quirks.c
4412 +++ b/sound/usb/quirks.c
4413 @@ -1592,13 +1592,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
4414 && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
4415 msleep(20);
4416
4417 - /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here,
4418 - * otherwise requests like get/set frequency return as failed despite
4419 - * actually succeeding.
4420 + /* Zoom R16/24, Logitech H650e, Jabra 550a, Kingston HyperX needs a tiny
4421 + * delay here, otherwise requests like get/set frequency return as
4422 + * failed despite actually succeeding.
4423 */
4424 if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
4425 chip->usb_id == USB_ID(0x046d, 0x0a46) ||
4426 - chip->usb_id == USB_ID(0x0b0e, 0x0349)) &&
4427 + chip->usb_id == USB_ID(0x0b0e, 0x0349) ||
4428 + chip->usb_id == USB_ID(0x0951, 0x16ad)) &&
4429 (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
4430 usleep_range(1000, 2000);
4431 }
4432 diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
4433 index b6403712c2f4..281cc65276e0 100644
4434 --- a/tools/lib/bpf/libbpf.c
4435 +++ b/tools/lib/bpf/libbpf.c
4436 @@ -5905,62 +5905,104 @@ void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
4437 }
4438 }
4439
4440 -int libbpf_num_possible_cpus(void)
4441 +int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
4442 {
4443 - static const char *fcpu = "/sys/devices/system/cpu/possible";
4444 - int len = 0, n = 0, il = 0, ir = 0;
4445 - unsigned int start = 0, end = 0;
4446 - int tmp_cpus = 0;
4447 - static int cpus;
4448 - char buf[128];
4449 - int error = 0;
4450 - int fd = -1;
4451 + int err = 0, n, len, start, end = -1;
4452 + bool *tmp;
4453
4454 - tmp_cpus = READ_ONCE(cpus);
4455 - if (tmp_cpus > 0)
4456 - return tmp_cpus;
4457 + *mask = NULL;
4458 + *mask_sz = 0;
4459 +
4460 + /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
4461 + while (*s) {
4462 + if (*s == ',' || *s == '\n') {
4463 + s++;
4464 + continue;
4465 + }
4466 + n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
4467 + if (n <= 0 || n > 2) {
4468 + pr_warning("Failed to get CPU range %s: %d\n", s, n);
4469 + err = -EINVAL;
4470 + goto cleanup;
4471 + } else if (n == 1) {
4472 + end = start;
4473 + }
4474 + if (start < 0 || start > end) {
4475 + pr_warning("Invalid CPU range [%d,%d] in %s\n",
4476 + start, end, s);
4477 + err = -EINVAL;
4478 + goto cleanup;
4479 + }
4480 + tmp = realloc(*mask, end + 1);
4481 + if (!tmp) {
4482 + err = -ENOMEM;
4483 + goto cleanup;
4484 + }
4485 + *mask = tmp;
4486 + memset(tmp + *mask_sz, 0, start - *mask_sz);
4487 + memset(tmp + start, 1, end - start + 1);
4488 + *mask_sz = end + 1;
4489 + s += len;
4490 + }
4491 + if (!*mask_sz) {
4492 + pr_warning("Empty CPU range\n");
4493 + return -EINVAL;
4494 + }
4495 + return 0;
4496 +cleanup:
4497 + free(*mask);
4498 + *mask = NULL;
4499 + return err;
4500 +}
4501 +
4502 +int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
4503 +{
4504 + int fd, err = 0, len;
4505 + char buf[128];
4506
4507 fd = open(fcpu, O_RDONLY);
4508 if (fd < 0) {
4509 - error = errno;
4510 - pr_warning("Failed to open file %s: %s\n",
4511 - fcpu, strerror(error));
4512 - return -error;
4513 + err = -errno;
4514 + pr_warning("Failed to open cpu mask file %s: %d\n", fcpu, err);
4515 + return err;
4516 }
4517 len = read(fd, buf, sizeof(buf));
4518 close(fd);
4519 if (len <= 0) {
4520 - error = len ? errno : EINVAL;
4521 - pr_warning("Failed to read # of possible cpus from %s: %s\n",
4522 - fcpu, strerror(error));
4523 - return -error;
4524 + err = len ? -errno : -EINVAL;
4525 + pr_warning("Failed to read cpu mask from %s: %d\n", fcpu, err);
4526 + return err;
4527 }
4528 - if (len == sizeof(buf)) {
4529 - pr_warning("File %s size overflow\n", fcpu);
4530 - return -EOVERFLOW;
4531 + if (len >= sizeof(buf)) {
4532 + pr_warning("CPU mask is too big in file %s\n", fcpu);
4533 + return -E2BIG;
4534 }
4535 buf[len] = '\0';
4536
4537 - for (ir = 0, tmp_cpus = 0; ir <= len; ir++) {
4538 - /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
4539 - if (buf[ir] == ',' || buf[ir] == '\0') {
4540 - buf[ir] = '\0';
4541 - n = sscanf(&buf[il], "%u-%u", &start, &end);
4542 - if (n <= 0) {
4543 - pr_warning("Failed to get # CPUs from %s\n",
4544 - &buf[il]);
4545 - return -EINVAL;
4546 - } else if (n == 1) {
4547 - end = start;
4548 - }
4549 - tmp_cpus += end - start + 1;
4550 - il = ir + 1;
4551 - }
4552 - }
4553 - if (tmp_cpus <= 0) {
4554 - pr_warning("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu);
4555 - return -EINVAL;
4556 + return parse_cpu_mask_str(buf, mask, mask_sz);
4557 +}
4558 +
4559 +int libbpf_num_possible_cpus(void)
4560 +{
4561 + static const char *fcpu = "/sys/devices/system/cpu/possible";
4562 + static int cpus;
4563 + int err, n, i, tmp_cpus;
4564 + bool *mask;
4565 +
4566 + tmp_cpus = READ_ONCE(cpus);
4567 + if (tmp_cpus > 0)
4568 + return tmp_cpus;
4569 +
4570 + err = parse_cpu_mask_file(fcpu, &mask, &n);
4571 + if (err)
4572 + return err;
4573 +
4574 + tmp_cpus = 0;
4575 + for (i = 0; i < n; i++) {
4576 + if (mask[i])
4577 + tmp_cpus++;
4578 }
4579 + free(mask);
4580
4581 WRITE_ONCE(cpus, tmp_cpus);
4582 return tmp_cpus;
4583 diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
4584 index 98216a69c32f..92940ae26ada 100644
4585 --- a/tools/lib/bpf/libbpf_internal.h
4586 +++ b/tools/lib/bpf/libbpf_internal.h
4587 @@ -63,6 +63,8 @@ do { \
4588 #define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
4589 #define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
4590
4591 +int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
4592 +int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
4593 int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
4594 const char *str_sec, size_t str_len);
4595
4596 diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
4597 index 1735faf17536..437cb93e72ac 100644
4598 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
4599 +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
4600 @@ -52,7 +52,7 @@ retry:
4601 if (pmu_fd < 0 && errno == ENOENT) {
4602 printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
4603 test__skip();
4604 - goto cleanup;
4605 + goto close_prog;
4606 }
4607 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
4608 pmu_fd, errno))
4609 diff --git a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c
4610 index 8941a41c2a55..cce6d605c017 100644
4611 --- a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c
4612 +++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c
4613 @@ -1,7 +1,7 @@
4614 // SPDX-License-Identifier: GPL-2.0
4615
4616 #include <linux/bpf.h>
4617 -#include <bpf/bpf_helpers.h>
4618 +#include "bpf_helpers.h"
4619
4620 #define MAX_STACK_RAWTP 10
4621
4622 diff --git a/tools/testing/selftests/bpf/test_select_reuseport.c b/tools/testing/selftests/bpf/test_select_reuseport.c
4623 index 079d0f5a2909..7e4c91f2238d 100644
4624 --- a/tools/testing/selftests/bpf/test_select_reuseport.c
4625 +++ b/tools/testing/selftests/bpf/test_select_reuseport.c
4626 @@ -668,12 +668,12 @@ static void cleanup_per_test(void)
4627
4628 for (i = 0; i < NR_RESULTS; i++) {
4629 err = bpf_map_update_elem(result_map, &i, &zero, BPF_ANY);
4630 - RET_IF(err, "reset elem in result_map",
4631 + CHECK(err, "reset elem in result_map",
4632 "i:%u err:%d errno:%d\n", i, err, errno);
4633 }
4634
4635 err = bpf_map_update_elem(linum_map, &zero, &zero, BPF_ANY);
4636 - RET_IF(err, "reset line number in linum_map", "err:%d errno:%d\n",
4637 + CHECK(err, "reset line number in linum_map", "err:%d errno:%d\n",
4638 err, errno);
4639
4640 for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++)
4641 diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
4642 index 063ecb290a5a..144308a757b7 100755
4643 --- a/tools/testing/selftests/ftrace/ftracetest
4644 +++ b/tools/testing/selftests/ftrace/ftracetest
4645 @@ -29,8 +29,25 @@ err_ret=1
4646 # kselftest skip code is 4
4647 err_skip=4
4648
4649 +# cgroup RT scheduling prevents chrt commands from succeeding, which
4650 +# induces failures in test wakeup tests. Disable for the duration of
4651 +# the tests.
4652 +
4653 +readonly sched_rt_runtime=/proc/sys/kernel/sched_rt_runtime_us
4654 +
4655 +sched_rt_runtime_orig=$(cat $sched_rt_runtime)
4656 +
4657 +setup() {
4658 + echo -1 > $sched_rt_runtime
4659 +}
4660 +
4661 +cleanup() {
4662 + echo $sched_rt_runtime_orig > $sched_rt_runtime
4663 +}
4664 +
4665 errexit() { # message
4666 echo "Error: $1" 1>&2
4667 + cleanup
4668 exit $err_ret
4669 }
4670
4671 @@ -39,6 +56,8 @@ if [ `id -u` -ne 0 ]; then
4672 errexit "this must be run by root user"
4673 fi
4674
4675 +setup
4676 +
4677 # Utilities
4678 absdir() { # file_path
4679 (cd `dirname $1`; pwd)
4680 @@ -235,6 +254,7 @@ TOTAL_RESULT=0
4681
4682 INSTANCE=
4683 CASENO=0
4684 +
4685 testcase() { # testfile
4686 CASENO=$((CASENO+1))
4687 desc=`grep "^#[ \t]*description:" $1 | cut -f2 -d:`
4688 @@ -406,5 +426,7 @@ prlog "# of unsupported: " `echo $UNSUPPORTED_CASES | wc -w`
4689 prlog "# of xfailed: " `echo $XFAILED_CASES | wc -w`
4690 prlog "# of undefined(test bug): " `echo $UNDEFINED_CASES | wc -w`
4691
4692 +cleanup
4693 +
4694 # if no error, return 0
4695 exit $TOTAL_RESULT
4696 diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
4697 index 1bcb67dcae26..81490ecaaa92 100644
4698 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
4699 +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
4700 @@ -38,7 +38,7 @@ for width in 64 32 16 8; do
4701 echo 0 > events/kprobes/testprobe/enable
4702
4703 : "Confirm the arguments is recorded in given types correctly"
4704 - ARGS=`grep "testprobe" trace | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'`
4705 + ARGS=`grep "testprobe" trace | head -n 1 | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'`
4706 check_types $ARGS $width
4707
4708 : "Clear event for next loop"
4709 diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
4710 index 5945f062d749..d63881f60e1a 100644
4711 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
4712 +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
4713 @@ -422,11 +422,11 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
4714 VGIC_ACCESS_32bit),
4715 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
4716 vgic_mmio_read_active, vgic_mmio_write_sactive,
4717 - NULL, vgic_mmio_uaccess_write_sactive, 1,
4718 + vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
4719 VGIC_ACCESS_32bit),
4720 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
4721 vgic_mmio_read_active, vgic_mmio_write_cactive,
4722 - NULL, vgic_mmio_uaccess_write_cactive, 1,
4723 + vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1,
4724 VGIC_ACCESS_32bit),
4725 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
4726 vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
4727 diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
4728 index 7dfd15dbb308..4c5909e38f78 100644
4729 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
4730 +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
4731 @@ -491,11 +491,11 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = {
4732 VGIC_ACCESS_32bit),
4733 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
4734 vgic_mmio_read_active, vgic_mmio_write_sactive,
4735 - NULL, vgic_mmio_uaccess_write_sactive, 1,
4736 + vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
4737 VGIC_ACCESS_32bit),
4738 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
4739 vgic_mmio_read_active, vgic_mmio_write_cactive,
4740 - NULL, vgic_mmio_uaccess_write_cactive,
4741 + vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive,
4742 1, VGIC_ACCESS_32bit),
4743 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
4744 vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
4745 @@ -563,12 +563,12 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
4746 VGIC_ACCESS_32bit),
4747 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0,
4748 vgic_mmio_read_active, vgic_mmio_write_sactive,
4749 - NULL, vgic_mmio_uaccess_write_sactive,
4750 - 4, VGIC_ACCESS_32bit),
4751 + vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 4,
4752 + VGIC_ACCESS_32bit),
4753 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0,
4754 vgic_mmio_read_active, vgic_mmio_write_cactive,
4755 - NULL, vgic_mmio_uaccess_write_cactive,
4756 - 4, VGIC_ACCESS_32bit),
4757 + vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 4,
4758 + VGIC_ACCESS_32bit),
4759 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0,
4760 vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
4761 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
4762 diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
4763 index 7eacf00e5abe..fb1dcd397b93 100644
4764 --- a/virt/kvm/arm/vgic/vgic-mmio.c
4765 +++ b/virt/kvm/arm/vgic/vgic-mmio.c
4766 @@ -300,8 +300,39 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
4767 }
4768 }
4769
4770 -unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
4771 - gpa_t addr, unsigned int len)
4772 +
4773 +/*
4774 + * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
4775 + * is not queued on some running VCPU's LRs, because then the change to the
4776 + * active state can be overwritten when the VCPU's state is synced coming back
4777 + * from the guest.
4778 + *
4779 + * For shared interrupts as well as GICv3 private interrupts, we have to
4780 + * stop all the VCPUs because interrupts can be migrated while we don't hold
4781 + * the IRQ locks and we don't want to be chasing moving targets.
4782 + *
4783 + * For GICv2 private interrupts we don't have to do anything because
4784 + * userspace accesses to the VGIC state already require all VCPUs to be
4785 + * stopped, and only the VCPU itself can modify its private interrupts
4786 + * active state, which guarantees that the VCPU is not running.
4787 + */
4788 +static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
4789 +{
4790 + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
4791 + intid >= VGIC_NR_PRIVATE_IRQS)
4792 + kvm_arm_halt_guest(vcpu->kvm);
4793 +}
4794 +
4795 +/* See vgic_access_active_prepare */
4796 +static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
4797 +{
4798 + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
4799 + intid >= VGIC_NR_PRIVATE_IRQS)
4800 + kvm_arm_resume_guest(vcpu->kvm);
4801 +}
4802 +
4803 +static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
4804 + gpa_t addr, unsigned int len)
4805 {
4806 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
4807 u32 value = 0;
4808 @@ -311,6 +342,10 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
4809 for (i = 0; i < len * 8; i++) {
4810 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
4811
4812 + /*
4813 + * Even for HW interrupts, don't evaluate the HW state as
4814 + * all the guest is interested in is the virtual state.
4815 + */
4816 if (irq->active)
4817 value |= (1U << i);
4818
4819 @@ -320,6 +355,29 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
4820 return value;
4821 }
4822
4823 +unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
4824 + gpa_t addr, unsigned int len)
4825 +{
4826 + u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
4827 + u32 val;
4828 +
4829 + mutex_lock(&vcpu->kvm->lock);
4830 + vgic_access_active_prepare(vcpu, intid);
4831 +
4832 + val = __vgic_mmio_read_active(vcpu, addr, len);
4833 +
4834 + vgic_access_active_finish(vcpu, intid);
4835 + mutex_unlock(&vcpu->kvm->lock);
4836 +
4837 + return val;
4838 +}
4839 +
4840 +unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
4841 + gpa_t addr, unsigned int len)
4842 +{
4843 + return __vgic_mmio_read_active(vcpu, addr, len);
4844 +}
4845 +
4846 /* Must be called with irq->irq_lock held */
4847 static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
4848 bool active, bool is_uaccess)
4849 @@ -371,36 +429,6 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
4850 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
4851 }
4852
4853 -/*
4854 - * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
4855 - * is not queued on some running VCPU's LRs, because then the change to the
4856 - * active state can be overwritten when the VCPU's state is synced coming back
4857 - * from the guest.
4858 - *
4859 - * For shared interrupts, we have to stop all the VCPUs because interrupts can
4860 - * be migrated while we don't hold the IRQ locks and we don't want to be
4861 - * chasing moving targets.
4862 - *
4863 - * For private interrupts we don't have to do anything because userspace
4864 - * accesses to the VGIC state already require all VCPUs to be stopped, and
4865 - * only the VCPU itself can modify its private interrupts active state, which
4866 - * guarantees that the VCPU is not running.
4867 - */
4868 -static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
4869 -{
4870 - if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
4871 - intid >= VGIC_NR_PRIVATE_IRQS)
4872 - kvm_arm_halt_guest(vcpu->kvm);
4873 -}
4874 -
4875 -/* See vgic_change_active_prepare */
4876 -static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
4877 -{
4878 - if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
4879 - intid >= VGIC_NR_PRIVATE_IRQS)
4880 - kvm_arm_resume_guest(vcpu->kvm);
4881 -}
4882 -
4883 static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
4884 gpa_t addr, unsigned int len,
4885 unsigned long val)
4886 @@ -422,11 +450,11 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
4887 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
4888
4889 mutex_lock(&vcpu->kvm->lock);
4890 - vgic_change_active_prepare(vcpu, intid);
4891 + vgic_access_active_prepare(vcpu, intid);
4892
4893 __vgic_mmio_write_cactive(vcpu, addr, len, val);
4894
4895 - vgic_change_active_finish(vcpu, intid);
4896 + vgic_access_active_finish(vcpu, intid);
4897 mutex_unlock(&vcpu->kvm->lock);
4898 }
4899
4900 @@ -459,11 +487,11 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
4901 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
4902
4903 mutex_lock(&vcpu->kvm->lock);
4904 - vgic_change_active_prepare(vcpu, intid);
4905 + vgic_access_active_prepare(vcpu, intid);
4906
4907 __vgic_mmio_write_sactive(vcpu, addr, len, val);
4908
4909 - vgic_change_active_finish(vcpu, intid);
4910 + vgic_access_active_finish(vcpu, intid);
4911 mutex_unlock(&vcpu->kvm->lock);
4912 }
4913
4914 diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
4915 index 836f418f1ee8..b6aff5252429 100644
4916 --- a/virt/kvm/arm/vgic/vgic-mmio.h
4917 +++ b/virt/kvm/arm/vgic/vgic-mmio.h
4918 @@ -157,6 +157,9 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
4919 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
4920 gpa_t addr, unsigned int len);
4921
4922 +unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
4923 + gpa_t addr, unsigned int len);
4924 +
4925 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
4926 gpa_t addr, unsigned int len,
4927 unsigned long val);