Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0149-5.4.50-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3535 - (hide annotations) (download)
Wed Jul 8 10:50:05 2020 UTC (3 years, 11 months ago) by niro
File size: 183383 byte(s)
-linux-5.4.50
1 niro 3535 diff --git a/Makefile b/Makefile
2     index 72230ad23299..380e398b2995 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 4
9     -SUBLEVEL = 49
10     +SUBLEVEL = 50
11     EXTRAVERSION =
12     NAME = Kleptomaniac Octopus
13    
14     diff --git a/arch/arm/boot/dts/am335x-pocketbeagle.dts b/arch/arm/boot/dts/am335x-pocketbeagle.dts
15     index ff4f919d22f6..abf2badce53d 100644
16     --- a/arch/arm/boot/dts/am335x-pocketbeagle.dts
17     +++ b/arch/arm/boot/dts/am335x-pocketbeagle.dts
18     @@ -88,7 +88,6 @@
19     AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
20     AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
21     AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
22     - AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT, MUX_MODE4) /* (B12) mcasp0_aclkr.mmc0_sdwp */
23     >;
24     };
25    
26     diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
27     index da6d70f09ef1..418e6b97cb2e 100644
28     --- a/arch/arm/boot/dts/bcm-nsp.dtsi
29     +++ b/arch/arm/boot/dts/bcm-nsp.dtsi
30     @@ -257,10 +257,10 @@
31     status = "disabled";
32     };
33    
34     - mailbox: mailbox@25000 {
35     + mailbox: mailbox@25c00 {
36     compatible = "brcm,iproc-fa2-mbox";
37     - reg = <0x25000 0x445>;
38     - interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
39     + reg = <0x25c00 0x400>;
40     + interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
41     #mbox-cells = <1>;
42     brcm,rx-status-len = <32>;
43     brcm,use-bcm-hdr;
44     diff --git a/arch/arm/boot/dts/omap4-duovero-parlor.dts b/arch/arm/boot/dts/omap4-duovero-parlor.dts
45     index 8047e8cdb3af..4548d87534e3 100644
46     --- a/arch/arm/boot/dts/omap4-duovero-parlor.dts
47     +++ b/arch/arm/boot/dts/omap4-duovero-parlor.dts
48     @@ -139,7 +139,7 @@
49     ethernet@gpmc {
50     reg = <5 0 0xff>;
51     interrupt-parent = <&gpio2>;
52     - interrupts = <12 IRQ_TYPE_EDGE_FALLING>; /* gpio_44 */
53     + interrupts = <12 IRQ_TYPE_LEVEL_LOW>; /* gpio_44 */
54    
55     phy-mode = "mii";
56    
57     diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c
58     index f057df813f83..e9962b48e30c 100644
59     --- a/arch/arm/mach-imx/pm-imx5.c
60     +++ b/arch/arm/mach-imx/pm-imx5.c
61     @@ -295,14 +295,14 @@ static int __init imx_suspend_alloc_ocram(
62     if (!ocram_pool) {
63     pr_warn("%s: ocram pool unavailable!\n", __func__);
64     ret = -ENODEV;
65     - goto put_node;
66     + goto put_device;
67     }
68    
69     ocram_base = gen_pool_alloc(ocram_pool, size);
70     if (!ocram_base) {
71     pr_warn("%s: unable to alloc ocram!\n", __func__);
72     ret = -ENOMEM;
73     - goto put_node;
74     + goto put_device;
75     }
76    
77     phys = gen_pool_virt_to_phys(ocram_pool, ocram_base);
78     @@ -312,6 +312,8 @@ static int __init imx_suspend_alloc_ocram(
79     if (virt_out)
80     *virt_out = virt;
81    
82     +put_device:
83     + put_device(&pdev->dev);
84     put_node:
85     of_node_put(node);
86    
87     diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
88     index 203664c40d3d..eb74aa182661 100644
89     --- a/arch/arm/mach-omap2/omap_hwmod.c
90     +++ b/arch/arm/mach-omap2/omap_hwmod.c
91     @@ -3535,7 +3535,7 @@ static const struct omap_hwmod_reset dra7_reset_quirks[] = {
92     };
93    
94     static const struct omap_hwmod_reset omap_reset_quirks[] = {
95     - { .match = "dss", .len = 3, .reset = omap_dss_reset, },
96     + { .match = "dss_core", .len = 8, .reset = omap_dss_reset, },
97     { .match = "hdq1w", .len = 5, .reset = omap_hdq1w_reset, },
98     { .match = "i2c", .len = 3, .reset = omap_i2c_reset, },
99     { .match = "wd_timer", .len = 8, .reset = omap2_wd_timer_reset, },
100     diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
101     index 13137451b438..b9f8b7aac8ff 100644
102     --- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
103     +++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
104     @@ -231,7 +231,7 @@
105    
106     ldo1_reg: LDO1 {
107     regulator-name = "LDO1";
108     - regulator-min-microvolt = <3000000>;
109     + regulator-min-microvolt = <1600000>;
110     regulator-max-microvolt = <3300000>;
111     regulator-boot-on;
112     regulator-always-on;
113     @@ -239,7 +239,7 @@
114    
115     ldo2_reg: LDO2 {
116     regulator-name = "LDO2";
117     - regulator-min-microvolt = <900000>;
118     + regulator-min-microvolt = <800000>;
119     regulator-max-microvolt = <900000>;
120     regulator-boot-on;
121     regulator-always-on;
122     diff --git a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
123     index 11c705d225d0..9ad1d43b8ce7 100644
124     --- a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
125     +++ b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
126     @@ -268,7 +268,7 @@
127    
128     ldo1_reg: LDO1 {
129     regulator-name = "LDO1";
130     - regulator-min-microvolt = <3000000>;
131     + regulator-min-microvolt = <1600000>;
132     regulator-max-microvolt = <3300000>;
133     regulator-boot-on;
134     regulator-always-on;
135     @@ -276,7 +276,7 @@
136    
137     ldo2_reg: LDO2 {
138     regulator-name = "LDO2";
139     - regulator-min-microvolt = <900000>;
140     + regulator-min-microvolt = <800000>;
141     regulator-max-microvolt = <900000>;
142     regulator-boot-on;
143     regulator-always-on;
144     diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
145     index 1765e5284994..04b982a2799e 100644
146     --- a/arch/arm64/kernel/fpsimd.c
147     +++ b/arch/arm64/kernel/fpsimd.c
148     @@ -338,7 +338,7 @@ static unsigned int find_supported_vector_length(unsigned int vl)
149     return sve_vl_from_vq(__bit_to_vq(bit));
150     }
151    
152     -#ifdef CONFIG_SYSCTL
153     +#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
154    
155     static int sve_proc_do_default_vl(struct ctl_table *table, int write,
156     void __user *buffer, size_t *lenp,
157     @@ -384,9 +384,9 @@ static int __init sve_sysctl_init(void)
158     return 0;
159     }
160    
161     -#else /* ! CONFIG_SYSCTL */
162     +#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
163     static int __init sve_sysctl_init(void) { return 0; }
164     -#endif /* ! CONFIG_SYSCTL */
165     +#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
166    
167     #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
168     (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
169     diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
170     index 0bbac612146e..666b225aeb3a 100644
171     --- a/arch/arm64/kernel/perf_regs.c
172     +++ b/arch/arm64/kernel/perf_regs.c
173     @@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
174     return 0;
175    
176     /*
177     - * Compat (i.e. 32 bit) mode:
178     - * - PC has been set in the pt_regs struct in kernel_entry,
179     - * - Handle SP and LR here.
180     + * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
181     + * we're stuck with it for ABI compatability reasons.
182     + *
183     + * For a 32-bit consumer inspecting a 32-bit task, then it will look at
184     + * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
185     + * These correspond directly to a prefix of the registers saved in our
186     + * 'struct pt_regs', with the exception of the PC, so we copy that down
187     + * (x15 corresponds to SP_hyp in the architecture).
188     + *
189     + * So far, so good.
190     + *
191     + * The oddity arises when a 64-bit consumer looks at a 32-bit task and
192     + * asks for registers beyond PERF_REG_ARM_MAX. In this case, we return
193     + * SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and
194     + * PC registers would normally live. The initial idea was to allow a
195     + * 64-bit unwinder to unwind a 32-bit task and, although it's not clear
196     + * how well that works in practice, somebody might be relying on it.
197     + *
198     + * At the time we make a sample, we don't know whether the consumer is
199     + * 32-bit or 64-bit, so we have to cater for both possibilities.
200     */
201     if (compat_user_mode(regs)) {
202     if ((u32)idx == PERF_REG_ARM64_SP)
203     return regs->compat_sp;
204     if ((u32)idx == PERF_REG_ARM64_LR)
205     return regs->compat_lr;
206     + if (idx == 15)
207     + return regs->pc;
208     }
209    
210     if ((u32)idx == PERF_REG_ARM64_SP)
211     diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
212     index d969bab4a26b..262e5bbb2776 100644
213     --- a/arch/riscv/include/asm/cmpxchg.h
214     +++ b/arch/riscv/include/asm/cmpxchg.h
215     @@ -179,7 +179,7 @@
216     " bnez %1, 0b\n" \
217     "1:\n" \
218     : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
219     - : "rJ" (__old), "rJ" (__new) \
220     + : "rJ" ((long)__old), "rJ" (__new) \
221     : "memory"); \
222     break; \
223     case 8: \
224     @@ -224,7 +224,7 @@
225     RISCV_ACQUIRE_BARRIER \
226     "1:\n" \
227     : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
228     - : "rJ" (__old), "rJ" (__new) \
229     + : "rJ" ((long)__old), "rJ" (__new) \
230     : "memory"); \
231     break; \
232     case 8: \
233     @@ -270,7 +270,7 @@
234     " bnez %1, 0b\n" \
235     "1:\n" \
236     : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
237     - : "rJ" (__old), "rJ" (__new) \
238     + : "rJ" ((long)__old), "rJ" (__new) \
239     : "memory"); \
240     break; \
241     case 8: \
242     @@ -316,7 +316,7 @@
243     " fence rw, rw\n" \
244     "1:\n" \
245     : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
246     - : "rJ" (__old), "rJ" (__new) \
247     + : "rJ" ((long)__old), "rJ" (__new) \
248     : "memory"); \
249     break; \
250     case 8: \
251     diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
252     index f3619f59d85c..12f8a7fce78b 100644
253     --- a/arch/riscv/kernel/sys_riscv.c
254     +++ b/arch/riscv/kernel/sys_riscv.c
255     @@ -8,6 +8,7 @@
256     #include <linux/syscalls.h>
257     #include <asm/unistd.h>
258     #include <asm/cacheflush.h>
259     +#include <asm-generic/mman-common.h>
260    
261     static long riscv_sys_mmap(unsigned long addr, unsigned long len,
262     unsigned long prot, unsigned long flags,
263     @@ -16,6 +17,11 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
264     {
265     if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
266     return -EINVAL;
267     +
268     + if ((prot & PROT_WRITE) && (prot & PROT_EXEC))
269     + if (unlikely(!(prot & PROT_READ)))
270     + return -EINVAL;
271     +
272     return ksys_mmap_pgoff(addr, len, prot, flags, fd,
273     offset >> (PAGE_SHIFT - page_shift_offset));
274     }
275     diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
276     index 169d7604eb80..f3ba84fa9bd1 100644
277     --- a/arch/s390/include/asm/vdso.h
278     +++ b/arch/s390/include/asm/vdso.h
279     @@ -36,6 +36,7 @@ struct vdso_data {
280     __u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
281     __u32 ts_dir; /* TOD steering direction 0x64 */
282     __u64 ts_end; /* TOD steering end 0x68 */
283     + __u32 hrtimer_res; /* hrtimer resolution 0x70 */
284     };
285    
286     struct vdso_per_cpu_data {
287     diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
288     index b6628586ab70..a65cb4924bdb 100644
289     --- a/arch/s390/kernel/asm-offsets.c
290     +++ b/arch/s390/kernel/asm-offsets.c
291     @@ -76,6 +76,7 @@ int main(void)
292     OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
293     OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
294     OFFSET(__VDSO_TS_END, vdso_data, ts_end);
295     + OFFSET(__VDSO_CLOCK_REALTIME_RES, vdso_data, hrtimer_res);
296     OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
297     OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
298     OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr);
299     @@ -87,7 +88,6 @@ int main(void)
300     DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
301     DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
302     DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
303     - DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
304     DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
305     BLANK();
306     /* idle data offsets */
307     diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
308     index bc85987727f0..c544b7a11ebb 100644
309     --- a/arch/s390/kernel/entry.S
310     +++ b/arch/s390/kernel/entry.S
311     @@ -368,9 +368,9 @@ ENTRY(system_call)
312     jnz .Lsysc_nr_ok
313     # svc 0: system call number in %r1
314     llgfr %r1,%r1 # clear high word in r1
315     + sth %r1,__PT_INT_CODE+2(%r11)
316     cghi %r1,NR_syscalls
317     jnl .Lsysc_nr_ok
318     - sth %r1,__PT_INT_CODE+2(%r11)
319     slag %r8,%r1,3
320     .Lsysc_nr_ok:
321     xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
322     diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
323     index ad71132374f0..5aa786063eb3 100644
324     --- a/arch/s390/kernel/ptrace.c
325     +++ b/arch/s390/kernel/ptrace.c
326     @@ -324,6 +324,25 @@ static inline void __poke_user_per(struct task_struct *child,
327     child->thread.per_user.end = data;
328     }
329    
330     +static void fixup_int_code(struct task_struct *child, addr_t data)
331     +{
332     + struct pt_regs *regs = task_pt_regs(child);
333     + int ilc = regs->int_code >> 16;
334     + u16 insn;
335     +
336     + if (ilc > 6)
337     + return;
338     +
339     + if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16),
340     + &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn))
341     + return;
342     +
343     + /* double check that tracee stopped on svc instruction */
344     + if ((insn >> 8) != 0xa)
345     + return;
346     +
347     + regs->int_code = 0x20000 | (data & 0xffff);
348     +}
349     /*
350     * Write a word to the user area of a process at location addr. This
351     * operation does have an additional problem compared to peek_user.
352     @@ -335,7 +354,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
353     struct user *dummy = NULL;
354     addr_t offset;
355    
356     +
357     if (addr < (addr_t) &dummy->regs.acrs) {
358     + struct pt_regs *regs = task_pt_regs(child);
359     /*
360     * psw and gprs are stored on the stack
361     */
362     @@ -353,7 +374,11 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
363     /* Invalid addressing mode bits */
364     return -EINVAL;
365     }
366     - *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
367     +
368     + if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
369     + addr == offsetof(struct user, regs.gprs[2]))
370     + fixup_int_code(child, data);
371     + *(addr_t *)((addr_t) &regs->psw + addr) = data;
372    
373     } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
374     /*
375     @@ -719,6 +744,10 @@ static int __poke_user_compat(struct task_struct *child,
376     regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
377     (__u64)(tmp & PSW32_ADDR_AMODE);
378     } else {
379     +
380     + if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
381     + addr == offsetof(struct compat_user, regs.gprs[2]))
382     + fixup_int_code(child, data);
383     /* gpr 0-15 */
384     *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
385     }
386     @@ -844,11 +873,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
387     * call number to gprs[2].
388     */
389     if (test_thread_flag(TIF_SYSCALL_TRACE) &&
390     - (tracehook_report_syscall_entry(regs) ||
391     - regs->gprs[2] >= NR_syscalls)) {
392     + tracehook_report_syscall_entry(regs)) {
393     /*
394     - * Tracing decided this syscall should not happen or the
395     - * debugger stored an invalid system call number. Skip
396     + * Tracing decided this syscall should not happen. Skip
397     * the system call and the system call restart handling.
398     */
399     clear_pt_regs_flag(regs, PIF_SYSCALL);
400     diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
401     index e8766beee5ad..8ea9db599d38 100644
402     --- a/arch/s390/kernel/time.c
403     +++ b/arch/s390/kernel/time.c
404     @@ -310,6 +310,7 @@ void update_vsyscall(struct timekeeper *tk)
405    
406     vdso_data->tk_mult = tk->tkr_mono.mult;
407     vdso_data->tk_shift = tk->tkr_mono.shift;
408     + vdso_data->hrtimer_res = hrtimer_resolution;
409     smp_wmb();
410     ++vdso_data->tb_update_count;
411     }
412     diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
413     index bec19e7e6e1c..4a66a1cb919b 100644
414     --- a/arch/s390/kernel/vdso64/Makefile
415     +++ b/arch/s390/kernel/vdso64/Makefile
416     @@ -18,8 +18,8 @@ KBUILD_AFLAGS_64 += -m64 -s
417    
418     KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
419     KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
420     -KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
421     - -Wl,--hash-style=both
422     +ldflags-y := -fPIC -shared -nostdlib -soname=linux-vdso64.so.1 \
423     + --hash-style=both --build-id -T
424    
425     $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
426     $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64)
427     @@ -37,8 +37,8 @@ KASAN_SANITIZE := n
428     $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
429    
430     # link rule for the .so file, .lds has to be first
431     -$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
432     - $(call if_changed,vdso64ld)
433     +$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) FORCE
434     + $(call if_changed,ld)
435    
436     # strip rule for the .so file
437     $(obj)/%.so: OBJCOPYFLAGS := -S
438     @@ -50,8 +50,6 @@ $(obj-vdso64): %.o: %.S FORCE
439     $(call if_changed_dep,vdso64as)
440    
441     # actual build commands
442     -quiet_cmd_vdso64ld = VDSO64L $@
443     - cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
444     quiet_cmd_vdso64as = VDSO64A $@
445     cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
446    
447     diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
448     index 081435398e0a..0c79caa32b59 100644
449     --- a/arch/s390/kernel/vdso64/clock_getres.S
450     +++ b/arch/s390/kernel/vdso64/clock_getres.S
451     @@ -17,12 +17,14 @@
452     .type __kernel_clock_getres,@function
453     __kernel_clock_getres:
454     CFI_STARTPROC
455     - larl %r1,4f
456     + larl %r1,3f
457     + lg %r0,0(%r1)
458     cghi %r2,__CLOCK_REALTIME_COARSE
459     je 0f
460     cghi %r2,__CLOCK_MONOTONIC_COARSE
461     je 0f
462     - larl %r1,3f
463     + larl %r1,_vdso_data
464     + llgf %r0,__VDSO_CLOCK_REALTIME_RES(%r1)
465     cghi %r2,__CLOCK_REALTIME
466     je 0f
467     cghi %r2,__CLOCK_MONOTONIC
468     @@ -36,7 +38,6 @@ __kernel_clock_getres:
469     jz 2f
470     0: ltgr %r3,%r3
471     jz 1f /* res == NULL */
472     - lg %r0,0(%r1)
473     xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */
474     stg %r0,8(%r3) /* store tp->tv_usec */
475     1: lghi %r2,0
476     @@ -45,6 +46,5 @@ __kernel_clock_getres:
477     svc 0
478     br %r14
479     CFI_ENDPROC
480     -3: .quad __CLOCK_REALTIME_RES
481     -4: .quad __CLOCK_COARSE_RES
482     +3: .quad __CLOCK_COARSE_RES
483     .size __kernel_clock_getres,.-__kernel_clock_getres
484     diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
485     index 60f7205ebe40..646dd58169ec 100644
486     --- a/arch/sparc/kernel/ptrace_32.c
487     +++ b/arch/sparc/kernel/ptrace_32.c
488     @@ -168,12 +168,17 @@ static int genregs32_set(struct task_struct *target,
489     if (ret || !count)
490     return ret;
491     ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
492     - &regs->y,
493     + &regs->npc,
494     34 * sizeof(u32), 35 * sizeof(u32));
495     if (ret || !count)
496     return ret;
497     + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
498     + &regs->y,
499     + 35 * sizeof(u32), 36 * sizeof(u32));
500     + if (ret || !count)
501     + return ret;
502     return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
503     - 35 * sizeof(u32), 38 * sizeof(u32));
504     + 36 * sizeof(u32), 38 * sizeof(u32));
505     }
506    
507     static int fpregs32_get(struct task_struct *target,
508     diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
509     index 7d91a3f5b26a..742de9d97ba1 100644
510     --- a/arch/x86/include/asm/kvm_host.h
511     +++ b/arch/x86/include/asm/kvm_host.h
512     @@ -1160,7 +1160,7 @@ struct kvm_x86_ops {
513     void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
514     struct kvm_memory_slot *slot,
515     gfn_t offset, unsigned long mask);
516     - int (*write_log_dirty)(struct kvm_vcpu *vcpu);
517     + int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
518    
519     /* pmu operations of sub-arch */
520     const struct kvm_pmu_ops *pmu_ops;
521     diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
522     index 650df6d21049..9b3f25e14608 100644
523     --- a/arch/x86/kernel/cpu/common.c
524     +++ b/arch/x86/kernel/cpu/common.c
525     @@ -366,6 +366,9 @@ out:
526     cr4_clear_bits(X86_CR4_UMIP);
527     }
528    
529     +/* These bits should not change their value after CPU init is finished. */
530     +static const unsigned long cr4_pinned_mask =
531     + X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE;
532     static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
533     static unsigned long cr4_pinned_bits __ro_after_init;
534    
535     @@ -390,20 +393,20 @@ EXPORT_SYMBOL(native_write_cr0);
536    
537     void native_write_cr4(unsigned long val)
538     {
539     - unsigned long bits_missing = 0;
540     + unsigned long bits_changed = 0;
541    
542     set_register:
543     asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
544    
545     if (static_branch_likely(&cr_pinning)) {
546     - if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) {
547     - bits_missing = ~val & cr4_pinned_bits;
548     - val |= bits_missing;
549     + if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
550     + bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
551     + val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
552     goto set_register;
553     }
554     - /* Warn after we've set the missing bits. */
555     - WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n",
556     - bits_missing);
557     + /* Warn after we've corrected the changed bits. */
558     + WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
559     + bits_changed);
560     }
561     }
562     EXPORT_SYMBOL(native_write_cr4);
563     @@ -415,7 +418,7 @@ void cr4_init(void)
564     if (boot_cpu_has(X86_FEATURE_PCID))
565     cr4 |= X86_CR4_PCIDE;
566     if (static_branch_likely(&cr_pinning))
567     - cr4 |= cr4_pinned_bits;
568     + cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
569    
570     __write_cr4(cr4);
571    
572     @@ -430,10 +433,7 @@ void cr4_init(void)
573     */
574     static void __init setup_cr_pinning(void)
575     {
576     - unsigned long mask;
577     -
578     - mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP);
579     - cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask;
580     + cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
581     static_key_enable(&cr_pinning.key);
582     }
583    
584     diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
585     index 20856d80dce3..54b711bc0607 100644
586     --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
587     +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
588     @@ -1027,6 +1027,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
589     _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
590     if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
591     _r_cdp = NULL;
592     + _d_cdp = NULL;
593     ret = -EINVAL;
594     }
595    
596     diff --git a/arch/x86/kernel/cpu/umwait.c b/arch/x86/kernel/cpu/umwait.c
597     index c222f283b456..32b4dc9030aa 100644
598     --- a/arch/x86/kernel/cpu/umwait.c
599     +++ b/arch/x86/kernel/cpu/umwait.c
600     @@ -17,12 +17,6 @@
601     */
602     static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
603    
604     -u32 get_umwait_control_msr(void)
605     -{
606     - return umwait_control_cached;
607     -}
608     -EXPORT_SYMBOL_GPL(get_umwait_control_msr);
609     -
610     /*
611     * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
612     * hardware or BIOS before kernel boot.
613     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
614     index a3824ae9a634..aab02ea2d2cb 100644
615     --- a/arch/x86/kvm/mmu.c
616     +++ b/arch/x86/kvm/mmu.c
617     @@ -1819,10 +1819,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
618     * Emulate arch specific page modification logging for the
619     * nested hypervisor
620     */
621     -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
622     +int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa)
623     {
624     if (kvm_x86_ops->write_log_dirty)
625     - return kvm_x86_ops->write_log_dirty(vcpu);
626     + return kvm_x86_ops->write_log_dirty(vcpu, l2_gpa);
627    
628     return 0;
629     }
630     diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
631     index d55674f44a18..6f2208cf30df 100644
632     --- a/arch/x86/kvm/mmu.h
633     +++ b/arch/x86/kvm/mmu.h
634     @@ -209,7 +209,7 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
635     void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
636     bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
637     struct kvm_memory_slot *slot, u64 gfn);
638     -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
639     +int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
640    
641     int kvm_mmu_post_init_vm(struct kvm *kvm);
642     void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
643     diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
644     index 4e3f137ffa8c..a20fc1ba607f 100644
645     --- a/arch/x86/kvm/paging_tmpl.h
646     +++ b/arch/x86/kvm/paging_tmpl.h
647     @@ -220,7 +220,7 @@ static inline unsigned FNAME(gpte_access)(u64 gpte)
648     static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
649     struct kvm_mmu *mmu,
650     struct guest_walker *walker,
651     - int write_fault)
652     + gpa_t addr, int write_fault)
653     {
654     unsigned level, index;
655     pt_element_t pte, orig_pte;
656     @@ -245,7 +245,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
657     !(pte & PT_GUEST_DIRTY_MASK)) {
658     trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
659     #if PTTYPE == PTTYPE_EPT
660     - if (kvm_arch_write_log_dirty(vcpu))
661     + if (kvm_arch_write_log_dirty(vcpu, addr))
662     return -EINVAL;
663     #endif
664     pte |= PT_GUEST_DIRTY_MASK;
665     @@ -442,7 +442,8 @@ retry_walk:
666     (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
667    
668     if (unlikely(!accessed_dirty)) {
669     - ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
670     + ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
671     + addr, write_fault);
672     if (unlikely(ret < 0))
673     goto error;
674     else if (ret)
675     diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
676     index 5fac01865a2d..10e6471896cd 100644
677     --- a/arch/x86/kvm/vmx/vmx.c
678     +++ b/arch/x86/kvm/vmx/vmx.c
679     @@ -6427,23 +6427,6 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
680     msrs[i].host, false);
681     }
682    
683     -static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx)
684     -{
685     - u32 host_umwait_control;
686     -
687     - if (!vmx_has_waitpkg(vmx))
688     - return;
689     -
690     - host_umwait_control = get_umwait_control_msr();
691     -
692     - if (vmx->msr_ia32_umwait_control != host_umwait_control)
693     - add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL,
694     - vmx->msr_ia32_umwait_control,
695     - host_umwait_control, false);
696     - else
697     - clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL);
698     -}
699     -
700     static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
701     {
702     struct vcpu_vmx *vmx = to_vmx(vcpu);
703     @@ -6533,7 +6516,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
704     pt_guest_enter(vmx);
705    
706     atomic_switch_perf_msrs(vmx);
707     - atomic_switch_umwait_control_msr(vmx);
708    
709     if (enable_preemption_timer)
710     vmx_update_hv_timer(vcpu);
711     @@ -7272,11 +7254,11 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
712     kvm_flush_pml_buffers(kvm);
713     }
714    
715     -static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
716     +static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
717     {
718     struct vmcs12 *vmcs12;
719     struct vcpu_vmx *vmx = to_vmx(vcpu);
720     - gpa_t gpa, dst;
721     + gpa_t dst;
722    
723     if (is_guest_mode(vcpu)) {
724     WARN_ON_ONCE(vmx->nested.pml_full);
725     @@ -7295,7 +7277,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
726     return 1;
727     }
728    
729     - gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
730     + gpa &= ~0xFFFull;
731     dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
732    
733     if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
734     diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
735     index 295c5f83842e..a1919ec7fd10 100644
736     --- a/arch/x86/kvm/vmx/vmx.h
737     +++ b/arch/x86/kvm/vmx/vmx.h
738     @@ -14,8 +14,6 @@
739     extern const u32 vmx_msr_index[];
740     extern u64 host_efer;
741    
742     -extern u32 get_umwait_control_msr(void);
743     -
744     #define MSR_TYPE_R 1
745     #define MSR_TYPE_W 2
746     #define MSR_TYPE_RW 3
747     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
748     index fff279fb173b..eed1866ae4d3 100644
749     --- a/arch/x86/kvm/x86.c
750     +++ b/arch/x86/kvm/x86.c
751     @@ -2753,7 +2753,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
752     return kvm_mtrr_set_msr(vcpu, msr, data);
753     case MSR_IA32_APICBASE:
754     return kvm_set_apic_base(vcpu, msr_info);
755     - case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
756     + case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
757     return kvm_x2apic_msr_write(vcpu, msr, data);
758     case MSR_IA32_TSCDEADLINE:
759     kvm_set_lapic_tscdeadline_msr(vcpu, data);
760     @@ -3057,7 +3057,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
761     case MSR_IA32_APICBASE:
762     msr_info->data = kvm_get_apic_base(vcpu);
763     break;
764     - case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
765     + case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
766     return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
767     break;
768     case MSR_IA32_TSCDEADLINE:
769     diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
770     index fff28c6f73a2..b0dfac3d3df7 100644
771     --- a/arch/x86/lib/usercopy_64.c
772     +++ b/arch/x86/lib/usercopy_64.c
773     @@ -24,6 +24,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
774     asm volatile(
775     " testq %[size8],%[size8]\n"
776     " jz 4f\n"
777     + " .align 16\n"
778     "0: movq $0,(%[dst])\n"
779     " addq $8,%[dst]\n"
780     " decl %%ecx ; jnz 0b\n"
781     diff --git a/block/bio-integrity.c b/block/bio-integrity.c
782     index bf62c25cde8f..ae07dd78e951 100644
783     --- a/block/bio-integrity.c
784     +++ b/block/bio-integrity.c
785     @@ -278,7 +278,6 @@ bool bio_integrity_prep(struct bio *bio)
786    
787     if (ret == 0) {
788     printk(KERN_ERR "could not attach integrity payload\n");
789     - kfree(buf);
790     status = BLK_STS_RESOURCE;
791     goto err_end_io;
792     }
793     diff --git a/block/blk-mq.c b/block/blk-mq.c
794     index 0550366e25d8..f1b930a300a3 100644
795     --- a/block/blk-mq.c
796     +++ b/block/blk-mq.c
797     @@ -3279,7 +3279,9 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
798    
799     if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
800     nr_hw_queues = nr_cpu_ids;
801     - if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
802     + if (nr_hw_queues < 1)
803     + return;
804     + if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
805     return;
806    
807     list_for_each_entry(q, &set->tag_list, tag_set_list)
808     diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
809     index 57d9d574d4dd..01738d8e888e 100644
810     --- a/drivers/acpi/acpi_configfs.c
811     +++ b/drivers/acpi/acpi_configfs.c
812     @@ -11,6 +11,7 @@
813     #include <linux/module.h>
814     #include <linux/configfs.h>
815     #include <linux/acpi.h>
816     +#include <linux/security.h>
817    
818     #include "acpica/accommon.h"
819     #include "acpica/actables.h"
820     @@ -28,7 +29,10 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg,
821     {
822     const struct acpi_table_header *header = data;
823     struct acpi_table *table;
824     - int ret;
825     + int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
826     +
827     + if (ret)
828     + return ret;
829    
830     table = container_of(cfg, struct acpi_table, cfg);
831    
832     diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
833     index 3a89909b50a6..76c668c05fa0 100644
834     --- a/drivers/acpi/sysfs.c
835     +++ b/drivers/acpi/sysfs.c
836     @@ -938,13 +938,13 @@ static void __exit interrupt_stats_exit(void)
837     }
838    
839     static ssize_t
840     -acpi_show_profile(struct device *dev, struct device_attribute *attr,
841     +acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
842     char *buf)
843     {
844     return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
845     }
846    
847     -static const struct device_attribute pm_profile_attr =
848     +static const struct kobj_attribute pm_profile_attr =
849     __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
850    
851     static ssize_t hotplug_enabled_show(struct kobject *kobj,
852     diff --git a/drivers/android/binder.c b/drivers/android/binder.c
853     index 34a6de65aa7e..5e6586af21b7 100644
854     --- a/drivers/android/binder.c
855     +++ b/drivers/android/binder.c
856     @@ -4688,8 +4688,15 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
857    
858     static void binder_free_proc(struct binder_proc *proc)
859     {
860     + struct binder_device *device;
861     +
862     BUG_ON(!list_empty(&proc->todo));
863     BUG_ON(!list_empty(&proc->delivered_death));
864     + device = container_of(proc->context, struct binder_device, context);
865     + if (refcount_dec_and_test(&device->ref)) {
866     + kfree(proc->context->name);
867     + kfree(device);
868     + }
869     binder_alloc_deferred_release(&proc->alloc);
870     put_task_struct(proc->tsk);
871     binder_stats_deleted(BINDER_STAT_PROC);
872     @@ -5408,7 +5415,6 @@ static int binder_node_release(struct binder_node *node, int refs)
873     static void binder_deferred_release(struct binder_proc *proc)
874     {
875     struct binder_context *context = proc->context;
876     - struct binder_device *device;
877     struct rb_node *n;
878     int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
879    
880     @@ -5425,12 +5431,6 @@ static void binder_deferred_release(struct binder_proc *proc)
881     context->binder_context_mgr_node = NULL;
882     }
883     mutex_unlock(&context->context_mgr_node_lock);
884     - device = container_of(proc->context, struct binder_device, context);
885     - if (refcount_dec_and_test(&device->ref)) {
886     - kfree(context->name);
887     - kfree(device);
888     - }
889     - proc->context = NULL;
890     binder_inner_proc_lock(proc);
891     /*
892     * Make sure proc stays alive after we
893     diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
894     index 5af34a3201ed..5596c9b6ebf2 100644
895     --- a/drivers/ata/libata-scsi.c
896     +++ b/drivers/ata/libata-scsi.c
897     @@ -3978,12 +3978,13 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
898     {
899     struct scsi_cmnd *scmd = qc->scsicmd;
900     const u8 *cdb = scmd->cmnd;
901     - const u8 *p;
902     u8 pg, spg;
903     unsigned six_byte, pg_len, hdr_len, bd_len;
904     int len;
905     u16 fp = (u16)-1;
906     u8 bp = 0xff;
907     + u8 buffer[64];
908     + const u8 *p = buffer;
909    
910     VPRINTK("ENTER\n");
911    
912     @@ -4017,12 +4018,14 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
913     if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len)
914     goto invalid_param_len;
915    
916     - p = page_address(sg_page(scsi_sglist(scmd)));
917     -
918     /* Move past header and block descriptors. */
919     if (len < hdr_len)
920     goto invalid_param_len;
921    
922     + if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd),
923     + buffer, sizeof(buffer)))
924     + goto invalid_param_len;
925     +
926     if (six_byte)
927     bd_len = p[3];
928     else
929     diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
930     index 3495e1733a8e..c35b7b993133 100644
931     --- a/drivers/ata/sata_rcar.c
932     +++ b/drivers/ata/sata_rcar.c
933     @@ -905,7 +905,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
934     pm_runtime_enable(dev);
935     ret = pm_runtime_get_sync(dev);
936     if (ret < 0)
937     - goto err_pm_disable;
938     + goto err_pm_put;
939    
940     host = ata_host_alloc(dev, 1);
941     if (!host) {
942     @@ -935,7 +935,6 @@ static int sata_rcar_probe(struct platform_device *pdev)
943    
944     err_pm_put:
945     pm_runtime_put(dev);
946     -err_pm_disable:
947     pm_runtime_disable(dev);
948     return ret;
949     }
950     @@ -989,8 +988,10 @@ static int sata_rcar_resume(struct device *dev)
951     int ret;
952    
953     ret = pm_runtime_get_sync(dev);
954     - if (ret < 0)
955     + if (ret < 0) {
956     + pm_runtime_put(dev);
957     return ret;
958     + }
959    
960     if (priv->type == RCAR_GEN3_SATA) {
961     sata_rcar_init_module(priv);
962     @@ -1015,8 +1016,10 @@ static int sata_rcar_restore(struct device *dev)
963     int ret;
964    
965     ret = pm_runtime_get_sync(dev);
966     - if (ret < 0)
967     + if (ret < 0) {
968     + pm_runtime_put(dev);
969     return ret;
970     + }
971    
972     sata_rcar_setup_port(host);
973    
974     diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
975     index 59f911e57719..508bbd6ea439 100644
976     --- a/drivers/base/regmap/regmap.c
977     +++ b/drivers/base/regmap/regmap.c
978     @@ -1356,6 +1356,7 @@ void regmap_exit(struct regmap *map)
979     if (map->hwlock)
980     hwspin_lock_free(map->hwlock);
981     kfree_const(map->name);
982     + kfree(map->patch);
983     kfree(map);
984     }
985     EXPORT_SYMBOL_GPL(regmap_exit);
986     diff --git a/drivers/block/loop.c b/drivers/block/loop.c
987     index 57ed6b70d295..565e35e69f24 100644
988     --- a/drivers/block/loop.c
989     +++ b/drivers/block/loop.c
990     @@ -1284,7 +1284,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
991     if (lo->lo_offset != info->lo_offset ||
992     lo->lo_sizelimit != info->lo_sizelimit) {
993     sync_blockdev(lo->lo_device);
994     - kill_bdev(lo->lo_device);
995     + invalidate_bdev(lo->lo_device);
996     }
997    
998     /* I/O need to be drained during transfer transition */
999     @@ -1558,12 +1558,12 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
1000    
1001     if (lo->lo_queue->limits.logical_block_size != arg) {
1002     sync_blockdev(lo->lo_device);
1003     - kill_bdev(lo->lo_device);
1004     + invalidate_bdev(lo->lo_device);
1005     }
1006    
1007     blk_mq_freeze_queue(lo->lo_queue);
1008    
1009     - /* kill_bdev should have truncated all the pages */
1010     + /* invalidate_bdev should have truncated all the pages */
1011     if (lo->lo_queue->limits.logical_block_size != arg &&
1012     lo->lo_device->bd_inode->i_mapping->nrpages) {
1013     err = -EAGAIN;
1014     diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
1015     index f0bc0841cbc4..553c0e279621 100644
1016     --- a/drivers/bus/ti-sysc.c
1017     +++ b/drivers/bus/ti-sysc.c
1018     @@ -880,10 +880,13 @@ static int sysc_enable_module(struct device *dev)
1019     regbits = ddata->cap->regbits;
1020     reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1021    
1022     - /* Set CLOCKACTIVITY, we only use it for ick */
1023     + /*
1024     + * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
1025     + * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
1026     + * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
1027     + */
1028     if (regbits->clkact_shift >= 0 &&
1029     - (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT ||
1030     - ddata->cfg.sysc_val & BIT(regbits->clkact_shift)))
1031     + (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
1032     reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
1033    
1034     /* Set SIDLE mode */
1035     @@ -938,6 +941,9 @@ set_autoidle:
1036     sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
1037     }
1038    
1039     + /* Flush posted write */
1040     + sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1041     +
1042     if (ddata->module_enable_quirk)
1043     ddata->module_enable_quirk(ddata);
1044    
1045     @@ -1018,6 +1024,9 @@ set_sidle:
1046     reg |= 1 << regbits->autoidle_shift;
1047     sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
1048    
1049     + /* Flush posted write */
1050     + sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1051     +
1052     return 0;
1053     }
1054    
1055     diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
1056     index a67430010aa6..5c7d3dfcfdd0 100644
1057     --- a/drivers/char/hw_random/ks-sa-rng.c
1058     +++ b/drivers/char/hw_random/ks-sa-rng.c
1059     @@ -208,6 +208,7 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
1060     ret = pm_runtime_get_sync(dev);
1061     if (ret < 0) {
1062     dev_err(dev, "Failed to enable SA power-domain\n");
1063     + pm_runtime_put_noidle(dev);
1064     pm_runtime_disable(dev);
1065     return ret;
1066     }
1067     diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c
1068     index 6282ee2f361c..a8901f90a61a 100644
1069     --- a/drivers/clk/sifive/fu540-prci.c
1070     +++ b/drivers/clk/sifive/fu540-prci.c
1071     @@ -586,7 +586,10 @@ static int sifive_fu540_prci_probe(struct platform_device *pdev)
1072     struct __prci_data *pd;
1073     int r;
1074    
1075     - pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1076     + pd = devm_kzalloc(dev,
1077     + struct_size(pd, hw_clks.hws,
1078     + ARRAY_SIZE(__prci_init_clocks)),
1079     + GFP_KERNEL);
1080     if (!pd)
1081     return -ENOMEM;
1082    
1083     diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
1084     index d6dd5f503fa2..e8f71a50ba89 100644
1085     --- a/drivers/firmware/efi/esrt.c
1086     +++ b/drivers/firmware/efi/esrt.c
1087     @@ -181,7 +181,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
1088     rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
1089     "entry%d", entry_num);
1090     if (rc) {
1091     - kfree(entry);
1092     + kobject_put(&entry->kobj);
1093     return rc;
1094     }
1095     }
1096     diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
1097     index bd715012185c..23fc16dc92b4 100644
1098     --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
1099     +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
1100     @@ -1273,8 +1273,12 @@ static int sdma_v5_0_sw_fini(void *handle)
1101     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1102     int i;
1103    
1104     - for (i = 0; i < adev->sdma.num_instances; i++)
1105     + for (i = 0; i < adev->sdma.num_instances; i++) {
1106     + if (adev->sdma.instance[i].fw != NULL)
1107     + release_firmware(adev->sdma.instance[i].fw);
1108     +
1109     amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1110     + }
1111    
1112     return 0;
1113     }
1114     diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
1115     index 40e3fc0c6942..aa0a617b8d44 100644
1116     --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
1117     +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
1118     @@ -312,6 +312,7 @@ struct kfd_process *kfd_create_process(struct file *filep)
1119     (int)process->lead_thread->pid);
1120     if (ret) {
1121     pr_warn("Creating procfs pid directory failed");
1122     + kobject_put(process->kobj);
1123     goto out;
1124     }
1125    
1126     diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
1127     index 207435fa4f2c..51d07a4561ce 100644
1128     --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
1129     +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
1130     @@ -1862,7 +1862,7 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
1131    
1132     kfree(rgb_regamma);
1133     rgb_regamma_alloc_fail:
1134     - kvfree(rgb_user);
1135     + kfree(rgb_user);
1136     rgb_user_alloc_fail:
1137     return ret;
1138     }
1139     diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
1140     index d9e62ca65ab8..bd2e577c701f 100644
1141     --- a/drivers/gpu/drm/radeon/ni_dpm.c
1142     +++ b/drivers/gpu/drm/radeon/ni_dpm.c
1143     @@ -2128,7 +2128,7 @@ static int ni_init_smc_spll_table(struct radeon_device *rdev)
1144     if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
1145     ret = -EINVAL;
1146    
1147     - if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
1148     + if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
1149     ret = -EINVAL;
1150    
1151     if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
1152     diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
1153     index 1529849e217e..7cdba77b1420 100644
1154     --- a/drivers/gpu/drm/rcar-du/Kconfig
1155     +++ b/drivers/gpu/drm/rcar-du/Kconfig
1156     @@ -23,6 +23,7 @@ config DRM_RCAR_DW_HDMI
1157     config DRM_RCAR_LVDS
1158     tristate "R-Car DU LVDS Encoder Support"
1159     depends on DRM && DRM_BRIDGE && OF
1160     + select DRM_KMS_HELPER
1161     select DRM_PANEL
1162     select OF_FLATTREE
1163     select OF_OVERLAY
1164     diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c
1165     index e0c256922d4f..977d6f524649 100644
1166     --- a/drivers/i2c/busses/i2c-fsi.c
1167     +++ b/drivers/i2c/busses/i2c-fsi.c
1168     @@ -98,7 +98,7 @@
1169     #define I2C_STAT_DAT_REQ BIT(25)
1170     #define I2C_STAT_CMD_COMP BIT(24)
1171     #define I2C_STAT_STOP_ERR BIT(23)
1172     -#define I2C_STAT_MAX_PORT GENMASK(19, 16)
1173     +#define I2C_STAT_MAX_PORT GENMASK(22, 16)
1174     #define I2C_STAT_ANY_INT BIT(15)
1175     #define I2C_STAT_SCL_IN BIT(11)
1176     #define I2C_STAT_SDA_IN BIT(10)
1177     diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
1178     index dbc43cfec19d..331f7cca9bab 100644
1179     --- a/drivers/i2c/busses/i2c-tegra.c
1180     +++ b/drivers/i2c/busses/i2c-tegra.c
1181     @@ -1719,14 +1719,9 @@ static int tegra_i2c_remove(struct platform_device *pdev)
1182     static int __maybe_unused tegra_i2c_suspend(struct device *dev)
1183     {
1184     struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
1185     - int err;
1186    
1187     i2c_mark_adapter_suspended(&i2c_dev->adapter);
1188    
1189     - err = pm_runtime_force_suspend(dev);
1190     - if (err < 0)
1191     - return err;
1192     -
1193     return 0;
1194     }
1195    
1196     @@ -1747,10 +1742,6 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev)
1197     if (err)
1198     return err;
1199    
1200     - err = pm_runtime_force_resume(dev);
1201     - if (err < 0)
1202     - return err;
1203     -
1204     i2c_mark_adapter_resumed(&i2c_dev->adapter);
1205    
1206     return 0;
1207     diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
1208     index 3ac426a8ab5a..c2ae8c8cd429 100644
1209     --- a/drivers/i2c/i2c-core-smbus.c
1210     +++ b/drivers/i2c/i2c-core-smbus.c
1211     @@ -495,6 +495,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
1212     break;
1213     case I2C_SMBUS_BLOCK_DATA:
1214     case I2C_SMBUS_BLOCK_PROC_CALL:
1215     + if (msg[1].buf[0] > I2C_SMBUS_BLOCK_MAX) {
1216     + dev_err(&adapter->dev,
1217     + "Invalid block size returned: %d\n",
1218     + msg[1].buf[0]);
1219     + status = -EPROTO;
1220     + goto cleanup;
1221     + }
1222     for (i = 0; i < msg[1].buf[0] + 1; i++)
1223     data->block[i] = msg[1].buf[i];
1224     break;
1225     diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1226     index 8f776b7de45e..e3cd9d2b0dd2 100644
1227     --- a/drivers/infiniband/core/cma.c
1228     +++ b/drivers/infiniband/core/cma.c
1229     @@ -1631,6 +1631,8 @@ static struct rdma_id_private *cma_find_listener(
1230     {
1231     struct rdma_id_private *id_priv, *id_priv_dev;
1232    
1233     + lockdep_assert_held(&lock);
1234     +
1235     if (!bind_list)
1236     return ERR_PTR(-EINVAL);
1237    
1238     @@ -1677,6 +1679,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
1239     }
1240     }
1241    
1242     + mutex_lock(&lock);
1243     /*
1244     * Net namespace might be getting deleted while route lookup,
1245     * cm_id lookup is in progress. Therefore, perform netdevice
1246     @@ -1718,6 +1721,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
1247     id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
1248     err:
1249     rcu_read_unlock();
1250     + mutex_unlock(&lock);
1251     if (IS_ERR(id_priv) && *net_dev) {
1252     dev_put(*net_dev);
1253     *net_dev = NULL;
1254     @@ -2473,6 +2477,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1255     struct net *net = id_priv->id.route.addr.dev_addr.net;
1256     int ret;
1257    
1258     + lockdep_assert_held(&lock);
1259     +
1260     if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
1261     return;
1262    
1263     @@ -3245,6 +3251,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list,
1264     u64 sid, mask;
1265     __be16 port;
1266    
1267     + lockdep_assert_held(&lock);
1268     +
1269     addr = cma_src_addr(id_priv);
1270     port = htons(bind_list->port);
1271    
1272     @@ -3273,6 +3281,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps,
1273     struct rdma_bind_list *bind_list;
1274     int ret;
1275    
1276     + lockdep_assert_held(&lock);
1277     +
1278     bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1279     if (!bind_list)
1280     return -ENOMEM;
1281     @@ -3299,6 +3309,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
1282     struct sockaddr *saddr = cma_src_addr(id_priv);
1283     __be16 dport = cma_port(daddr);
1284    
1285     + lockdep_assert_held(&lock);
1286     +
1287     hlist_for_each_entry(cur_id, &bind_list->owners, node) {
1288     struct sockaddr *cur_daddr = cma_dst_addr(cur_id);
1289     struct sockaddr *cur_saddr = cma_src_addr(cur_id);
1290     @@ -3338,6 +3350,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
1291     unsigned int rover;
1292     struct net *net = id_priv->id.route.addr.dev_addr.net;
1293    
1294     + lockdep_assert_held(&lock);
1295     +
1296     inet_get_local_port_range(net, &low, &high);
1297     remaining = (high - low) + 1;
1298     rover = prandom_u32() % remaining + low;
1299     @@ -3385,6 +3399,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
1300     struct rdma_id_private *cur_id;
1301     struct sockaddr *addr, *cur_addr;
1302    
1303     + lockdep_assert_held(&lock);
1304     +
1305     addr = cma_src_addr(id_priv);
1306     hlist_for_each_entry(cur_id, &bind_list->owners, node) {
1307     if (id_priv == cur_id)
1308     @@ -3415,6 +3431,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps,
1309     unsigned short snum;
1310     int ret;
1311    
1312     + lockdep_assert_held(&lock);
1313     +
1314     snum = ntohs(cma_port(cma_src_addr(id_priv)));
1315     if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
1316     return -EACCES;
1317     diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
1318     index 9947d16edef2..2284930b5f91 100644
1319     --- a/drivers/infiniband/core/mad.c
1320     +++ b/drivers/infiniband/core/mad.c
1321     @@ -639,10 +639,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
1322     xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
1323    
1324     flush_workqueue(port_priv->wq);
1325     - ib_cancel_rmpp_recvs(mad_agent_priv);
1326    
1327     deref_mad_agent(mad_agent_priv);
1328     wait_for_completion(&mad_agent_priv->comp);
1329     + ib_cancel_rmpp_recvs(mad_agent_priv);
1330    
1331     ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
1332    
1333     @@ -2960,6 +2960,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
1334     DMA_FROM_DEVICE);
1335     if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
1336     sg_list.addr))) {
1337     + kfree(mad_priv);
1338     ret = -ENOMEM;
1339     break;
1340     }
1341     diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
1342     index d268bf9c42ee..c29da2f4e339 100644
1343     --- a/drivers/infiniband/hw/hfi1/debugfs.c
1344     +++ b/drivers/infiniband/hw/hfi1/debugfs.c
1345     @@ -985,15 +985,10 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf,
1346     static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target)
1347     {
1348     struct hfi1_pportdata *ppd;
1349     - int ret;
1350    
1351     ppd = private2ppd(fp);
1352    
1353     - ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
1354     - if (ret) /* failed - release the module */
1355     - module_put(THIS_MODULE);
1356     -
1357     - return ret;
1358     + return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
1359     }
1360    
1361     static int i2c1_debugfs_open(struct inode *in, struct file *fp)
1362     @@ -1013,7 +1008,6 @@ static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target)
1363     ppd = private2ppd(fp);
1364    
1365     release_chip_resource(ppd->dd, i2c_target(target));
1366     - module_put(THIS_MODULE);
1367    
1368     return 0;
1369     }
1370     @@ -1031,18 +1025,10 @@ static int i2c2_debugfs_release(struct inode *in, struct file *fp)
1371     static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target)
1372     {
1373     struct hfi1_pportdata *ppd;
1374     - int ret;
1375     -
1376     - if (!try_module_get(THIS_MODULE))
1377     - return -ENODEV;
1378    
1379     ppd = private2ppd(fp);
1380    
1381     - ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
1382     - if (ret) /* failed - release the module */
1383     - module_put(THIS_MODULE);
1384     -
1385     - return ret;
1386     + return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
1387     }
1388    
1389     static int qsfp1_debugfs_open(struct inode *in, struct file *fp)
1390     @@ -1062,7 +1048,6 @@ static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target)
1391     ppd = private2ppd(fp);
1392    
1393     release_chip_resource(ppd->dd, i2c_target(target));
1394     - module_put(THIS_MODULE);
1395    
1396     return 0;
1397     }
1398     diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
1399     index 5e9732990be5..a7a926b7b562 100644
1400     --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
1401     +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
1402     @@ -150,8 +150,17 @@ qedr_iw_issue_event(void *context,
1403     if (params->cm_info) {
1404     event.ird = params->cm_info->ird;
1405     event.ord = params->cm_info->ord;
1406     - event.private_data_len = params->cm_info->private_data_len;
1407     - event.private_data = (void *)params->cm_info->private_data;
1408     + /* Only connect_request and reply have valid private data
1409     + * the rest of the events this may be left overs from
1410     + * connection establishment. CONNECT_REQUEST is issued via
1411     + * qedr_iw_mpa_request
1412     + */
1413     + if (event_type == IW_CM_EVENT_CONNECT_REPLY) {
1414     + event.private_data_len =
1415     + params->cm_info->private_data_len;
1416     + event.private_data =
1417     + (void *)params->cm_info->private_data;
1418     + }
1419     }
1420    
1421     if (ep->cm_id)
1422     diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
1423     index d35465389357..19556c62c7ea 100644
1424     --- a/drivers/infiniband/sw/rdmavt/qp.c
1425     +++ b/drivers/infiniband/sw/rdmavt/qp.c
1426     @@ -1196,7 +1196,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
1427     err = alloc_ud_wq_attr(qp, rdi->dparms.node);
1428     if (err) {
1429     ret = (ERR_PTR(err));
1430     - goto bail_driver_priv;
1431     + goto bail_rq_rvt;
1432     }
1433    
1434     err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
1435     @@ -1300,9 +1300,11 @@ bail_qpn:
1436     rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1437    
1438     bail_rq_wq:
1439     - rvt_free_rq(&qp->r_rq);
1440     free_ud_wq_attr(qp);
1441    
1442     +bail_rq_rvt:
1443     + rvt_free_rq(&qp->r_rq);
1444     +
1445     bail_driver_priv:
1446     rdi->driver_f.qp_priv_free(rdi, qp);
1447    
1448     diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
1449     index c0a887240325..0520e70084f9 100644
1450     --- a/drivers/infiniband/sw/siw/siw_qp_rx.c
1451     +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
1452     @@ -139,7 +139,8 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
1453     break;
1454    
1455     bytes = min(bytes, len);
1456     - if (siw_rx_kva(srx, (void *)buf_addr, bytes) == bytes) {
1457     + if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) ==
1458     + bytes) {
1459     copied += bytes;
1460     offset += bytes;
1461     len -= bytes;
1462     diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
1463     index 9e393b9c5091..30ac0ba55864 100644
1464     --- a/drivers/iommu/dmar.c
1465     +++ b/drivers/iommu/dmar.c
1466     @@ -898,7 +898,8 @@ int __init detect_intel_iommu(void)
1467     if (!ret)
1468     ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
1469     &validate_drhd_cb);
1470     - if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
1471     + if (!ret && !no_iommu && !iommu_detected &&
1472     + (!dmar_disabled || dmar_platform_optin())) {
1473     iommu_detected = 1;
1474     /* Make sure ACS will be enabled */
1475     pci_request_acs();
1476     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1477     index 773ac2b0d606..6366b5fbb3a4 100644
1478     --- a/drivers/iommu/intel-iommu.c
1479     +++ b/drivers/iommu/intel-iommu.c
1480     @@ -611,6 +611,12 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
1481     return g_iommus[iommu_id];
1482     }
1483    
1484     +static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
1485     +{
1486     + return sm_supported(iommu) ?
1487     + ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
1488     +}
1489     +
1490     static void domain_update_iommu_coherency(struct dmar_domain *domain)
1491     {
1492     struct dmar_drhd_unit *drhd;
1493     @@ -622,7 +628,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
1494    
1495     for_each_domain_iommu(i, domain) {
1496     found = true;
1497     - if (!ecap_coherent(g_iommus[i]->ecap)) {
1498     + if (!iommu_paging_structure_coherency(g_iommus[i])) {
1499     domain->iommu_coherency = 0;
1500     break;
1501     }
1502     @@ -633,7 +639,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
1503     /* No hardware attached; use lowest common denominator */
1504     rcu_read_lock();
1505     for_each_active_iommu(iommu, drhd) {
1506     - if (!ecap_coherent(iommu->ecap)) {
1507     + if (!iommu_paging_structure_coherency(iommu)) {
1508     domain->iommu_coherency = 0;
1509     break;
1510     }
1511     @@ -2090,7 +2096,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1512    
1513     context_set_fault_enable(context);
1514     context_set_present(context);
1515     - domain_flush_cache(domain, context, sizeof(*context));
1516     + if (!ecap_coherent(iommu->ecap))
1517     + clflush_cache_range(context, sizeof(*context));
1518    
1519     /*
1520     * It's a non-present to present mapping. If hardware doesn't cache
1521     diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
1522     index 0d6ca723257f..67eb4e972cc3 100644
1523     --- a/drivers/md/dm-writecache.c
1524     +++ b/drivers/md/dm-writecache.c
1525     @@ -279,6 +279,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
1526     while (daa-- && i < p) {
1527     pages[i++] = pfn_t_to_page(pfn);
1528     pfn.val++;
1529     + if (!(i & 15))
1530     + cond_resched();
1531     }
1532     } while (i < p);
1533     wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
1534     @@ -805,6 +807,8 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_
1535     writecache_wait_for_ios(wc, WRITE);
1536     discarded_something = true;
1537     }
1538     + if (!writecache_entry_is_committed(wc, e))
1539     + wc->uncommitted_blocks--;
1540     writecache_free_entry(wc, e);
1541     }
1542    
1543     diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
1544     index d4bbcdfd691a..aa693c8e285a 100644
1545     --- a/drivers/net/ethernet/atheros/alx/main.c
1546     +++ b/drivers/net/ethernet/atheros/alx/main.c
1547     @@ -1249,8 +1249,12 @@ out_disable_adv_intr:
1548    
1549     static void __alx_stop(struct alx_priv *alx)
1550     {
1551     - alx_halt(alx);
1552     alx_free_irq(alx);
1553     +
1554     + cancel_work_sync(&alx->link_check_wk);
1555     + cancel_work_sync(&alx->reset_wk);
1556     +
1557     + alx_halt(alx);
1558     alx_free_rings(alx);
1559     alx_free_napis(alx);
1560     }
1561     @@ -1858,9 +1862,6 @@ static void alx_remove(struct pci_dev *pdev)
1562     struct alx_priv *alx = pci_get_drvdata(pdev);
1563     struct alx_hw *hw = &alx->hw;
1564    
1565     - cancel_work_sync(&alx->link_check_wk);
1566     - cancel_work_sync(&alx->reset_wk);
1567     -
1568     /* restore permanent mac address */
1569     alx_set_macaddr(hw, hw->perm_addr);
1570    
1571     diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1572     index 3d3b1005d076..03f82786c0b9 100644
1573     --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1574     +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1575     @@ -1591,11 +1591,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1576     goto out;
1577     }
1578    
1579     - if (skb_padto(skb, ETH_ZLEN)) {
1580     - ret = NETDEV_TX_OK;
1581     - goto out;
1582     - }
1583     -
1584     /* Retain how many bytes will be sent on the wire, without TSB inserted
1585     * by transmit checksum offload
1586     */
1587     @@ -1644,6 +1639,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1588     len_stat = (size << DMA_BUFLENGTH_SHIFT) |
1589     (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
1590    
1591     + /* Note: if we ever change from DMA_TX_APPEND_CRC below we
1592     + * will need to restore software padding of "runt" packets
1593     + */
1594     if (!i) {
1595     len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
1596     if (skb->ip_summed == CHECKSUM_PARTIAL)
1597     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1598     index ca3aa1250dd1..e12ba81288e6 100644
1599     --- a/drivers/net/ethernet/broadcom/tg3.c
1600     +++ b/drivers/net/ethernet/broadcom/tg3.c
1601     @@ -18176,8 +18176,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
1602    
1603     rtnl_lock();
1604    
1605     - /* We probably don't have netdev yet */
1606     - if (!netdev || !netif_running(netdev))
1607     + /* Could be second call or maybe we don't have netdev yet */
1608     + if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
1609     goto done;
1610    
1611     /* We needn't recover from permanent error */
1612     diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
1613     index e6fe2870137b..a440c1cf0b61 100644
1614     --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
1615     +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
1616     @@ -506,41 +506,20 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
1617     }
1618     EXPORT_SYMBOL(cxgb4_select_ntuple);
1619    
1620     -/*
1621     - * Called when address resolution fails for an L2T entry to handle packets
1622     - * on the arpq head. If a packet specifies a failure handler it is invoked,
1623     - * otherwise the packet is sent to the device.
1624     - */
1625     -static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e)
1626     -{
1627     - struct sk_buff *skb;
1628     -
1629     - while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
1630     - const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
1631     -
1632     - spin_unlock(&e->lock);
1633     - if (cb->arp_err_handler)
1634     - cb->arp_err_handler(cb->handle, skb);
1635     - else
1636     - t4_ofld_send(adap, skb);
1637     - spin_lock(&e->lock);
1638     - }
1639     -}
1640     -
1641     /*
1642     * Called when the host's neighbor layer makes a change to some entry that is
1643     * loaded into the HW L2 table.
1644     */
1645     void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
1646     {
1647     - struct l2t_entry *e;
1648     - struct sk_buff_head *arpq = NULL;
1649     - struct l2t_data *d = adap->l2t;
1650     unsigned int addr_len = neigh->tbl->key_len;
1651     u32 *addr = (u32 *) neigh->primary_key;
1652     - int ifidx = neigh->dev->ifindex;
1653     - int hash = addr_hash(d, addr, addr_len, ifidx);
1654     + int hash, ifidx = neigh->dev->ifindex;
1655     + struct sk_buff_head *arpq = NULL;
1656     + struct l2t_data *d = adap->l2t;
1657     + struct l2t_entry *e;
1658    
1659     + hash = addr_hash(d, addr, addr_len, ifidx);
1660     read_lock_bh(&d->lock);
1661     for (e = d->l2tab[hash].first; e; e = e->next)
1662     if (!addreq(e, addr) && e->ifindex == ifidx) {
1663     @@ -573,8 +552,25 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
1664     write_l2e(adap, e, 0);
1665     }
1666    
1667     - if (arpq)
1668     - handle_failed_resolution(adap, e);
1669     + if (arpq) {
1670     + struct sk_buff *skb;
1671     +
1672     + /* Called when address resolution fails for an L2T
1673     + * entry to handle packets on the arpq head. If a
1674     + * packet specifies a failure handler it is invoked,
1675     + * otherwise the packet is sent to the device.
1676     + */
1677     + while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
1678     + const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
1679     +
1680     + spin_unlock(&e->lock);
1681     + if (cb->arp_err_handler)
1682     + cb->arp_err_handler(cb->handle, skb);
1683     + else
1684     + t4_ofld_send(adap, skb);
1685     + spin_lock(&e->lock);
1686     + }
1687     + }
1688     spin_unlock_bh(&e->lock);
1689     }
1690    
1691     diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
1692     index b6ff89307409..4ef4d41b0d8d 100644
1693     --- a/drivers/net/ethernet/freescale/enetc/enetc.c
1694     +++ b/drivers/net/ethernet/freescale/enetc/enetc.c
1695     @@ -254,7 +254,7 @@ static irqreturn_t enetc_msix(int irq, void *data)
1696     /* disable interrupts */
1697     enetc_wr_reg(v->rbier, 0);
1698    
1699     - for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
1700     + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
1701     enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
1702    
1703     napi_schedule_irqoff(&v->napi);
1704     @@ -290,7 +290,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
1705     /* enable interrupts */
1706     enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
1707    
1708     - for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
1709     + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
1710     enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
1711     ENETC_TBIER_TXTIE);
1712    
1713     diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
1714     index c5be4ebd8437..aa32a5b04112 100644
1715     --- a/drivers/net/ethernet/ibm/ibmveth.c
1716     +++ b/drivers/net/ethernet/ibm/ibmveth.c
1717     @@ -1682,7 +1682,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1718     }
1719    
1720     netdev->min_mtu = IBMVETH_MIN_MTU;
1721     - netdev->max_mtu = ETH_MAX_MTU;
1722     + netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
1723    
1724     memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
1725    
1726     diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
1727     index 5a42ddeecfe5..4f503b9a674c 100644
1728     --- a/drivers/net/ethernet/ibm/ibmvnic.c
1729     +++ b/drivers/net/ethernet/ibm/ibmvnic.c
1730     @@ -779,12 +779,13 @@ static int ibmvnic_login(struct net_device *netdev)
1731     struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1732     unsigned long timeout = msecs_to_jiffies(30000);
1733     int retry_count = 0;
1734     + int retries = 10;
1735     bool retry;
1736     int rc;
1737    
1738     do {
1739     retry = false;
1740     - if (retry_count > IBMVNIC_MAX_QUEUES) {
1741     + if (retry_count > retries) {
1742     netdev_warn(netdev, "Login attempts exceeded\n");
1743     return -1;
1744     }
1745     @@ -799,11 +800,23 @@ static int ibmvnic_login(struct net_device *netdev)
1746    
1747     if (!wait_for_completion_timeout(&adapter->init_done,
1748     timeout)) {
1749     - netdev_warn(netdev, "Login timed out\n");
1750     - return -1;
1751     + netdev_warn(netdev, "Login timed out, retrying...\n");
1752     + retry = true;
1753     + adapter->init_done_rc = 0;
1754     + retry_count++;
1755     + continue;
1756     }
1757    
1758     - if (adapter->init_done_rc == PARTIALSUCCESS) {
1759     + if (adapter->init_done_rc == ABORTED) {
1760     + netdev_warn(netdev, "Login aborted, retrying...\n");
1761     + retry = true;
1762     + adapter->init_done_rc = 0;
1763     + retry_count++;
1764     + /* FW or device may be busy, so
1765     + * wait a bit before retrying login
1766     + */
1767     + msleep(500);
1768     + } else if (adapter->init_done_rc == PARTIALSUCCESS) {
1769     retry_count++;
1770     release_sub_crqs(adapter, 1);
1771    
1772     diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1773     index cf5d447af7db..0f136f1af5d1 100644
1774     --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1775     +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1776     @@ -1541,7 +1541,7 @@ static void mvpp2_read_stats(struct mvpp2_port *port)
1777     for (q = 0; q < port->ntxqs; q++)
1778     for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
1779     *pstats++ += mvpp2_read_index(port->priv,
1780     - MVPP22_CTRS_TX_CTR(port->id, i),
1781     + MVPP22_CTRS_TX_CTR(port->id, q),
1782     mvpp2_ethtool_txq_regs[i].offset);
1783    
1784     /* Rxqs are numbered from 0 from the user standpoint, but not from the
1785     @@ -1550,7 +1550,7 @@ static void mvpp2_read_stats(struct mvpp2_port *port)
1786     for (q = 0; q < port->nrxqs; q++)
1787     for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
1788     *pstats++ += mvpp2_read_index(port->priv,
1789     - port->first_rxq + i,
1790     + port->first_rxq + q,
1791     mvpp2_ethtool_rxq_regs[i].offset);
1792     }
1793    
1794     diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
1795     index 8e1bdf58b9e7..1d6dfba0c034 100644
1796     --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
1797     +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
1798     @@ -396,7 +396,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
1799     vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
1800     }
1801    
1802     - iids->vf_cids += vf_cids * p_mngr->vf_count;
1803     + iids->vf_cids = vf_cids;
1804     iids->tids += vf_tids * p_mngr->vf_count;
1805    
1806     DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1807     diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
1808     index 0bf91df80d47..ecd14474a603 100644
1809     --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
1810     +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
1811     @@ -1368,6 +1368,8 @@ static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn)
1812    
1813     void qed_resc_free(struct qed_dev *cdev)
1814     {
1815     + struct qed_rdma_info *rdma_info;
1816     + struct qed_hwfn *p_hwfn;
1817     int i;
1818    
1819     if (IS_VF(cdev)) {
1820     @@ -1385,7 +1387,8 @@ void qed_resc_free(struct qed_dev *cdev)
1821     qed_llh_free(cdev);
1822    
1823     for_each_hwfn(cdev, i) {
1824     - struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1825     + p_hwfn = cdev->hwfns + i;
1826     + rdma_info = p_hwfn->p_rdma_info;
1827    
1828     qed_cxt_mngr_free(p_hwfn);
1829     qed_qm_info_free(p_hwfn);
1830     @@ -1404,8 +1407,10 @@ void qed_resc_free(struct qed_dev *cdev)
1831     qed_ooo_free(p_hwfn);
1832     }
1833    
1834     - if (QED_IS_RDMA_PERSONALITY(p_hwfn))
1835     + if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) {
1836     + qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto);
1837     qed_rdma_info_free(p_hwfn);
1838     + }
1839    
1840     qed_iov_free(p_hwfn);
1841     qed_l2_free(p_hwfn);
1842     diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
1843     index 65ec16a31658..2b3102a2fe5c 100644
1844     --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
1845     +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
1846     @@ -2832,8 +2832,6 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
1847     if (rc)
1848     return rc;
1849    
1850     - qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
1851     -
1852     return qed_iwarp_ll2_stop(p_hwfn);
1853     }
1854    
1855     diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
1856     index e49fada85410..83817bb50e9f 100644
1857     --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
1858     +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
1859     @@ -113,7 +113,6 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
1860     break;
1861     }
1862     }
1863     - qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
1864     }
1865    
1866     static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
1867     diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
1868     index 856051f50eb7..adc2c8f3d48e 100644
1869     --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
1870     +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
1871     @@ -81,12 +81,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
1872     mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
1873     }
1874    
1875     +#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90
1876     +#define QED_VF_CHANNEL_USLEEP_DELAY 100
1877     +#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
1878     +#define QED_VF_CHANNEL_MSLEEP_DELAY 25
1879     +
1880     static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
1881     {
1882     union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
1883     struct ustorm_trigger_vf_zone trigger;
1884     struct ustorm_vf_zone *zone_data;
1885     - int rc = 0, time = 100;
1886     + int iter, rc = 0;
1887    
1888     zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
1889    
1890     @@ -126,11 +131,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
1891     REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
1892    
1893     /* When PF would be done with the response, it would write back to the
1894     - * `done' address. Poll until then.
1895     + * `done' address from a coherent DMA zone. Poll until then.
1896     */
1897     - while ((!*done) && time) {
1898     - msleep(25);
1899     - time--;
1900     +
1901     + iter = QED_VF_CHANNEL_USLEEP_ITERATIONS;
1902     + while (!*done && iter--) {
1903     + udelay(QED_VF_CHANNEL_USLEEP_DELAY);
1904     + dma_rmb();
1905     + }
1906     +
1907     + iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS;
1908     + while (!*done && iter--) {
1909     + msleep(QED_VF_CHANNEL_MSLEEP_DELAY);
1910     + dma_rmb();
1911     }
1912    
1913     if (!*done) {
1914     diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
1915     index 1da6b5bda80a..2c3d654c8454 100644
1916     --- a/drivers/net/ethernet/qlogic/qede/qede_main.c
1917     +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
1918     @@ -1158,7 +1158,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
1919    
1920     /* PTP not supported on VFs */
1921     if (!is_vf)
1922     - qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL));
1923     + qede_ptp_enable(edev);
1924    
1925     edev->ops->register_ops(cdev, &qede_ll_ops, edev);
1926    
1927     @@ -1247,6 +1247,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1928     if (system_state == SYSTEM_POWER_OFF)
1929     return;
1930     qed_ops->common->remove(cdev);
1931     + edev->cdev = NULL;
1932    
1933     /* Since this can happen out-of-sync with other flows,
1934     * don't release the netdevice until after slowpath stop
1935     diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
1936     index f815435cf106..2d3b2fa92df5 100644
1937     --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
1938     +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
1939     @@ -411,6 +411,7 @@ void qede_ptp_disable(struct qede_dev *edev)
1940     if (ptp->tx_skb) {
1941     dev_kfree_skb_any(ptp->tx_skb);
1942     ptp->tx_skb = NULL;
1943     + clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
1944     }
1945    
1946     /* Disable PTP in HW */
1947     @@ -422,7 +423,7 @@ void qede_ptp_disable(struct qede_dev *edev)
1948     edev->ptp = NULL;
1949     }
1950    
1951     -static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
1952     +static int qede_ptp_init(struct qede_dev *edev)
1953     {
1954     struct qede_ptp *ptp;
1955     int rc;
1956     @@ -443,25 +444,19 @@ static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
1957     /* Init work queue for Tx timestamping */
1958     INIT_WORK(&ptp->work, qede_ptp_task);
1959    
1960     - /* Init cyclecounter and timecounter. This is done only in the first
1961     - * load. If done in every load, PTP application will fail when doing
1962     - * unload / load (e.g. MTU change) while it is running.
1963     - */
1964     - if (init_tc) {
1965     - memset(&ptp->cc, 0, sizeof(ptp->cc));
1966     - ptp->cc.read = qede_ptp_read_cc;
1967     - ptp->cc.mask = CYCLECOUNTER_MASK(64);
1968     - ptp->cc.shift = 0;
1969     - ptp->cc.mult = 1;
1970     -
1971     - timecounter_init(&ptp->tc, &ptp->cc,
1972     - ktime_to_ns(ktime_get_real()));
1973     - }
1974     + /* Init cyclecounter and timecounter */
1975     + memset(&ptp->cc, 0, sizeof(ptp->cc));
1976     + ptp->cc.read = qede_ptp_read_cc;
1977     + ptp->cc.mask = CYCLECOUNTER_MASK(64);
1978     + ptp->cc.shift = 0;
1979     + ptp->cc.mult = 1;
1980    
1981     - return rc;
1982     + timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
1983     +
1984     + return 0;
1985     }
1986    
1987     -int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
1988     +int qede_ptp_enable(struct qede_dev *edev)
1989     {
1990     struct qede_ptp *ptp;
1991     int rc;
1992     @@ -482,7 +477,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
1993    
1994     edev->ptp = ptp;
1995    
1996     - rc = qede_ptp_init(edev, init_tc);
1997     + rc = qede_ptp_init(edev);
1998     if (rc)
1999     goto err1;
2000    
2001     diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
2002     index 691a14c4b2c5..89c7f3cf3ee2 100644
2003     --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h
2004     +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
2005     @@ -41,7 +41,7 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
2006     void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
2007     int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
2008     void qede_ptp_disable(struct qede_dev *edev);
2009     -int qede_ptp_enable(struct qede_dev *edev, bool init_tc);
2010     +int qede_ptp_enable(struct qede_dev *edev);
2011     int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
2012    
2013     static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
2014     diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
2015     index 2d873ae8a234..668ccc9d49f8 100644
2016     --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
2017     +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
2018     @@ -105,6 +105,7 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev)
2019    
2020     qede_rdma_cleanup_event(edev);
2021     destroy_workqueue(edev->rdma_info.rdma_wq);
2022     + edev->rdma_info.rdma_wq = NULL;
2023     }
2024    
2025     int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
2026     @@ -325,7 +326,7 @@ static void qede_rdma_add_event(struct qede_dev *edev,
2027     if (edev->rdma_info.exp_recovery)
2028     return;
2029    
2030     - if (!edev->rdma_info.qedr_dev)
2031     + if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq)
2032     return;
2033    
2034     /* We don't want the cleanup flow to start while we're allocating and
2035     diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
2036     index 786b158bd305..5abb3f9684ff 100644
2037     --- a/drivers/net/ethernet/rocker/rocker_main.c
2038     +++ b/drivers/net/ethernet/rocker/rocker_main.c
2039     @@ -647,10 +647,10 @@ static int rocker_dma_rings_init(struct rocker *rocker)
2040     err_dma_event_ring_bufs_alloc:
2041     rocker_dma_ring_destroy(rocker, &rocker->event_ring);
2042     err_dma_event_ring_create:
2043     + rocker_dma_cmd_ring_waits_free(rocker);
2044     +err_dma_cmd_ring_waits_alloc:
2045     rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
2046     PCI_DMA_BIDIRECTIONAL);
2047     -err_dma_cmd_ring_waits_alloc:
2048     - rocker_dma_cmd_ring_waits_free(rocker);
2049     err_dma_cmd_ring_bufs_alloc:
2050     rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
2051     return err;
2052     diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
2053     index cac75c7d1d01..19d9d78a6df2 100644
2054     --- a/drivers/net/geneve.c
2055     +++ b/drivers/net/geneve.c
2056     @@ -1649,6 +1649,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
2057     geneve->collect_md = metadata;
2058     geneve->use_udp6_rx_checksums = use_udp6_rx_checksums;
2059     geneve->ttl_inherit = ttl_inherit;
2060     + geneve->df = df;
2061     geneve_unquiesce(geneve, gs4, gs6);
2062    
2063     return 0;
2064     diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
2065     index 0907c3d8d94a..dba52a5c378a 100644
2066     --- a/drivers/net/phy/phy_device.c
2067     +++ b/drivers/net/phy/phy_device.c
2068     @@ -797,8 +797,10 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
2069    
2070     /* Grab the bits from PHYIR2, and put them in the lower half */
2071     phy_reg = mdiobus_read(bus, addr, MII_PHYSID2);
2072     - if (phy_reg < 0)
2073     - return -EIO;
2074     + if (phy_reg < 0) {
2075     + /* returning -ENODEV doesn't stop bus scanning */
2076     + return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO;
2077     + }
2078    
2079     *phy_id |= phy_reg;
2080    
2081     diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
2082     index daa54486ab09..df2f7cc6dc03 100644
2083     --- a/drivers/net/usb/ax88179_178a.c
2084     +++ b/drivers/net/usb/ax88179_178a.c
2085     @@ -1387,10 +1387,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
2086     }
2087    
2088     if (pkt_cnt == 0) {
2089     - /* Skip IP alignment psudo header */
2090     - skb_pull(skb, 2);
2091     skb->len = pkt_len;
2092     - skb_set_tail_pointer(skb, pkt_len);
2093     + /* Skip IP alignment pseudo header */
2094     + skb_pull(skb, 2);
2095     + skb_set_tail_pointer(skb, skb->len);
2096     skb->truesize = pkt_len + sizeof(struct sk_buff);
2097     ax88179_rx_checksum(skb, pkt_hdr);
2098     return 1;
2099     @@ -1399,8 +1399,9 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
2100     ax_skb = skb_clone(skb, GFP_ATOMIC);
2101     if (ax_skb) {
2102     ax_skb->len = pkt_len;
2103     - ax_skb->data = skb->data + 2;
2104     - skb_set_tail_pointer(ax_skb, pkt_len);
2105     + /* Skip IP alignment pseudo header */
2106     + skb_pull(ax_skb, 2);
2107     + skb_set_tail_pointer(ax_skb, ax_skb->len);
2108     ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
2109     ax88179_rx_checksum(ax_skb, pkt_hdr);
2110     usbnet_skb_return(dev, ax_skb);
2111     diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
2112     index 56caddeabb5e..772eb05e57af 100644
2113     --- a/drivers/nvme/host/multipath.c
2114     +++ b/drivers/nvme/host/multipath.c
2115     @@ -413,11 +413,10 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
2116     {
2117     struct nvme_ns_head *head = ns->head;
2118    
2119     - lockdep_assert_held(&ns->head->lock);
2120     -
2121     if (!head->disk)
2122     return;
2123    
2124     + mutex_lock(&head->lock);
2125     if (!(head->disk->flags & GENHD_FL_UP))
2126     device_add_disk(&head->subsys->dev, head->disk,
2127     nvme_ns_id_attr_groups);
2128     @@ -430,9 +429,10 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
2129     __nvme_find_path(head, node);
2130     srcu_read_unlock(&head->srcu, srcu_idx);
2131     }
2132     + mutex_unlock(&head->lock);
2133    
2134     - synchronize_srcu(&ns->head->srcu);
2135     - kblockd_schedule_work(&ns->head->requeue_work);
2136     + synchronize_srcu(&head->srcu);
2137     + kblockd_schedule_work(&head->requeue_work);
2138     }
2139    
2140     static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
2141     @@ -483,14 +483,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
2142     static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
2143     struct nvme_ns *ns)
2144     {
2145     - mutex_lock(&ns->head->lock);
2146     ns->ana_grpid = le32_to_cpu(desc->grpid);
2147     ns->ana_state = desc->state;
2148     clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
2149    
2150     if (nvme_state_is_live(ns->ana_state))
2151     nvme_mpath_set_live(ns);
2152     - mutex_unlock(&ns->head->lock);
2153     }
2154    
2155     static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
2156     @@ -661,10 +659,8 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
2157     nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
2158     mutex_unlock(&ns->ctrl->ana_lock);
2159     } else {
2160     - mutex_lock(&ns->head->lock);
2161     ns->ana_state = NVME_ANA_OPTIMIZED;
2162     nvme_mpath_set_live(ns);
2163     - mutex_unlock(&ns->head->lock);
2164     }
2165     }
2166    
2167     diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
2168     index f1fece5b9c06..3769ad08eadf 100644
2169     --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
2170     +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
2171     @@ -170,6 +170,7 @@ struct pmic_gpio_state {
2172     struct regmap *map;
2173     struct pinctrl_dev *ctrl;
2174     struct gpio_chip chip;
2175     + struct irq_chip irq;
2176     };
2177    
2178     static const struct pinconf_generic_params pmic_gpio_bindings[] = {
2179     @@ -917,16 +918,6 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state,
2180     return 0;
2181     }
2182    
2183     -static struct irq_chip pmic_gpio_irq_chip = {
2184     - .name = "spmi-gpio",
2185     - .irq_ack = irq_chip_ack_parent,
2186     - .irq_mask = irq_chip_mask_parent,
2187     - .irq_unmask = irq_chip_unmask_parent,
2188     - .irq_set_type = irq_chip_set_type_parent,
2189     - .irq_set_wake = irq_chip_set_wake_parent,
2190     - .flags = IRQCHIP_MASK_ON_SUSPEND,
2191     -};
2192     -
2193     static int pmic_gpio_domain_translate(struct irq_domain *domain,
2194     struct irq_fwspec *fwspec,
2195     unsigned long *hwirq,
2196     @@ -1053,8 +1044,16 @@ static int pmic_gpio_probe(struct platform_device *pdev)
2197     if (!parent_domain)
2198     return -ENXIO;
2199    
2200     + state->irq.name = "spmi-gpio",
2201     + state->irq.irq_ack = irq_chip_ack_parent,
2202     + state->irq.irq_mask = irq_chip_mask_parent,
2203     + state->irq.irq_unmask = irq_chip_unmask_parent,
2204     + state->irq.irq_set_type = irq_chip_set_type_parent,
2205     + state->irq.irq_set_wake = irq_chip_set_wake_parent,
2206     + state->irq.flags = IRQCHIP_MASK_ON_SUSPEND,
2207     +
2208     girq = &state->chip.irq;
2209     - girq->chip = &pmic_gpio_irq_chip;
2210     + girq->chip = &state->irq;
2211     girq->default_type = IRQ_TYPE_NONE;
2212     girq->handler = handle_level_irq;
2213     girq->fwnode = of_node_to_fwnode(state->dev->of_node);
2214     diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
2215     index e9a7cbb9aa33..01bcef2c01bc 100644
2216     --- a/drivers/pinctrl/tegra/pinctrl-tegra.c
2217     +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
2218     @@ -685,8 +685,8 @@ static int tegra_pinctrl_resume(struct device *dev)
2219     }
2220    
2221     const struct dev_pm_ops tegra_pinctrl_pm = {
2222     - .suspend = &tegra_pinctrl_suspend,
2223     - .resume = &tegra_pinctrl_resume
2224     + .suspend_noirq = &tegra_pinctrl_suspend,
2225     + .resume_noirq = &tegra_pinctrl_resume
2226     };
2227    
2228     static bool gpio_node_has_range(const char *compatible)
2229     diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
2230     index 689537927f6f..4c8e8b472287 100644
2231     --- a/drivers/regulator/pfuze100-regulator.c
2232     +++ b/drivers/regulator/pfuze100-regulator.c
2233     @@ -209,6 +209,19 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
2234    
2235     };
2236    
2237     +static const struct regulator_ops pfuze3000_sw_regulator_ops = {
2238     + .enable = regulator_enable_regmap,
2239     + .disable = regulator_disable_regmap,
2240     + .is_enabled = regulator_is_enabled_regmap,
2241     + .list_voltage = regulator_list_voltage_table,
2242     + .map_voltage = regulator_map_voltage_ascend,
2243     + .set_voltage_sel = regulator_set_voltage_sel_regmap,
2244     + .get_voltage_sel = regulator_get_voltage_sel_regmap,
2245     + .set_voltage_time_sel = regulator_set_voltage_time_sel,
2246     + .set_ramp_delay = pfuze100_set_ramp_delay,
2247     +
2248     +};
2249     +
2250     #define PFUZE100_FIXED_REG(_chip, _name, base, voltage) \
2251     [_chip ## _ ## _name] = { \
2252     .desc = { \
2253     @@ -318,23 +331,28 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
2254     .stby_mask = 0x20, \
2255     }
2256    
2257     -
2258     -#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \
2259     - .desc = { \
2260     - .name = #_name,\
2261     - .n_voltages = ((max) - (min)) / (step) + 1, \
2262     - .ops = &pfuze100_sw_regulator_ops, \
2263     - .type = REGULATOR_VOLTAGE, \
2264     - .id = _chip ## _ ## _name, \
2265     - .owner = THIS_MODULE, \
2266     - .min_uV = (min), \
2267     - .uV_step = (step), \
2268     - .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
2269     - .vsel_mask = 0x7, \
2270     - }, \
2271     - .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
2272     - .stby_mask = 0x7, \
2273     -}
2274     +/* No linar case for the some switches of PFUZE3000 */
2275     +#define PFUZE3000_SW_REG(_chip, _name, base, mask, voltages) \
2276     + [_chip ## _ ## _name] = { \
2277     + .desc = { \
2278     + .name = #_name, \
2279     + .n_voltages = ARRAY_SIZE(voltages), \
2280     + .ops = &pfuze3000_sw_regulator_ops, \
2281     + .type = REGULATOR_VOLTAGE, \
2282     + .id = _chip ## _ ## _name, \
2283     + .owner = THIS_MODULE, \
2284     + .volt_table = voltages, \
2285     + .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
2286     + .vsel_mask = (mask), \
2287     + .enable_reg = (base) + PFUZE100_MODE_OFFSET, \
2288     + .enable_mask = 0xf, \
2289     + .enable_val = 0x8, \
2290     + .enable_time = 500, \
2291     + }, \
2292     + .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
2293     + .stby_mask = (mask), \
2294     + .sw_reg = true, \
2295     + }
2296    
2297     #define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \
2298     .desc = { \
2299     @@ -391,9 +409,9 @@ static struct pfuze_regulator pfuze200_regulators[] = {
2300     };
2301    
2302     static struct pfuze_regulator pfuze3000_regulators[] = {
2303     - PFUZE100_SWB_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
2304     + PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
2305     PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
2306     - PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
2307     + PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
2308     PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
2309     PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
2310     PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
2311     @@ -407,8 +425,8 @@ static struct pfuze_regulator pfuze3000_regulators[] = {
2312     };
2313    
2314     static struct pfuze_regulator pfuze3001_regulators[] = {
2315     - PFUZE100_SWB_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
2316     - PFUZE100_SWB_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
2317     + PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
2318     + PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
2319     PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
2320     PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
2321     PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
2322     diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
2323     index fe70e9875bde..5043f0fcf399 100644
2324     --- a/drivers/s390/net/qeth_core_main.c
2325     +++ b/drivers/s390/net/qeth_core_main.c
2326     @@ -4163,9 +4163,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
2327     int fallback = *(int *)reply->param;
2328    
2329     QETH_CARD_TEXT(card, 4, "setaccb");
2330     - if (cmd->hdr.return_code)
2331     - return -EIO;
2332     - qeth_setadpparms_inspect_rc(cmd);
2333    
2334     access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
2335     QETH_CARD_TEXT_(card, 2, "rc=%d",
2336     @@ -4175,7 +4172,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
2337     QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
2338     access_ctrl_req->subcmd_code, CARD_DEVID(card),
2339     cmd->data.setadapterparms.hdr.return_code);
2340     - switch (cmd->data.setadapterparms.hdr.return_code) {
2341     + switch (qeth_setadpparms_inspect_rc(cmd)) {
2342     case SET_ACCESS_CTRL_RC_SUCCESS:
2343     if (card->options.isolation == ISOLATION_MODE_NONE) {
2344     dev_info(&card->gdev->dev,
2345     diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
2346     index cb84125ab80d..08dc2efb7d8a 100644
2347     --- a/drivers/s390/scsi/zfcp_erp.c
2348     +++ b/drivers/s390/scsi/zfcp_erp.c
2349     @@ -576,7 +576,10 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
2350     ZFCP_STATUS_ERP_TIMEDOUT)) {
2351     req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
2352     zfcp_dbf_rec_run("erscf_1", act);
2353     - req->erp_action = NULL;
2354     + /* lock-free concurrent access with
2355     + * zfcp_erp_timeout_handler()
2356     + */
2357     + WRITE_ONCE(req->erp_action, NULL);
2358     }
2359     if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
2360     zfcp_dbf_rec_run("erscf_2", act);
2361     @@ -612,8 +615,14 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
2362     void zfcp_erp_timeout_handler(struct timer_list *t)
2363     {
2364     struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
2365     - struct zfcp_erp_action *act = fsf_req->erp_action;
2366     + struct zfcp_erp_action *act;
2367    
2368     + if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
2369     + return;
2370     + /* lock-free concurrent access with zfcp_erp_strategy_check_fsfreq() */
2371     + act = READ_ONCE(fsf_req->erp_action);
2372     + if (!act)
2373     + return;
2374     zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
2375     }
2376    
2377     diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
2378     index 14d9f41977f1..95abffd9ad10 100644
2379     --- a/drivers/scsi/lpfc/lpfc_init.c
2380     +++ b/drivers/scsi/lpfc/lpfc_init.c
2381     @@ -11542,7 +11542,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
2382     lpfc_sli4_xri_exchange_busy_wait(phba);
2383    
2384     /* per-phba callback de-registration for hotplug event */
2385     - lpfc_cpuhp_remove(phba);
2386     + if (phba->pport)
2387     + lpfc_cpuhp_remove(phba);
2388    
2389     /* Disable PCI subsystem interrupt */
2390     lpfc_sli4_disable_intr(phba);
2391     diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
2392     index 84bb4a048016..a44de4c5dcf6 100644
2393     --- a/drivers/scsi/qla2xxx/qla_gs.c
2394     +++ b/drivers/scsi/qla2xxx/qla_gs.c
2395     @@ -3638,7 +3638,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
2396     qla2x00_clear_loop_id(fcport);
2397     fcport->flags |= FCF_FABRIC_DEVICE;
2398     } else if (fcport->d_id.b24 != rp->id.b24 ||
2399     - fcport->scan_needed) {
2400     + (fcport->scan_needed &&
2401     + fcport->port_type != FCT_INITIATOR &&
2402     + fcport->port_type != FCT_NVME_INITIATOR)) {
2403     qlt_schedule_sess_for_deletion(fcport);
2404     }
2405     fcport->d_id.b24 = rp->id.b24;
2406     diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
2407     index ea3ea2a6b314..f6678ba6d4bc 100644
2408     --- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
2409     +++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
2410     @@ -1845,12 +1845,14 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
2411     pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
2412     if (!pIE)
2413     return _FAIL;
2414     + if (ie_len > sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates))
2415     + return _FAIL;
2416    
2417     memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
2418     supportRateNum = ie_len;
2419    
2420     pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
2421     - if (pIE)
2422     + if (pIE && (ie_len <= sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates) - supportRateNum))
2423     memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);
2424    
2425     return _SUCCESS;
2426     diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
2427     index f8e43a6faea9..cdcc64ea2554 100644
2428     --- a/drivers/tty/hvc/hvc_console.c
2429     +++ b/drivers/tty/hvc/hvc_console.c
2430     @@ -75,8 +75,6 @@ static LIST_HEAD(hvc_structs);
2431     */
2432     static DEFINE_MUTEX(hvc_structs_mutex);
2433    
2434     -/* Mutex to serialize hvc_open */
2435     -static DEFINE_MUTEX(hvc_open_mutex);
2436     /*
2437     * This value is used to assign a tty->index value to a hvc_struct based
2438     * upon order of exposure via hvc_probe(), when we can not match it to
2439     @@ -348,24 +346,16 @@ static int hvc_install(struct tty_driver *driver, struct tty_struct *tty)
2440     */
2441     static int hvc_open(struct tty_struct *tty, struct file * filp)
2442     {
2443     - struct hvc_struct *hp;
2444     + struct hvc_struct *hp = tty->driver_data;
2445     unsigned long flags;
2446     int rc = 0;
2447    
2448     - mutex_lock(&hvc_open_mutex);
2449     -
2450     - hp = tty->driver_data;
2451     - if (!hp) {
2452     - rc = -EIO;
2453     - goto out;
2454     - }
2455     -
2456     spin_lock_irqsave(&hp->port.lock, flags);
2457     /* Check and then increment for fast path open. */
2458     if (hp->port.count++ > 0) {
2459     spin_unlock_irqrestore(&hp->port.lock, flags);
2460     hvc_kick();
2461     - goto out;
2462     + return 0;
2463     } /* else count == 0 */
2464     spin_unlock_irqrestore(&hp->port.lock, flags);
2465    
2466     @@ -393,8 +383,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
2467     /* Force wakeup of the polling thread */
2468     hvc_kick();
2469    
2470     -out:
2471     - mutex_unlock(&hvc_open_mutex);
2472     return rc;
2473     }
2474    
2475     diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c
2476     index e71240b386b4..da4c5eb03d7e 100644
2477     --- a/drivers/usb/cdns3/ep0.c
2478     +++ b/drivers/usb/cdns3/ep0.c
2479     @@ -327,7 +327,8 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev,
2480     if (!set || (tmode & 0xff) != 0)
2481     return -EINVAL;
2482    
2483     - switch (tmode >> 8) {
2484     + tmode >>= 8;
2485     + switch (tmode) {
2486     case TEST_J:
2487     case TEST_K:
2488     case TEST_SE0_NAK:
2489     @@ -711,15 +712,17 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
2490     int ret = 0;
2491     u8 zlp = 0;
2492    
2493     + spin_lock_irqsave(&priv_dev->lock, flags);
2494     trace_cdns3_ep0_queue(priv_dev, request);
2495    
2496     /* cancel the request if controller receive new SETUP packet. */
2497     - if (cdns3_check_new_setup(priv_dev))
2498     + if (cdns3_check_new_setup(priv_dev)) {
2499     + spin_unlock_irqrestore(&priv_dev->lock, flags);
2500     return -ECONNRESET;
2501     + }
2502    
2503     /* send STATUS stage. Should be called only for SET_CONFIGURATION */
2504     if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) {
2505     - spin_lock_irqsave(&priv_dev->lock, flags);
2506     cdns3_select_ep(priv_dev, 0x00);
2507    
2508     erdy_sent = !priv_dev->hw_configured_flag;
2509     @@ -744,7 +747,6 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
2510     return 0;
2511     }
2512    
2513     - spin_lock_irqsave(&priv_dev->lock, flags);
2514     if (!list_empty(&priv_ep->pending_req_list)) {
2515     dev_err(priv_dev->dev,
2516     "can't handle multiple requests for ep0\n");
2517     diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/trace.h
2518     index e92348c9b4d7..7cc8bebaa07d 100644
2519     --- a/drivers/usb/cdns3/trace.h
2520     +++ b/drivers/usb/cdns3/trace.h
2521     @@ -150,7 +150,7 @@ DECLARE_EVENT_CLASS(cdns3_log_ep0_irq,
2522     __dynamic_array(char, str, CDNS3_MSG_MAX)
2523     ),
2524     TP_fast_assign(
2525     - __entry->ep_dir = priv_dev->ep0_data_dir;
2526     + __entry->ep_dir = priv_dev->selected_ep;
2527     __entry->ep_sts = ep_sts;
2528     ),
2529     TP_printk("%s", cdns3_decode_ep0_irq(__get_str(str),
2530     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
2531     index f67088bb8218..d5187b50fc82 100644
2532     --- a/drivers/usb/class/cdc-acm.c
2533     +++ b/drivers/usb/class/cdc-acm.c
2534     @@ -1689,6 +1689,8 @@ static int acm_pre_reset(struct usb_interface *intf)
2535    
2536     static const struct usb_device_id acm_ids[] = {
2537     /* quirky and broken devices */
2538     + { USB_DEVICE(0x0424, 0x274e), /* Microchip Technology, Inc. (formerly SMSC) */
2539     + .driver_info = DISABLE_ECHO, }, /* DISABLE ECHO in termios flag */
2540     { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */
2541     .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
2542     { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
2543     diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
2544     index 3e8efe759c3e..e0b77674869c 100644
2545     --- a/drivers/usb/core/quirks.c
2546     +++ b/drivers/usb/core/quirks.c
2547     @@ -218,11 +218,12 @@ static const struct usb_device_id usb_quirk_list[] = {
2548     /* Logitech HD Webcam C270 */
2549     { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
2550    
2551     - /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
2552     + /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */
2553     { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
2554     { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
2555     { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
2556     { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
2557     + { USB_DEVICE(0x046d, 0x085c), .driver_info = USB_QUIRK_DELAY_INIT },
2558    
2559     /* Logitech ConferenceCam CC3000e */
2560     { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
2561     diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
2562     index 7fd0900a9cb0..f7528f732b2a 100644
2563     --- a/drivers/usb/dwc2/gadget.c
2564     +++ b/drivers/usb/dwc2/gadget.c
2565     @@ -4886,12 +4886,6 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
2566     epnum, 0);
2567     }
2568    
2569     - ret = usb_add_gadget_udc(dev, &hsotg->gadget);
2570     - if (ret) {
2571     - dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
2572     - hsotg->ctrl_req);
2573     - return ret;
2574     - }
2575     dwc2_hsotg_dump(hsotg);
2576    
2577     return 0;
2578     diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
2579     index 3c6ce09a6db5..15e55808cf4e 100644
2580     --- a/drivers/usb/dwc2/platform.c
2581     +++ b/drivers/usb/dwc2/platform.c
2582     @@ -507,6 +507,17 @@ static int dwc2_driver_probe(struct platform_device *dev)
2583     if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
2584     dwc2_lowlevel_hw_disable(hsotg);
2585    
2586     +#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
2587     + IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
2588     + /* Postponed adding a new gadget to the udc class driver list */
2589     + if (hsotg->gadget_enabled) {
2590     + retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget);
2591     + if (retval) {
2592     + dwc2_hsotg_remove(hsotg);
2593     + goto error;
2594     + }
2595     + }
2596     +#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
2597     return 0;
2598    
2599     error:
2600     diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
2601     index cafde053788b..80a1b52c656e 100644
2602     --- a/drivers/usb/gadget/udc/mv_udc_core.c
2603     +++ b/drivers/usb/gadget/udc/mv_udc_core.c
2604     @@ -2313,7 +2313,8 @@ static int mv_udc_probe(struct platform_device *pdev)
2605     return 0;
2606    
2607     err_create_workqueue:
2608     - destroy_workqueue(udc->qwork);
2609     + if (udc->qwork)
2610     + destroy_workqueue(udc->qwork);
2611     err_destroy_dma:
2612     dma_pool_destroy(udc->dtd_pool);
2613     err_free_dma:
2614     diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
2615     index 01debfd03d4a..84d59a611511 100644
2616     --- a/drivers/usb/host/ehci-exynos.c
2617     +++ b/drivers/usb/host/ehci-exynos.c
2618     @@ -203,9 +203,8 @@ static int exynos_ehci_probe(struct platform_device *pdev)
2619     hcd->rsrc_len = resource_size(res);
2620    
2621     irq = platform_get_irq(pdev, 0);
2622     - if (!irq) {
2623     - dev_err(&pdev->dev, "Failed to get IRQ\n");
2624     - err = -ENODEV;
2625     + if (irq < 0) {
2626     + err = irq;
2627     goto fail_io;
2628     }
2629    
2630     diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
2631     index b0882c13a1d1..66713c253765 100644
2632     --- a/drivers/usb/host/ehci-pci.c
2633     +++ b/drivers/usb/host/ehci-pci.c
2634     @@ -216,6 +216,13 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
2635     ehci_info(ehci, "applying MosChip frame-index workaround\n");
2636     ehci->frame_index_bug = 1;
2637     break;
2638     + case PCI_VENDOR_ID_HUAWEI:
2639     + /* Synopsys HC bug */
2640     + if (pdev->device == 0xa239) {
2641     + ehci_info(ehci, "applying Synopsys HC workaround\n");
2642     + ehci->has_synopsys_hc_bug = 1;
2643     + }
2644     + break;
2645     }
2646    
2647     /* optional debug port, normally in the first BAR */
2648     diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
2649     index cff965240327..b91d50da6127 100644
2650     --- a/drivers/usb/host/ohci-sm501.c
2651     +++ b/drivers/usb/host/ohci-sm501.c
2652     @@ -191,6 +191,7 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
2653     struct resource *mem;
2654    
2655     usb_remove_hcd(hcd);
2656     + iounmap(hcd->regs);
2657     release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
2658     usb_put_hcd(hcd);
2659     mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2660     diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
2661     index b18a6baef204..85f1ff0399a9 100644
2662     --- a/drivers/usb/host/xhci-mtk.c
2663     +++ b/drivers/usb/host/xhci-mtk.c
2664     @@ -592,6 +592,9 @@ static int xhci_mtk_remove(struct platform_device *dev)
2665     struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2666     struct usb_hcd *shared_hcd = xhci->shared_hcd;
2667    
2668     + pm_runtime_put_noidle(&dev->dev);
2669     + pm_runtime_disable(&dev->dev);
2670     +
2671     usb_remove_hcd(shared_hcd);
2672     xhci->shared_hcd = NULL;
2673     device_init_wakeup(&dev->dev, false);
2674     @@ -602,8 +605,6 @@ static int xhci_mtk_remove(struct platform_device *dev)
2675     xhci_mtk_sch_exit(mtk);
2676     xhci_mtk_clks_disable(mtk);
2677     xhci_mtk_ldos_disable(mtk);
2678     - pm_runtime_put_sync(&dev->dev);
2679     - pm_runtime_disable(&dev->dev);
2680    
2681     return 0;
2682     }
2683     diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
2684     index 81b54a3d2910..11a65854d3f0 100644
2685     --- a/drivers/usb/host/xhci.c
2686     +++ b/drivers/usb/host/xhci.c
2687     @@ -1430,6 +1430,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
2688     xhci->devs[slot_id]->out_ctx, ep_index);
2689    
2690     ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
2691     + ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
2692     ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
2693     ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
2694    
2695     @@ -4390,6 +4391,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
2696     int hird, exit_latency;
2697     int ret;
2698    
2699     + if (xhci->quirks & XHCI_HW_LPM_DISABLE)
2700     + return -EPERM;
2701     +
2702     if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
2703     !udev->lpm_capable)
2704     return -EPERM;
2705     @@ -4412,7 +4416,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
2706     xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
2707     enable ? "enable" : "disable", port_num + 1);
2708    
2709     - if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
2710     + if (enable) {
2711     /* Host supports BESL timeout instead of HIRD */
2712     if (udev->usb2_hw_lpm_besl_capable) {
2713     /* if device doesn't have a preferred BESL value use a
2714     @@ -4471,6 +4475,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
2715     mutex_lock(hcd->bandwidth_mutex);
2716     xhci_change_max_exit_latency(xhci, udev, 0);
2717     mutex_unlock(hcd->bandwidth_mutex);
2718     + readl_poll_timeout(ports[port_num]->addr, pm_val,
2719     + (pm_val & PORT_PLS_MASK) == XDEV_U0,
2720     + 100, 10000);
2721     return 0;
2722     }
2723     }
2724     diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
2725     index 23a1abdc2b43..c656b41b57b5 100644
2726     --- a/drivers/usb/host/xhci.h
2727     +++ b/drivers/usb/host/xhci.h
2728     @@ -716,7 +716,7 @@ struct xhci_ep_ctx {
2729     * 4 - TRB error
2730     * 5-7 - reserved
2731     */
2732     -#define EP_STATE_MASK (0xf)
2733     +#define EP_STATE_MASK (0x7)
2734     #define EP_STATE_DISABLED 0
2735     #define EP_STATE_RUNNING 1
2736     #define EP_STATE_HALTED 2
2737     diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
2738     index 86637cd066cf..05cdad13933b 100644
2739     --- a/drivers/usb/renesas_usbhs/fifo.c
2740     +++ b/drivers/usb/renesas_usbhs/fifo.c
2741     @@ -803,7 +803,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
2742     return info->dma_map_ctrl(chan->device->dev, pkt, map);
2743     }
2744    
2745     -static void usbhsf_dma_complete(void *arg);
2746     +static void usbhsf_dma_complete(void *arg,
2747     + const struct dmaengine_result *result);
2748     static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
2749     {
2750     struct usbhs_pipe *pipe = pkt->pipe;
2751     @@ -813,6 +814,7 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
2752     struct dma_chan *chan;
2753     struct device *dev = usbhs_priv_to_dev(priv);
2754     enum dma_transfer_direction dir;
2755     + dma_cookie_t cookie;
2756    
2757     fifo = usbhs_pipe_to_fifo(pipe);
2758     if (!fifo)
2759     @@ -827,11 +829,11 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
2760     if (!desc)
2761     return;
2762    
2763     - desc->callback = usbhsf_dma_complete;
2764     - desc->callback_param = pipe;
2765     + desc->callback_result = usbhsf_dma_complete;
2766     + desc->callback_param = pkt;
2767    
2768     - pkt->cookie = dmaengine_submit(desc);
2769     - if (pkt->cookie < 0) {
2770     + cookie = dmaengine_submit(desc);
2771     + if (cookie < 0) {
2772     dev_err(dev, "Failed to submit dma descriptor\n");
2773     return;
2774     }
2775     @@ -1152,12 +1154,10 @@ static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt,
2776     struct dma_chan *chan, int dtln)
2777     {
2778     struct usbhs_pipe *pipe = pkt->pipe;
2779     - struct dma_tx_state state;
2780     size_t received_size;
2781     int maxp = usbhs_pipe_get_maxpacket(pipe);
2782    
2783     - dmaengine_tx_status(chan, pkt->cookie, &state);
2784     - received_size = pkt->length - state.residue;
2785     + received_size = pkt->length - pkt->dma_result->residue;
2786    
2787     if (dtln) {
2788     received_size -= USBHS_USB_DMAC_XFER_SIZE;
2789     @@ -1363,13 +1363,16 @@ static int usbhsf_irq_ready(struct usbhs_priv *priv,
2790     return 0;
2791     }
2792    
2793     -static void usbhsf_dma_complete(void *arg)
2794     +static void usbhsf_dma_complete(void *arg,
2795     + const struct dmaengine_result *result)
2796     {
2797     - struct usbhs_pipe *pipe = arg;
2798     + struct usbhs_pkt *pkt = arg;
2799     + struct usbhs_pipe *pipe = pkt->pipe;
2800     struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
2801     struct device *dev = usbhs_priv_to_dev(priv);
2802     int ret;
2803    
2804     + pkt->dma_result = result;
2805     ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
2806     if (ret < 0)
2807     dev_err(dev, "dma_complete run_error %d : %d\n",
2808     diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
2809     index c3d3cc35cee0..4a7dc23ce3d3 100644
2810     --- a/drivers/usb/renesas_usbhs/fifo.h
2811     +++ b/drivers/usb/renesas_usbhs/fifo.h
2812     @@ -50,7 +50,7 @@ struct usbhs_pkt {
2813     struct usbhs_pkt *pkt);
2814     struct work_struct work;
2815     dma_addr_t dma;
2816     - dma_cookie_t cookie;
2817     + const struct dmaengine_result *dma_result;
2818     void *buf;
2819     int length;
2820     int trans;
2821     diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
2822     index 017389021b96..b56a0880a044 100644
2823     --- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
2824     +++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
2825     @@ -179,26 +179,6 @@ out:
2826     return tcpci_irq(chip->tcpci);
2827     }
2828    
2829     -static int rt1711h_init_alert(struct rt1711h_chip *chip,
2830     - struct i2c_client *client)
2831     -{
2832     - int ret;
2833     -
2834     - /* Disable chip interrupts before requesting irq */
2835     - ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
2836     - if (ret < 0)
2837     - return ret;
2838     -
2839     - ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
2840     - rt1711h_irq,
2841     - IRQF_ONESHOT | IRQF_TRIGGER_LOW,
2842     - dev_name(chip->dev), chip);
2843     - if (ret < 0)
2844     - return ret;
2845     - enable_irq_wake(client->irq);
2846     - return 0;
2847     -}
2848     -
2849     static int rt1711h_sw_reset(struct rt1711h_chip *chip)
2850     {
2851     int ret;
2852     @@ -260,7 +240,8 @@ static int rt1711h_probe(struct i2c_client *client,
2853     if (ret < 0)
2854     return ret;
2855    
2856     - ret = rt1711h_init_alert(chip, client);
2857     + /* Disable chip interrupts before requesting irq */
2858     + ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
2859     if (ret < 0)
2860     return ret;
2861    
2862     @@ -271,6 +252,14 @@ static int rt1711h_probe(struct i2c_client *client,
2863     if (IS_ERR_OR_NULL(chip->tcpci))
2864     return PTR_ERR(chip->tcpci);
2865    
2866     + ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
2867     + rt1711h_irq,
2868     + IRQF_ONESHOT | IRQF_TRIGGER_LOW,
2869     + dev_name(chip->dev), chip);
2870     + if (ret < 0)
2871     + return ret;
2872     + enable_irq_wake(client->irq);
2873     +
2874     return 0;
2875     }
2876    
2877     diff --git a/fs/afs/cell.c b/fs/afs/cell.c
2878     index 78ba5f932287..296b489861a9 100644
2879     --- a/fs/afs/cell.c
2880     +++ b/fs/afs/cell.c
2881     @@ -154,10 +154,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
2882     return ERR_PTR(-ENOMEM);
2883     }
2884    
2885     + cell->name = kmalloc(namelen + 1, GFP_KERNEL);
2886     + if (!cell->name) {
2887     + kfree(cell);
2888     + return ERR_PTR(-ENOMEM);
2889     + }
2890     +
2891     cell->net = net;
2892     cell->name_len = namelen;
2893     for (i = 0; i < namelen; i++)
2894     cell->name[i] = tolower(name[i]);
2895     + cell->name[i] = 0;
2896    
2897     atomic_set(&cell->usage, 2);
2898     INIT_WORK(&cell->manager, afs_manage_cell);
2899     @@ -203,6 +210,7 @@ parse_failed:
2900     if (ret == -EINVAL)
2901     printk(KERN_ERR "kAFS: bad VL server IP address\n");
2902     error:
2903     + kfree(cell->name);
2904     kfree(cell);
2905     _leave(" = %d", ret);
2906     return ERR_PTR(ret);
2907     @@ -483,6 +491,7 @@ static void afs_cell_destroy(struct rcu_head *rcu)
2908    
2909     afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
2910     key_put(cell->anonymous_key);
2911     + kfree(cell->name);
2912     kfree(cell);
2913    
2914     _leave(" [destroyed]");
2915     diff --git a/fs/afs/internal.h b/fs/afs/internal.h
2916     index 555ad7c9afcb..7fe88d918b23 100644
2917     --- a/fs/afs/internal.h
2918     +++ b/fs/afs/internal.h
2919     @@ -397,7 +397,7 @@ struct afs_cell {
2920     struct afs_vlserver_list __rcu *vl_servers;
2921    
2922     u8 name_len; /* Length of name */
2923     - char name[64 + 1]; /* Cell name, case-flattened and NUL-padded */
2924     + char *name; /* Cell name, case-flattened and NUL-padded */
2925     };
2926    
2927     /*
2928     diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
2929     index c2dd94e1b274..42d69e77f89d 100644
2930     --- a/fs/btrfs/block-group.c
2931     +++ b/fs/btrfs/block-group.c
2932     @@ -910,7 +910,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
2933     path = btrfs_alloc_path();
2934     if (!path) {
2935     ret = -ENOMEM;
2936     - goto out_put_group;
2937     + goto out;
2938     }
2939    
2940     /*
2941     @@ -948,7 +948,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
2942     ret = btrfs_orphan_add(trans, BTRFS_I(inode));
2943     if (ret) {
2944     btrfs_add_delayed_iput(inode);
2945     - goto out_put_group;
2946     + goto out;
2947     }
2948     clear_nlink(inode);
2949     /* One for the block groups ref */
2950     @@ -971,13 +971,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
2951    
2952     ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
2953     if (ret < 0)
2954     - goto out_put_group;
2955     + goto out;
2956     if (ret > 0)
2957     btrfs_release_path(path);
2958     if (ret == 0) {
2959     ret = btrfs_del_item(trans, tree_root, path);
2960     if (ret)
2961     - goto out_put_group;
2962     + goto out;
2963     btrfs_release_path(path);
2964     }
2965    
2966     @@ -986,6 +986,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
2967     &fs_info->block_group_cache_tree);
2968     RB_CLEAR_NODE(&block_group->cache_node);
2969    
2970     + /* Once for the block groups rbtree */
2971     + btrfs_put_block_group(block_group);
2972     +
2973     if (fs_info->first_logical_byte == block_group->key.objectid)
2974     fs_info->first_logical_byte = (u64)-1;
2975     spin_unlock(&fs_info->block_group_cache_lock);
2976     @@ -1094,10 +1097,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
2977    
2978     ret = remove_block_group_free_space(trans, block_group);
2979     if (ret)
2980     - goto out_put_group;
2981     -
2982     - /* Once for the block groups rbtree */
2983     - btrfs_put_block_group(block_group);
2984     + goto out;
2985    
2986     ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2987     if (ret > 0)
2988     @@ -1120,10 +1120,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
2989     free_extent_map(em);
2990     }
2991    
2992     -out_put_group:
2993     +out:
2994     /* Once for the lookup reference */
2995     btrfs_put_block_group(block_group);
2996     -out:
2997     if (remove_rsv)
2998     btrfs_delayed_refs_rsv_release(fs_info, 1);
2999     btrfs_free_path(path);
3000     diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
3001     index 6d2c277c6e0a..36cd210ee2ef 100644
3002     --- a/fs/btrfs/ctree.h
3003     +++ b/fs/btrfs/ctree.h
3004     @@ -940,6 +940,8 @@ enum {
3005     BTRFS_ROOT_DEAD_RELOC_TREE,
3006     /* Mark dead root stored on device whose cleanup needs to be resumed */
3007     BTRFS_ROOT_DEAD_TREE,
3008     + /* The root has a log tree. Used only for subvolume roots. */
3009     + BTRFS_ROOT_HAS_LOG_TREE,
3010     };
3011    
3012     /*
3013     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3014     index 127cdecbe872..280c45c91ddc 100644
3015     --- a/fs/btrfs/inode.c
3016     +++ b/fs/btrfs/inode.c
3017     @@ -975,6 +975,7 @@ static noinline int cow_file_range(struct inode *inode,
3018     u64 num_bytes;
3019     unsigned long ram_size;
3020     u64 cur_alloc_size = 0;
3021     + u64 min_alloc_size;
3022     u64 blocksize = fs_info->sectorsize;
3023     struct btrfs_key ins;
3024     struct extent_map *em;
3025     @@ -1025,10 +1026,26 @@ static noinline int cow_file_range(struct inode *inode,
3026     btrfs_drop_extent_cache(BTRFS_I(inode), start,
3027     start + num_bytes - 1, 0);
3028    
3029     + /*
3030     + * Relocation relies on the relocated extents to have exactly the same
3031     + * size as the original extents. Normally writeback for relocation data
3032     + * extents follows a NOCOW path because relocation preallocates the
3033     + * extents. However, due to an operation such as scrub turning a block
3034     + * group to RO mode, it may fallback to COW mode, so we must make sure
3035     + * an extent allocated during COW has exactly the requested size and can
3036     + * not be split into smaller extents, otherwise relocation breaks and
3037     + * fails during the stage where it updates the bytenr of file extent
3038     + * items.
3039     + */
3040     + if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3041     + min_alloc_size = num_bytes;
3042     + else
3043     + min_alloc_size = fs_info->sectorsize;
3044     +
3045     while (num_bytes > 0) {
3046     cur_alloc_size = num_bytes;
3047     ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
3048     - fs_info->sectorsize, 0, alloc_hint,
3049     + min_alloc_size, 0, alloc_hint,
3050     &ins, 1, 1);
3051     if (ret < 0)
3052     goto out_unlock;
3053     @@ -1328,6 +1345,8 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page,
3054     int *page_started, unsigned long *nr_written)
3055     {
3056     const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode));
3057     + const bool is_reloc_ino = (BTRFS_I(inode)->root->root_key.objectid ==
3058     + BTRFS_DATA_RELOC_TREE_OBJECTID);
3059     const u64 range_bytes = end + 1 - start;
3060     struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3061     u64 range_start = start;
3062     @@ -1358,18 +1377,23 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page,
3063     * data space info, which we incremented in the step above.
3064     *
3065     * If we need to fallback to cow and the inode corresponds to a free
3066     - * space cache inode, we must also increment bytes_may_use of the data
3067     - * space_info for the same reason. Space caches always get a prealloc
3068     + * space cache inode or an inode of the data relocation tree, we must
3069     + * also increment bytes_may_use of the data space_info for the same
3070     + * reason. Space caches and relocated data extents always get a prealloc
3071     * extent for them, however scrub or balance may have set the block
3072     - * group that contains that extent to RO mode.
3073     + * group that contains that extent to RO mode and therefore force COW
3074     + * when starting writeback.
3075     */
3076     count = count_range_bits(io_tree, &range_start, end, range_bytes,
3077     EXTENT_NORESERVE, 0);
3078     - if (count > 0 || is_space_ino) {
3079     - const u64 bytes = is_space_ino ? range_bytes : count;
3080     + if (count > 0 || is_space_ino || is_reloc_ino) {
3081     + u64 bytes = count;
3082     struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3083     struct btrfs_space_info *sinfo = fs_info->data_sinfo;
3084    
3085     + if (is_space_ino || is_reloc_ino)
3086     + bytes = range_bytes;
3087     +
3088     spin_lock(&sinfo->lock);
3089     btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
3090     spin_unlock(&sinfo->lock);
3091     @@ -8833,9 +8857,6 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3092     dio_data.overwrite = 1;
3093     inode_unlock(inode);
3094     relock = true;
3095     - } else if (iocb->ki_flags & IOCB_NOWAIT) {
3096     - ret = -EAGAIN;
3097     - goto out;
3098     }
3099     ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
3100     offset, count);
3101     diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3102     index 7d464b049507..f46afbff668e 100644
3103     --- a/fs/btrfs/tree-log.c
3104     +++ b/fs/btrfs/tree-log.c
3105     @@ -167,6 +167,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
3106     if (ret)
3107     goto out;
3108    
3109     + set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
3110     clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
3111     root->log_start_pid = current->pid;
3112     }
3113     @@ -193,6 +194,9 @@ static int join_running_log_trans(struct btrfs_root *root)
3114     {
3115     int ret = -ENOENT;
3116    
3117     + if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state))
3118     + return ret;
3119     +
3120     mutex_lock(&root->log_mutex);
3121     if (root->log_root) {
3122     ret = 0;
3123     @@ -3327,6 +3331,7 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3124     if (root->log_root) {
3125     free_log_tree(trans, root->log_root);
3126     root->log_root = NULL;
3127     + clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
3128     }
3129     return 0;
3130     }
3131     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3132     index 58915d882285..7ccbfc656478 100644
3133     --- a/fs/cifs/smb2ops.c
3134     +++ b/fs/cifs/smb2ops.c
3135     @@ -736,6 +736,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
3136     /* close extra handle outside of crit sec */
3137     SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3138     }
3139     + rc = 0;
3140     goto oshr_free;
3141     }
3142    
3143     @@ -2969,6 +2970,11 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
3144     trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3145     ses->Suid, offset, len);
3146    
3147     + /*
3148     + * We zero the range through ioctl, so we need remove the page caches
3149     + * first, otherwise the data may be inconsistent with the server.
3150     + */
3151     + truncate_pagecache_range(inode, offset, offset + len - 1);
3152    
3153     /* if file not oplocked can't be sure whether asking to extend size */
3154     if (!CIFS_CACHE_READ(cifsi))
3155     @@ -3035,6 +3041,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
3156     return rc;
3157     }
3158    
3159     + /*
3160     + * We implement the punch hole through ioctl, so we need remove the page
3161     + * caches first, otherwise the data may be inconsistent with the server.
3162     + */
3163     + truncate_pagecache_range(inode, offset, offset + len - 1);
3164     +
3165     cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
3166    
3167     fsctl_buf.FileOffset = cpu_to_le64(offset);
3168     diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h
3169     index faf950189bd7..568d5a493876 100644
3170     --- a/fs/erofs/zdata.h
3171     +++ b/fs/erofs/zdata.h
3172     @@ -148,22 +148,22 @@ static inline void z_erofs_onlinepage_init(struct page *page)
3173     static inline void z_erofs_onlinepage_fixup(struct page *page,
3174     uintptr_t index, bool down)
3175     {
3176     - unsigned long *p, o, v, id;
3177     -repeat:
3178     - p = &page_private(page);
3179     - o = READ_ONCE(*p);
3180     + union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
3181     + int orig, orig_index, val;
3182    
3183     - id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
3184     - if (id) {
3185     +repeat:
3186     + orig = atomic_read(u.o);
3187     + orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
3188     + if (orig_index) {
3189     if (!index)
3190     return;
3191    
3192     - DBG_BUGON(id != index);
3193     + DBG_BUGON(orig_index != index);
3194     }
3195    
3196     - v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
3197     - ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
3198     - if (cmpxchg(p, o, v) != o)
3199     + val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
3200     + ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
3201     + if (atomic_cmpxchg(u.o, orig, val) != orig)
3202     goto repeat;
3203     }
3204    
3205     diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
3206     index 6b0bf4ebd812..70cf8c5760c7 100644
3207     --- a/fs/nfs/direct.c
3208     +++ b/fs/nfs/direct.c
3209     @@ -367,8 +367,6 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
3210     {
3211     struct inode *inode = dreq->inode;
3212    
3213     - inode_dio_end(inode);
3214     -
3215     if (dreq->iocb) {
3216     long res = (long) dreq->error;
3217     if (dreq->count != 0) {
3218     @@ -380,7 +378,10 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
3219    
3220     complete(&dreq->completion);
3221    
3222     + igrab(inode);
3223     nfs_direct_req_release(dreq);
3224     + inode_dio_end(inode);
3225     + iput(inode);
3226     }
3227    
3228     static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
3229     @@ -510,8 +511,10 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
3230     * generic layer handle the completion.
3231     */
3232     if (requested_bytes == 0) {
3233     - inode_dio_end(inode);
3234     + igrab(inode);
3235     nfs_direct_req_release(dreq);
3236     + inode_dio_end(inode);
3237     + iput(inode);
3238     return result < 0 ? result : -EIO;
3239     }
3240    
3241     @@ -923,8 +926,10 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
3242     * generic layer handle the completion.
3243     */
3244     if (requested_bytes == 0) {
3245     - inode_dio_end(inode);
3246     + igrab(inode);
3247     nfs_direct_req_release(dreq);
3248     + inode_dio_end(inode);
3249     + iput(inode);
3250     return result < 0 ? result : -EIO;
3251     }
3252    
3253     diff --git a/fs/nfs/file.c b/fs/nfs/file.c
3254     index 95dc90570786..7b3136753205 100644
3255     --- a/fs/nfs/file.c
3256     +++ b/fs/nfs/file.c
3257     @@ -83,6 +83,7 @@ nfs_file_release(struct inode *inode, struct file *filp)
3258     dprintk("NFS: release(%pD2)\n", filp);
3259    
3260     nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
3261     + inode_dio_wait(inode);
3262     nfs_file_clear_open_context(filp);
3263     return 0;
3264     }
3265     diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
3266     index 5657b7f2611f..1741d902b0d8 100644
3267     --- a/fs/nfs/flexfilelayout/flexfilelayout.c
3268     +++ b/fs/nfs/flexfilelayout/flexfilelayout.c
3269     @@ -984,9 +984,8 @@ retry:
3270     goto out_mds;
3271    
3272     /* Use a direct mapping of ds_idx to pgio mirror_idx */
3273     - if (WARN_ON_ONCE(pgio->pg_mirror_count !=
3274     - FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
3275     - goto out_mds;
3276     + if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
3277     + goto out_eagain;
3278    
3279     for (i = 0; i < pgio->pg_mirror_count; i++) {
3280     mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
3281     @@ -1008,7 +1007,10 @@ retry:
3282     (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
3283     pgio->pg_maxretrans = io_maxretrans;
3284     return;
3285     -
3286     +out_eagain:
3287     + pnfs_generic_pg_cleanup(pgio);
3288     + pgio->pg_error = -EAGAIN;
3289     + return;
3290     out_mds:
3291     trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
3292     0, NFS4_MAX_UINT64, IOMODE_RW,
3293     @@ -1018,6 +1020,7 @@ out_mds:
3294     pgio->pg_lseg = NULL;
3295     pgio->pg_maxretrans = 0;
3296     nfs_pageio_reset_write_mds(pgio);
3297     + pgio->pg_error = -EAGAIN;
3298     }
3299    
3300     static unsigned int
3301     diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
3302     index 8a2e284ccfcd..e2c34c704185 100644
3303     --- a/fs/ocfs2/dlmglue.c
3304     +++ b/fs/ocfs2/dlmglue.c
3305     @@ -689,6 +689,12 @@ static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
3306     &ocfs2_nfs_sync_lops, osb);
3307     }
3308    
3309     +static void ocfs2_nfs_sync_lock_init(struct ocfs2_super *osb)
3310     +{
3311     + ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
3312     + init_rwsem(&osb->nfs_sync_rwlock);
3313     +}
3314     +
3315     void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb)
3316     {
3317     struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
3318     @@ -2855,6 +2861,11 @@ int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
3319     if (ocfs2_is_hard_readonly(osb))
3320     return -EROFS;
3321    
3322     + if (ex)
3323     + down_write(&osb->nfs_sync_rwlock);
3324     + else
3325     + down_read(&osb->nfs_sync_rwlock);
3326     +
3327     if (ocfs2_mount_local(osb))
3328     return 0;
3329    
3330     @@ -2873,6 +2884,10 @@ void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
3331     if (!ocfs2_mount_local(osb))
3332     ocfs2_cluster_unlock(osb, lockres,
3333     ex ? LKM_EXMODE : LKM_PRMODE);
3334     + if (ex)
3335     + up_write(&osb->nfs_sync_rwlock);
3336     + else
3337     + up_read(&osb->nfs_sync_rwlock);
3338     }
3339    
3340     int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
3341     @@ -3340,7 +3355,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
3342     local:
3343     ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
3344     ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
3345     - ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
3346     + ocfs2_nfs_sync_lock_init(osb);
3347     ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
3348    
3349     osb->cconn = conn;
3350     diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
3351     index 9150cfa4df7d..9461bd3e1c0c 100644
3352     --- a/fs/ocfs2/ocfs2.h
3353     +++ b/fs/ocfs2/ocfs2.h
3354     @@ -394,6 +394,7 @@ struct ocfs2_super
3355     struct ocfs2_lock_res osb_super_lockres;
3356     struct ocfs2_lock_res osb_rename_lockres;
3357     struct ocfs2_lock_res osb_nfs_sync_lockres;
3358     + struct rw_semaphore nfs_sync_rwlock;
3359     struct ocfs2_lock_res osb_trim_fs_lockres;
3360     struct mutex obs_trim_fs_mutex;
3361     struct ocfs2_dlm_debug *osb_dlm_debug;
3362     diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
3363     index 0db4a7ec58a2..dcef83c8796d 100644
3364     --- a/fs/ocfs2/ocfs2_fs.h
3365     +++ b/fs/ocfs2/ocfs2_fs.h
3366     @@ -290,7 +290,7 @@
3367     #define OCFS2_MAX_SLOTS 255
3368    
3369     /* Slot map indicator for an empty slot */
3370     -#define OCFS2_INVALID_SLOT -1
3371     +#define OCFS2_INVALID_SLOT ((u16)-1)
3372    
3373     #define OCFS2_VOL_UUID_LEN 16
3374     #define OCFS2_MAX_VOL_LABEL_LEN 64
3375     @@ -326,8 +326,8 @@ struct ocfs2_system_inode_info {
3376     enum {
3377     BAD_BLOCK_SYSTEM_INODE = 0,
3378     GLOBAL_INODE_ALLOC_SYSTEM_INODE,
3379     +#define OCFS2_FIRST_ONLINE_SYSTEM_INODE GLOBAL_INODE_ALLOC_SYSTEM_INODE
3380     SLOT_MAP_SYSTEM_INODE,
3381     -#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE
3382     HEARTBEAT_SYSTEM_INODE,
3383     GLOBAL_BITMAP_SYSTEM_INODE,
3384     USER_QUOTA_SYSTEM_INODE,
3385     diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
3386     index 69c21a3843af..503e724d39f5 100644
3387     --- a/fs/ocfs2/suballoc.c
3388     +++ b/fs/ocfs2/suballoc.c
3389     @@ -2827,9 +2827,12 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
3390     goto bail;
3391     }
3392    
3393     - inode_alloc_inode =
3394     - ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE,
3395     - suballoc_slot);
3396     + if (suballoc_slot == (u16)OCFS2_INVALID_SLOT)
3397     + inode_alloc_inode = ocfs2_get_system_file_inode(osb,
3398     + GLOBAL_INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
3399     + else
3400     + inode_alloc_inode = ocfs2_get_system_file_inode(osb,
3401     + INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
3402     if (!inode_alloc_inode) {
3403     /* the error code could be inaccurate, but we are not able to
3404     * get the correct one. */
3405     diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
3406     index 533b04aaf6f6..0a36f532cf86 100644
3407     --- a/fs/xfs/libxfs/xfs_alloc.c
3408     +++ b/fs/xfs/libxfs/xfs_alloc.c
3409     @@ -2598,6 +2598,13 @@ xfs_agf_verify(
3410     be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
3411     return __this_address;
3412    
3413     + if (be32_to_cpu(agf->agf_length) > mp->m_sb.sb_dblocks)
3414     + return __this_address;
3415     +
3416     + if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
3417     + be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length))
3418     + return __this_address;
3419     +
3420     if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
3421     be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
3422     be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
3423     @@ -2609,6 +2616,10 @@ xfs_agf_verify(
3424     be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
3425     return __this_address;
3426    
3427     + if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
3428     + be32_to_cpu(agf->agf_rmap_blocks) > be32_to_cpu(agf->agf_length))
3429     + return __this_address;
3430     +
3431     /*
3432     * during growfs operations, the perag is not fully initialised,
3433     * so we can't use it for any useful checking. growfs ensures we can't
3434     @@ -2622,6 +2633,11 @@ xfs_agf_verify(
3435     be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
3436     return __this_address;
3437    
3438     + if (xfs_sb_version_hasreflink(&mp->m_sb) &&
3439     + be32_to_cpu(agf->agf_refcount_blocks) >
3440     + be32_to_cpu(agf->agf_length))
3441     + return __this_address;
3442     +
3443     if (xfs_sb_version_hasreflink(&mp->m_sb) &&
3444     (be32_to_cpu(agf->agf_refcount_level) < 1 ||
3445     be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
3446     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
3447     index b580a35f50ea..ec3081ab04c0 100644
3448     --- a/include/linux/netdevice.h
3449     +++ b/include/linux/netdevice.h
3450     @@ -3043,7 +3043,7 @@ static inline int dev_recursion_level(void)
3451     return this_cpu_read(softnet_data.xmit.recursion);
3452     }
3453    
3454     -#define XMIT_RECURSION_LIMIT 10
3455     +#define XMIT_RECURSION_LIMIT 8
3456     static inline bool dev_xmit_recursion(void)
3457     {
3458     return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
3459     diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
3460     index 733fad7dfbed..6d15040c642c 100644
3461     --- a/include/linux/qed/qed_chain.h
3462     +++ b/include/linux/qed/qed_chain.h
3463     @@ -207,28 +207,34 @@ static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
3464    
3465     static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
3466     {
3467     + u16 elem_per_page = p_chain->elem_per_page;
3468     + u32 prod = p_chain->u.chain16.prod_idx;
3469     + u32 cons = p_chain->u.chain16.cons_idx;
3470     u16 used;
3471    
3472     - used = (u16) (((u32)0x10000 +
3473     - (u32)p_chain->u.chain16.prod_idx) -
3474     - (u32)p_chain->u.chain16.cons_idx);
3475     + if (prod < cons)
3476     + prod += (u32)U16_MAX + 1;
3477     +
3478     + used = (u16)(prod - cons);
3479     if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
3480     - used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
3481     - p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
3482     + used -= prod / elem_per_page - cons / elem_per_page;
3483    
3484     return (u16)(p_chain->capacity - used);
3485     }
3486    
3487     static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
3488     {
3489     + u16 elem_per_page = p_chain->elem_per_page;
3490     + u64 prod = p_chain->u.chain32.prod_idx;
3491     + u64 cons = p_chain->u.chain32.cons_idx;
3492     u32 used;
3493    
3494     - used = (u32) (((u64)0x100000000ULL +
3495     - (u64)p_chain->u.chain32.prod_idx) -
3496     - (u64)p_chain->u.chain32.cons_idx);
3497     + if (prod < cons)
3498     + prod += (u64)U32_MAX + 1;
3499     +
3500     + used = (u32)(prod - cons);
3501     if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
3502     - used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
3503     - p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
3504     + used -= (u32)(prod / elem_per_page - cons / elem_per_page);
3505    
3506     return p_chain->capacity - used;
3507     }
3508     diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
3509     index 131ea1bad458..eccfd3a4e4c8 100644
3510     --- a/include/linux/tpm_eventlog.h
3511     +++ b/include/linux/tpm_eventlog.h
3512     @@ -81,6 +81,8 @@ struct tcg_efi_specid_event_algs {
3513     u16 digest_size;
3514     } __packed;
3515    
3516     +#define TCG_SPECID_SIG "Spec ID Event03"
3517     +
3518     struct tcg_efi_specid_event_head {
3519     u8 signature[16];
3520     u32 platform_class;
3521     @@ -171,6 +173,7 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
3522     int i;
3523     int j;
3524     u32 count, event_type;
3525     + const u8 zero_digest[sizeof(event_header->digest)] = {0};
3526    
3527     marker = event;
3528     marker_start = marker;
3529     @@ -198,10 +201,19 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
3530     count = READ_ONCE(event->count);
3531     event_type = READ_ONCE(event->event_type);
3532    
3533     + /* Verify that it's the log header */
3534     + if (event_header->pcr_idx != 0 ||
3535     + event_header->event_type != NO_ACTION ||
3536     + memcmp(event_header->digest, zero_digest, sizeof(zero_digest))) {
3537     + size = 0;
3538     + goto out;
3539     + }
3540     +
3541     efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
3542    
3543     /* Check if event is malformed. */
3544     - if (count > efispecid->num_algs) {
3545     + if (memcmp(efispecid->signature, TCG_SPECID_SIG,
3546     + sizeof(TCG_SPECID_SIG)) || count > efispecid->num_algs) {
3547     size = 0;
3548     goto out;
3549     }
3550     diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
3551     index 823afc42a3aa..06e1deeef464 100644
3552     --- a/include/net/sctp/constants.h
3553     +++ b/include/net/sctp/constants.h
3554     @@ -341,11 +341,13 @@ enum {
3555     ipv4_is_anycast_6to4(a))
3556    
3557     /* Flags used for the bind address copy functions. */
3558     -#define SCTP_ADDR6_ALLOWED 0x00000001 /* IPv6 address is allowed by
3559     +#define SCTP_ADDR4_ALLOWED 0x00000001 /* IPv4 address is allowed by
3560     local sock family */
3561     -#define SCTP_ADDR4_PEERSUPP 0x00000002 /* IPv4 address is supported by
3562     +#define SCTP_ADDR6_ALLOWED 0x00000002 /* IPv6 address is allowed by
3563     + local sock family */
3564     +#define SCTP_ADDR4_PEERSUPP 0x00000004 /* IPv4 address is supported by
3565     peer */
3566     -#define SCTP_ADDR6_PEERSUPP 0x00000004 /* IPv6 address is supported by
3567     +#define SCTP_ADDR6_PEERSUPP 0x00000008 /* IPv6 address is supported by
3568     peer */
3569    
3570     /* Reasons to retransmit. */
3571     diff --git a/include/net/sock.h b/include/net/sock.h
3572     index 6c5a3809483e..8263bbf756a2 100644
3573     --- a/include/net/sock.h
3574     +++ b/include/net/sock.h
3575     @@ -1803,7 +1803,6 @@ static inline int sk_rx_queue_get(const struct sock *sk)
3576    
3577     static inline void sk_set_socket(struct sock *sk, struct socket *sock)
3578     {
3579     - sk_tx_queue_clear(sk);
3580     sk->sk_socket = sock;
3581     }
3582    
3583     diff --git a/include/net/xfrm.h b/include/net/xfrm.h
3584     index aa08a7a5f6ac..fb391c00c19a 100644
3585     --- a/include/net/xfrm.h
3586     +++ b/include/net/xfrm.h
3587     @@ -1012,6 +1012,7 @@ struct xfrm_offload {
3588     #define XFRM_GRO 32
3589     #define XFRM_ESP_NO_TRAILER 64
3590     #define XFRM_DEV_RESUME 128
3591     +#define XFRM_XMIT 256
3592    
3593     __u32 status;
3594     #define CRYPTO_SUCCESS 1
3595     diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
3596     index 869e2e1860e8..b701af27a779 100644
3597     --- a/kernel/bpf/cgroup.c
3598     +++ b/kernel/bpf/cgroup.c
3599     @@ -966,16 +966,23 @@ static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
3600    
3601     static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
3602     {
3603     - if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0)
3604     + if (unlikely(max_optlen < 0))
3605     return -EINVAL;
3606    
3607     + if (unlikely(max_optlen > PAGE_SIZE)) {
3608     + /* We don't expose optvals that are greater than PAGE_SIZE
3609     + * to the BPF program.
3610     + */
3611     + max_optlen = PAGE_SIZE;
3612     + }
3613     +
3614     ctx->optval = kzalloc(max_optlen, GFP_USER);
3615     if (!ctx->optval)
3616     return -ENOMEM;
3617    
3618     ctx->optval_end = ctx->optval + max_optlen;
3619    
3620     - return 0;
3621     + return max_optlen;
3622     }
3623    
3624     static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
3625     @@ -1009,13 +1016,13 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
3626     */
3627     max_optlen = max_t(int, 16, *optlen);
3628    
3629     - ret = sockopt_alloc_buf(&ctx, max_optlen);
3630     - if (ret)
3631     - return ret;
3632     + max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
3633     + if (max_optlen < 0)
3634     + return max_optlen;
3635    
3636     ctx.optlen = *optlen;
3637    
3638     - if (copy_from_user(ctx.optval, optval, *optlen) != 0) {
3639     + if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
3640     ret = -EFAULT;
3641     goto out;
3642     }
3643     @@ -1043,8 +1050,14 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
3644     /* export any potential modifications */
3645     *level = ctx.level;
3646     *optname = ctx.optname;
3647     - *optlen = ctx.optlen;
3648     - *kernel_optval = ctx.optval;
3649     +
3650     + /* optlen == 0 from BPF indicates that we should
3651     + * use original userspace data.
3652     + */
3653     + if (ctx.optlen != 0) {
3654     + *optlen = ctx.optlen;
3655     + *kernel_optval = ctx.optval;
3656     + }
3657     }
3658    
3659     out:
3660     @@ -1076,12 +1089,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
3661     __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
3662     return retval;
3663    
3664     - ret = sockopt_alloc_buf(&ctx, max_optlen);
3665     - if (ret)
3666     - return ret;
3667     -
3668     ctx.optlen = max_optlen;
3669    
3670     + max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
3671     + if (max_optlen < 0)
3672     + return max_optlen;
3673     +
3674     if (!retval) {
3675     /* If kernel getsockopt finished successfully,
3676     * copy whatever was returned to the user back
3677     @@ -1095,10 +1108,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
3678     goto out;
3679     }
3680    
3681     - if (ctx.optlen > max_optlen)
3682     - ctx.optlen = max_optlen;
3683     -
3684     - if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) {
3685     + if (copy_from_user(ctx.optval, optval,
3686     + min(ctx.optlen, max_optlen)) != 0) {
3687     ret = -EFAULT;
3688     goto out;
3689     }
3690     @@ -1127,10 +1138,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
3691     goto out;
3692     }
3693    
3694     - if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
3695     - put_user(ctx.optlen, optlen)) {
3696     - ret = -EFAULT;
3697     - goto out;
3698     + if (ctx.optlen != 0) {
3699     + if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
3700     + put_user(ctx.optlen, optlen)) {
3701     + ret = -EFAULT;
3702     + goto out;
3703     + }
3704     }
3705    
3706     ret = ctx.retval;
3707     diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
3708     index b4b6b77f309c..6684696fa457 100644
3709     --- a/kernel/bpf/devmap.c
3710     +++ b/kernel/bpf/devmap.c
3711     @@ -88,12 +88,13 @@ struct bpf_dtab {
3712     static DEFINE_SPINLOCK(dev_map_lock);
3713     static LIST_HEAD(dev_map_list);
3714    
3715     -static struct hlist_head *dev_map_create_hash(unsigned int entries)
3716     +static struct hlist_head *dev_map_create_hash(unsigned int entries,
3717     + int numa_node)
3718     {
3719     int i;
3720     struct hlist_head *hash;
3721    
3722     - hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL);
3723     + hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
3724     if (hash != NULL)
3725     for (i = 0; i < entries; i++)
3726     INIT_HLIST_HEAD(&hash[i]);
3727     @@ -151,7 +152,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
3728     INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));
3729    
3730     if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
3731     - dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
3732     + dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
3733     + dtab->map.numa_node);
3734     if (!dtab->dev_index_head)
3735     goto free_percpu;
3736    
3737     @@ -249,7 +251,7 @@ static void dev_map_free(struct bpf_map *map)
3738     }
3739     }
3740    
3741     - kfree(dtab->dev_index_head);
3742     + bpf_map_area_free(dtab->dev_index_head);
3743     } else {
3744     for (i = 0; i < dtab->map.max_entries; i++) {
3745     struct bpf_dtab_netdev *dev;
3746     diff --git a/kernel/kprobes.c b/kernel/kprobes.c
3747     index 195ecb955fcc..950a5cfd262c 100644
3748     --- a/kernel/kprobes.c
3749     +++ b/kernel/kprobes.c
3750     @@ -326,7 +326,8 @@ struct kprobe *get_kprobe(void *addr)
3751     struct kprobe *p;
3752    
3753     head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
3754     - hlist_for_each_entry_rcu(p, head, hlist) {
3755     + hlist_for_each_entry_rcu(p, head, hlist,
3756     + lockdep_is_held(&kprobe_mutex)) {
3757     if (p->addr == addr)
3758     return p;
3759     }
3760     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
3761     index 361cbc2dc966..7238ef445daf 100644
3762     --- a/kernel/sched/core.c
3763     +++ b/kernel/sched/core.c
3764     @@ -4447,7 +4447,8 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
3765     */
3766     if (dl_prio(prio)) {
3767     if (!dl_prio(p->normal_prio) ||
3768     - (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
3769     + (pi_task && dl_prio(pi_task->prio) &&
3770     + dl_entity_preempt(&pi_task->dl, &p->dl))) {
3771     p->dl.dl_boosted = 1;
3772     queue_flag |= ENQUEUE_REPLENISH;
3773     } else
3774     diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
3775     index 08bdee0480b3..4cb00538a207 100644
3776     --- a/kernel/sched/deadline.c
3777     +++ b/kernel/sched/deadline.c
3778     @@ -2693,6 +2693,7 @@ void __dl_clear_params(struct task_struct *p)
3779     dl_se->dl_bw = 0;
3780     dl_se->dl_density = 0;
3781    
3782     + dl_se->dl_boosted = 0;
3783     dl_se->dl_throttled = 0;
3784     dl_se->dl_yielded = 0;
3785     dl_se->dl_non_contending = 0;
3786     diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
3787     index a677aa84ccb6..eaee960153e1 100644
3788     --- a/kernel/trace/blktrace.c
3789     +++ b/kernel/trace/blktrace.c
3790     @@ -3,6 +3,9 @@
3791     * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
3792     *
3793     */
3794     +
3795     +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3796     +
3797     #include <linux/kernel.h>
3798     #include <linux/blkdev.h>
3799     #include <linux/blktrace_api.h>
3800     @@ -495,6 +498,16 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
3801     */
3802     strreplace(buts->name, '/', '_');
3803    
3804     + /*
3805     + * bdev can be NULL, as with scsi-generic, this is a helpful as
3806     + * we can be.
3807     + */
3808     + if (q->blk_trace) {
3809     + pr_warn("Concurrent blktraces are not allowed on %s\n",
3810     + buts->name);
3811     + return -EBUSY;
3812     + }
3813     +
3814     bt = kzalloc(sizeof(*bt), GFP_KERNEL);
3815     if (!bt)
3816     return -ENOMEM;
3817     diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
3818     index 4bf050fcfe3b..9a2581fe7ed5 100644
3819     --- a/kernel/trace/ring_buffer.c
3820     +++ b/kernel/trace/ring_buffer.c
3821     @@ -2358,7 +2358,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
3822     if (unlikely(info->add_timestamp)) {
3823     bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
3824    
3825     - event = rb_add_time_stamp(event, info->delta, abs);
3826     + event = rb_add_time_stamp(event, abs ? info->delta : delta, abs);
3827     length -= RB_LEN_TIME_EXTEND;
3828     delta = 0;
3829     }
3830     diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
3831     index de840de87a18..e913d41a4194 100644
3832     --- a/kernel/trace/trace_events_trigger.c
3833     +++ b/kernel/trace/trace_events_trigger.c
3834     @@ -216,11 +216,17 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
3835    
3836     static int trigger_process_regex(struct trace_event_file *file, char *buff)
3837     {
3838     - char *command, *next = buff;
3839     + char *command, *next;
3840     struct event_command *p;
3841     int ret = -EINVAL;
3842    
3843     + next = buff = skip_spaces(buff);
3844     command = strsep(&next, ": \t");
3845     + if (next) {
3846     + next = skip_spaces(next);
3847     + if (!*next)
3848     + next = NULL;
3849     + }
3850     command = (command[0] != '!') ? command : command + 1;
3851    
3852     mutex_lock(&trigger_cmd_mutex);
3853     @@ -630,8 +636,14 @@ event_trigger_callback(struct event_command *cmd_ops,
3854     int ret;
3855    
3856     /* separate the trigger from the filter (t:n [if filter]) */
3857     - if (param && isdigit(param[0]))
3858     + if (param && isdigit(param[0])) {
3859     trigger = strsep(&param, " \t");
3860     + if (param) {
3861     + param = skip_spaces(param);
3862     + if (!*param)
3863     + param = NULL;
3864     + }
3865     + }
3866    
3867     trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
3868    
3869     @@ -1368,6 +1380,11 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
3870     trigger = strsep(&param, " \t");
3871     if (!trigger)
3872     return -EINVAL;
3873     + if (param) {
3874     + param = skip_spaces(param);
3875     + if (!*param)
3876     + param = NULL;
3877     + }
3878    
3879     system = strsep(&trigger, ":");
3880     if (!trigger)
3881     diff --git a/lib/test_objagg.c b/lib/test_objagg.c
3882     index 72c1abfa154d..da137939a410 100644
3883     --- a/lib/test_objagg.c
3884     +++ b/lib/test_objagg.c
3885     @@ -979,10 +979,10 @@ err_check_expect_stats2:
3886     err_world2_obj_get:
3887     for (i--; i >= 0; i--)
3888     world_obj_put(&world2, objagg, hints_case->key_ids[i]);
3889     - objagg_hints_put(hints);
3890     - objagg_destroy(objagg2);
3891     i = hints_case->key_ids_count;
3892     + objagg_destroy(objagg2);
3893     err_check_expect_hints_stats:
3894     + objagg_hints_put(hints);
3895     err_hints_get:
3896     err_check_expect_stats:
3897     err_world_obj_get:
3898     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
3899     index 0d6f3ea86738..a3f4c35bb5fa 100644
3900     --- a/mm/memcontrol.c
3901     +++ b/mm/memcontrol.c
3902     @@ -2895,8 +2895,10 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
3903     return;
3904    
3905     cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
3906     - if (!cw)
3907     + if (!cw) {
3908     + css_put(&memcg->css);
3909     return;
3910     + }
3911    
3912     cw->memcg = memcg;
3913     cw->cachep = cachep;
3914     diff --git a/mm/slab_common.c b/mm/slab_common.c
3915     index ade6c257d4b4..8c1ffbf7de45 100644
3916     --- a/mm/slab_common.c
3917     +++ b/mm/slab_common.c
3918     @@ -1740,7 +1740,7 @@ void kzfree(const void *p)
3919     if (unlikely(ZERO_OR_NULL_PTR(mem)))
3920     return;
3921     ks = ksize(mem);
3922     - memset(mem, 0, ks);
3923     + memzero_explicit(mem, ks);
3924     kfree(mem);
3925     }
3926     EXPORT_SYMBOL(kzfree);
3927     diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
3928     index ce2ab14ee605..cecb4223440e 100644
3929     --- a/net/bridge/br_private.h
3930     +++ b/net/bridge/br_private.h
3931     @@ -208,8 +208,8 @@ struct net_bridge_port_group {
3932     struct rcu_head rcu;
3933     struct timer_list timer;
3934     struct br_ip addr;
3935     + unsigned char eth_addr[ETH_ALEN] __aligned(2);
3936     unsigned char flags;
3937     - unsigned char eth_addr[ETH_ALEN];
3938     };
3939    
3940     struct net_bridge_mdb_entry {
3941     diff --git a/net/core/dev.c b/net/core/dev.c
3942     index 204d87e7c9b1..727965565d31 100644
3943     --- a/net/core/dev.c
3944     +++ b/net/core/dev.c
3945     @@ -3832,10 +3832,12 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
3946    
3947     local_bh_disable();
3948    
3949     + dev_xmit_recursion_inc();
3950     HARD_TX_LOCK(dev, txq, smp_processor_id());
3951     if (!netif_xmit_frozen_or_drv_stopped(txq))
3952     ret = netdev_start_xmit(skb, dev, txq, false);
3953     HARD_TX_UNLOCK(dev, txq);
3954     + dev_xmit_recursion_dec();
3955    
3956     local_bh_enable();
3957    
3958     @@ -9114,6 +9116,13 @@ int register_netdevice(struct net_device *dev)
3959     rcu_barrier();
3960    
3961     dev->reg_state = NETREG_UNREGISTERED;
3962     + /* We should put the kobject that hold in
3963     + * netdev_unregister_kobject(), otherwise
3964     + * the net device cannot be freed when
3965     + * driver calls free_netdev(), because the
3966     + * kobject is being hold.
3967     + */
3968     + kobject_put(&dev->dev.kobj);
3969     }
3970     /*
3971     * Prevent userspace races by waiting until the network
3972     diff --git a/net/core/sock.c b/net/core/sock.c
3973     index 0adf7a9e5a90..078791a6649a 100644
3974     --- a/net/core/sock.c
3975     +++ b/net/core/sock.c
3976     @@ -709,7 +709,7 @@ bool sk_mc_loop(struct sock *sk)
3977     return inet6_sk(sk)->mc_loop;
3978     #endif
3979     }
3980     - WARN_ON(1);
3981     + WARN_ON_ONCE(1);
3982     return true;
3983     }
3984     EXPORT_SYMBOL(sk_mc_loop);
3985     @@ -1679,6 +1679,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
3986     cgroup_sk_alloc(&sk->sk_cgrp_data);
3987     sock_update_classid(&sk->sk_cgrp_data);
3988     sock_update_netprioidx(&sk->sk_cgrp_data);
3989     + sk_tx_queue_clear(sk);
3990     }
3991    
3992     return sk;
3993     @@ -1895,6 +1896,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
3994     */
3995     sk_refcnt_debug_inc(newsk);
3996     sk_set_socket(newsk, NULL);
3997     + sk_tx_queue_clear(newsk);
3998     RCU_INIT_POINTER(newsk->sk_wq, NULL);
3999    
4000     if (newsk->sk_prot->sockets_allocated)
4001     diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
4002     index 01588eef0cee..b1b3220917ca 100644
4003     --- a/net/ipv4/fib_semantics.c
4004     +++ b/net/ipv4/fib_semantics.c
4005     @@ -1100,7 +1100,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
4006     if (fl4.flowi4_scope < RT_SCOPE_LINK)
4007     fl4.flowi4_scope = RT_SCOPE_LINK;
4008    
4009     - if (table)
4010     + if (table && table != RT_TABLE_MAIN)
4011     tbl = fib_get_table(net, table);
4012    
4013     if (tbl)
4014     diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
4015     index cd4b84310d92..a0b4dc54f8a6 100644
4016     --- a/net/ipv4/ip_tunnel.c
4017     +++ b/net/ipv4/ip_tunnel.c
4018     @@ -85,9 +85,10 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
4019     __be32 remote, __be32 local,
4020     __be32 key)
4021     {
4022     - unsigned int hash;
4023     struct ip_tunnel *t, *cand = NULL;
4024     struct hlist_head *head;
4025     + struct net_device *ndev;
4026     + unsigned int hash;
4027    
4028     hash = ip_tunnel_hash(key, remote);
4029     head = &itn->tunnels[hash];
4030     @@ -162,8 +163,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
4031     if (t && t->dev->flags & IFF_UP)
4032     return t;
4033    
4034     - if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
4035     - return netdev_priv(itn->fb_tunnel_dev);
4036     + ndev = READ_ONCE(itn->fb_tunnel_dev);
4037     + if (ndev && ndev->flags & IFF_UP)
4038     + return netdev_priv(ndev);
4039    
4040     return NULL;
4041     }
4042     @@ -1245,9 +1247,9 @@ void ip_tunnel_uninit(struct net_device *dev)
4043     struct ip_tunnel_net *itn;
4044    
4045     itn = net_generic(net, tunnel->ip_tnl_net_id);
4046     - /* fb_tunnel_dev will be unregisted in net-exit call. */
4047     - if (itn->fb_tunnel_dev != dev)
4048     - ip_tunnel_del(itn, netdev_priv(dev));
4049     + ip_tunnel_del(itn, netdev_priv(dev));
4050     + if (itn->fb_tunnel_dev == dev)
4051     + WRITE_ONCE(itn->fb_tunnel_dev, NULL);
4052    
4053     dst_cache_reset(&tunnel->dst_cache);
4054     }
4055     diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
4056     index 1b3d032a4df2..ee6c38a73325 100644
4057     --- a/net/ipv4/tcp_cubic.c
4058     +++ b/net/ipv4/tcp_cubic.c
4059     @@ -404,6 +404,8 @@ static void hystart_update(struct sock *sk, u32 delay)
4060    
4061     if (hystart_detect & HYSTART_DELAY) {
4062     /* obtain the minimum delay of more than sampling packets */
4063     + if (ca->curr_rtt > delay)
4064     + ca->curr_rtt = delay;
4065     if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
4066     if (ca->curr_rtt == 0 || ca->curr_rtt > delay)
4067     ca->curr_rtt = delay;
4068     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
4069     index 677facbeed26..3e63dc9c3eba 100644
4070     --- a/net/ipv4/tcp_input.c
4071     +++ b/net/ipv4/tcp_input.c
4072     @@ -260,7 +260,8 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
4073     * cwnd may be very low (even just 1 packet), so we should ACK
4074     * immediately.
4075     */
4076     - inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
4077     + if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq)
4078     + inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
4079     }
4080     }
4081    
4082     @@ -3682,6 +3683,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
4083     tcp_in_ack_event(sk, ack_ev_flags);
4084     }
4085    
4086     + /* This is a deviation from RFC3168 since it states that:
4087     + * "When the TCP data sender is ready to set the CWR bit after reducing
4088     + * the congestion window, it SHOULD set the CWR bit only on the first
4089     + * new data packet that it transmits."
4090     + * We accept CWR on pure ACKs to be more robust
4091     + * with widely-deployed TCP implementations that do this.
4092     + */
4093     + tcp_ecn_accept_cwr(sk, skb);
4094     +
4095     /* We passed data and got it acked, remove any soft error
4096     * log. Something worked...
4097     */
4098     @@ -4587,7 +4597,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4099     if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
4100     skb, &fragstolen)) {
4101     coalesce_done:
4102     - tcp_grow_window(sk, skb);
4103     + /* For non sack flows, do not grow window to force DUPACK
4104     + * and trigger fast retransmit.
4105     + */
4106     + if (tcp_is_sack(tp))
4107     + tcp_grow_window(sk, skb);
4108     kfree_skb_partial(skb, fragstolen);
4109     skb = NULL;
4110     goto add_sack;
4111     @@ -4671,7 +4685,11 @@ add_sack:
4112     tcp_sack_new_ofo_skb(sk, seq, end_seq);
4113     end:
4114     if (skb) {
4115     - tcp_grow_window(sk, skb);
4116     + /* For non sack flows, do not grow window to force DUPACK
4117     + * and trigger fast retransmit.
4118     + */
4119     + if (tcp_is_sack(tp))
4120     + tcp_grow_window(sk, skb);
4121     skb_condense(skb);
4122     skb_set_owner_r(skb, sk);
4123     }
4124     @@ -4771,8 +4789,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4125     skb_dst_drop(skb);
4126     __skb_pull(skb, tcp_hdr(skb)->doff * 4);
4127    
4128     - tcp_ecn_accept_cwr(sk, skb);
4129     -
4130     tp->rx_opt.dsack = 0;
4131    
4132     /* Queue data for delivery to the user.
4133     diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
4134     index 9ec05a1df5e1..04d76f043e18 100644
4135     --- a/net/ipv6/ip6_gre.c
4136     +++ b/net/ipv6/ip6_gre.c
4137     @@ -127,6 +127,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
4138     gre_proto == htons(ETH_P_ERSPAN2)) ?
4139     ARPHRD_ETHER : ARPHRD_IP6GRE;
4140     int score, cand_score = 4;
4141     + struct net_device *ndev;
4142    
4143     for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
4144     if (!ipv6_addr_equal(local, &t->parms.laddr) ||
4145     @@ -238,9 +239,9 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
4146     if (t && t->dev->flags & IFF_UP)
4147     return t;
4148    
4149     - dev = ign->fb_tunnel_dev;
4150     - if (dev && dev->flags & IFF_UP)
4151     - return netdev_priv(dev);
4152     + ndev = READ_ONCE(ign->fb_tunnel_dev);
4153     + if (ndev && ndev->flags & IFF_UP)
4154     + return netdev_priv(ndev);
4155    
4156     return NULL;
4157     }
4158     @@ -413,6 +414,8 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
4159    
4160     ip6gre_tunnel_unlink_md(ign, t);
4161     ip6gre_tunnel_unlink(ign, t);
4162     + if (ign->fb_tunnel_dev == dev)
4163     + WRITE_ONCE(ign->fb_tunnel_dev, NULL);
4164     dst_cache_reset(&t->dst_cache);
4165     dev_put(dev);
4166     }
4167     diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
4168     index eaa4c2cc2fbb..c875c9b6edbe 100644
4169     --- a/net/ipv6/mcast.c
4170     +++ b/net/ipv6/mcast.c
4171     @@ -2618,6 +2618,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
4172     idev->mc_list = i->next;
4173    
4174     write_unlock_bh(&idev->lock);
4175     + ip6_mc_clear_src(i);
4176     ma_put(i);
4177     write_lock_bh(&idev->lock);
4178     }
4179     diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
4180     index 75da200aa5d8..133a3f1b6f56 100644
4181     --- a/net/netfilter/ipset/ip_set_core.c
4182     +++ b/net/netfilter/ipset/ip_set_core.c
4183     @@ -382,6 +382,8 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
4184     for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
4185     if (!add_extension(id, cadt_flags, tb))
4186     continue;
4187     + if (align < ip_set_extensions[id].align)
4188     + align = ip_set_extensions[id].align;
4189     len = ALIGN(len, ip_set_extensions[id].align);
4190     set->offset[id] = len;
4191     set->extensions |= ip_set_extensions[id].type;
4192     diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
4193     index 99352f09deaa..3d96dab10449 100644
4194     --- a/net/openvswitch/actions.c
4195     +++ b/net/openvswitch/actions.c
4196     @@ -1146,9 +1146,10 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
4197     struct sw_flow_key *key,
4198     const struct nlattr *attr, bool last)
4199     {
4200     + struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
4201     const struct nlattr *actions, *cpl_arg;
4202     + int len, max_len, rem = nla_len(attr);
4203     const struct check_pkt_len_arg *arg;
4204     - int rem = nla_len(attr);
4205     bool clone_flow_key;
4206    
4207     /* The first netlink attribute in 'attr' is always
4208     @@ -1157,7 +1158,11 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
4209     cpl_arg = nla_data(attr);
4210     arg = nla_data(cpl_arg);
4211    
4212     - if (skb->len <= arg->pkt_len) {
4213     + len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
4214     + max_len = arg->pkt_len;
4215     +
4216     + if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
4217     + len <= max_len) {
4218     /* Second netlink attribute in 'attr' is always
4219     * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
4220     */
4221     diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
4222     index b7611cc159e5..032ed76c0166 100644
4223     --- a/net/rxrpc/call_accept.c
4224     +++ b/net/rxrpc/call_accept.c
4225     @@ -22,6 +22,11 @@
4226     #include <net/ip.h>
4227     #include "ar-internal.h"
4228    
4229     +static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
4230     + unsigned long user_call_ID)
4231     +{
4232     +}
4233     +
4234     /*
4235     * Preallocate a single service call, connection and peer and, if possible,
4236     * give them a user ID and attach the user's side of the ID to them.
4237     @@ -228,6 +233,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
4238     if (rx->discard_new_call) {
4239     _debug("discard %lx", call->user_call_ID);
4240     rx->discard_new_call(call, call->user_call_ID);
4241     + if (call->notify_rx)
4242     + call->notify_rx = rxrpc_dummy_notify;
4243     rxrpc_put_call(call, rxrpc_call_put_kernel);
4244     }
4245     rxrpc_call_completed(call);
4246     diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
4247     index 3be4177baf70..22dec6049e1b 100644
4248     --- a/net/rxrpc/input.c
4249     +++ b/net/rxrpc/input.c
4250     @@ -723,13 +723,12 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
4251     ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
4252     rwind, ntohl(ackinfo->jumbo_max));
4253    
4254     + if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
4255     + rwind = RXRPC_RXTX_BUFF_SIZE - 1;
4256     if (call->tx_winsize != rwind) {
4257     - if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
4258     - rwind = RXRPC_RXTX_BUFF_SIZE - 1;
4259     if (rwind > call->tx_winsize)
4260     wake = true;
4261     - trace_rxrpc_rx_rwind_change(call, sp->hdr.serial,
4262     - ntohl(ackinfo->rwind), wake);
4263     + trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake);
4264     call->tx_winsize = rwind;
4265     }
4266    
4267     diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
4268     index 2277369feae5..5d605bab9afc 100644
4269     --- a/net/sched/sch_cake.c
4270     +++ b/net/sched/sch_cake.c
4271     @@ -1515,32 +1515,51 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
4272     return idx + (tin << 16);
4273     }
4274    
4275     -static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
4276     +static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash)
4277     {
4278     - int wlen = skb_network_offset(skb);
4279     + const int offset = skb_network_offset(skb);
4280     + u16 *buf, buf_;
4281     u8 dscp;
4282    
4283     switch (tc_skb_protocol(skb)) {
4284     case htons(ETH_P_IP):
4285     - wlen += sizeof(struct iphdr);
4286     - if (!pskb_may_pull(skb, wlen) ||
4287     - skb_try_make_writable(skb, wlen))
4288     + buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
4289     + if (unlikely(!buf))
4290     return 0;
4291    
4292     - dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
4293     - if (wash && dscp)
4294     + /* ToS is in the second byte of iphdr */
4295     + dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2;
4296     +
4297     + if (wash && dscp) {
4298     + const int wlen = offset + sizeof(struct iphdr);
4299     +
4300     + if (!pskb_may_pull(skb, wlen) ||
4301     + skb_try_make_writable(skb, wlen))
4302     + return 0;
4303     +
4304     ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
4305     + }
4306     +
4307     return dscp;
4308    
4309     case htons(ETH_P_IPV6):
4310     - wlen += sizeof(struct ipv6hdr);
4311     - if (!pskb_may_pull(skb, wlen) ||
4312     - skb_try_make_writable(skb, wlen))
4313     + buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
4314     + if (unlikely(!buf))
4315     return 0;
4316    
4317     - dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
4318     - if (wash && dscp)
4319     + /* Traffic class is in the first and second bytes of ipv6hdr */
4320     + dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2;
4321     +
4322     + if (wash && dscp) {
4323     + const int wlen = offset + sizeof(struct ipv6hdr);
4324     +
4325     + if (!pskb_may_pull(skb, wlen) ||
4326     + skb_try_make_writable(skb, wlen))
4327     + return 0;
4328     +
4329     ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
4330     + }
4331     +
4332     return dscp;
4333    
4334     case htons(ETH_P_ARP):
4335     @@ -1557,14 +1576,17 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
4336     {
4337     struct cake_sched_data *q = qdisc_priv(sch);
4338     u32 tin, mark;
4339     + bool wash;
4340     u8 dscp;
4341    
4342     /* Tin selection: Default to diffserv-based selection, allow overriding
4343     - * using firewall marks or skb->priority.
4344     + * using firewall marks or skb->priority. Call DSCP parsing early if
4345     + * wash is enabled, otherwise defer to below to skip unneeded parsing.
4346     */
4347     - dscp = cake_handle_diffserv(skb,
4348     - q->rate_flags & CAKE_FLAG_WASH);
4349     mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
4350     + wash = !!(q->rate_flags & CAKE_FLAG_WASH);
4351     + if (wash)
4352     + dscp = cake_handle_diffserv(skb, wash);
4353    
4354     if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
4355     tin = 0;
4356     @@ -1578,6 +1600,8 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
4357     tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
4358    
4359     else {
4360     + if (!wash)
4361     + dscp = cake_handle_diffserv(skb, wash);
4362     tin = q->tin_index[dscp];
4363    
4364     if (unlikely(tin >= q->tin_cnt))
4365     @@ -2679,7 +2703,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
4366     qdisc_watchdog_init(&q->watchdog, sch);
4367    
4368     if (opt) {
4369     - int err = cake_change(sch, opt, extack);
4370     + err = cake_change(sch, opt, extack);
4371    
4372     if (err)
4373     return err;
4374     @@ -2996,7 +3020,7 @@ static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
4375     PUT_STAT_S32(BLUE_TIMER_US,
4376     ktime_to_us(
4377     ktime_sub(now,
4378     - flow->cvars.blue_timer)));
4379     + flow->cvars.blue_timer)));
4380     }
4381     if (flow->cvars.dropping) {
4382     PUT_STAT_S32(DROP_NEXT_US,
4383     diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
4384     index 7c3c5fdb82a9..896c9037155a 100644
4385     --- a/net/sched/sch_generic.c
4386     +++ b/net/sched/sch_generic.c
4387     @@ -469,6 +469,7 @@ void __netdev_watchdog_up(struct net_device *dev)
4388     dev_hold(dev);
4389     }
4390     }
4391     +EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
4392    
4393     static void dev_watchdog_up(struct net_device *dev)
4394     {
4395     diff --git a/net/sctp/associola.c b/net/sctp/associola.c
4396     index 41839b85c268..fb6f62264e87 100644
4397     --- a/net/sctp/associola.c
4398     +++ b/net/sctp/associola.c
4399     @@ -1569,12 +1569,15 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
4400     int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
4401     enum sctp_scope scope, gfp_t gfp)
4402     {
4403     + struct sock *sk = asoc->base.sk;
4404     int flags;
4405    
4406     /* Use scoping rules to determine the subset of addresses from
4407     * the endpoint.
4408     */
4409     - flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
4410     + flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
4411     + if (!inet_v6_ipv6only(sk))
4412     + flags |= SCTP_ADDR4_ALLOWED;
4413     if (asoc->peer.ipv4_address)
4414     flags |= SCTP_ADDR4_PEERSUPP;
4415     if (asoc->peer.ipv6_address)
4416     diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
4417     index 53bc61537f44..701c5a4e441d 100644
4418     --- a/net/sctp/bind_addr.c
4419     +++ b/net/sctp/bind_addr.c
4420     @@ -461,6 +461,7 @@ static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest,
4421     * well as the remote peer.
4422     */
4423     if ((((AF_INET == addr->sa.sa_family) &&
4424     + (flags & SCTP_ADDR4_ALLOWED) &&
4425     (flags & SCTP_ADDR4_PEERSUPP))) ||
4426     (((AF_INET6 == addr->sa.sa_family) &&
4427     (flags & SCTP_ADDR6_ALLOWED) &&
4428     diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
4429     index 237c88eeb538..981c7cbca46a 100644
4430     --- a/net/sctp/protocol.c
4431     +++ b/net/sctp/protocol.c
4432     @@ -148,7 +148,8 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
4433     * sock as well as the remote peer.
4434     */
4435     if (addr->a.sa.sa_family == AF_INET &&
4436     - !(copy_flags & SCTP_ADDR4_PEERSUPP))
4437     + (!(copy_flags & SCTP_ADDR4_ALLOWED) ||
4438     + !(copy_flags & SCTP_ADDR4_PEERSUPP)))
4439     continue;
4440     if (addr->a.sa.sa_family == AF_INET6 &&
4441     (!(copy_flags & SCTP_ADDR6_ALLOWED) ||
4442     diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
4443     index b71a39ded930..37792675ed57 100644
4444     --- a/net/sunrpc/rpc_pipe.c
4445     +++ b/net/sunrpc/rpc_pipe.c
4446     @@ -1317,6 +1317,7 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data)
4447     q.len = strlen(gssd_dummy_clnt_dir[0].name);
4448     clnt_dentry = d_hash_and_lookup(gssd_dentry, &q);
4449     if (!clnt_dentry) {
4450     + __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1);
4451     pipe_dentry = ERR_PTR(-ENOENT);
4452     goto out;
4453     }
4454     diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
4455     index 451ca7ec321c..7ef37054071f 100644
4456     --- a/net/sunrpc/xdr.c
4457     +++ b/net/sunrpc/xdr.c
4458     @@ -1118,6 +1118,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
4459     base = 0;
4460     } else {
4461     base -= buf->head[0].iov_len;
4462     + subbuf->head[0].iov_base = buf->head[0].iov_base;
4463     subbuf->head[0].iov_len = 0;
4464     }
4465    
4466     @@ -1130,6 +1131,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
4467     base = 0;
4468     } else {
4469     base -= buf->page_len;
4470     + subbuf->pages = buf->pages;
4471     + subbuf->page_base = 0;
4472     subbuf->page_len = 0;
4473     }
4474    
4475     @@ -1141,6 +1144,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
4476     base = 0;
4477     } else {
4478     base -= buf->tail[0].iov_len;
4479     + subbuf->tail[0].iov_base = buf->tail[0].iov_base;
4480     subbuf->tail[0].iov_len = 0;
4481     }
4482    
4483     diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
4484     index ef5102b60589..c56e6cfc4a62 100644
4485     --- a/net/sunrpc/xprtrdma/rpc_rdma.c
4486     +++ b/net/sunrpc/xprtrdma/rpc_rdma.c
4487     @@ -1246,8 +1246,7 @@ rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
4488     be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
4489     }
4490    
4491     - r_xprt->rx_stats.bad_reply_count++;
4492     - return -EREMOTEIO;
4493     + return -EIO;
4494     }
4495    
4496     /* Perform XID lookup, reconstruction of the RPC reply, and
4497     @@ -1284,13 +1283,11 @@ out:
4498     spin_unlock(&xprt->queue_lock);
4499     return;
4500    
4501     -/* If the incoming reply terminated a pending RPC, the next
4502     - * RPC call will post a replacement receive buffer as it is
4503     - * being marshaled.
4504     - */
4505     out_badheader:
4506     trace_xprtrdma_reply_hdr(rep);
4507     r_xprt->rx_stats.bad_reply_count++;
4508     + rqst->rq_task->tk_status = status;
4509     + status = 0;
4510     goto out;
4511     }
4512    
4513     diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
4514     index c365b918be35..bb2292b5260c 100644
4515     --- a/net/xfrm/xfrm_device.c
4516     +++ b/net/xfrm/xfrm_device.c
4517     @@ -82,7 +82,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
4518     struct xfrm_offload *xo = xfrm_offload(skb);
4519     struct sec_path *sp;
4520    
4521     - if (!xo)
4522     + if (!xo || (xo->flags & XFRM_XMIT))
4523     return skb;
4524    
4525     if (!(features & NETIF_F_HW_ESP))
4526     @@ -103,6 +103,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
4527     return skb;
4528     }
4529    
4530     + xo->flags |= XFRM_XMIT;
4531     +
4532     if (skb_is_gso(skb)) {
4533     struct net_device *dev = skb->dev;
4534    
4535     diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c
4536     index dd558cbb2309..ef53b93db573 100644
4537     --- a/samples/bpf/xdp_monitor_user.c
4538     +++ b/samples/bpf/xdp_monitor_user.c
4539     @@ -509,11 +509,8 @@ static void *alloc_rec_per_cpu(int record_size)
4540     {
4541     unsigned int nr_cpus = bpf_num_possible_cpus();
4542     void *array;
4543     - size_t size;
4544    
4545     - size = record_size * nr_cpus;
4546     - array = malloc(size);
4547     - memset(array, 0, size);
4548     + array = calloc(nr_cpus, record_size);
4549     if (!array) {
4550     fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
4551     exit(EXIT_FAIL_MEM);
4552     @@ -528,8 +525,7 @@ static struct stats_record *alloc_stats_record(void)
4553     int i;
4554    
4555     /* Alloc main stats_record structure */
4556     - rec = malloc(sizeof(*rec));
4557     - memset(rec, 0, sizeof(*rec));
4558     + rec = calloc(1, sizeof(*rec));
4559     if (!rec) {
4560     fprintf(stderr, "Mem alloc error\n");
4561     exit(EXIT_FAIL_MEM);
4562     diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c
4563     index cfcc31e51197..d94a999b4b4b 100644
4564     --- a/samples/bpf/xdp_redirect_cpu_kern.c
4565     +++ b/samples/bpf/xdp_redirect_cpu_kern.c
4566     @@ -15,7 +15,7 @@
4567     #include "bpf_helpers.h"
4568     #include "hash_func01.h"
4569    
4570     -#define MAX_CPUS 64 /* WARNING - sync with _user.c */
4571     +#define MAX_CPUS NR_CPUS
4572    
4573     /* Special map type that can XDP_REDIRECT frames to another CPU */
4574     struct {
4575     diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
4576     index 8b862a7a6c6a..0a7672556822 100644
4577     --- a/samples/bpf/xdp_redirect_cpu_user.c
4578     +++ b/samples/bpf/xdp_redirect_cpu_user.c
4579     @@ -13,6 +13,7 @@ static const char *__doc__ =
4580     #include <unistd.h>
4581     #include <locale.h>
4582     #include <sys/resource.h>
4583     +#include <sys/sysinfo.h>
4584     #include <getopt.h>
4585     #include <net/if.h>
4586     #include <time.h>
4587     @@ -24,8 +25,6 @@ static const char *__doc__ =
4588     #include <arpa/inet.h>
4589     #include <linux/if_link.h>
4590    
4591     -#define MAX_CPUS 64 /* WARNING - sync with _kern.c */
4592     -
4593     /* How many xdp_progs are defined in _kern.c */
4594     #define MAX_PROG 6
4595    
4596     @@ -40,6 +39,7 @@ static char *ifname;
4597     static __u32 prog_id;
4598    
4599     static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
4600     +static int n_cpus;
4601     static int cpu_map_fd;
4602     static int rx_cnt_map_fd;
4603     static int redirect_err_cnt_map_fd;
4604     @@ -170,7 +170,7 @@ struct stats_record {
4605     struct record redir_err;
4606     struct record kthread;
4607     struct record exception;
4608     - struct record enq[MAX_CPUS];
4609     + struct record enq[];
4610     };
4611    
4612     static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
4613     @@ -210,11 +210,8 @@ static struct datarec *alloc_record_per_cpu(void)
4614     {
4615     unsigned int nr_cpus = bpf_num_possible_cpus();
4616     struct datarec *array;
4617     - size_t size;
4618    
4619     - size = sizeof(struct datarec) * nr_cpus;
4620     - array = malloc(size);
4621     - memset(array, 0, size);
4622     + array = calloc(nr_cpus, sizeof(struct datarec));
4623     if (!array) {
4624     fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
4625     exit(EXIT_FAIL_MEM);
4626     @@ -225,19 +222,20 @@ static struct datarec *alloc_record_per_cpu(void)
4627     static struct stats_record *alloc_stats_record(void)
4628     {
4629     struct stats_record *rec;
4630     - int i;
4631     + int i, size;
4632    
4633     - rec = malloc(sizeof(*rec));
4634     - memset(rec, 0, sizeof(*rec));
4635     + size = sizeof(*rec) + n_cpus * sizeof(struct record);
4636     + rec = malloc(size);
4637     if (!rec) {
4638     fprintf(stderr, "Mem alloc error\n");
4639     exit(EXIT_FAIL_MEM);
4640     }
4641     + memset(rec, 0, size);
4642     rec->rx_cnt.cpu = alloc_record_per_cpu();
4643     rec->redir_err.cpu = alloc_record_per_cpu();
4644     rec->kthread.cpu = alloc_record_per_cpu();
4645     rec->exception.cpu = alloc_record_per_cpu();
4646     - for (i = 0; i < MAX_CPUS; i++)
4647     + for (i = 0; i < n_cpus; i++)
4648     rec->enq[i].cpu = alloc_record_per_cpu();
4649    
4650     return rec;
4651     @@ -247,7 +245,7 @@ static void free_stats_record(struct stats_record *r)
4652     {
4653     int i;
4654    
4655     - for (i = 0; i < MAX_CPUS; i++)
4656     + for (i = 0; i < n_cpus; i++)
4657     free(r->enq[i].cpu);
4658     free(r->exception.cpu);
4659     free(r->kthread.cpu);
4660     @@ -350,7 +348,7 @@ static void stats_print(struct stats_record *stats_rec,
4661     }
4662    
4663     /* cpumap enqueue stats */
4664     - for (to_cpu = 0; to_cpu < MAX_CPUS; to_cpu++) {
4665     + for (to_cpu = 0; to_cpu < n_cpus; to_cpu++) {
4666     char *fmt = "%-15s %3d:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
4667     char *fm2 = "%-15s %3s:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
4668     char *errstr = "";
4669     @@ -475,7 +473,7 @@ static void stats_collect(struct stats_record *rec)
4670     map_collect_percpu(fd, 1, &rec->redir_err);
4671    
4672     fd = cpumap_enqueue_cnt_map_fd;
4673     - for (i = 0; i < MAX_CPUS; i++)
4674     + for (i = 0; i < n_cpus; i++)
4675     map_collect_percpu(fd, i, &rec->enq[i]);
4676    
4677     fd = cpumap_kthread_cnt_map_fd;
4678     @@ -549,10 +547,10 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size,
4679     */
4680     static void mark_cpus_unavailable(void)
4681     {
4682     - __u32 invalid_cpu = MAX_CPUS;
4683     + __u32 invalid_cpu = n_cpus;
4684     int ret, i;
4685    
4686     - for (i = 0; i < MAX_CPUS; i++) {
4687     + for (i = 0; i < n_cpus; i++) {
4688     ret = bpf_map_update_elem(cpus_available_map_fd, &i,
4689     &invalid_cpu, 0);
4690     if (ret) {
4691     @@ -688,6 +686,8 @@ int main(int argc, char **argv)
4692     int prog_fd;
4693     __u32 qsize;
4694    
4695     + n_cpus = get_nprocs_conf();
4696     +
4697     /* Notice: choosing he queue size is very important with the
4698     * ixgbe driver, because it's driver page recycling trick is
4699     * dependend on pages being returned quickly. The number of
4700     @@ -757,7 +757,7 @@ int main(int argc, char **argv)
4701     case 'c':
4702     /* Add multiple CPUs */
4703     add_cpu = strtoul(optarg, NULL, 0);
4704     - if (add_cpu >= MAX_CPUS) {
4705     + if (add_cpu >= n_cpus) {
4706     fprintf(stderr,
4707     "--cpu nr too large for cpumap err(%d):%s\n",
4708     errno, strerror(errno));
4709     diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
4710     index b88df17853b8..21d6e5067a83 100644
4711     --- a/samples/bpf/xdp_rxq_info_user.c
4712     +++ b/samples/bpf/xdp_rxq_info_user.c
4713     @@ -198,11 +198,8 @@ static struct datarec *alloc_record_per_cpu(void)
4714     {
4715     unsigned int nr_cpus = bpf_num_possible_cpus();
4716     struct datarec *array;
4717     - size_t size;
4718    
4719     - size = sizeof(struct datarec) * nr_cpus;
4720     - array = malloc(size);
4721     - memset(array, 0, size);
4722     + array = calloc(nr_cpus, sizeof(struct datarec));
4723     if (!array) {
4724     fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
4725     exit(EXIT_FAIL_MEM);
4726     @@ -214,11 +211,8 @@ static struct record *alloc_record_per_rxq(void)
4727     {
4728     unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
4729     struct record *array;
4730     - size_t size;
4731    
4732     - size = sizeof(struct record) * nr_rxqs;
4733     - array = malloc(size);
4734     - memset(array, 0, size);
4735     + array = calloc(nr_rxqs, sizeof(struct record));
4736     if (!array) {
4737     fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs);
4738     exit(EXIT_FAIL_MEM);
4739     @@ -232,8 +226,7 @@ static struct stats_record *alloc_stats_record(void)
4740     struct stats_record *rec;
4741     int i;
4742    
4743     - rec = malloc(sizeof(*rec));
4744     - memset(rec, 0, sizeof(*rec));
4745     + rec = calloc(1, sizeof(struct stats_record));
4746     if (!rec) {
4747     fprintf(stderr, "Mem alloc error\n");
4748     exit(EXIT_FAIL_MEM);
4749     diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
4750     index d1dd4a6b6adb..7da10afc92c6 100644
4751     --- a/scripts/Kbuild.include
4752     +++ b/scripts/Kbuild.include
4753     @@ -82,20 +82,21 @@ cc-cross-prefix = $(firstword $(foreach c, $(1), \
4754     $(if $(shell command -v -- $(c)gcc 2>/dev/null), $(c))))
4755    
4756     # output directory for tests below
4757     -TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
4758     +TMPOUT = $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_$$$$
4759    
4760     # try-run
4761     # Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
4762     # Exit code chooses option. "$$TMP" serves as a temporary file and is
4763     # automatically cleaned up.
4764     try-run = $(shell set -e; \
4765     - TMP="$(TMPOUT).$$$$.tmp"; \
4766     - TMPO="$(TMPOUT).$$$$.o"; \
4767     + TMP=$(TMPOUT)/tmp; \
4768     + TMPO=$(TMPOUT)/tmp.o; \
4769     + mkdir -p $(TMPOUT); \
4770     + trap "rm -rf $(TMPOUT)" EXIT; \
4771     if ($(1)) >/dev/null 2>&1; \
4772     then echo "$(2)"; \
4773     else echo "$(3)"; \
4774     - fi; \
4775     - rm -f "$$TMP" "$$TMPO")
4776     + fi)
4777    
4778     # as-option
4779     # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
4780     diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
4781     index 74eab03e31d4..f9b19524da11 100644
4782     --- a/scripts/recordmcount.h
4783     +++ b/scripts/recordmcount.h
4784     @@ -29,6 +29,11 @@
4785     #undef has_rel_mcount
4786     #undef tot_relsize
4787     #undef get_mcountsym
4788     +#undef find_symtab
4789     +#undef get_shnum
4790     +#undef set_shnum
4791     +#undef get_shstrndx
4792     +#undef get_symindex
4793     #undef get_sym_str_and_relp
4794     #undef do_func
4795     #undef Elf_Addr
4796     @@ -58,6 +63,11 @@
4797     # define __has_rel_mcount __has64_rel_mcount
4798     # define has_rel_mcount has64_rel_mcount
4799     # define tot_relsize tot64_relsize
4800     +# define find_symtab find_symtab64
4801     +# define get_shnum get_shnum64
4802     +# define set_shnum set_shnum64
4803     +# define get_shstrndx get_shstrndx64
4804     +# define get_symindex get_symindex64
4805     # define get_sym_str_and_relp get_sym_str_and_relp_64
4806     # define do_func do64
4807     # define get_mcountsym get_mcountsym_64
4808     @@ -91,6 +101,11 @@
4809     # define __has_rel_mcount __has32_rel_mcount
4810     # define has_rel_mcount has32_rel_mcount
4811     # define tot_relsize tot32_relsize
4812     +# define find_symtab find_symtab32
4813     +# define get_shnum get_shnum32
4814     +# define set_shnum set_shnum32
4815     +# define get_shstrndx get_shstrndx32
4816     +# define get_symindex get_symindex32
4817     # define get_sym_str_and_relp get_sym_str_and_relp_32
4818     # define do_func do32
4819     # define get_mcountsym get_mcountsym_32
4820     @@ -173,6 +188,67 @@ static int MIPS_is_fake_mcount(Elf_Rel const *rp)
4821     return is_fake;
4822     }
4823    
4824     +static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab,
4825     + Elf32_Word const *symtab_shndx)
4826     +{
4827     + unsigned long offset;
4828     + int index;
4829     +
4830     + if (sym->st_shndx != SHN_XINDEX)
4831     + return w2(sym->st_shndx);
4832     +
4833     + offset = (unsigned long)sym - (unsigned long)symtab;
4834     + index = offset / sizeof(*sym);
4835     +
4836     + return w(symtab_shndx[index]);
4837     +}
4838     +
4839     +static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
4840     +{
4841     + if (shdr0 && !ehdr->e_shnum)
4842     + return w(shdr0->sh_size);
4843     +
4844     + return w2(ehdr->e_shnum);
4845     +}
4846     +
4847     +static void set_shnum(Elf_Ehdr *ehdr, Elf_Shdr *shdr0, unsigned int new_shnum)
4848     +{
4849     + if (new_shnum >= SHN_LORESERVE) {
4850     + ehdr->e_shnum = 0;
4851     + shdr0->sh_size = w(new_shnum);
4852     + } else
4853     + ehdr->e_shnum = w2(new_shnum);
4854     +}
4855     +
4856     +static int get_shstrndx(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
4857     +{
4858     + if (ehdr->e_shstrndx != SHN_XINDEX)
4859     + return w2(ehdr->e_shstrndx);
4860     +
4861     + return w(shdr0->sh_link);
4862     +}
4863     +
4864     +static void find_symtab(Elf_Ehdr *const ehdr, Elf_Shdr const *shdr0,
4865     + unsigned const nhdr, Elf32_Word **symtab,
4866     + Elf32_Word **symtab_shndx)
4867     +{
4868     + Elf_Shdr const *relhdr;
4869     + unsigned k;
4870     +
4871     + *symtab = NULL;
4872     + *symtab_shndx = NULL;
4873     +
4874     + for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) {
4875     + if (relhdr->sh_type == SHT_SYMTAB)
4876     + *symtab = (void *)ehdr + relhdr->sh_offset;
4877     + else if (relhdr->sh_type == SHT_SYMTAB_SHNDX)
4878     + *symtab_shndx = (void *)ehdr + relhdr->sh_offset;
4879     +
4880     + if (*symtab && *symtab_shndx)
4881     + break;
4882     + }
4883     +}
4884     +
4885     /* Append the new shstrtab, Elf_Shdr[], __mcount_loc and its relocations. */
4886     static int append_func(Elf_Ehdr *const ehdr,
4887     Elf_Shdr *const shstr,
4888     @@ -188,10 +264,12 @@ static int append_func(Elf_Ehdr *const ehdr,
4889     char const *mc_name = (sizeof(Elf_Rela) == rel_entsize)
4890     ? ".rela__mcount_loc"
4891     : ".rel__mcount_loc";
4892     - unsigned const old_shnum = w2(ehdr->e_shnum);
4893     uint_t const old_shoff = _w(ehdr->e_shoff);
4894     uint_t const old_shstr_sh_size = _w(shstr->sh_size);
4895     uint_t const old_shstr_sh_offset = _w(shstr->sh_offset);
4896     + Elf_Shdr *const shdr0 = (Elf_Shdr *)(old_shoff + (void *)ehdr);
4897     + unsigned int const old_shnum = get_shnum(ehdr, shdr0);
4898     + unsigned int const new_shnum = 2 + old_shnum; /* {.rel,}__mcount_loc */
4899     uint_t t = 1 + strlen(mc_name) + _w(shstr->sh_size);
4900     uint_t new_e_shoff;
4901    
4902     @@ -201,6 +279,8 @@ static int append_func(Elf_Ehdr *const ehdr,
4903     t += (_align & -t); /* word-byte align */
4904     new_e_shoff = t;
4905    
4906     + set_shnum(ehdr, shdr0, new_shnum);
4907     +
4908     /* body for new shstrtab */
4909     if (ulseek(sb.st_size, SEEK_SET) < 0)
4910     return -1;
4911     @@ -255,7 +335,6 @@ static int append_func(Elf_Ehdr *const ehdr,
4912     return -1;
4913    
4914     ehdr->e_shoff = _w(new_e_shoff);
4915     - ehdr->e_shnum = w2(2 + w2(ehdr->e_shnum)); /* {.rel,}__mcount_loc */
4916     if (ulseek(0, SEEK_SET) < 0)
4917     return -1;
4918     if (uwrite(ehdr, sizeof(*ehdr)) < 0)
4919     @@ -434,6 +513,8 @@ static int find_secsym_ndx(unsigned const txtndx,
4920     uint_t *const recvalp,
4921     unsigned int *sym_index,
4922     Elf_Shdr const *const symhdr,
4923     + Elf32_Word const *symtab,
4924     + Elf32_Word const *symtab_shndx,
4925     Elf_Ehdr const *const ehdr)
4926     {
4927     Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symhdr->sh_offset)
4928     @@ -445,7 +526,7 @@ static int find_secsym_ndx(unsigned const txtndx,
4929     for (symp = sym0, t = nsym; t; --t, ++symp) {
4930     unsigned int const st_bind = ELF_ST_BIND(symp->st_info);
4931    
4932     - if (txtndx == w2(symp->st_shndx)
4933     + if (txtndx == get_symindex(symp, symtab, symtab_shndx)
4934     /* avoid STB_WEAK */
4935     && (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) {
4936     /* function symbols on ARM have quirks, avoid them */
4937     @@ -516,21 +597,23 @@ static unsigned tot_relsize(Elf_Shdr const *const shdr0,
4938     return totrelsz;
4939     }
4940    
4941     -
4942     /* Overall supervision for Elf32 ET_REL file. */
4943     static int do_func(Elf_Ehdr *const ehdr, char const *const fname,
4944     unsigned const reltype)
4945     {
4946     Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff)
4947     + (void *)ehdr);
4948     - unsigned const nhdr = w2(ehdr->e_shnum);
4949     - Elf_Shdr *const shstr = &shdr0[w2(ehdr->e_shstrndx)];
4950     + unsigned const nhdr = get_shnum(ehdr, shdr0);
4951     + Elf_Shdr *const shstr = &shdr0[get_shstrndx(ehdr, shdr0)];
4952     char const *const shstrtab = (char const *)(_w(shstr->sh_offset)
4953     + (void *)ehdr);
4954    
4955     Elf_Shdr const *relhdr;
4956     unsigned k;
4957    
4958     + Elf32_Word *symtab;
4959     + Elf32_Word *symtab_shndx;
4960     +
4961     /* Upper bound on space: assume all relevant relocs are for mcount. */
4962     unsigned totrelsz;
4963    
4964     @@ -561,6 +644,8 @@ static int do_func(Elf_Ehdr *const ehdr, char const *const fname,
4965     return -1;
4966     }
4967    
4968     + find_symtab(ehdr, shdr0, nhdr, &symtab, &symtab_shndx);
4969     +
4970     for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) {
4971     char const *const txtname = has_rel_mcount(relhdr, shdr0,
4972     shstrtab, fname);
4973     @@ -577,6 +662,7 @@ static int do_func(Elf_Ehdr *const ehdr, char const *const fname,
4974     result = find_secsym_ndx(w(relhdr->sh_info), txtname,
4975     &recval, &recsym,
4976     &shdr0[symsec_sh_link],
4977     + symtab, symtab_shndx,
4978     ehdr);
4979     if (result)
4980     goto out;
4981     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
4982     index d41c91468ab3..e78c4367b6c8 100644
4983     --- a/sound/pci/hda/patch_hdmi.c
4984     +++ b/sound/pci/hda/patch_hdmi.c
4985     @@ -4146,6 +4146,11 @@ HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP", patch_nvhdmi),
4986     HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi),
4987     HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi),
4988     HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi),
4989     +HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP", patch_nvhdmi),
4990     +HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP", patch_nvhdmi),
4991     +HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP", patch_nvhdmi),
4992     +HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP", patch_nvhdmi),
4993     +HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP", patch_nvhdmi),
4994     HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
4995     HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch),
4996     HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi),
4997     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4998     index 459a7d61326e..34868459104d 100644
4999     --- a/sound/pci/hda/patch_realtek.c
5000     +++ b/sound/pci/hda/patch_realtek.c
5001     @@ -2460,6 +2460,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5002     SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
5003     SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
5004     SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
5005     + SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
5006     SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
5007     SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950),
5008     SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
5009     @@ -7435,6 +7436,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5010     SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
5011     SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
5012     SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
5013     + SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
5014     + SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
5015     SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED),
5016     SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
5017     SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
5018     diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
5019     index 537dc69256f0..a4ebd6ddaba1 100644
5020     --- a/sound/soc/fsl/fsl_ssi.c
5021     +++ b/sound/soc/fsl/fsl_ssi.c
5022     @@ -678,8 +678,9 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
5023     struct regmap *regs = ssi->regs;
5024     u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i;
5025     unsigned long clkrate, baudrate, tmprate;
5026     - unsigned int slots = params_channels(hw_params);
5027     - unsigned int slot_width = 32;
5028     + unsigned int channels = params_channels(hw_params);
5029     + unsigned int slot_width = params_width(hw_params);
5030     + unsigned int slots = 2;
5031     u64 sub, savesub = 100000;
5032     unsigned int freq;
5033     bool baudclk_is_used;
5034     @@ -688,10 +689,14 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
5035     /* Override slots and slot_width if being specifically set... */
5036     if (ssi->slots)
5037     slots = ssi->slots;
5038     - /* ...but keep 32 bits if slots is 2 -- I2S Master mode */
5039     - if (ssi->slot_width && slots != 2)
5040     + if (ssi->slot_width)
5041     slot_width = ssi->slot_width;
5042    
5043     + /* ...but force 32 bits for stereo audio using I2S Master Mode */
5044     + if (channels == 2 &&
5045     + (ssi->i2s_net & SSI_SCR_I2S_MODE_MASK) == SSI_SCR_I2S_MODE_MASTER)
5046     + slot_width = 32;
5047     +
5048     /* Generate bit clock based on the slot number and slot width */
5049     freq = slots * slot_width * params_rate(hw_params);
5050    
5051     diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
5052     index 6c20bdd850f3..8ada4ecba847 100644
5053     --- a/sound/soc/qcom/common.c
5054     +++ b/sound/soc/qcom/common.c
5055     @@ -4,6 +4,7 @@
5056    
5057     #include <linux/module.h>
5058     #include "common.h"
5059     +#include "qdsp6/q6afe.h"
5060    
5061     int qcom_snd_parse_of(struct snd_soc_card *card)
5062     {
5063     @@ -101,6 +102,15 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
5064     }
5065     link->no_pcm = 1;
5066     link->ignore_pmdown_time = 1;
5067     +
5068     + if (q6afe_is_rx_port(link->id)) {
5069     + link->dpcm_playback = 1;
5070     + link->dpcm_capture = 0;
5071     + } else {
5072     + link->dpcm_playback = 0;
5073     + link->dpcm_capture = 1;
5074     + }
5075     +
5076     } else {
5077     dlc = devm_kzalloc(dev, sizeof(*dlc), GFP_KERNEL);
5078     if (!dlc)
5079     @@ -113,12 +123,12 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
5080     link->codecs->dai_name = "snd-soc-dummy-dai";
5081     link->codecs->name = "snd-soc-dummy";
5082     link->dynamic = 1;
5083     + link->dpcm_playback = 1;
5084     + link->dpcm_capture = 1;
5085     }
5086    
5087     link->ignore_suspend = 1;
5088     link->nonatomic = 1;
5089     - link->dpcm_playback = 1;
5090     - link->dpcm_capture = 1;
5091     link->stream_name = link->name;
5092     link++;
5093    
5094     diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
5095     index e0945f7a58c8..0ce4eb60f984 100644
5096     --- a/sound/soc/qcom/qdsp6/q6afe.c
5097     +++ b/sound/soc/qcom/qdsp6/q6afe.c
5098     @@ -800,6 +800,14 @@ int q6afe_get_port_id(int index)
5099     }
5100     EXPORT_SYMBOL_GPL(q6afe_get_port_id);
5101    
5102     +int q6afe_is_rx_port(int index)
5103     +{
5104     + if (index < 0 || index >= AFE_PORT_MAX)
5105     + return -EINVAL;
5106     +
5107     + return port_maps[index].is_rx;
5108     +}
5109     +EXPORT_SYMBOL_GPL(q6afe_is_rx_port);
5110     static int afe_apr_send_pkt(struct q6afe *afe, struct apr_pkt *pkt,
5111     struct q6afe_port *port)
5112     {
5113     diff --git a/sound/soc/qcom/qdsp6/q6afe.h b/sound/soc/qcom/qdsp6/q6afe.h
5114     index c7ed5422baff..1a0f80a14afe 100644
5115     --- a/sound/soc/qcom/qdsp6/q6afe.h
5116     +++ b/sound/soc/qcom/qdsp6/q6afe.h
5117     @@ -198,6 +198,7 @@ int q6afe_port_start(struct q6afe_port *port);
5118     int q6afe_port_stop(struct q6afe_port *port);
5119     void q6afe_port_put(struct q6afe_port *port);
5120     int q6afe_get_port_id(int index);
5121     +int q6afe_is_rx_port(int index);
5122     void q6afe_hdmi_port_prepare(struct q6afe_port *port,
5123     struct q6afe_hdmi_cfg *cfg);
5124     void q6afe_slim_port_prepare(struct q6afe_port *port,
5125     diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c
5126     index e8141a33a55e..835ac98a789c 100644
5127     --- a/sound/soc/qcom/qdsp6/q6asm.c
5128     +++ b/sound/soc/qcom/qdsp6/q6asm.c
5129     @@ -25,6 +25,7 @@
5130     #define ASM_STREAM_CMD_FLUSH 0x00010BCE
5131     #define ASM_SESSION_CMD_PAUSE 0x00010BD3
5132     #define ASM_DATA_CMD_EOS 0x00010BDB
5133     +#define ASM_DATA_EVENT_RENDERED_EOS 0x00010C1C
5134     #define ASM_NULL_POPP_TOPOLOGY 0x00010C68
5135     #define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09
5136     #define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10
5137     @@ -546,9 +547,6 @@ static int32_t q6asm_stream_callback(struct apr_device *adev,
5138     case ASM_SESSION_CMD_SUSPEND:
5139     client_event = ASM_CLIENT_EVENT_CMD_SUSPEND_DONE;
5140     break;
5141     - case ASM_DATA_CMD_EOS:
5142     - client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
5143     - break;
5144     case ASM_STREAM_CMD_FLUSH:
5145     client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE;
5146     break;
5147     @@ -651,6 +649,9 @@ static int32_t q6asm_stream_callback(struct apr_device *adev,
5148     spin_unlock_irqrestore(&ac->lock, flags);
5149     }
5150    
5151     + break;
5152     + case ASM_DATA_EVENT_RENDERED_EOS:
5153     + client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
5154     break;
5155     }
5156    
5157     diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c
5158     index 7cd42fcfcf38..1707414cfa92 100644
5159     --- a/sound/soc/rockchip/rockchip_pdm.c
5160     +++ b/sound/soc/rockchip/rockchip_pdm.c
5161     @@ -590,8 +590,10 @@ static int rockchip_pdm_resume(struct device *dev)
5162     int ret;
5163    
5164     ret = pm_runtime_get_sync(dev);
5165     - if (ret < 0)
5166     + if (ret < 0) {
5167     + pm_runtime_put(dev);
5168     return ret;
5169     + }
5170    
5171     ret = regcache_sync(pdm->regmap);
5172    
5173     diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
5174     index f55afe3a98e3..9079c380228f 100644
5175     --- a/sound/usb/mixer.c
5176     +++ b/sound/usb/mixer.c
5177     @@ -576,8 +576,9 @@ static int check_matrix_bitmap(unsigned char *bmap,
5178     * if failed, give up and free the control instance.
5179     */
5180    
5181     -int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
5182     - struct snd_kcontrol *kctl)
5183     +int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list,
5184     + struct snd_kcontrol *kctl,
5185     + bool is_std_info)
5186     {
5187     struct usb_mixer_interface *mixer = list->mixer;
5188     int err;
5189     @@ -591,6 +592,7 @@ int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
5190     return err;
5191     }
5192     list->kctl = kctl;
5193     + list->is_std_info = is_std_info;
5194     list->next_id_elem = mixer->id_elems[list->id];
5195     mixer->id_elems[list->id] = list;
5196     return 0;
5197     @@ -3213,8 +3215,11 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid)
5198     unitid = delegate_notify(mixer, unitid, NULL, NULL);
5199    
5200     for_each_mixer_elem(list, mixer, unitid) {
5201     - struct usb_mixer_elem_info *info =
5202     - mixer_elem_list_to_info(list);
5203     + struct usb_mixer_elem_info *info;
5204     +
5205     + if (!list->is_std_info)
5206     + continue;
5207     + info = mixer_elem_list_to_info(list);
5208     /* invalidate cache, so the value is read from the device */
5209     info->cached = 0;
5210     snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
5211     @@ -3294,6 +3299,8 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer,
5212    
5213     if (!list->kctl)
5214     continue;
5215     + if (!list->is_std_info)
5216     + continue;
5217    
5218     info = mixer_elem_list_to_info(list);
5219     if (count > 1 && info->control != control)
5220     diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
5221     index 8e0fb7fdf1a0..01b5e5cc2221 100644
5222     --- a/sound/usb/mixer.h
5223     +++ b/sound/usb/mixer.h
5224     @@ -66,6 +66,7 @@ struct usb_mixer_elem_list {
5225     struct usb_mixer_elem_list *next_id_elem; /* list of controls with same id */
5226     struct snd_kcontrol *kctl;
5227     unsigned int id;
5228     + bool is_std_info;
5229     usb_mixer_elem_dump_func_t dump;
5230     usb_mixer_elem_resume_func_t resume;
5231     };
5232     @@ -103,8 +104,12 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid);
5233     int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
5234     int request, int validx, int value_set);
5235    
5236     -int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
5237     - struct snd_kcontrol *kctl);
5238     +int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list,
5239     + struct snd_kcontrol *kctl,
5240     + bool is_std_info);
5241     +
5242     +#define snd_usb_mixer_add_control(list, kctl) \
5243     + snd_usb_mixer_add_list(list, kctl, true)
5244    
5245     void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
5246     struct usb_mixer_interface *mixer,
5247     diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
5248     index dc181066c799..d39bf5b648d1 100644
5249     --- a/sound/usb/mixer_quirks.c
5250     +++ b/sound/usb/mixer_quirks.c
5251     @@ -157,7 +157,8 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer,
5252     return -ENOMEM;
5253     }
5254     kctl->private_free = snd_usb_mixer_elem_free;
5255     - return snd_usb_mixer_add_control(list, kctl);
5256     + /* don't use snd_usb_mixer_add_control() here, this is a special list element */
5257     + return snd_usb_mixer_add_list(list, kctl, false);
5258     }
5259    
5260     /*
5261     diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
5262     index 6c391e5fad2a..b971d9aaa64a 100644
5263     --- a/sound/usb/pcm.c
5264     +++ b/sound/usb/pcm.c
5265     @@ -349,6 +349,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
5266     ifnum = 0;
5267     goto add_sync_ep_from_ifnum;
5268     case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
5269     + case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
5270     ep = 0x81;
5271     ifnum = 2;
5272     goto add_sync_ep_from_ifnum;
5273     @@ -1778,6 +1779,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
5274     return 0;
5275     case SNDRV_PCM_TRIGGER_STOP:
5276     stop_endpoints(subs, false);
5277     + subs->data_endpoint->retire_data_urb = NULL;
5278     subs->running = 0;
5279     return 0;
5280     case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
5281     diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
5282     index 092720ce2c55..bf5083a20b6d 100644
5283     --- a/sound/usb/quirks.c
5284     +++ b/sound/usb/quirks.c
5285     @@ -1461,6 +1461,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
5286     static bool is_itf_usb_dsd_dac(unsigned int id)
5287     {
5288     switch (id) {
5289     + case USB_ID(0x154e, 0x1002): /* Denon DCD-1500RE */
5290     case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
5291     case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
5292     case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
5293     @@ -1602,6 +1603,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
5294     chip->usb_id == USB_ID(0x0951, 0x16ad)) &&
5295     (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
5296     usleep_range(1000, 2000);
5297     +
5298     + /*
5299     + * Samsung USBC Headset (AKG) need a tiny delay after each
5300     + * class compliant request. (Model number: AAM625R or AAM627R)
5301     + */
5302     + if (chip->usb_id == USB_ID(0x04e8, 0xa051) &&
5303     + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
5304     + usleep_range(5000, 6000);
5305     }
5306    
5307     /*
5308     diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
5309     index 383bac05ac32..ceaad78e9667 100644
5310     --- a/tools/testing/selftests/net/so_txtime.c
5311     +++ b/tools/testing/selftests/net/so_txtime.c
5312     @@ -15,8 +15,9 @@
5313     #include <inttypes.h>
5314     #include <linux/net_tstamp.h>
5315     #include <linux/errqueue.h>
5316     +#include <linux/if_ether.h>
5317     #include <linux/ipv6.h>
5318     -#include <linux/tcp.h>
5319     +#include <linux/udp.h>
5320     #include <stdbool.h>
5321     #include <stdlib.h>
5322     #include <stdio.h>
5323     @@ -140,8 +141,8 @@ static void do_recv_errqueue_timeout(int fdt)
5324     {
5325     char control[CMSG_SPACE(sizeof(struct sock_extended_err)) +
5326     CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0};
5327     - char data[sizeof(struct ipv6hdr) +
5328     - sizeof(struct tcphdr) + 1];
5329     + char data[sizeof(struct ethhdr) + sizeof(struct ipv6hdr) +
5330     + sizeof(struct udphdr) + 1];
5331     struct sock_extended_err *err;
5332     struct msghdr msg = {0};
5333     struct iovec iov = {0};
5334     @@ -159,6 +160,8 @@ static void do_recv_errqueue_timeout(int fdt)
5335     msg.msg_controllen = sizeof(control);
5336    
5337     while (1) {
5338     + const char *reason;
5339     +
5340     ret = recvmsg(fdt, &msg, MSG_ERRQUEUE);
5341     if (ret == -1 && errno == EAGAIN)
5342     break;
5343     @@ -176,14 +179,30 @@ static void do_recv_errqueue_timeout(int fdt)
5344     err = (struct sock_extended_err *)CMSG_DATA(cm);
5345     if (err->ee_origin != SO_EE_ORIGIN_TXTIME)
5346     error(1, 0, "errqueue: origin 0x%x\n", err->ee_origin);
5347     - if (err->ee_code != ECANCELED)
5348     - error(1, 0, "errqueue: code 0x%x\n", err->ee_code);
5349     +
5350     + switch (err->ee_errno) {
5351     + case ECANCELED:
5352     + if (err->ee_code != SO_EE_CODE_TXTIME_MISSED)
5353     + error(1, 0, "errqueue: unknown ECANCELED %u\n",
5354     + err->ee_code);
5355     + reason = "missed txtime";
5356     + break;
5357     + case EINVAL:
5358     + if (err->ee_code != SO_EE_CODE_TXTIME_INVALID_PARAM)
5359     + error(1, 0, "errqueue: unknown EINVAL %u\n",
5360     + err->ee_code);
5361     + reason = "invalid txtime";
5362     + break;
5363     + default:
5364     + error(1, 0, "errqueue: errno %u code %u\n",
5365     + err->ee_errno, err->ee_code);
5366     + };
5367    
5368     tstamp = ((int64_t) err->ee_data) << 32 | err->ee_info;
5369     tstamp -= (int64_t) glob_tstart;
5370     tstamp /= 1000 * 1000;
5371     - fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped\n",
5372     - data[ret - 1], tstamp);
5373     + fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped: %s\n",
5374     + data[ret - 1], tstamp, reason);
5375    
5376     msg.msg_flags = 0;
5377     msg.msg_controllen = sizeof(control);