Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0140-5.4.41-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3521 - (hide annotations) (download)
Thu Jun 25 11:14:53 2020 UTC (3 years, 11 months ago) by niro
File size: 95835 byte(s)
-linux-5.4.41
1 niro 3521 diff --git a/Makefile b/Makefile
2     index 6d4fca82529a..a8c772b299aa 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 4
9     -SUBLEVEL = 40
10     +SUBLEVEL = 41
11     EXTRAVERSION =
12     NAME = Kleptomaniac Octopus
13    
14     diff --git a/arch/arm/crypto/nhpoly1305-neon-glue.c b/arch/arm/crypto/nhpoly1305-neon-glue.c
15     index ae5aefc44a4d..ffa8d73fe722 100644
16     --- a/arch/arm/crypto/nhpoly1305-neon-glue.c
17     +++ b/arch/arm/crypto/nhpoly1305-neon-glue.c
18     @@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
19     return crypto_nhpoly1305_update(desc, src, srclen);
20    
21     do {
22     - unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
23     + unsigned int n = min_t(unsigned int, srclen, SZ_4K);
24    
25     kernel_neon_begin();
26     crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);
27     diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c
28     index 895d3727c1fb..c5405e6a6db7 100644
29     --- a/arch/arm64/crypto/nhpoly1305-neon-glue.c
30     +++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c
31     @@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
32     return crypto_nhpoly1305_update(desc, src, srclen);
33    
34     do {
35     - unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
36     + unsigned int n = min_t(unsigned int, srclen, SZ_4K);
37    
38     kernel_neon_begin();
39     crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);
40     diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
41     index dfd626447482..5271ab366bee 100644
42     --- a/arch/arm64/kvm/guest.c
43     +++ b/arch/arm64/kvm/guest.c
44     @@ -202,6 +202,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
45     }
46    
47     memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
48     +
49     + if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
50     + int i;
51     +
52     + for (i = 0; i < 16; i++)
53     + *vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
54     + }
55     out:
56     return err;
57     }
58     diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
59     index bbeb6a5a6ba6..0be3355e3499 100644
60     --- a/arch/arm64/mm/hugetlbpage.c
61     +++ b/arch/arm64/mm/hugetlbpage.c
62     @@ -230,6 +230,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
63     ptep = (pte_t *)pudp;
64     } else if (sz == (CONT_PTE_SIZE)) {
65     pmdp = pmd_alloc(mm, pudp, addr);
66     + if (!pmdp)
67     + return NULL;
68    
69     WARN_ON(addr & (sz - 1));
70     /*
71     diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
72     index f5d813c1304d..319812923012 100644
73     --- a/arch/riscv/mm/init.c
74     +++ b/arch/riscv/mm/init.c
75     @@ -116,7 +116,8 @@ void __init setup_bootmem(void)
76     memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
77    
78     set_max_mapnr(PFN_DOWN(mem_size));
79     - max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
80     + max_pfn = PFN_DOWN(memblock_end_of_DRAM());
81     + max_low_pfn = max_pfn;
82    
83     #ifdef CONFIG_BLK_DEV_INITRD
84     setup_initrd();
85     diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
86     index ed52ffa8d5d4..560310e29e27 100644
87     --- a/arch/s390/kvm/priv.c
88     +++ b/arch/s390/kvm/priv.c
89     @@ -626,10 +626,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
90     * available for the guest are AQIC and TAPQ with the t bit set
91     * since we do not set IC.3 (FIII) we currently will only intercept
92     * the AQIC function code.
93     + * Note: running nested under z/VM can result in intercepts for other
94     + * function codes, e.g. PQAP(QCI). We do not support this and bail out.
95     */
96     reg0 = vcpu->run->s.regs.gprs[0];
97     fc = (reg0 >> 24) & 0xff;
98     - if (WARN_ON_ONCE(fc != 0x03))
99     + if (fc != 0x03)
100     return -EOPNOTSUPP;
101    
102     /* PQAP instruction is allowed for guest kernel only */
103     diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c
104     index f7567cbd35b6..80fcb85736e1 100644
105     --- a/arch/x86/crypto/nhpoly1305-avx2-glue.c
106     +++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c
107     @@ -29,7 +29,7 @@ static int nhpoly1305_avx2_update(struct shash_desc *desc,
108     return crypto_nhpoly1305_update(desc, src, srclen);
109    
110     do {
111     - unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
112     + unsigned int n = min_t(unsigned int, srclen, SZ_4K);
113    
114     kernel_fpu_begin();
115     crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2);
116     diff --git a/arch/x86/crypto/nhpoly1305-sse2-glue.c b/arch/x86/crypto/nhpoly1305-sse2-glue.c
117     index a661ede3b5cf..cc6b7c1a2705 100644
118     --- a/arch/x86/crypto/nhpoly1305-sse2-glue.c
119     +++ b/arch/x86/crypto/nhpoly1305-sse2-glue.c
120     @@ -29,7 +29,7 @@ static int nhpoly1305_sse2_update(struct shash_desc *desc,
121     return crypto_nhpoly1305_update(desc, src, srclen);
122    
123     do {
124     - unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
125     + unsigned int n = min_t(unsigned int, srclen, SZ_4K);
126    
127     kernel_fpu_begin();
128     crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2);
129     diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
130     index 515c0ceeb4a3..b3f121478738 100644
131     --- a/arch/x86/entry/calling.h
132     +++ b/arch/x86/entry/calling.h
133     @@ -98,13 +98,6 @@ For 32-bit we have the following conventions - kernel is built with
134     #define SIZEOF_PTREGS 21*8
135    
136     .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
137     - /*
138     - * Push registers and sanitize registers of values that a
139     - * speculation attack might otherwise want to exploit. The
140     - * lower registers are likely clobbered well before they
141     - * could be put to use in a speculative execution gadget.
142     - * Interleave XOR with PUSH for better uop scheduling:
143     - */
144     .if \save_ret
145     pushq %rsi /* pt_regs->si */
146     movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
147     @@ -114,34 +107,43 @@ For 32-bit we have the following conventions - kernel is built with
148     pushq %rsi /* pt_regs->si */
149     .endif
150     pushq \rdx /* pt_regs->dx */
151     - xorl %edx, %edx /* nospec dx */
152     pushq %rcx /* pt_regs->cx */
153     - xorl %ecx, %ecx /* nospec cx */
154     pushq \rax /* pt_regs->ax */
155     pushq %r8 /* pt_regs->r8 */
156     - xorl %r8d, %r8d /* nospec r8 */
157     pushq %r9 /* pt_regs->r9 */
158     - xorl %r9d, %r9d /* nospec r9 */
159     pushq %r10 /* pt_regs->r10 */
160     - xorl %r10d, %r10d /* nospec r10 */
161     pushq %r11 /* pt_regs->r11 */
162     - xorl %r11d, %r11d /* nospec r11*/
163     pushq %rbx /* pt_regs->rbx */
164     - xorl %ebx, %ebx /* nospec rbx*/
165     pushq %rbp /* pt_regs->rbp */
166     - xorl %ebp, %ebp /* nospec rbp*/
167     pushq %r12 /* pt_regs->r12 */
168     - xorl %r12d, %r12d /* nospec r12*/
169     pushq %r13 /* pt_regs->r13 */
170     - xorl %r13d, %r13d /* nospec r13*/
171     pushq %r14 /* pt_regs->r14 */
172     - xorl %r14d, %r14d /* nospec r14*/
173     pushq %r15 /* pt_regs->r15 */
174     - xorl %r15d, %r15d /* nospec r15*/
175     UNWIND_HINT_REGS
176     +
177     .if \save_ret
178     pushq %rsi /* return address on top of stack */
179     .endif
180     +
181     + /*
182     + * Sanitize registers of values that a speculation attack might
183     + * otherwise want to exploit. The lower registers are likely clobbered
184     + * well before they could be put to use in a speculative execution
185     + * gadget.
186     + */
187     + xorl %edx, %edx /* nospec dx */
188     + xorl %ecx, %ecx /* nospec cx */
189     + xorl %r8d, %r8d /* nospec r8 */
190     + xorl %r9d, %r9d /* nospec r9 */
191     + xorl %r10d, %r10d /* nospec r10 */
192     + xorl %r11d, %r11d /* nospec r11 */
193     + xorl %ebx, %ebx /* nospec rbx */
194     + xorl %ebp, %ebp /* nospec rbp */
195     + xorl %r12d, %r12d /* nospec r12 */
196     + xorl %r13d, %r13d /* nospec r13 */
197     + xorl %r14d, %r14d /* nospec r14 */
198     + xorl %r15d, %r15d /* nospec r15 */
199     +
200     .endm
201    
202     .macro POP_REGS pop_rdi=1 skip_r11rcx=0
203     diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
204     index b7c3ea4cb19d..2ba3d53ac5b1 100644
205     --- a/arch/x86/entry/entry_64.S
206     +++ b/arch/x86/entry/entry_64.S
207     @@ -249,7 +249,6 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
208     */
209     syscall_return_via_sysret:
210     /* rcx and r11 are already restored (see code above) */
211     - UNWIND_HINT_EMPTY
212     POP_REGS pop_rdi=0 skip_r11rcx=1
213    
214     /*
215     @@ -258,6 +257,7 @@ syscall_return_via_sysret:
216     */
217     movq %rsp, %rdi
218     movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
219     + UNWIND_HINT_EMPTY
220    
221     pushq RSP-RDI(%rdi) /* RSP */
222     pushq (%rdi) /* RDI */
223     @@ -512,7 +512,7 @@ END(spurious_entries_start)
224     * +----------------------------------------------------+
225     */
226     ENTRY(interrupt_entry)
227     - UNWIND_HINT_FUNC
228     + UNWIND_HINT_IRET_REGS offset=16
229     ASM_CLAC
230     cld
231    
232     @@ -544,9 +544,9 @@ ENTRY(interrupt_entry)
233     pushq 5*8(%rdi) /* regs->eflags */
234     pushq 4*8(%rdi) /* regs->cs */
235     pushq 3*8(%rdi) /* regs->ip */
236     + UNWIND_HINT_IRET_REGS
237     pushq 2*8(%rdi) /* regs->orig_ax */
238     pushq 8(%rdi) /* return address */
239     - UNWIND_HINT_FUNC
240    
241     movq (%rdi), %rdi
242     jmp 2f
243     @@ -637,6 +637,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
244     */
245     movq %rsp, %rdi
246     movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
247     + UNWIND_HINT_EMPTY
248    
249     /* Copy the IRET frame to the trampoline stack. */
250     pushq 6*8(%rdi) /* SS */
251     @@ -1739,7 +1740,7 @@ ENTRY(rewind_stack_do_exit)
252    
253     movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
254     leaq -PTREGS_SIZE(%rax), %rsp
255     - UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE
256     + UNWIND_HINT_REGS
257    
258     call do_exit
259     END(rewind_stack_do_exit)
260     diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
261     index 380cee9bc175..f5341edbfa16 100644
262     --- a/arch/x86/include/asm/kvm_host.h
263     +++ b/arch/x86/include/asm/kvm_host.h
264     @@ -1608,8 +1608,8 @@ void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
265     static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
266     {
267     /* We can only post Fixed and LowPrio IRQs */
268     - return (irq->delivery_mode == dest_Fixed ||
269     - irq->delivery_mode == dest_LowestPrio);
270     + return (irq->delivery_mode == APIC_DM_FIXED ||
271     + irq->delivery_mode == APIC_DM_LOWEST);
272     }
273    
274     static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
275     diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
276     index 499578f7e6d7..70fc159ebe69 100644
277     --- a/arch/x86/include/asm/unwind.h
278     +++ b/arch/x86/include/asm/unwind.h
279     @@ -19,7 +19,7 @@ struct unwind_state {
280     #if defined(CONFIG_UNWINDER_ORC)
281     bool signal, full_regs;
282     unsigned long sp, bp, ip;
283     - struct pt_regs *regs;
284     + struct pt_regs *regs, *prev_regs;
285     #elif defined(CONFIG_UNWINDER_FRAME_POINTER)
286     bool got_irq;
287     unsigned long *bp, *orig_sp, ip;
288     diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
289     index 332ae6530fa8..fb37221a1532 100644
290     --- a/arch/x86/kernel/unwind_orc.c
291     +++ b/arch/x86/kernel/unwind_orc.c
292     @@ -142,9 +142,6 @@ static struct orc_entry *orc_find(unsigned long ip)
293     {
294     static struct orc_entry *orc;
295    
296     - if (!orc_init)
297     - return NULL;
298     -
299     if (ip == 0)
300     return &null_orc_entry;
301    
302     @@ -378,9 +375,38 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
303     return true;
304     }
305    
306     +/*
307     + * If state->regs is non-NULL, and points to a full pt_regs, just get the reg
308     + * value from state->regs.
309     + *
310     + * Otherwise, if state->regs just points to IRET regs, and the previous frame
311     + * had full regs, it's safe to get the value from the previous regs. This can
312     + * happen when early/late IRQ entry code gets interrupted by an NMI.
313     + */
314     +static bool get_reg(struct unwind_state *state, unsigned int reg_off,
315     + unsigned long *val)
316     +{
317     + unsigned int reg = reg_off/8;
318     +
319     + if (!state->regs)
320     + return false;
321     +
322     + if (state->full_regs) {
323     + *val = ((unsigned long *)state->regs)[reg];
324     + return true;
325     + }
326     +
327     + if (state->prev_regs) {
328     + *val = ((unsigned long *)state->prev_regs)[reg];
329     + return true;
330     + }
331     +
332     + return false;
333     +}
334     +
335     bool unwind_next_frame(struct unwind_state *state)
336     {
337     - unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp;
338     + unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
339     enum stack_type prev_type = state->stack_info.type;
340     struct orc_entry *orc;
341     bool indirect = false;
342     @@ -442,39 +468,35 @@ bool unwind_next_frame(struct unwind_state *state)
343     break;
344    
345     case ORC_REG_R10:
346     - if (!state->regs || !state->full_regs) {
347     + if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
348     orc_warn("missing regs for base reg R10 at ip %pB\n",
349     (void *)state->ip);
350     goto err;
351     }
352     - sp = state->regs->r10;
353     break;
354    
355     case ORC_REG_R13:
356     - if (!state->regs || !state->full_regs) {
357     + if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
358     orc_warn("missing regs for base reg R13 at ip %pB\n",
359     (void *)state->ip);
360     goto err;
361     }
362     - sp = state->regs->r13;
363     break;
364    
365     case ORC_REG_DI:
366     - if (!state->regs || !state->full_regs) {
367     + if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
368     orc_warn("missing regs for base reg DI at ip %pB\n",
369     (void *)state->ip);
370     goto err;
371     }
372     - sp = state->regs->di;
373     break;
374    
375     case ORC_REG_DX:
376     - if (!state->regs || !state->full_regs) {
377     + if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
378     orc_warn("missing regs for base reg DX at ip %pB\n",
379     (void *)state->ip);
380     goto err;
381     }
382     - sp = state->regs->dx;
383     break;
384    
385     default:
386     @@ -501,6 +523,7 @@ bool unwind_next_frame(struct unwind_state *state)
387    
388     state->sp = sp;
389     state->regs = NULL;
390     + state->prev_regs = NULL;
391     state->signal = false;
392     break;
393    
394     @@ -512,6 +535,7 @@ bool unwind_next_frame(struct unwind_state *state)
395     }
396    
397     state->regs = (struct pt_regs *)sp;
398     + state->prev_regs = NULL;
399     state->full_regs = true;
400     state->signal = true;
401     break;
402     @@ -523,6 +547,8 @@ bool unwind_next_frame(struct unwind_state *state)
403     goto err;
404     }
405    
406     + if (state->full_regs)
407     + state->prev_regs = state->regs;
408     state->regs = (void *)sp - IRET_FRAME_OFFSET;
409     state->full_regs = false;
410     state->signal = true;
411     @@ -531,14 +557,14 @@ bool unwind_next_frame(struct unwind_state *state)
412     default:
413     orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
414     orc->type, (void *)orig_ip);
415     - break;
416     + goto err;
417     }
418    
419     /* Find BP: */
420     switch (orc->bp_reg) {
421     case ORC_REG_UNDEFINED:
422     - if (state->regs && state->full_regs)
423     - state->bp = state->regs->bp;
424     + if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
425     + state->bp = tmp;
426     break;
427    
428     case ORC_REG_PREV_SP:
429     @@ -582,6 +608,9 @@ EXPORT_SYMBOL_GPL(unwind_next_frame);
430     void __unwind_start(struct unwind_state *state, struct task_struct *task,
431     struct pt_regs *regs, unsigned long *first_frame)
432     {
433     + if (!orc_init)
434     + goto done;
435     +
436     memset(state, 0, sizeof(*state));
437     state->task = task;
438    
439     @@ -648,7 +677,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
440     /* Otherwise, skip ahead to the user-specified starting frame: */
441     while (!unwind_done(state) &&
442     (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
443     - state->sp <= (unsigned long)first_frame))
444     + state->sp < (unsigned long)first_frame))
445     unwind_next_frame(state);
446    
447     return;
448     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
449     index 51ff6b342279..fda2126f9a97 100644
450     --- a/arch/x86/kvm/svm.c
451     +++ b/arch/x86/kvm/svm.c
452     @@ -1861,7 +1861,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
453     return NULL;
454    
455     /* Pin the user virtual address. */
456     - npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages);
457     + npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
458     if (npinned != npages) {
459     pr_err("SEV: Failure locking %lu pages.\n", npages);
460     goto err;
461     diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
462     index c7012f6c971c..ca4252f81bf8 100644
463     --- a/arch/x86/kvm/vmx/vmenter.S
464     +++ b/arch/x86/kvm/vmx/vmenter.S
465     @@ -86,6 +86,9 @@ ENTRY(vmx_vmexit)
466     /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
467     FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
468    
469     + /* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */
470     + or $1, %_ASM_AX
471     +
472     pop %_ASM_AX
473     .Lvmexit_skip_rsb:
474     #endif
475     diff --git a/block/blk-iocost.c b/block/blk-iocost.c
476     index 2dc5dc54e257..d083f7704082 100644
477     --- a/block/blk-iocost.c
478     +++ b/block/blk-iocost.c
479     @@ -469,7 +469,7 @@ struct ioc_gq {
480     */
481     atomic64_t vtime;
482     atomic64_t done_vtime;
483     - atomic64_t abs_vdebt;
484     + u64 abs_vdebt;
485     u64 last_vtime;
486    
487     /*
488     @@ -1145,7 +1145,7 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now)
489     struct iocg_wake_ctx ctx = { .iocg = iocg };
490     u64 margin_ns = (u64)(ioc->period_us *
491     WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC;
492     - u64 abs_vdebt, vdebt, vshortage, expires, oexpires;
493     + u64 vdebt, vshortage, expires, oexpires;
494     s64 vbudget;
495     u32 hw_inuse;
496    
497     @@ -1155,18 +1155,15 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now)
498     vbudget = now->vnow - atomic64_read(&iocg->vtime);
499    
500     /* pay off debt */
501     - abs_vdebt = atomic64_read(&iocg->abs_vdebt);
502     - vdebt = abs_cost_to_cost(abs_vdebt, hw_inuse);
503     + vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse);
504     if (vdebt && vbudget > 0) {
505     u64 delta = min_t(u64, vbudget, vdebt);
506     u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse),
507     - abs_vdebt);
508     + iocg->abs_vdebt);
509    
510     atomic64_add(delta, &iocg->vtime);
511     atomic64_add(delta, &iocg->done_vtime);
512     - atomic64_sub(abs_delta, &iocg->abs_vdebt);
513     - if (WARN_ON_ONCE(atomic64_read(&iocg->abs_vdebt) < 0))
514     - atomic64_set(&iocg->abs_vdebt, 0);
515     + iocg->abs_vdebt -= abs_delta;
516     }
517    
518     /*
519     @@ -1222,12 +1219,18 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
520     u64 expires, oexpires;
521     u32 hw_inuse;
522    
523     + lockdep_assert_held(&iocg->waitq.lock);
524     +
525     /* debt-adjust vtime */
526     current_hweight(iocg, NULL, &hw_inuse);
527     - vtime += abs_cost_to_cost(atomic64_read(&iocg->abs_vdebt), hw_inuse);
528     + vtime += abs_cost_to_cost(iocg->abs_vdebt, hw_inuse);
529    
530     - /* clear or maintain depending on the overage */
531     - if (time_before_eq64(vtime, now->vnow)) {
532     + /*
533     + * Clear or maintain depending on the overage. Non-zero vdebt is what
534     + * guarantees that @iocg is online and future iocg_kick_delay() will
535     + * clear use_delay. Don't leave it on when there's no vdebt.
536     + */
537     + if (!iocg->abs_vdebt || time_before_eq64(vtime, now->vnow)) {
538     blkcg_clear_delay(blkg);
539     return false;
540     }
541     @@ -1261,9 +1264,12 @@ static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
542     {
543     struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer);
544     struct ioc_now now;
545     + unsigned long flags;
546    
547     + spin_lock_irqsave(&iocg->waitq.lock, flags);
548     ioc_now(iocg->ioc, &now);
549     iocg_kick_delay(iocg, &now, 0);
550     + spin_unlock_irqrestore(&iocg->waitq.lock, flags);
551    
552     return HRTIMER_NORESTART;
553     }
554     @@ -1371,14 +1377,13 @@ static void ioc_timer_fn(struct timer_list *timer)
555     * should have woken up in the last period and expire idle iocgs.
556     */
557     list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
558     - if (!waitqueue_active(&iocg->waitq) &&
559     - !atomic64_read(&iocg->abs_vdebt) && !iocg_is_idle(iocg))
560     + if (!waitqueue_active(&iocg->waitq) && iocg->abs_vdebt &&
561     + !iocg_is_idle(iocg))
562     continue;
563    
564     spin_lock(&iocg->waitq.lock);
565    
566     - if (waitqueue_active(&iocg->waitq) ||
567     - atomic64_read(&iocg->abs_vdebt)) {
568     + if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt) {
569     /* might be oversleeping vtime / hweight changes, kick */
570     iocg_kick_waitq(iocg, &now);
571     iocg_kick_delay(iocg, &now, 0);
572     @@ -1721,28 +1726,49 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
573     * tests are racy but the races aren't systemic - we only miss once
574     * in a while which is fine.
575     */
576     - if (!waitqueue_active(&iocg->waitq) &&
577     - !atomic64_read(&iocg->abs_vdebt) &&
578     + if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
579     time_before_eq64(vtime + cost, now.vnow)) {
580     iocg_commit_bio(iocg, bio, cost);
581     return;
582     }
583    
584     /*
585     - * We're over budget. If @bio has to be issued regardless,
586     - * remember the abs_cost instead of advancing vtime.
587     - * iocg_kick_waitq() will pay off the debt before waking more IOs.
588     + * We activated above but w/o any synchronization. Deactivation is
589     + * synchronized with waitq.lock and we won't get deactivated as long
590     + * as we're waiting or has debt, so we're good if we're activated
591     + * here. In the unlikely case that we aren't, just issue the IO.
592     + */
593     + spin_lock_irq(&iocg->waitq.lock);
594     +
595     + if (unlikely(list_empty(&iocg->active_list))) {
596     + spin_unlock_irq(&iocg->waitq.lock);
597     + iocg_commit_bio(iocg, bio, cost);
598     + return;
599     + }
600     +
601     + /*
602     + * We're over budget. If @bio has to be issued regardless, remember
603     + * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
604     + * off the debt before waking more IOs.
605     + *
606     * This way, the debt is continuously paid off each period with the
607     - * actual budget available to the cgroup. If we just wound vtime,
608     - * we would incorrectly use the current hw_inuse for the entire
609     - * amount which, for example, can lead to the cgroup staying
610     - * blocked for a long time even with substantially raised hw_inuse.
611     + * actual budget available to the cgroup. If we just wound vtime, we
612     + * would incorrectly use the current hw_inuse for the entire amount
613     + * which, for example, can lead to the cgroup staying blocked for a
614     + * long time even with substantially raised hw_inuse.
615     + *
616     + * An iocg with vdebt should stay online so that the timer can keep
617     + * deducting its vdebt and [de]activate use_delay mechanism
618     + * accordingly. We don't want to race against the timer trying to
619     + * clear them and leave @iocg inactive w/ dangling use_delay heavily
620     + * penalizing the cgroup and its descendants.
621     */
622     if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
623     - atomic64_add(abs_cost, &iocg->abs_vdebt);
624     + iocg->abs_vdebt += abs_cost;
625     if (iocg_kick_delay(iocg, &now, cost))
626     blkcg_schedule_throttle(rqos->q,
627     (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
628     + spin_unlock_irq(&iocg->waitq.lock);
629     return;
630     }
631    
632     @@ -1759,20 +1785,6 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
633     * All waiters are on iocg->waitq and the wait states are
634     * synchronized using waitq.lock.
635     */
636     - spin_lock_irq(&iocg->waitq.lock);
637     -
638     - /*
639     - * We activated above but w/o any synchronization. Deactivation is
640     - * synchronized with waitq.lock and we won't get deactivated as
641     - * long as we're waiting, so we're good if we're activated here.
642     - * In the unlikely case that we are deactivated, just issue the IO.
643     - */
644     - if (unlikely(list_empty(&iocg->active_list))) {
645     - spin_unlock_irq(&iocg->waitq.lock);
646     - iocg_commit_bio(iocg, bio, cost);
647     - return;
648     - }
649     -
650     init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
651     wait.wait.private = current;
652     wait.bio = bio;
653     @@ -1804,6 +1816,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
654     struct ioc_now now;
655     u32 hw_inuse;
656     u64 abs_cost, cost;
657     + unsigned long flags;
658    
659     /* bypass if disabled or for root cgroup */
660     if (!ioc->enabled || !iocg->level)
661     @@ -1823,15 +1836,28 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
662     iocg->cursor = bio_end;
663    
664     /*
665     - * Charge if there's enough vtime budget and the existing request
666     - * has cost assigned. Otherwise, account it as debt. See debt
667     - * handling in ioc_rqos_throttle() for details.
668     + * Charge if there's enough vtime budget and the existing request has
669     + * cost assigned.
670     */
671     if (rq->bio && rq->bio->bi_iocost_cost &&
672     - time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow))
673     + time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
674     iocg_commit_bio(iocg, bio, cost);
675     - else
676     - atomic64_add(abs_cost, &iocg->abs_vdebt);
677     + return;
678     + }
679     +
680     + /*
681     + * Otherwise, account it as debt if @iocg is online, which it should
682     + * be for the vast majority of cases. See debt handling in
683     + * ioc_rqos_throttle() for details.
684     + */
685     + spin_lock_irqsave(&iocg->waitq.lock, flags);
686     + if (likely(!list_empty(&iocg->active_list))) {
687     + iocg->abs_vdebt += abs_cost;
688     + iocg_kick_delay(iocg, &now, cost);
689     + } else {
690     + iocg_commit_bio(iocg, bio, cost);
691     + }
692     + spin_unlock_irqrestore(&iocg->waitq.lock, flags);
693     }
694    
695     static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
696     @@ -2001,7 +2027,6 @@ static void ioc_pd_init(struct blkg_policy_data *pd)
697     iocg->ioc = ioc;
698     atomic64_set(&iocg->vtime, now.vnow);
699     atomic64_set(&iocg->done_vtime, now.vnow);
700     - atomic64_set(&iocg->abs_vdebt, 0);
701     atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
702     INIT_LIST_HEAD(&iocg->active_list);
703     iocg->hweight_active = HWEIGHT_WHOLE;
704     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
705     index 630e8342d162..5e1dce424154 100644
706     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
707     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
708     @@ -3070,15 +3070,12 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
709     }
710     }
711    
712     - amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
713     - amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
714     -
715     - amdgpu_amdkfd_suspend(adev);
716     -
717     amdgpu_ras_suspend(adev);
718    
719     r = amdgpu_device_ip_suspend_phase1(adev);
720    
721     + amdgpu_amdkfd_suspend(adev);
722     +
723     /* evict vram memory */
724     amdgpu_bo_evict_vram(adev);
725    
726     diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
727     index f156f245fdec..9e95f6fd5406 100644
728     --- a/drivers/gpu/drm/ingenic/ingenic-drm.c
729     +++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
730     @@ -824,6 +824,7 @@ static const struct of_device_id ingenic_drm_of_match[] = {
731     { .compatible = "ingenic,jz4725b-lcd", .data = &jz4725b_soc_info },
732     { /* sentinel */ },
733     };
734     +MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
735    
736     static struct platform_driver ingenic_drm_driver = {
737     .driver = {
738     diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
739     index c7bc9db5b192..17a638f15082 100644
740     --- a/drivers/hid/usbhid/hid-core.c
741     +++ b/drivers/hid/usbhid/hid-core.c
742     @@ -682,16 +682,21 @@ static int usbhid_open(struct hid_device *hid)
743     struct usbhid_device *usbhid = hid->driver_data;
744     int res;
745    
746     + mutex_lock(&usbhid->mutex);
747     +
748     set_bit(HID_OPENED, &usbhid->iofl);
749    
750     - if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
751     - return 0;
752     + if (hid->quirks & HID_QUIRK_ALWAYS_POLL) {
753     + res = 0;
754     + goto Done;
755     + }
756    
757     res = usb_autopm_get_interface(usbhid->intf);
758     /* the device must be awake to reliably request remote wakeup */
759     if (res < 0) {
760     clear_bit(HID_OPENED, &usbhid->iofl);
761     - return -EIO;
762     + res = -EIO;
763     + goto Done;
764     }
765    
766     usbhid->intf->needs_remote_wakeup = 1;
767     @@ -725,6 +730,9 @@ static int usbhid_open(struct hid_device *hid)
768     msleep(50);
769    
770     clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
771     +
772     + Done:
773     + mutex_unlock(&usbhid->mutex);
774     return res;
775     }
776    
777     @@ -732,6 +740,8 @@ static void usbhid_close(struct hid_device *hid)
778     {
779     struct usbhid_device *usbhid = hid->driver_data;
780    
781     + mutex_lock(&usbhid->mutex);
782     +
783     /*
784     * Make sure we don't restart data acquisition due to
785     * a resumption we no longer care about by avoiding racing
786     @@ -743,12 +753,13 @@ static void usbhid_close(struct hid_device *hid)
787     clear_bit(HID_IN_POLLING, &usbhid->iofl);
788     spin_unlock_irq(&usbhid->lock);
789    
790     - if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
791     - return;
792     + if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) {
793     + hid_cancel_delayed_stuff(usbhid);
794     + usb_kill_urb(usbhid->urbin);
795     + usbhid->intf->needs_remote_wakeup = 0;
796     + }
797    
798     - hid_cancel_delayed_stuff(usbhid);
799     - usb_kill_urb(usbhid->urbin);
800     - usbhid->intf->needs_remote_wakeup = 0;
801     + mutex_unlock(&usbhid->mutex);
802     }
803    
804     /*
805     @@ -1057,6 +1068,8 @@ static int usbhid_start(struct hid_device *hid)
806     unsigned int n, insize = 0;
807     int ret;
808    
809     + mutex_lock(&usbhid->mutex);
810     +
811     clear_bit(HID_DISCONNECTED, &usbhid->iofl);
812    
813     usbhid->bufsize = HID_MIN_BUFFER_SIZE;
814     @@ -1177,6 +1190,8 @@ static int usbhid_start(struct hid_device *hid)
815     usbhid_set_leds(hid);
816     device_set_wakeup_enable(&dev->dev, 1);
817     }
818     +
819     + mutex_unlock(&usbhid->mutex);
820     return 0;
821    
822     fail:
823     @@ -1187,6 +1202,7 @@ fail:
824     usbhid->urbout = NULL;
825     usbhid->urbctrl = NULL;
826     hid_free_buffers(dev, hid);
827     + mutex_unlock(&usbhid->mutex);
828     return ret;
829     }
830    
831     @@ -1202,6 +1218,8 @@ static void usbhid_stop(struct hid_device *hid)
832     usbhid->intf->needs_remote_wakeup = 0;
833     }
834    
835     + mutex_lock(&usbhid->mutex);
836     +
837     clear_bit(HID_STARTED, &usbhid->iofl);
838     spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
839     set_bit(HID_DISCONNECTED, &usbhid->iofl);
840     @@ -1222,6 +1240,8 @@ static void usbhid_stop(struct hid_device *hid)
841     usbhid->urbout = NULL;
842    
843     hid_free_buffers(hid_to_usb_dev(hid), hid);
844     +
845     + mutex_unlock(&usbhid->mutex);
846     }
847    
848     static int usbhid_power(struct hid_device *hid, int lvl)
849     @@ -1382,6 +1402,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
850     INIT_WORK(&usbhid->reset_work, hid_reset);
851     timer_setup(&usbhid->io_retry, hid_retry_timeout, 0);
852     spin_lock_init(&usbhid->lock);
853     + mutex_init(&usbhid->mutex);
854    
855     ret = hid_add_device(hid);
856     if (ret) {
857     diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
858     index 8620408bd7af..75fe85d3d27a 100644
859     --- a/drivers/hid/usbhid/usbhid.h
860     +++ b/drivers/hid/usbhid/usbhid.h
861     @@ -80,6 +80,7 @@ struct usbhid_device {
862     dma_addr_t outbuf_dma; /* Output buffer dma */
863     unsigned long last_out; /* record of last output for timeouts */
864    
865     + struct mutex mutex; /* start/stop/open/close */
866     spinlock_t lock; /* fifo spinlock */
867     unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */
868     struct timer_list io_retry; /* Retry timer */
869     diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
870     index 5ded94b7bf68..cd71e7133944 100644
871     --- a/drivers/hid/wacom_sys.c
872     +++ b/drivers/hid/wacom_sys.c
873     @@ -319,9 +319,11 @@ static void wacom_feature_mapping(struct hid_device *hdev,
874     data[0] = field->report->id;
875     ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
876     data, n, WAC_CMD_RETRIES);
877     - if (ret == n) {
878     + if (ret == n && features->type == HID_GENERIC) {
879     ret = hid_report_raw_event(hdev,
880     HID_FEATURE_REPORT, data, n, 0);
881     + } else if (ret == 2 && features->type != HID_GENERIC) {
882     + features->touch_max = data[1];
883     } else {
884     features->touch_max = 16;
885     hid_warn(hdev, "wacom_feature_mapping: "
886     diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
887     index d99a9d407671..1c96809b51c9 100644
888     --- a/drivers/hid/wacom_wac.c
889     +++ b/drivers/hid/wacom_wac.c
890     @@ -1427,11 +1427,13 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
891     {
892     struct input_dev *pad_input = wacom->pad_input;
893     unsigned char *data = wacom->data;
894     + int nbuttons = wacom->features.numbered_buttons;
895    
896     - int buttons = data[282] | ((data[281] & 0x40) << 2);
897     + int expresskeys = data[282];
898     + int center = (data[281] & 0x40) >> 6;
899     int ring = data[285] & 0x7F;
900     bool ringstatus = data[285] & 0x80;
901     - bool prox = buttons || ringstatus;
902     + bool prox = expresskeys || center || ringstatus;
903    
904     /* Fix touchring data: userspace expects 0 at left and increasing clockwise */
905     ring = 71 - ring;
906     @@ -1439,7 +1441,8 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
907     if (ring > 71)
908     ring -= 72;
909    
910     - wacom_report_numbered_buttons(pad_input, 9, buttons);
911     + wacom_report_numbered_buttons(pad_input, nbuttons,
912     + expresskeys | (center << (nbuttons - 1)));
913    
914     input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0);
915    
916     @@ -2637,9 +2640,25 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
917     case HID_DG_TIPSWITCH:
918     hid_data->last_slot_field = equivalent_usage;
919     break;
920     + case HID_DG_CONTACTCOUNT:
921     + hid_data->cc_report = report->id;
922     + hid_data->cc_index = i;
923     + hid_data->cc_value_index = j;
924     + break;
925     }
926     }
927     }
928     +
929     + if (hid_data->cc_report != 0 &&
930     + hid_data->cc_index >= 0) {
931     + struct hid_field *field = report->field[hid_data->cc_index];
932     + int value = field->value[hid_data->cc_value_index];
933     + if (value)
934     + hid_data->num_expected = value;
935     + }
936     + else {
937     + hid_data->num_expected = wacom_wac->features.touch_max;
938     + }
939     }
940    
941     static void wacom_wac_finger_report(struct hid_device *hdev,
942     @@ -2649,7 +2668,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
943     struct wacom_wac *wacom_wac = &wacom->wacom_wac;
944     struct input_dev *input = wacom_wac->touch_input;
945     unsigned touch_max = wacom_wac->features.touch_max;
946     - struct hid_data *hid_data = &wacom_wac->hid_data;
947    
948     /* If more packets of data are expected, give us a chance to
949     * process them rather than immediately syncing a partial
950     @@ -2663,7 +2681,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
951    
952     input_sync(input);
953     wacom_wac->hid_data.num_received = 0;
954     - hid_data->num_expected = 0;
955    
956     /* keep touch state for pen event */
957     wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac);
958     @@ -2738,73 +2755,12 @@ static void wacom_report_events(struct hid_device *hdev,
959     }
960     }
961    
962     -static void wacom_set_num_expected(struct hid_device *hdev,
963     - struct hid_report *report,
964     - int collection_index,
965     - struct hid_field *field,
966     - int field_index)
967     -{
968     - struct wacom *wacom = hid_get_drvdata(hdev);
969     - struct wacom_wac *wacom_wac = &wacom->wacom_wac;
970     - struct hid_data *hid_data = &wacom_wac->hid_data;
971     - unsigned int original_collection_level =
972     - hdev->collection[collection_index].level;
973     - bool end_collection = false;
974     - int i;
975     -
976     - if (hid_data->num_expected)
977     - return;
978     -
979     - // find the contact count value for this segment
980     - for (i = field_index; i < report->maxfield && !end_collection; i++) {
981     - struct hid_field *field = report->field[i];
982     - unsigned int field_level =
983     - hdev->collection[field->usage[0].collection_index].level;
984     - unsigned int j;
985     -
986     - if (field_level != original_collection_level)
987     - continue;
988     -
989     - for (j = 0; j < field->maxusage; j++) {
990     - struct hid_usage *usage = &field->usage[j];
991     -
992     - if (usage->collection_index != collection_index) {
993     - end_collection = true;
994     - break;
995     - }
996     - if (wacom_equivalent_usage(usage->hid) == HID_DG_CONTACTCOUNT) {
997     - hid_data->cc_report = report->id;
998     - hid_data->cc_index = i;
999     - hid_data->cc_value_index = j;
1000     -
1001     - if (hid_data->cc_report != 0 &&
1002     - hid_data->cc_index >= 0) {
1003     -
1004     - struct hid_field *field =
1005     - report->field[hid_data->cc_index];
1006     - int value =
1007     - field->value[hid_data->cc_value_index];
1008     -
1009     - if (value)
1010     - hid_data->num_expected = value;
1011     - }
1012     - }
1013     - }
1014     - }
1015     -
1016     - if (hid_data->cc_report == 0 || hid_data->cc_index < 0)
1017     - hid_data->num_expected = wacom_wac->features.touch_max;
1018     -}
1019     -
1020     static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *report,
1021     int collection_index, struct hid_field *field,
1022     int field_index)
1023     {
1024     struct wacom *wacom = hid_get_drvdata(hdev);
1025    
1026     - if (WACOM_FINGER_FIELD(field))
1027     - wacom_set_num_expected(hdev, report, collection_index, field,
1028     - field_index);
1029     wacom_report_events(hdev, report, collection_index, field_index);
1030    
1031     /*
1032     diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
1033     index 6c340a4f4fd2..60e659a24f90 100644
1034     --- a/drivers/iommu/virtio-iommu.c
1035     +++ b/drivers/iommu/virtio-iommu.c
1036     @@ -454,7 +454,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
1037     if (!region)
1038     return -ENOMEM;
1039    
1040     - list_add(&vdev->resv_regions, &region->list);
1041     + list_add(&region->list, &vdev->resv_regions);
1042     return 0;
1043     }
1044    
1045     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1046     index 6862594b49ab..edf8452a2574 100644
1047     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1048     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1049     @@ -6649,7 +6649,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
1050     int rc;
1051    
1052     if (!mem_size)
1053     - return 0;
1054     + return -EINVAL;
1055    
1056     ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
1057     if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
1058     @@ -9755,6 +9755,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
1059     netdev_features_t features)
1060     {
1061     struct bnxt *bp = netdev_priv(dev);
1062     + netdev_features_t vlan_features;
1063    
1064     if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
1065     features &= ~NETIF_F_NTUPLE;
1066     @@ -9771,12 +9772,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
1067     /* Both CTAG and STAG VLAN accelaration on the RX side have to be
1068     * turned on or off together.
1069     */
1070     - if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
1071     - (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
1072     + vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
1073     + NETIF_F_HW_VLAN_STAG_RX);
1074     + if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
1075     + NETIF_F_HW_VLAN_STAG_RX)) {
1076     if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
1077     features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
1078     NETIF_F_HW_VLAN_STAG_RX);
1079     - else
1080     + else if (vlan_features)
1081     features |= NETIF_F_HW_VLAN_CTAG_RX |
1082     NETIF_F_HW_VLAN_STAG_RX;
1083     }
1084     @@ -12066,12 +12069,15 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
1085     }
1086     }
1087    
1088     - if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
1089     - dev_close(netdev);
1090     + if (result != PCI_ERS_RESULT_RECOVERED) {
1091     + if (netif_running(netdev))
1092     + dev_close(netdev);
1093     + pci_disable_device(pdev);
1094     + }
1095    
1096     rtnl_unlock();
1097    
1098     - return PCI_ERS_RESULT_RECOVERED;
1099     + return result;
1100     }
1101    
1102     /**
1103     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1104     index cda7ba31095a..a61a5873ab0a 100644
1105     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1106     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1107     @@ -1058,7 +1058,6 @@ struct bnxt_vf_info {
1108     #define BNXT_VF_LINK_FORCED 0x4
1109     #define BNXT_VF_LINK_UP 0x8
1110     #define BNXT_VF_TRUST 0x10
1111     - u32 func_flags; /* func cfg flags */
1112     u32 min_tx_rate;
1113     u32 max_tx_rate;
1114     void *hwrm_cmd_req_addr;
1115     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
1116     index 689c47ab2155..ba94edec9fb8 100644
1117     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
1118     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
1119     @@ -39,7 +39,7 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
1120     #define NVM_OFF_DIS_GRE_VER_CHECK 171
1121     #define NVM_OFF_ENABLE_SRIOV 401
1122    
1123     -#define BNXT_MSIX_VEC_MAX 1280
1124     +#define BNXT_MSIX_VEC_MAX 512
1125     #define BNXT_MSIX_VEC_MIN_MAX 128
1126    
1127     enum bnxt_nvm_dir_type {
1128     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
1129     index f6f3454d6059..1046b22220a3 100644
1130     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
1131     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
1132     @@ -85,11 +85,10 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
1133     if (old_setting == setting)
1134     return 0;
1135    
1136     - func_flags = vf->func_flags;
1137     if (setting)
1138     - func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
1139     + func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
1140     else
1141     - func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
1142     + func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
1143     /*TODO: if the driver supports VLAN filter on guest VLAN,
1144     * the spoof check should also include vlan anti-spoofing
1145     */
1146     @@ -98,7 +97,6 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
1147     req.flags = cpu_to_le32(func_flags);
1148     rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1149     if (!rc) {
1150     - vf->func_flags = func_flags;
1151     if (setting)
1152     vf->flags |= BNXT_VF_SPOOFCHK;
1153     else
1154     @@ -230,7 +228,6 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
1155     memcpy(vf->mac_addr, mac, ETH_ALEN);
1156     bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
1157     req.fid = cpu_to_le16(vf->fw_fid);
1158     - req.flags = cpu_to_le32(vf->func_flags);
1159     req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1160     memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
1161     return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1162     @@ -268,7 +265,6 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
1163    
1164     bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
1165     req.fid = cpu_to_le16(vf->fw_fid);
1166     - req.flags = cpu_to_le32(vf->func_flags);
1167     req.dflt_vlan = cpu_to_le16(vlan_tag);
1168     req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
1169     rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1170     @@ -307,7 +303,6 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
1171     return 0;
1172     bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
1173     req.fid = cpu_to_le16(vf->fw_fid);
1174     - req.flags = cpu_to_le32(vf->func_flags);
1175     req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
1176     req.max_bw = cpu_to_le32(max_tx_rate);
1177     req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
1178     @@ -479,7 +474,6 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
1179     vf = &bp->pf.vf[vf_id];
1180     bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
1181     req.fid = cpu_to_le16(vf->fw_fid);
1182     - req.flags = cpu_to_le32(vf->func_flags);
1183    
1184     if (is_valid_ether_addr(vf->mac_addr)) {
1185     req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1186     diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
1187     index 234c13ebbc41..1ec19d9fab00 100644
1188     --- a/drivers/net/ethernet/cadence/macb_main.c
1189     +++ b/drivers/net/ethernet/cadence/macb_main.c
1190     @@ -334,8 +334,10 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1191     int status;
1192    
1193     status = pm_runtime_get_sync(&bp->pdev->dev);
1194     - if (status < 0)
1195     + if (status < 0) {
1196     + pm_runtime_put_noidle(&bp->pdev->dev);
1197     goto mdio_pm_exit;
1198     + }
1199    
1200     status = macb_mdio_wait_for_idle(bp);
1201     if (status < 0)
1202     @@ -367,8 +369,10 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1203     int status;
1204    
1205     status = pm_runtime_get_sync(&bp->pdev->dev);
1206     - if (status < 0)
1207     + if (status < 0) {
1208     + pm_runtime_put_noidle(&bp->pdev->dev);
1209     goto mdio_pm_exit;
1210     + }
1211    
1212     status = macb_mdio_wait_for_idle(bp);
1213     if (status < 0)
1214     @@ -3691,8 +3695,10 @@ static int at91ether_open(struct net_device *dev)
1215     int ret;
1216    
1217     ret = pm_runtime_get_sync(&lp->pdev->dev);
1218     - if (ret < 0)
1219     + if (ret < 0) {
1220     + pm_runtime_put_noidle(&lp->pdev->dev);
1221     return ret;
1222     + }
1223    
1224     /* Clear internal statistics */
1225     ctl = macb_readl(lp, NCR);
1226     @@ -4048,15 +4054,9 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
1227    
1228     static int fu540_c000_init(struct platform_device *pdev)
1229     {
1230     - struct resource *res;
1231     -
1232     - res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1233     - if (!res)
1234     - return -ENODEV;
1235     -
1236     - mgmt->reg = ioremap(res->start, resource_size(res));
1237     - if (!mgmt->reg)
1238     - return -ENOMEM;
1239     + mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
1240     + if (IS_ERR(mgmt->reg))
1241     + return PTR_ERR(mgmt->reg);
1242    
1243     return macb_init(pdev);
1244     }
1245     diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1246     index 35478cba2aa5..4344a59c823f 100644
1247     --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1248     +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1249     @@ -1422,6 +1422,9 @@ int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
1250     struct mvpp2_ethtool_fs *efs;
1251     int ret;
1252    
1253     + if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
1254     + return -EINVAL;
1255     +
1256     efs = port->rfs_rules[info->fs.location];
1257     if (!efs)
1258     return -EINVAL;
1259     diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1260     index ef44c6979a31..373b8c832850 100644
1261     --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1262     +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1263     @@ -4319,6 +4319,8 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
1264    
1265     if (!mvpp22_rss_is_supported())
1266     return -EOPNOTSUPP;
1267     + if (rss_context >= MVPP22_N_RSS_TABLES)
1268     + return -EINVAL;
1269    
1270     if (hfunc)
1271     *hfunc = ETH_RSS_HASH_CRC32;
1272     diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
1273     index d44ac666e730..87c2e8de6102 100644
1274     --- a/drivers/net/ethernet/mellanox/mlx4/main.c
1275     +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
1276     @@ -2550,6 +2550,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
1277    
1278     if (!err || err == -ENOSPC) {
1279     priv->def_counter[port] = idx;
1280     + err = 0;
1281     } else if (err == -ENOENT) {
1282     err = 0;
1283     continue;
1284     @@ -2600,7 +2601,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
1285     MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1286     if (!err)
1287     *idx = get_param_l(&out_param);
1288     -
1289     + if (WARN_ON(err == -ENOSPC))
1290     + err = -EINVAL;
1291     return err;
1292     }
1293     return __mlx4_counter_alloc(dev, idx);
1294     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1295     index ea934cd02448..08048a2d7259 100644
1296     --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1297     +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1298     @@ -888,7 +888,6 @@ static void cmd_work_handler(struct work_struct *work)
1299     }
1300    
1301     cmd->ent_arr[ent->idx] = ent;
1302     - set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
1303     lay = get_inst(cmd, ent->idx);
1304     ent->lay = lay;
1305     memset(lay, 0, sizeof(*lay));
1306     @@ -910,6 +909,7 @@ static void cmd_work_handler(struct work_struct *work)
1307    
1308     if (ent->callback)
1309     schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
1310     + set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
1311    
1312     /* Skip sending command to fw if internal error */
1313     if (pci_channel_offline(dev->pdev) ||
1314     @@ -922,6 +922,10 @@ static void cmd_work_handler(struct work_struct *work)
1315     MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
1316    
1317     mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
1318     + /* no doorbell, no need to keep the entry */
1319     + free_ent(cmd, ent->idx);
1320     + if (ent->callback)
1321     + free_cmd(ent);
1322     return;
1323     }
1324    
1325     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
1326     index 095ec7b1399d..7c77378accf0 100644
1327     --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
1328     +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
1329     @@ -689,6 +689,12 @@ static void dr_cq_event(struct mlx5_core_cq *mcq,
1330     pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn);
1331     }
1332    
1333     +static void dr_cq_complete(struct mlx5_core_cq *mcq,
1334     + struct mlx5_eqe *eqe)
1335     +{
1336     + pr_err("CQ completion CQ: #%u\n", mcq->cqn);
1337     +}
1338     +
1339     static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
1340     struct mlx5_uars_page *uar,
1341     size_t ncqe)
1342     @@ -750,6 +756,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
1343     mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
1344    
1345     cq->mcq.event = dr_cq_event;
1346     + cq->mcq.comp = dr_cq_complete;
1347    
1348     err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
1349     kvfree(in);
1350     @@ -761,7 +768,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
1351     cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
1352     cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
1353     *cq->mcq.set_ci_db = 0;
1354     - *cq->mcq.arm_db = 0;
1355     +
1356     + /* set no-zero value, in order to avoid the HW to run db-recovery on
1357     + * CQ that used in polling mode.
1358     + */
1359     + *cq->mcq.arm_db = cpu_to_be32(2 << 28);
1360     +
1361     cq->mcq.vector = 0;
1362     cq->mcq.irqn = irqn;
1363     cq->mcq.uar = uar;
1364     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
1365     index e993159e8e4c..295b27112d36 100644
1366     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
1367     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
1368     @@ -986,8 +986,9 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
1369     unsigned int priority,
1370     struct mlxsw_afk_element_usage *elusage)
1371     {
1372     + struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
1373     struct mlxsw_sp_acl_tcam_vregion *vregion;
1374     - struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1375     + struct list_head *pos;
1376     int err;
1377    
1378     if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
1379     @@ -1025,7 +1026,14 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
1380     }
1381    
1382     mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1383     - list_add_tail(&vchunk->list, &vregion->vchunk_list);
1384     +
1385     + /* Position the vchunk inside the list according to priority */
1386     + list_for_each(pos, &vregion->vchunk_list) {
1387     + vchunk2 = list_entry(pos, typeof(*vchunk2), list);
1388     + if (vchunk2->priority > priority)
1389     + break;
1390     + }
1391     + list_add_tail(&vchunk->list, pos);
1392     mutex_unlock(&vregion->lock);
1393    
1394     return vchunk;
1395     diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
1396     index 9183b3e85d21..354efffac0f9 100644
1397     --- a/drivers/net/ethernet/netronome/nfp/abm/main.c
1398     +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
1399     @@ -283,6 +283,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn,
1400     if (!nfp_nsp_has_hwinfo_lookup(nsp)) {
1401     nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n");
1402     eth_hw_addr_random(nn->dp.netdev);
1403     + nfp_nsp_close(nsp);
1404     return;
1405     }
1406    
1407     diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
1408     index 12466a72cefc..aab0cf57c654 100644
1409     --- a/drivers/net/ethernet/toshiba/tc35815.c
1410     +++ b/drivers/net/ethernet/toshiba/tc35815.c
1411     @@ -644,7 +644,7 @@ static int tc_mii_probe(struct net_device *dev)
1412     linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
1413     linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
1414     }
1415     - linkmode_and(phydev->supported, phydev->supported, mask);
1416     + linkmode_andnot(phydev->supported, phydev->supported, mask);
1417     linkmode_copy(phydev->advertising, phydev->supported);
1418    
1419     lp->link = 0;
1420     diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
1421     index a0abc729f0ca..4c86a73db475 100644
1422     --- a/drivers/net/macsec.c
1423     +++ b/drivers/net/macsec.c
1424     @@ -1309,7 +1309,8 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1425     struct crypto_aead *tfm;
1426     int ret;
1427    
1428     - tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
1429     + /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
1430     + tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
1431    
1432     if (IS_ERR(tfm))
1433     return tfm;
1434     diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
1435     index 8f241b57fcf6..1c75b2627ca8 100644
1436     --- a/drivers/net/phy/dp83640.c
1437     +++ b/drivers/net/phy/dp83640.c
1438     @@ -1119,7 +1119,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
1439     goto out;
1440     }
1441     dp83640_clock_init(clock, bus);
1442     - list_add_tail(&phyter_clocks, &clock->list);
1443     + list_add_tail(&clock->list, &phyter_clocks);
1444     out:
1445     mutex_unlock(&phyter_clocks_lock);
1446    
1447     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1448     index 6c738a271257..4bb8552a00d3 100644
1449     --- a/drivers/net/usb/qmi_wwan.c
1450     +++ b/drivers/net/usb/qmi_wwan.c
1451     @@ -1359,6 +1359,7 @@ static const struct usb_device_id products[] = {
1452     {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
1453     {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
1454     {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
1455     + {QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */
1456     {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
1457     {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
1458     {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
1459     diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
1460     index 31b7dcd791c2..f0e0af3aa714 100644
1461     --- a/drivers/nvme/host/core.c
1462     +++ b/drivers/nvme/host/core.c
1463     @@ -1071,8 +1071,17 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
1464    
1465     status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1466     NVME_IDENTIFY_DATA_SIZE);
1467     - if (status)
1468     + if (status) {
1469     + dev_warn(ctrl->device,
1470     + "Identify Descriptors failed (%d)\n", status);
1471     + /*
1472     + * Don't treat an error as fatal, as we potentially already
1473     + * have a NGUID or EUI-64.
1474     + */
1475     + if (status > 0 && !(status & NVME_SC_DNR))
1476     + status = 0;
1477     goto free_data;
1478     + }
1479    
1480     for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
1481     struct nvme_ns_id_desc *cur = data + pos;
1482     @@ -1730,26 +1739,15 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
1483     static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
1484     struct nvme_id_ns *id, struct nvme_ns_ids *ids)
1485     {
1486     - int ret = 0;
1487     -
1488     memset(ids, 0, sizeof(*ids));
1489    
1490     if (ctrl->vs >= NVME_VS(1, 1, 0))
1491     memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
1492     if (ctrl->vs >= NVME_VS(1, 2, 0))
1493     memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
1494     - if (ctrl->vs >= NVME_VS(1, 3, 0)) {
1495     - /* Don't treat error as fatal we potentially
1496     - * already have a NGUID or EUI-64
1497     - */
1498     - ret = nvme_identify_ns_descs(ctrl, nsid, ids);
1499     - if (ret)
1500     - dev_warn(ctrl->device,
1501     - "Identify Descriptors failed (%d)\n", ret);
1502     - if (ret > 0)
1503     - ret = 0;
1504     - }
1505     - return ret;
1506     + if (ctrl->vs >= NVME_VS(1, 3, 0))
1507     + return nvme_identify_ns_descs(ctrl, nsid, ids);
1508     + return 0;
1509     }
1510    
1511     static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
1512     diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c
1513     index 13179f063a61..6f9c0d18d9ce 100644
1514     --- a/drivers/staging/gasket/gasket_core.c
1515     +++ b/drivers/staging/gasket/gasket_core.c
1516     @@ -926,6 +926,10 @@ do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
1517     gasket_get_bar_index(gasket_dev,
1518     (vma->vm_pgoff << PAGE_SHIFT) +
1519     driver_desc->legacy_mmap_address_offset);
1520     +
1521     + if (bar_index < 0)
1522     + return DO_MAP_REGION_INVALID;
1523     +
1524     phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset;
1525     while (mapped_bytes < map_length) {
1526     /*
1527     diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
1528     index fe098cf14e6a..3cb9aacfe0b2 100644
1529     --- a/drivers/tty/serial/xilinx_uartps.c
1530     +++ b/drivers/tty/serial/xilinx_uartps.c
1531     @@ -1445,6 +1445,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
1532     cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS;
1533     #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
1534     cdns_uart_uart_driver.cons = &cdns_uart_console;
1535     + cdns_uart_console.index = id;
1536     #endif
1537    
1538     rc = uart_register_driver(&cdns_uart_uart_driver);
1539     diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
1540     index 8b3ecef50394..fd0361d72738 100644
1541     --- a/drivers/tty/vt/vt.c
1542     +++ b/drivers/tty/vt/vt.c
1543     @@ -365,9 +365,14 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
1544     return uniscr;
1545     }
1546    
1547     +static void vc_uniscr_free(struct uni_screen *uniscr)
1548     +{
1549     + vfree(uniscr);
1550     +}
1551     +
1552     static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr)
1553     {
1554     - vfree(vc->vc_uni_screen);
1555     + vc_uniscr_free(vc->vc_uni_screen);
1556     vc->vc_uni_screen = new_uniscr;
1557     }
1558    
1559     @@ -1230,7 +1235,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
1560     err = resize_screen(vc, new_cols, new_rows, user);
1561     if (err) {
1562     kfree(newscreen);
1563     - kfree(new_uniscr);
1564     + vc_uniscr_free(new_uniscr);
1565     return err;
1566     }
1567    
1568     diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
1569     index af648ba6544d..46105457e1ca 100644
1570     --- a/drivers/usb/chipidea/ci_hdrc_msm.c
1571     +++ b/drivers/usb/chipidea/ci_hdrc_msm.c
1572     @@ -114,7 +114,7 @@ static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
1573     hw_write_id_reg(ci, HS_PHY_GENCONFIG_2,
1574     HS_PHY_ULPI_TX_PKT_EN_CLR_FIX, 0);
1575    
1576     - if (!IS_ERR(ci->platdata->vbus_extcon.edev)) {
1577     + if (!IS_ERR(ci->platdata->vbus_extcon.edev) || ci->role_switch) {
1578     hw_write_id_reg(ci, HS_PHY_GENCONFIG_2,
1579     HS_PHY_SESS_VLD_CTRL_EN,
1580     HS_PHY_SESS_VLD_CTRL_EN);
1581     diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
1582     index 633550ec3025..f29c3a936a08 100644
1583     --- a/drivers/usb/serial/garmin_gps.c
1584     +++ b/drivers/usb/serial/garmin_gps.c
1585     @@ -1138,8 +1138,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p,
1586     send it directly to the tty port */
1587     if (garmin_data_p->flags & FLAGS_QUEUING) {
1588     pkt_add(garmin_data_p, data, data_length);
1589     - } else if (bulk_data ||
1590     - getLayerId(data) == GARMIN_LAYERID_APPL) {
1591     + } else if (bulk_data || (data_length >= sizeof(u32) &&
1592     + getLayerId(data) == GARMIN_LAYERID_APPL)) {
1593    
1594     spin_lock_irqsave(&garmin_data_p->lock, flags);
1595     garmin_data_p->flags |= APP_RESP_SEEN;
1596     diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
1597     index 613f91add03d..ce0401d3137f 100644
1598     --- a/drivers/usb/serial/qcserial.c
1599     +++ b/drivers/usb/serial/qcserial.c
1600     @@ -173,6 +173,7 @@ static const struct usb_device_id id_table[] = {
1601     {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
1602     {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
1603     {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
1604     + {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */
1605     {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
1606     {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
1607     {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
1608     diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
1609     index 1b23741036ee..37157ed9a881 100644
1610     --- a/drivers/usb/storage/unusual_uas.h
1611     +++ b/drivers/usb/storage/unusual_uas.h
1612     @@ -28,6 +28,13 @@
1613     * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
1614     */
1615    
1616     +/* Reported-by: Julian Groß <julian.g@posteo.de> */
1617     +UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
1618     + "LaCie",
1619     + "2Big Quadra USB3",
1620     + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1621     + US_FL_NO_REPORT_OPCODES),
1622     +
1623     /*
1624     * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
1625     * commands in UAS mode. Observed with the 1.28 firmware; are there others?
1626     diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
1627     index 8c1f04c3a684..b79fe6549df6 100644
1628     --- a/fs/ceph/mds_client.c
1629     +++ b/fs/ceph/mds_client.c
1630     @@ -3072,8 +3072,7 @@ static void handle_session(struct ceph_mds_session *session,
1631     void *end = p + msg->front.iov_len;
1632     struct ceph_mds_session_head *h;
1633     u32 op;
1634     - u64 seq;
1635     - unsigned long features = 0;
1636     + u64 seq, features = 0;
1637     int wake = 0;
1638     bool blacklisted = false;
1639    
1640     @@ -3092,9 +3091,8 @@ static void handle_session(struct ceph_mds_session *session,
1641     goto bad;
1642     /* version >= 3, feature bits */
1643     ceph_decode_32_safe(&p, end, len, bad);
1644     - ceph_decode_need(&p, end, len, bad);
1645     - memcpy(&features, p, min_t(size_t, len, sizeof(features)));
1646     - p += len;
1647     + ceph_decode_64_safe(&p, end, features, bad);
1648     + p += len - sizeof(features);
1649     }
1650    
1651     mutex_lock(&mdsc->mutex);
1652     diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
1653     index de56dee60540..19507e2fdb57 100644
1654     --- a/fs/ceph/quota.c
1655     +++ b/fs/ceph/quota.c
1656     @@ -159,8 +159,8 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
1657     }
1658    
1659     if (IS_ERR(in)) {
1660     - pr_warn("Can't lookup inode %llx (err: %ld)\n",
1661     - realm->ino, PTR_ERR(in));
1662     + dout("Can't lookup inode %llx (err: %ld)\n",
1663     + realm->ino, PTR_ERR(in));
1664     qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */
1665     } else {
1666     qri->timeout = 0;
1667     diff --git a/fs/coredump.c b/fs/coredump.c
1668     index d25bad2ed061..5c0375e7440f 100644
1669     --- a/fs/coredump.c
1670     +++ b/fs/coredump.c
1671     @@ -788,6 +788,14 @@ void do_coredump(const kernel_siginfo_t *siginfo)
1672     if (displaced)
1673     put_files_struct(displaced);
1674     if (!dump_interrupted()) {
1675     + /*
1676     + * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
1677     + * have this set to NULL.
1678     + */
1679     + if (!cprm.file) {
1680     + pr_info("Core dump to |%s disabled\n", cn.corename);
1681     + goto close_fail;
1682     + }
1683     file_start_write(cprm.file);
1684     core_dumped = binfmt->core_dump(&cprm);
1685     file_end_write(cprm.file);
1686     diff --git a/fs/eventpoll.c b/fs/eventpoll.c
1687     index 33cff49769cc..6307c1d883e0 100644
1688     --- a/fs/eventpoll.c
1689     +++ b/fs/eventpoll.c
1690     @@ -1176,6 +1176,10 @@ static inline bool chain_epi_lockless(struct epitem *epi)
1691     {
1692     struct eventpoll *ep = epi->ep;
1693    
1694     + /* Fast preliminary check */
1695     + if (epi->next != EP_UNACTIVE_PTR)
1696     + return false;
1697     +
1698     /* Check that the same epi has not been just chained from another CPU */
1699     if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
1700     return false;
1701     @@ -1242,16 +1246,12 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
1702     * chained in ep->ovflist and requeued later on.
1703     */
1704     if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
1705     - if (epi->next == EP_UNACTIVE_PTR &&
1706     - chain_epi_lockless(epi))
1707     + if (chain_epi_lockless(epi))
1708     + ep_pm_stay_awake_rcu(epi);
1709     + } else if (!ep_is_linked(epi)) {
1710     + /* In the usual case, add event to ready list. */
1711     + if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
1712     ep_pm_stay_awake_rcu(epi);
1713     - goto out_unlock;
1714     - }
1715     -
1716     - /* If this file is already in the ready list we exit soon */
1717     - if (!ep_is_linked(epi) &&
1718     - list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) {
1719     - ep_pm_stay_awake_rcu(epi);
1720     }
1721    
1722     /*
1723     @@ -1827,7 +1827,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1724     {
1725     int res = 0, eavail, timed_out = 0;
1726     u64 slack = 0;
1727     - bool waiter = false;
1728     wait_queue_entry_t wait;
1729     ktime_t expires, *to = NULL;
1730    
1731     @@ -1872,21 +1871,23 @@ fetch_events:
1732     */
1733     ep_reset_busy_poll_napi_id(ep);
1734    
1735     - /*
1736     - * We don't have any available event to return to the caller. We need
1737     - * to sleep here, and we will be woken by ep_poll_callback() when events
1738     - * become available.
1739     - */
1740     - if (!waiter) {
1741     - waiter = true;
1742     - init_waitqueue_entry(&wait, current);
1743     -
1744     + do {
1745     + /*
1746     + * Internally init_wait() uses autoremove_wake_function(),
1747     + * thus wait entry is removed from the wait queue on each
1748     + * wakeup. Why it is important? In case of several waiters
1749     + * each new wakeup will hit the next waiter, giving it the
1750     + * chance to harvest new event. Otherwise wakeup can be
1751     + * lost. This is also good performance-wise, because on
1752     + * normal wakeup path no need to call __remove_wait_queue()
1753     + * explicitly, thus ep->lock is not taken, which halts the
1754     + * event delivery.
1755     + */
1756     + init_wait(&wait);
1757     write_lock_irq(&ep->lock);
1758     __add_wait_queue_exclusive(&ep->wq, &wait);
1759     write_unlock_irq(&ep->lock);
1760     - }
1761    
1762     - for (;;) {
1763     /*
1764     * We don't want to sleep if the ep_poll_callback() sends us
1765     * a wakeup in between. That's why we set the task state
1766     @@ -1916,10 +1917,20 @@ fetch_events:
1767     timed_out = 1;
1768     break;
1769     }
1770     - }
1771     +
1772     + /* We were woken up, thus go and try to harvest some events */
1773     + eavail = 1;
1774     +
1775     + } while (0);
1776    
1777     __set_current_state(TASK_RUNNING);
1778    
1779     + if (!list_empty_careful(&wait.entry)) {
1780     + write_lock_irq(&ep->lock);
1781     + __remove_wait_queue(&ep->wq, &wait);
1782     + write_unlock_irq(&ep->lock);
1783     + }
1784     +
1785     send_events:
1786     /*
1787     * Try to transfer events to user space. In case we get 0 events and
1788     @@ -1930,12 +1941,6 @@ send_events:
1789     !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
1790     goto fetch_events;
1791    
1792     - if (waiter) {
1793     - write_lock_irq(&ep->lock);
1794     - __remove_wait_queue(&ep->wq, &wait);
1795     - write_unlock_irq(&ep->lock);
1796     - }
1797     -
1798     return res;
1799     }
1800    
1801     diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
1802     index 5778d1347b35..f5d30573f4a9 100644
1803     --- a/fs/notify/fanotify/fanotify.c
1804     +++ b/fs/notify/fanotify/fanotify.c
1805     @@ -26,7 +26,7 @@ static bool should_merge(struct fsnotify_event *old_fsn,
1806     old = FANOTIFY_E(old_fsn);
1807     new = FANOTIFY_E(new_fsn);
1808    
1809     - if (old_fsn->inode != new_fsn->inode || old->pid != new->pid ||
1810     + if (old_fsn->objectid != new_fsn->objectid || old->pid != new->pid ||
1811     old->fh_type != new->fh_type || old->fh_len != new->fh_len)
1812     return false;
1813    
1814     @@ -314,7 +314,12 @@ struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
1815     if (!event)
1816     goto out;
1817     init: __maybe_unused
1818     - fsnotify_init_event(&event->fse, inode);
1819     + /*
1820     + * Use the victim inode instead of the watching inode as the id for
1821     + * event queue, so event reported on parent is merged with event
1822     + * reported on child when both directory and child watches exist.
1823     + */
1824     + fsnotify_init_event(&event->fse, (unsigned long)id);
1825     event->mask = mask;
1826     if (FAN_GROUP_FLAG(group, FAN_REPORT_TID))
1827     event->pid = get_pid(task_pid(current));
1828     diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
1829     index d510223d302c..589dee962993 100644
1830     --- a/fs/notify/inotify/inotify_fsnotify.c
1831     +++ b/fs/notify/inotify/inotify_fsnotify.c
1832     @@ -39,7 +39,7 @@ static bool event_compare(struct fsnotify_event *old_fsn,
1833     if (old->mask & FS_IN_IGNORED)
1834     return false;
1835     if ((old->mask == new->mask) &&
1836     - (old_fsn->inode == new_fsn->inode) &&
1837     + (old_fsn->objectid == new_fsn->objectid) &&
1838     (old->name_len == new->name_len) &&
1839     (!old->name_len || !strcmp(old->name, new->name)))
1840     return true;
1841     @@ -118,7 +118,7 @@ int inotify_handle_event(struct fsnotify_group *group,
1842     mask &= ~IN_ISDIR;
1843    
1844     fsn_event = &event->fse;
1845     - fsnotify_init_event(fsn_event, inode);
1846     + fsnotify_init_event(fsn_event, (unsigned long)inode);
1847     event->mask = mask;
1848     event->wd = i_mark->wd;
1849     event->sync_cookie = cookie;
1850     diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
1851     index 107537a543fd..81ffc8629fc4 100644
1852     --- a/fs/notify/inotify/inotify_user.c
1853     +++ b/fs/notify/inotify/inotify_user.c
1854     @@ -635,7 +635,7 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events)
1855     return ERR_PTR(-ENOMEM);
1856     }
1857     group->overflow_event = &oevent->fse;
1858     - fsnotify_init_event(group->overflow_event, NULL);
1859     + fsnotify_init_event(group->overflow_event, 0);
1860     oevent->mask = FS_Q_OVERFLOW;
1861     oevent->wd = -1;
1862     oevent->sync_cookie = 0;
1863     diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
1864     index 4fc87dee005a..2849bdbb3acb 100644
1865     --- a/include/linux/backing-dev-defs.h
1866     +++ b/include/linux/backing-dev-defs.h
1867     @@ -220,6 +220,7 @@ struct backing_dev_info {
1868     wait_queue_head_t wb_waitq;
1869    
1870     struct device *dev;
1871     + char dev_name[64];
1872     struct device *owner;
1873    
1874     struct timer_list laptop_mode_wb_timer;
1875     diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
1876     index f88197c1ffc2..c9ad5c3b7b4b 100644
1877     --- a/include/linux/backing-dev.h
1878     +++ b/include/linux/backing-dev.h
1879     @@ -505,13 +505,6 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
1880     (1 << WB_async_congested));
1881     }
1882    
1883     -extern const char *bdi_unknown_name;
1884     -
1885     -static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
1886     -{
1887     - if (!bdi || !bdi->dev)
1888     - return bdi_unknown_name;
1889     - return dev_name(bdi->dev);
1890     -}
1891     +const char *bdi_dev_name(struct backing_dev_info *bdi);
1892    
1893     #endif /* _LINUX_BACKING_DEV_H */
1894     diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
1895     index 1915bdba2fad..64cfb5446f4d 100644
1896     --- a/include/linux/fsnotify_backend.h
1897     +++ b/include/linux/fsnotify_backend.h
1898     @@ -133,8 +133,7 @@ struct fsnotify_ops {
1899     */
1900     struct fsnotify_event {
1901     struct list_head list;
1902     - /* inode may ONLY be dereferenced during handle_event(). */
1903     - struct inode *inode; /* either the inode the event happened to or its parent */
1904     + unsigned long objectid; /* identifier for queue merges */
1905     };
1906    
1907     /*
1908     @@ -500,10 +499,10 @@ extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
1909     extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
1910    
1911     static inline void fsnotify_init_event(struct fsnotify_event *event,
1912     - struct inode *inode)
1913     + unsigned long objectid)
1914     {
1915     INIT_LIST_HEAD(&event->list);
1916     - event->inode = inode;
1917     + event->objectid = objectid;
1918     }
1919    
1920     #else
1921     diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
1922     index 0d1fe9297ac6..6f6ade63b04c 100644
1923     --- a/include/linux/virtio_net.h
1924     +++ b/include/linux/virtio_net.h
1925     @@ -3,6 +3,8 @@
1926     #define _LINUX_VIRTIO_NET_H
1927    
1928     #include <linux/if_vlan.h>
1929     +#include <uapi/linux/tcp.h>
1930     +#include <uapi/linux/udp.h>
1931     #include <uapi/linux/virtio_net.h>
1932    
1933     static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
1934     @@ -28,17 +30,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
1935     bool little_endian)
1936     {
1937     unsigned int gso_type = 0;
1938     + unsigned int thlen = 0;
1939     + unsigned int ip_proto;
1940    
1941     if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1942     switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1943     case VIRTIO_NET_HDR_GSO_TCPV4:
1944     gso_type = SKB_GSO_TCPV4;
1945     + ip_proto = IPPROTO_TCP;
1946     + thlen = sizeof(struct tcphdr);
1947     break;
1948     case VIRTIO_NET_HDR_GSO_TCPV6:
1949     gso_type = SKB_GSO_TCPV6;
1950     + ip_proto = IPPROTO_TCP;
1951     + thlen = sizeof(struct tcphdr);
1952     break;
1953     case VIRTIO_NET_HDR_GSO_UDP:
1954     gso_type = SKB_GSO_UDP;
1955     + ip_proto = IPPROTO_UDP;
1956     + thlen = sizeof(struct udphdr);
1957     break;
1958     default:
1959     return -EINVAL;
1960     @@ -57,16 +67,22 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
1961    
1962     if (!skb_partial_csum_set(skb, start, off))
1963     return -EINVAL;
1964     +
1965     + if (skb_transport_offset(skb) + thlen > skb_headlen(skb))
1966     + return -EINVAL;
1967     } else {
1968     /* gso packets without NEEDS_CSUM do not set transport_offset.
1969     * probe and drop if does not match one of the above types.
1970     */
1971     if (gso_type && skb->network_header) {
1972     + struct flow_keys_basic keys;
1973     +
1974     if (!skb->protocol)
1975     virtio_net_hdr_set_proto(skb, hdr);
1976     retry:
1977     - skb_probe_transport_header(skb);
1978     - if (!skb_transport_header_was_set(skb)) {
1979     + if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
1980     + NULL, 0, 0, 0,
1981     + 0)) {
1982     /* UFO does not specify ipv4 or 6: try both */
1983     if (gso_type & SKB_GSO_UDP &&
1984     skb->protocol == htons(ETH_P_IP)) {
1985     @@ -75,6 +91,12 @@ retry:
1986     }
1987     return -EINVAL;
1988     }
1989     +
1990     + if (keys.control.thoff + thlen > skb_headlen(skb) ||
1991     + keys.basic.ip_proto != ip_proto)
1992     + return -EINVAL;
1993     +
1994     + skb_set_transport_header(skb, keys.control.thoff);
1995     }
1996     }
1997    
1998     diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
1999     index c8e2bebd8d93..0f0d1efe06dd 100644
2000     --- a/include/net/inet_ecn.h
2001     +++ b/include/net/inet_ecn.h
2002     @@ -99,6 +99,20 @@ static inline int IP_ECN_set_ce(struct iphdr *iph)
2003     return 1;
2004     }
2005    
2006     +static inline int IP_ECN_set_ect1(struct iphdr *iph)
2007     +{
2008     + u32 check = (__force u32)iph->check;
2009     +
2010     + if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
2011     + return 0;
2012     +
2013     + check += (__force u16)htons(0x100);
2014     +
2015     + iph->check = (__force __sum16)(check + (check>=0xFFFF));
2016     + iph->tos ^= INET_ECN_MASK;
2017     + return 1;
2018     +}
2019     +
2020     static inline void IP_ECN_clear(struct iphdr *iph)
2021     {
2022     iph->tos &= ~INET_ECN_MASK;
2023     @@ -134,6 +148,22 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
2024     return 1;
2025     }
2026    
2027     +static inline int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph)
2028     +{
2029     + __be32 from, to;
2030     +
2031     + if ((ipv6_get_dsfield(iph) & INET_ECN_MASK) != INET_ECN_ECT_0)
2032     + return 0;
2033     +
2034     + from = *(__be32 *)iph;
2035     + to = from ^ htonl(INET_ECN_MASK << 20);
2036     + *(__be32 *)iph = to;
2037     + if (skb->ip_summed == CHECKSUM_COMPLETE)
2038     + skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
2039     + (__force __wsum)to);
2040     + return 1;
2041     +}
2042     +
2043     static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
2044     {
2045     dscp &= ~INET_ECN_MASK;
2046     @@ -159,6 +189,25 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
2047     return 0;
2048     }
2049    
2050     +static inline int INET_ECN_set_ect1(struct sk_buff *skb)
2051     +{
2052     + switch (skb->protocol) {
2053     + case cpu_to_be16(ETH_P_IP):
2054     + if (skb_network_header(skb) + sizeof(struct iphdr) <=
2055     + skb_tail_pointer(skb))
2056     + return IP_ECN_set_ect1(ip_hdr(skb));
2057     + break;
2058     +
2059     + case cpu_to_be16(ETH_P_IPV6):
2060     + if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
2061     + skb_tail_pointer(skb))
2062     + return IP6_ECN_set_ect1(skb, ipv6_hdr(skb));
2063     + break;
2064     + }
2065     +
2066     + return 0;
2067     +}
2068     +
2069     /*
2070     * RFC 6040 4.2
2071     * To decapsulate the inner header at the tunnel egress, a compliant
2072     @@ -208,8 +257,12 @@ static inline int INET_ECN_decapsulate(struct sk_buff *skb,
2073     int rc;
2074    
2075     rc = __INET_ECN_decapsulate(outer, inner, &set_ce);
2076     - if (!rc && set_ce)
2077     - INET_ECN_set_ce(skb);
2078     + if (!rc) {
2079     + if (set_ce)
2080     + INET_ECN_set_ce(skb);
2081     + else if ((outer & INET_ECN_MASK) == INET_ECN_ECT_1)
2082     + INET_ECN_set_ect1(skb);
2083     + }
2084    
2085     return rc;
2086     }
2087     diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
2088     index 4b5656c71abc..bd0f1595bdc7 100644
2089     --- a/include/net/ip6_fib.h
2090     +++ b/include/net/ip6_fib.h
2091     @@ -177,6 +177,7 @@ struct fib6_info {
2092     struct rt6_info {
2093     struct dst_entry dst;
2094     struct fib6_info __rcu *from;
2095     + int sernum;
2096    
2097     struct rt6key rt6i_dst;
2098     struct rt6key rt6i_src;
2099     @@ -260,6 +261,9 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
2100     struct fib6_info *from;
2101     u32 cookie = 0;
2102    
2103     + if (rt->sernum)
2104     + return rt->sernum;
2105     +
2106     rcu_read_lock();
2107    
2108     from = rcu_dereference(rt->from);
2109     diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
2110     index c7e15a213ef2..0fca98a3d2d3 100644
2111     --- a/include/net/net_namespace.h
2112     +++ b/include/net/net_namespace.h
2113     @@ -428,6 +428,13 @@ static inline int rt_genid_ipv4(struct net *net)
2114     return atomic_read(&net->ipv4.rt_genid);
2115     }
2116    
2117     +#if IS_ENABLED(CONFIG_IPV6)
2118     +static inline int rt_genid_ipv6(const struct net *net)
2119     +{
2120     + return atomic_read(&net->ipv6.fib6_sernum);
2121     +}
2122     +#endif
2123     +
2124     static inline void rt_genid_bump_ipv4(struct net *net)
2125     {
2126     atomic_inc(&net->ipv4.rt_genid);
2127     diff --git a/ipc/mqueue.c b/ipc/mqueue.c
2128     index 3d920ff15c80..2ea0c08188e6 100644
2129     --- a/ipc/mqueue.c
2130     +++ b/ipc/mqueue.c
2131     @@ -82,6 +82,7 @@ struct mqueue_inode_info {
2132    
2133     struct sigevent notify;
2134     struct pid *notify_owner;
2135     + u32 notify_self_exec_id;
2136     struct user_namespace *notify_user_ns;
2137     struct user_struct *user; /* user who created, for accounting */
2138     struct sock *notify_sock;
2139     @@ -709,28 +710,44 @@ static void __do_notify(struct mqueue_inode_info *info)
2140     * synchronously. */
2141     if (info->notify_owner &&
2142     info->attr.mq_curmsgs == 1) {
2143     - struct kernel_siginfo sig_i;
2144     switch (info->notify.sigev_notify) {
2145     case SIGEV_NONE:
2146     break;
2147     - case SIGEV_SIGNAL:
2148     - /* sends signal */
2149     + case SIGEV_SIGNAL: {
2150     + struct kernel_siginfo sig_i;
2151     + struct task_struct *task;
2152     +
2153     + /* do_mq_notify() accepts sigev_signo == 0, why?? */
2154     + if (!info->notify.sigev_signo)
2155     + break;
2156    
2157     clear_siginfo(&sig_i);
2158     sig_i.si_signo = info->notify.sigev_signo;
2159     sig_i.si_errno = 0;
2160     sig_i.si_code = SI_MESGQ;
2161     sig_i.si_value = info->notify.sigev_value;
2162     - /* map current pid/uid into info->owner's namespaces */
2163     rcu_read_lock();
2164     + /* map current pid/uid into info->owner's namespaces */
2165     sig_i.si_pid = task_tgid_nr_ns(current,
2166     ns_of_pid(info->notify_owner));
2167     - sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
2168     + sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
2169     + current_uid());
2170     + /*
2171     + * We can't use kill_pid_info(), this signal should
2172     + * bypass check_kill_permission(). It is from kernel
2173     + * but si_fromuser() can't know this.
2174     + * We do check the self_exec_id, to avoid sending
2175     + * signals to programs that don't expect them.
2176     + */
2177     + task = pid_task(info->notify_owner, PIDTYPE_TGID);
2178     + if (task && task->self_exec_id ==
2179     + info->notify_self_exec_id) {
2180     + do_send_sig_info(info->notify.sigev_signo,
2181     + &sig_i, task, PIDTYPE_TGID);
2182     + }
2183     rcu_read_unlock();
2184     -
2185     - kill_pid_info(info->notify.sigev_signo,
2186     - &sig_i, info->notify_owner);
2187     break;
2188     + }
2189     case SIGEV_THREAD:
2190     set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
2191     netlink_sendskb(info->notify_sock, info->notify_cookie);
2192     @@ -1315,6 +1332,7 @@ retry:
2193     info->notify.sigev_signo = notification->sigev_signo;
2194     info->notify.sigev_value = notification->sigev_value;
2195     info->notify.sigev_notify = SIGEV_SIGNAL;
2196     + info->notify_self_exec_id = current->self_exec_id;
2197     break;
2198     }
2199    
2200     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2201     index c6ccaf6c62f7..721947b9962d 100644
2202     --- a/kernel/trace/trace.c
2203     +++ b/kernel/trace/trace.c
2204     @@ -8318,6 +8318,19 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
2205     */
2206     allocate_snapshot = false;
2207     #endif
2208     +
2209     + /*
2210     + * Because of some magic with the way alloc_percpu() works on
2211     + * x86_64, we need to synchronize the pgd of all the tables,
2212     + * otherwise the trace events that happen in x86_64 page fault
2213     + * handlers can't cope with accessing the chance that a
2214     + * alloc_percpu()'d memory might be touched in the page fault trace
2215     + * event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
2216     + * calls in tracing, because something might get triggered within a
2217     + * page fault trace event!
2218     + */
2219     + vmalloc_sync_mappings();
2220     +
2221     return 0;
2222     }
2223    
2224     diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
2225     index 2f0f7fcee73e..fba4b48451f6 100644
2226     --- a/kernel/trace/trace_kprobe.c
2227     +++ b/kernel/trace/trace_kprobe.c
2228     @@ -454,7 +454,7 @@ static bool __within_notrace_func(unsigned long addr)
2229    
2230     static bool within_notrace_func(struct trace_kprobe *tk)
2231     {
2232     - unsigned long addr = addr = trace_kprobe_address(tk);
2233     + unsigned long addr = trace_kprobe_address(tk);
2234     char symname[KSYM_NAME_LEN], *p;
2235    
2236     if (!__within_notrace_func(addr))
2237     diff --git a/kernel/umh.c b/kernel/umh.c
2238     index 7f255b5a8845..11bf5eea474c 100644
2239     --- a/kernel/umh.c
2240     +++ b/kernel/umh.c
2241     @@ -544,6 +544,11 @@ EXPORT_SYMBOL_GPL(fork_usermode_blob);
2242     * Runs a user-space application. The application is started
2243     * asynchronously if wait is not set, and runs as a child of system workqueues.
2244     * (ie. it runs with full root capabilities and optimized affinity).
2245     + *
2246     + * Note: successful return value does not guarantee the helper was called at
2247     + * all. You can't rely on sub_info->{init,cleanup} being called even for
2248     + * UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers
2249     + * into a successful no-op.
2250     */
2251     int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
2252     {
2253     diff --git a/mm/backing-dev.c b/mm/backing-dev.c
2254     index 62f05f605fb5..3f2480e4c5af 100644
2255     --- a/mm/backing-dev.c
2256     +++ b/mm/backing-dev.c
2257     @@ -21,7 +21,7 @@ struct backing_dev_info noop_backing_dev_info = {
2258     EXPORT_SYMBOL_GPL(noop_backing_dev_info);
2259    
2260     static struct class *bdi_class;
2261     -const char *bdi_unknown_name = "(unknown)";
2262     +static const char *bdi_unknown_name = "(unknown)";
2263    
2264     /*
2265     * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
2266     @@ -938,7 +938,8 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
2267     if (bdi->dev) /* The driver needs to use separate queues per device */
2268     return 0;
2269    
2270     - dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args);
2271     + vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args);
2272     + dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name);
2273     if (IS_ERR(dev))
2274     return PTR_ERR(dev);
2275    
2276     @@ -1043,6 +1044,14 @@ void bdi_put(struct backing_dev_info *bdi)
2277     }
2278     EXPORT_SYMBOL(bdi_put);
2279    
2280     +const char *bdi_dev_name(struct backing_dev_info *bdi)
2281     +{
2282     + if (!bdi || !bdi->dev)
2283     + return bdi_unknown_name;
2284     + return bdi->dev_name;
2285     +}
2286     +EXPORT_SYMBOL_GPL(bdi_dev_name);
2287     +
2288     static wait_queue_head_t congestion_wqh[2] = {
2289     __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
2290     __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
2291     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2292     index 8159000781be..0d6f3ea86738 100644
2293     --- a/mm/memcontrol.c
2294     +++ b/mm/memcontrol.c
2295     @@ -5101,19 +5101,22 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
2296     unsigned int size;
2297     int node;
2298     int __maybe_unused i;
2299     + long error = -ENOMEM;
2300    
2301     size = sizeof(struct mem_cgroup);
2302     size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
2303    
2304     memcg = kzalloc(size, GFP_KERNEL);
2305     if (!memcg)
2306     - return NULL;
2307     + return ERR_PTR(error);
2308    
2309     memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
2310     1, MEM_CGROUP_ID_MAX,
2311     GFP_KERNEL);
2312     - if (memcg->id.id < 0)
2313     + if (memcg->id.id < 0) {
2314     + error = memcg->id.id;
2315     goto fail;
2316     + }
2317    
2318     memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
2319     if (!memcg->vmstats_local)
2320     @@ -5158,7 +5161,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
2321     fail:
2322     mem_cgroup_id_remove(memcg);
2323     __mem_cgroup_free(memcg);
2324     - return NULL;
2325     + return ERR_PTR(error);
2326     }
2327    
2328     static struct cgroup_subsys_state * __ref
2329     @@ -5169,8 +5172,8 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
2330     long error = -ENOMEM;
2331    
2332     memcg = mem_cgroup_alloc();
2333     - if (!memcg)
2334     - return ERR_PTR(error);
2335     + if (IS_ERR(memcg))
2336     + return ERR_CAST(memcg);
2337    
2338     memcg->high = PAGE_COUNTER_MAX;
2339     memcg->soft_limit = PAGE_COUNTER_MAX;
2340     @@ -5220,7 +5223,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
2341     fail:
2342     mem_cgroup_id_remove(memcg);
2343     mem_cgroup_free(memcg);
2344     - return ERR_PTR(-ENOMEM);
2345     + return ERR_PTR(error);
2346     }
2347    
2348     static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
2349     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2350     index d387ca74cb5a..98d5c940facd 100644
2351     --- a/mm/page_alloc.c
2352     +++ b/mm/page_alloc.c
2353     @@ -1555,6 +1555,7 @@ void set_zone_contiguous(struct zone *zone)
2354     if (!__pageblock_pfn_to_page(block_start_pfn,
2355     block_end_pfn, zone))
2356     return;
2357     + cond_resched();
2358     }
2359    
2360     /* We confirm that there is no hole */
2361     @@ -2350,6 +2351,14 @@ static inline void boost_watermark(struct zone *zone)
2362    
2363     if (!watermark_boost_factor)
2364     return;
2365     + /*
2366     + * Don't bother in zones that are unlikely to produce results.
2367     + * On small machines, including kdump capture kernels running
2368     + * in a small area, boosting the watermark can cause an out of
2369     + * memory situation immediately.
2370     + */
2371     + if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2372     + return;
2373    
2374     max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2375     watermark_boost_factor, 10000);
2376     diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
2377     index 8033f24f506c..a9e7540c5691 100644
2378     --- a/net/batman-adv/bat_v_ogm.c
2379     +++ b/net/batman-adv/bat_v_ogm.c
2380     @@ -897,7 +897,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
2381    
2382     orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig);
2383     if (!orig_node)
2384     - return;
2385     + goto out;
2386    
2387     neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming,
2388     ethhdr->h_source);
2389     diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
2390     index 580609389f0f..70e3b161c663 100644
2391     --- a/net/batman-adv/network-coding.c
2392     +++ b/net/batman-adv/network-coding.c
2393     @@ -1009,15 +1009,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
2394     */
2395     static u8 batadv_nc_random_weight_tq(u8 tq)
2396     {
2397     - u8 rand_val, rand_tq;
2398     -
2399     - get_random_bytes(&rand_val, sizeof(rand_val));
2400     -
2401     /* randomize the estimated packet loss (max TQ - estimated TQ) */
2402     - rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq);
2403     -
2404     - /* normalize the randomized packet loss */
2405     - rand_tq /= BATADV_TQ_MAX_VALUE;
2406     + u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq);
2407    
2408     /* convert to (randomized) estimated tq again */
2409     return BATADV_TQ_MAX_VALUE - rand_tq;
2410     diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
2411     index e5bbc28ed12c..079a13493880 100644
2412     --- a/net/batman-adv/sysfs.c
2413     +++ b/net/batman-adv/sysfs.c
2414     @@ -1150,7 +1150,7 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
2415     ret = batadv_parse_throughput(net_dev, buff, "throughput_override",
2416     &tp_override);
2417     if (!ret)
2418     - return count;
2419     + goto out;
2420    
2421     old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
2422     if (old_tp_override == tp_override)
2423     @@ -1190,6 +1190,7 @@ static ssize_t batadv_show_throughput_override(struct kobject *kobj,
2424    
2425     tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
2426    
2427     + batadv_hardif_put(hard_iface);
2428     return sprintf(buff, "%u.%u MBit\n", tp_override / 10,
2429     tp_override % 10);
2430     }
2431     diff --git a/net/core/devlink.c b/net/core/devlink.c
2432     index 4c25f1aa2d37..5667cae57072 100644
2433     --- a/net/core/devlink.c
2434     +++ b/net/core/devlink.c
2435     @@ -3907,6 +3907,11 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
2436     end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
2437     end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]);
2438     dump = false;
2439     +
2440     + if (start_offset == end_offset) {
2441     + err = 0;
2442     + goto nla_put_failure;
2443     + }
2444     }
2445    
2446     err = devlink_nl_region_read_snapshot_fill(skb, devlink,
2447     diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2448     index 920784a9b7ff..7b40d12f0c22 100644
2449     --- a/net/core/neighbour.c
2450     +++ b/net/core/neighbour.c
2451     @@ -1954,6 +1954,9 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
2452     NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
2453     }
2454    
2455     + if (protocol)
2456     + neigh->protocol = protocol;
2457     +
2458     if (ndm->ndm_flags & NTF_EXT_LEARNED)
2459     flags |= NEIGH_UPDATE_F_EXT_LEARNED;
2460    
2461     @@ -1967,9 +1970,6 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
2462     err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2463     NETLINK_CB(skb).portid, extack);
2464    
2465     - if (protocol)
2466     - neigh->protocol = protocol;
2467     -
2468     neigh_release(neigh);
2469    
2470     out:
2471     diff --git a/net/dsa/master.c b/net/dsa/master.c
2472     index 3255dfc97f86..be0b4ed3b7d8 100644
2473     --- a/net/dsa/master.c
2474     +++ b/net/dsa/master.c
2475     @@ -259,7 +259,8 @@ static void dsa_master_ndo_teardown(struct net_device *dev)
2476     {
2477     struct dsa_port *cpu_dp = dev->dsa_ptr;
2478    
2479     - dev->netdev_ops = cpu_dp->orig_ndo_ops;
2480     + if (cpu_dp->orig_ndo_ops)
2481     + dev->netdev_ops = cpu_dp->orig_ndo_ops;
2482     cpu_dp->orig_ndo_ops = NULL;
2483     }
2484    
2485     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2486     index 894c7370c1bd..c81d8e9e5169 100644
2487     --- a/net/ipv6/route.c
2488     +++ b/net/ipv6/route.c
2489     @@ -1388,9 +1388,18 @@ static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
2490     }
2491     ip6_rt_copy_init(pcpu_rt, res);
2492     pcpu_rt->rt6i_flags |= RTF_PCPU;
2493     +
2494     + if (f6i->nh)
2495     + pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
2496     +
2497     return pcpu_rt;
2498     }
2499    
2500     +static bool rt6_is_valid(const struct rt6_info *rt6)
2501     +{
2502     + return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
2503     +}
2504     +
2505     /* It should be called with rcu_read_lock() acquired */
2506     static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
2507     {
2508     @@ -1398,6 +1407,19 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
2509    
2510     pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
2511    
2512     + if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
2513     + struct rt6_info *prev, **p;
2514     +
2515     + p = this_cpu_ptr(res->nh->rt6i_pcpu);
2516     + prev = xchg(p, NULL);
2517     + if (prev) {
2518     + dst_dev_put(&prev->dst);
2519     + dst_release(&prev->dst);
2520     + }
2521     +
2522     + pcpu_rt = NULL;
2523     + }
2524     +
2525     return pcpu_rt;
2526     }
2527    
2528     @@ -2599,6 +2621,9 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
2529    
2530     rt = container_of(dst, struct rt6_info, dst);
2531    
2532     + if (rt->sernum)
2533     + return rt6_is_valid(rt) ? dst : NULL;
2534     +
2535     rcu_read_lock();
2536    
2537     /* All IPV6 dsts are created with ->obsolete set to the value
2538     diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c
2539     index 3d816a1e5442..59151dc07fdc 100644
2540     --- a/net/netfilter/nf_nat_proto.c
2541     +++ b/net/netfilter/nf_nat_proto.c
2542     @@ -68,15 +68,13 @@ static bool udp_manip_pkt(struct sk_buff *skb,
2543     enum nf_nat_manip_type maniptype)
2544     {
2545     struct udphdr *hdr;
2546     - bool do_csum;
2547    
2548     if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
2549     return false;
2550    
2551     hdr = (struct udphdr *)(skb->data + hdroff);
2552     - do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL;
2553     + __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, !!hdr->check);
2554    
2555     - __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, do_csum);
2556     return true;
2557     }
2558    
2559     diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
2560     index 9f5dea0064ea..916a3c7f9eaf 100644
2561     --- a/net/netfilter/nfnetlink_osf.c
2562     +++ b/net/netfilter/nfnetlink_osf.c
2563     @@ -165,12 +165,12 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
2564     static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
2565     const struct sk_buff *skb,
2566     const struct iphdr *ip,
2567     - unsigned char *opts)
2568     + unsigned char *opts,
2569     + struct tcphdr *_tcph)
2570     {
2571     const struct tcphdr *tcp;
2572     - struct tcphdr _tcph;
2573    
2574     - tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph);
2575     + tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), _tcph);
2576     if (!tcp)
2577     return NULL;
2578    
2579     @@ -205,10 +205,11 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family,
2580     int fmatch = FMATCH_WRONG;
2581     struct nf_osf_hdr_ctx ctx;
2582     const struct tcphdr *tcp;
2583     + struct tcphdr _tcph;
2584    
2585     memset(&ctx, 0, sizeof(ctx));
2586    
2587     - tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
2588     + tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
2589     if (!tcp)
2590     return false;
2591    
2592     @@ -265,10 +266,11 @@ bool nf_osf_find(const struct sk_buff *skb,
2593     const struct nf_osf_finger *kf;
2594     struct nf_osf_hdr_ctx ctx;
2595     const struct tcphdr *tcp;
2596     + struct tcphdr _tcph;
2597    
2598     memset(&ctx, 0, sizeof(ctx));
2599    
2600     - tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
2601     + tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
2602     if (!tcp)
2603     return false;
2604    
2605     diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
2606     index dba70377bbd9..4021f726b58f 100644
2607     --- a/net/sched/sch_choke.c
2608     +++ b/net/sched/sch_choke.c
2609     @@ -323,7 +323,8 @@ static void choke_reset(struct Qdisc *sch)
2610    
2611     sch->q.qlen = 0;
2612     sch->qstats.backlog = 0;
2613     - memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
2614     + if (q->tab)
2615     + memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
2616     q->head = q->tail = 0;
2617     red_restart(&q->vars);
2618     }
2619     diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
2620     index c261c0a18868..76d72c3f52ed 100644
2621     --- a/net/sched/sch_fq_codel.c
2622     +++ b/net/sched/sch_fq_codel.c
2623     @@ -417,7 +417,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
2624     q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
2625    
2626     if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
2627     - q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
2628     + q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
2629    
2630     if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
2631     q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
2632     diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
2633     index c787d4d46017..5a6def5e4e6d 100644
2634     --- a/net/sched/sch_sfq.c
2635     +++ b/net/sched/sch_sfq.c
2636     @@ -637,6 +637,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
2637     if (ctl->divisor &&
2638     (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
2639     return -EINVAL;
2640     +
2641     + /* slot->allot is a short, make sure quantum is not too big. */
2642     + if (ctl->quantum) {
2643     + unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum);
2644     +
2645     + if (scaled <= 0 || scaled > SHRT_MAX)
2646     + return -EINVAL;
2647     + }
2648     +
2649     if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
2650     ctl_v1->Wlog))
2651     return -EINVAL;
2652     diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c
2653     index 0fb10abf7579..7a5e4c454715 100644
2654     --- a/net/sched/sch_skbprio.c
2655     +++ b/net/sched/sch_skbprio.c
2656     @@ -169,6 +169,9 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
2657     {
2658     struct tc_skbprio_qopt *ctl = nla_data(opt);
2659    
2660     + if (opt->nla_len != nla_attr_size(sizeof(*ctl)))
2661     + return -EINVAL;
2662     +
2663     sch->limit = ctl->limit;
2664     return 0;
2665     }
2666     diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
2667     index c6d83a64eac3..5625a9500f21 100644
2668     --- a/net/sctp/sm_statefuns.c
2669     +++ b/net/sctp/sm_statefuns.c
2670     @@ -1865,7 +1865,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
2671     */
2672     sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
2673     return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
2674     - SCTP_ST_CHUNK(0), NULL,
2675     + SCTP_ST_CHUNK(0), repl,
2676     commands);
2677     } else {
2678     sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
2679     @@ -5470,7 +5470,7 @@ enum sctp_disposition sctp_sf_do_9_2_start_shutdown(
2680     * in the Cumulative TSN Ack field the last sequential TSN it
2681     * has received from the peer.
2682     */
2683     - reply = sctp_make_shutdown(asoc, NULL);
2684     + reply = sctp_make_shutdown(asoc, arg);
2685     if (!reply)
2686     goto nomem;
2687    
2688     @@ -6068,7 +6068,7 @@ enum sctp_disposition sctp_sf_autoclose_timer_expire(
2689     disposition = SCTP_DISPOSITION_CONSUME;
2690     if (sctp_outq_is_empty(&asoc->outqueue)) {
2691     disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
2692     - arg, commands);
2693     + NULL, commands);
2694     }
2695    
2696     return disposition;
2697     diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
2698     index 3a12fc18239b..73dbed0c4b6b 100644
2699     --- a/net/tipc/topsrv.c
2700     +++ b/net/tipc/topsrv.c
2701     @@ -402,10 +402,11 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
2702     read_lock_bh(&sk->sk_callback_lock);
2703     ret = tipc_conn_rcv_sub(srv, con, &s);
2704     read_unlock_bh(&sk->sk_callback_lock);
2705     + if (!ret)
2706     + return 0;
2707     }
2708     - if (ret < 0)
2709     - tipc_conn_close(con);
2710    
2711     + tipc_conn_close(con);
2712     return ret;
2713     }
2714    
2715     diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
2716     index 41e9c2932b34..5513a08a4308 100644
2717     --- a/net/tls/tls_sw.c
2718     +++ b/net/tls/tls_sw.c
2719     @@ -797,6 +797,8 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
2720     *copied -= sk_msg_free(sk, msg);
2721     tls_free_open_rec(sk);
2722     }
2723     + if (psock)
2724     + sk_psock_put(sk, psock);
2725     return err;
2726     }
2727     more_data:
2728     @@ -2076,8 +2078,9 @@ static void tls_data_ready(struct sock *sk)
2729     strp_data_ready(&ctx->strp);
2730    
2731     psock = sk_psock_get(sk);
2732     - if (psock && !list_empty(&psock->ingress_msg)) {
2733     - ctx->saved_data_ready(sk);
2734     + if (psock) {
2735     + if (!list_empty(&psock->ingress_msg))
2736     + ctx->saved_data_ready(sk);
2737     sk_psock_put(sk, psock);
2738     }
2739     }
2740     diff --git a/scripts/decodecode b/scripts/decodecode
2741     index ba8b8d5834e6..fbdb325cdf4f 100755
2742     --- a/scripts/decodecode
2743     +++ b/scripts/decodecode
2744     @@ -126,7 +126,7 @@ faultlinenum=$(( $(wc -l $T.oo | cut -d" " -f1) - \
2745     faultline=`cat $T.dis | head -1 | cut -d":" -f2-`
2746     faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'`
2747    
2748     -cat $T.oo | sed -e "${faultlinenum}s/^\(.*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/"
2749     +cat $T.oo | sed -e "${faultlinenum}s/^\([^:]*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/"
2750     echo
2751     cat $T.aa
2752     cleanup
2753     diff --git a/tools/cgroup/iocost_monitor.py b/tools/cgroup/iocost_monitor.py
2754     index f79b23582a1d..7e344a78a627 100644
2755     --- a/tools/cgroup/iocost_monitor.py
2756     +++ b/tools/cgroup/iocost_monitor.py
2757     @@ -159,7 +159,12 @@ class IocgStat:
2758     else:
2759     self.inflight_pct = 0
2760    
2761     - self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000
2762     + # vdebt used to be an atomic64_t and is now u64, support both
2763     + try:
2764     + self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000
2765     + except:
2766     + self.debt_ms = iocg.abs_vdebt.value_() / VTIME_PER_USEC / 1000
2767     +
2768     self.use_delay = blkg.use_delay.counter.value_()
2769     self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000
2770    
2771     diff --git a/tools/objtool/check.c b/tools/objtool/check.c
2772     index d6a971326f87..fcc6cd404f56 100644
2773     --- a/tools/objtool/check.c
2774     +++ b/tools/objtool/check.c
2775     @@ -1402,7 +1402,7 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s
2776     struct cfi_reg *cfa = &state->cfa;
2777     struct stack_op *op = &insn->stack_op;
2778    
2779     - if (cfa->base != CFI_SP)
2780     + if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2781     return 0;
2782    
2783     /* push */
2784     diff --git a/virt/kvm/arm/hyp/aarch32.c b/virt/kvm/arm/hyp/aarch32.c
2785     index d31f267961e7..25c0e47d57cb 100644
2786     --- a/virt/kvm/arm/hyp/aarch32.c
2787     +++ b/virt/kvm/arm/hyp/aarch32.c
2788     @@ -125,12 +125,16 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
2789     */
2790     void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
2791     {
2792     + u32 pc = *vcpu_pc(vcpu);
2793     bool is_thumb;
2794    
2795     is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
2796     if (is_thumb && !is_wide_instr)
2797     - *vcpu_pc(vcpu) += 2;
2798     + pc += 2;
2799     else
2800     - *vcpu_pc(vcpu) += 4;
2801     + pc += 4;
2802     +
2803     + *vcpu_pc(vcpu) = pc;
2804     +
2805     kvm_adjust_itstate(vcpu);
2806     }
2807     diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
2808     index 0d090482720d..7eacf00e5abe 100644
2809     --- a/virt/kvm/arm/vgic/vgic-mmio.c
2810     +++ b/virt/kvm/arm/vgic/vgic-mmio.c
2811     @@ -389,7 +389,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
2812     static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
2813     {
2814     if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
2815     - intid > VGIC_NR_PRIVATE_IRQS)
2816     + intid >= VGIC_NR_PRIVATE_IRQS)
2817     kvm_arm_halt_guest(vcpu->kvm);
2818     }
2819    
2820     @@ -397,7 +397,7 @@ static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
2821     static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
2822     {
2823     if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
2824     - intid > VGIC_NR_PRIVATE_IRQS)
2825     + intid >= VGIC_NR_PRIVATE_IRQS)
2826     kvm_arm_resume_guest(vcpu->kvm);
2827     }
2828