Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.20/0105-4.20.6-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3282 - (hide annotations) (download)
Mon Mar 4 10:35:53 2019 UTC (5 years, 2 months ago) by niro
File size: 165492 byte(s)
linux-4.20.6
1 niro 3282 diff --git a/Makefile b/Makefile
2     index 690f6a9d9f1b..523922ea9c97 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 20
9     -SUBLEVEL = 5
10     +SUBLEVEL = 6
11     EXTRAVERSION =
12     NAME = Shy Crocodile
13    
14     diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
15     index 9185541035cc..6958545390f0 100644
16     --- a/arch/arc/include/asm/perf_event.h
17     +++ b/arch/arc/include/asm/perf_event.h
18     @@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
19    
20     /* counts condition */
21     [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
22     - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
23     + /* All jump instructions that are taken */
24     + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
25     [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
26     #ifdef CONFIG_ISA_ARCV2
27     [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
28     diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
29     index 62ad4bcb841a..f230bb7092fd 100644
30     --- a/arch/arc/lib/memset-archs.S
31     +++ b/arch/arc/lib/memset-archs.S
32     @@ -7,11 +7,39 @@
33     */
34    
35     #include <linux/linkage.h>
36     +#include <asm/cache.h>
37    
38     -#undef PREALLOC_NOT_AVAIL
39     +/*
40     + * The memset implementation below is optimized to use prefetchw and prealloc
41     + * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
42     + * If you want to implement optimized memset for other possible L1 data cache
43     + * line lengths (32B and 128B) you should rewrite code carefully checking
44     + * we don't call any prefetchw/prealloc instruction for L1 cache lines which
45     + * don't belongs to memset area.
46     + */
47     +
48     +#if L1_CACHE_SHIFT == 6
49     +
50     +.macro PREALLOC_INSTR reg, off
51     + prealloc [\reg, \off]
52     +.endm
53     +
54     +.macro PREFETCHW_INSTR reg, off
55     + prefetchw [\reg, \off]
56     +.endm
57     +
58     +#else
59     +
60     +.macro PREALLOC_INSTR
61     +.endm
62     +
63     +.macro PREFETCHW_INSTR
64     +.endm
65     +
66     +#endif
67    
68     ENTRY_CFI(memset)
69     - prefetchw [r0] ; Prefetch the write location
70     + PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
71     mov.f 0, r2
72     ;;; if size is zero
73     jz.d [blink]
74     @@ -48,11 +76,8 @@ ENTRY_CFI(memset)
75    
76     lpnz @.Lset64bytes
77     ;; LOOP START
78     -#ifdef PREALLOC_NOT_AVAIL
79     - prefetchw [r3, 64] ;Prefetch the next write location
80     -#else
81     - prealloc [r3, 64]
82     -#endif
83     + PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
84     +
85     #ifdef CONFIG_ARC_HAS_LL64
86     std.ab r4, [r3, 8]
87     std.ab r4, [r3, 8]
88     @@ -85,7 +110,6 @@ ENTRY_CFI(memset)
89     lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
90     lpnz .Lset32bytes
91     ;; LOOP START
92     - prefetchw [r3, 32] ;Prefetch the next write location
93     #ifdef CONFIG_ARC_HAS_LL64
94     std.ab r4, [r3, 8]
95     std.ab r4, [r3, 8]
96     diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
97     index f8fe5668b30f..a56e6a8ed259 100644
98     --- a/arch/arc/mm/init.c
99     +++ b/arch/arc/mm/init.c
100     @@ -137,7 +137,8 @@ void __init setup_arch_memory(void)
101     */
102    
103     memblock_add_node(low_mem_start, low_mem_sz, 0);
104     - memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
105     + memblock_reserve(CONFIG_LINUX_LINK_BASE,
106     + __pa(_end) - CONFIG_LINUX_LINK_BASE);
107    
108     #ifdef CONFIG_BLK_DEV_INITRD
109     if (initrd_start)
110     diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
111     index 19516fbc2c55..5461d589a1e2 100644
112     --- a/arch/arm/mm/proc-macros.S
113     +++ b/arch/arm/mm/proc-macros.S
114     @@ -278,7 +278,7 @@
115     * If we are building for big.Little with branch predictor hardening,
116     * we need the processor function tables to remain available after boot.
117     */
118     -#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
119     +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
120     .section ".rodata"
121     #endif
122     .type \name\()_processor_functions, #object
123     @@ -316,7 +316,7 @@ ENTRY(\name\()_processor_functions)
124     .endif
125    
126     .size \name\()_processor_functions, . - \name\()_processor_functions
127     -#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
128     +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
129     .previous
130     #endif
131     .endm
132     diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
133     index ccbb53e22024..8d04e6f3f796 100644
134     --- a/arch/s390/include/asm/mmu_context.h
135     +++ b/arch/s390/include/asm/mmu_context.h
136     @@ -25,7 +25,7 @@ static inline int init_new_context(struct task_struct *tsk,
137     atomic_set(&mm->context.flush_count, 0);
138     mm->context.gmap_asce = 0;
139     mm->context.flush_mm = 0;
140     - mm->context.compat_mm = 0;
141     + mm->context.compat_mm = test_thread_flag(TIF_31BIT);
142     #ifdef CONFIG_PGSTE
143     mm->context.alloc_pgste = page_table_allocate_pgste ||
144     test_thread_flag(TIF_PGSTE) ||
145     @@ -90,8 +90,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
146     {
147     int cpu = smp_processor_id();
148    
149     - if (prev == next)
150     - return;
151     S390_lowcore.user_asce = next->context.asce;
152     cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
153     /* Clear previous user-ASCE from CR1 and CR7 */
154     @@ -103,7 +101,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
155     __ctl_load(S390_lowcore.vdso_asce, 7, 7);
156     clear_cpu_flag(CIF_ASCE_SECONDARY);
157     }
158     - cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
159     + if (prev != next)
160     + cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
161     }
162    
163     #define finish_arch_post_lock_switch finish_arch_post_lock_switch
164     diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
165     index af5c2b3f7065..a8c7789b246b 100644
166     --- a/arch/s390/kernel/early.c
167     +++ b/arch/s390/kernel/early.c
168     @@ -63,10 +63,10 @@ static noinline __init void detect_machine_type(void)
169     if (stsi(vmms, 3, 2, 2) || !vmms->count)
170     return;
171    
172     - /* Running under KVM? If not we assume z/VM */
173     + /* Detect known hypervisors */
174     if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
175     S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
176     - else
177     + else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
178     S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
179     }
180    
181     diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
182     index 72dd23ef771b..7ed90a759135 100644
183     --- a/arch/s390/kernel/setup.c
184     +++ b/arch/s390/kernel/setup.c
185     @@ -1006,6 +1006,8 @@ void __init setup_arch(char **cmdline_p)
186     pr_info("Linux is running under KVM in 64-bit mode\n");
187     else if (MACHINE_IS_LPAR)
188     pr_info("Linux is running natively in 64-bit mode\n");
189     + else
190     + pr_info("Linux is running as a guest in 64-bit mode\n");
191    
192     /* Have one command line that is parsed and saved in /proc/cmdline */
193     /* boot_command_line has been already set up in early.c */
194     diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
195     index f82b3d3c36e2..b198ece2aad6 100644
196     --- a/arch/s390/kernel/smp.c
197     +++ b/arch/s390/kernel/smp.c
198     @@ -381,8 +381,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
199     */
200     void smp_call_ipl_cpu(void (*func)(void *), void *data)
201     {
202     + struct lowcore *lc = pcpu_devices->lowcore;
203     +
204     + if (pcpu_devices[0].address == stap())
205     + lc = &S390_lowcore;
206     +
207     pcpu_delegate(&pcpu_devices[0], func, data,
208     - pcpu_devices->lowcore->nodat_stack);
209     + lc->nodat_stack);
210     }
211    
212     int smp_find_processor_id(u16 address)
213     @@ -1166,7 +1171,11 @@ static ssize_t __ref rescan_store(struct device *dev,
214     {
215     int rc;
216    
217     + rc = lock_device_hotplug_sysfs();
218     + if (rc)
219     + return rc;
220     rc = smp_rescan_cpus();
221     + unlock_device_hotplug();
222     return rc ? rc : count;
223     }
224     static DEVICE_ATTR_WO(rescan);
225     diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
226     index ebe748a9f472..4ff354887db4 100644
227     --- a/arch/s390/kernel/vdso.c
228     +++ b/arch/s390/kernel/vdso.c
229     @@ -224,10 +224,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
230    
231     vdso_pages = vdso64_pages;
232     #ifdef CONFIG_COMPAT
233     - if (is_compat_task()) {
234     + mm->context.compat_mm = is_compat_task();
235     + if (mm->context.compat_mm)
236     vdso_pages = vdso32_pages;
237     - mm->context.compat_mm = 1;
238     - }
239     #endif
240     /*
241     * vDSO has a problem and was disabled, just don't "enable" it for
242     diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
243     index 8eaf8952c408..39913770a44d 100644
244     --- a/arch/x86/entry/entry_64_compat.S
245     +++ b/arch/x86/entry/entry_64_compat.S
246     @@ -361,7 +361,8 @@ ENTRY(entry_INT80_compat)
247    
248     /* Need to switch before accessing the thread stack. */
249     SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
250     - movq %rsp, %rdi
251     + /* In the Xen PV case we already run on the thread stack. */
252     + ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
253     movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
254    
255     pushq 6*8(%rdi) /* regs->ss */
256     @@ -370,8 +371,9 @@ ENTRY(entry_INT80_compat)
257     pushq 3*8(%rdi) /* regs->cs */
258     pushq 2*8(%rdi) /* regs->ip */
259     pushq 1*8(%rdi) /* regs->orig_ax */
260     -
261     pushq (%rdi) /* pt_regs->di */
262     +.Lint80_keep_stack:
263     +
264     pushq %rsi /* pt_regs->si */
265     xorl %esi, %esi /* nospec si */
266     pushq %rdx /* pt_regs->dx */
267     diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
268     index 0ca50611e8ce..19d18fae6ec6 100644
269     --- a/arch/x86/include/asm/mmu_context.h
270     +++ b/arch/x86/include/asm/mmu_context.h
271     @@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
272    
273     void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
274    
275     +/*
276     + * Init a new mm. Used on mm copies, like at fork()
277     + * and on mm's that are brand-new, like at execve().
278     + */
279     static inline int init_new_context(struct task_struct *tsk,
280     struct mm_struct *mm)
281     {
282     @@ -228,8 +232,22 @@ do { \
283     } while (0)
284     #endif
285    
286     +static inline void arch_dup_pkeys(struct mm_struct *oldmm,
287     + struct mm_struct *mm)
288     +{
289     +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
290     + if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
291     + return;
292     +
293     + /* Duplicate the oldmm pkey state in mm: */
294     + mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
295     + mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
296     +#endif
297     +}
298     +
299     static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
300     {
301     + arch_dup_pkeys(oldmm, mm);
302     paravirt_arch_dup_mmap(oldmm, mm);
303     return ldt_dup_context(oldmm, mm);
304     }
305     diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
306     index ba4bfb7f6a36..5c93a65ee1e5 100644
307     --- a/arch/x86/kernel/kvm.c
308     +++ b/arch/x86/kernel/kvm.c
309     @@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
310     #else
311     u64 ipi_bitmap = 0;
312     #endif
313     + long ret;
314    
315     if (cpumask_empty(mask))
316     return;
317     @@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
318     } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
319     max = apic_id < max ? max : apic_id;
320     } else {
321     - kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
322     + ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
323     (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
324     + WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
325     min = max = apic_id;
326     ipi_bitmap = 0;
327     }
328     @@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
329     }
330    
331     if (ipi_bitmap) {
332     - kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
333     + ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
334     (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
335     + WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
336     }
337    
338     local_irq_restore(flags);
339     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
340     index 95784bc4a53c..5a2c87552122 100644
341     --- a/arch/x86/kvm/vmx.c
342     +++ b/arch/x86/kvm/vmx.c
343     @@ -8315,11 +8315,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
344     if (r < 0)
345     goto out_vmcs02;
346    
347     - vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
348     + vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
349     if (!vmx->nested.cached_vmcs12)
350     goto out_cached_vmcs12;
351    
352     - vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
353     + vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
354     if (!vmx->nested.cached_shadow_vmcs12)
355     goto out_cached_shadow_vmcs12;
356    
357     @@ -14853,13 +14853,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
358     copy_shadow_to_vmcs12(vmx);
359     }
360    
361     - if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
362     + /*
363     + * Copy over the full allocated size of vmcs12 rather than just the size
364     + * of the struct.
365     + */
366     + if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
367     return -EFAULT;
368    
369     if (nested_cpu_has_shadow_vmcs(vmcs12) &&
370     vmcs12->vmcs_link_pointer != -1ull) {
371     if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
372     - get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
373     + get_shadow_vmcs12(vcpu), VMCS12_SIZE))
374     return -EFAULT;
375     }
376    
377     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
378     index f049ecfac7bb..4247cb230bd3 100644
379     --- a/arch/x86/kvm/x86.c
380     +++ b/arch/x86/kvm/x86.c
381     @@ -6407,8 +6407,7 @@ restart:
382     toggle_interruptibility(vcpu, ctxt->interruptibility);
383     vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
384     kvm_rip_write(vcpu, ctxt->eip);
385     - if (r == EMULATE_DONE &&
386     - (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
387     + if (r == EMULATE_DONE && ctxt->tf)
388     kvm_vcpu_do_singlestep(vcpu, &r);
389     if (!ctxt->have_exception ||
390     exception_type(ctxt->exception.vector) == EXCPT_TRAP)
391     @@ -6998,10 +6997,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
392     case KVM_HC_CLOCK_PAIRING:
393     ret = kvm_pv_clock_pairing(vcpu, a0, a1);
394     break;
395     +#endif
396     case KVM_HC_SEND_IPI:
397     ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
398     break;
399     -#endif
400     default:
401     ret = -KVM_ENOSYS;
402     break;
403     diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
404     index 79778ab200e4..a53665116458 100644
405     --- a/arch/x86/lib/kaslr.c
406     +++ b/arch/x86/lib/kaslr.c
407     @@ -36,8 +36,8 @@ static inline u16 i8254(void)
408     u16 status, timer;
409    
410     do {
411     - outb(I8254_PORT_CONTROL,
412     - I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
413     + outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
414     + I8254_PORT_CONTROL);
415     status = inb(I8254_PORT_COUNTER0);
416     timer = inb(I8254_PORT_COUNTER0);
417     timer |= inb(I8254_PORT_COUNTER0) << 8;
418     diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
419     index 5912d30020c7..8535e7999769 100644
420     --- a/drivers/acpi/nfit/core.c
421     +++ b/drivers/acpi/nfit/core.c
422     @@ -394,6 +394,32 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func)
423     return id;
424     }
425    
426     +static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
427     + struct nd_cmd_pkg *call_pkg)
428     +{
429     + if (call_pkg) {
430     + int i;
431     +
432     + if (nfit_mem->family != call_pkg->nd_family)
433     + return -ENOTTY;
434     +
435     + for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
436     + if (call_pkg->nd_reserved2[i])
437     + return -EINVAL;
438     + return call_pkg->nd_command;
439     + }
440     +
441     + /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
442     + if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
443     + return cmd;
444     +
445     + /*
446     + * Force function number validation to fail since 0 is never
447     + * published as a valid function in dsm_mask.
448     + */
449     + return 0;
450     +}
451     +
452     int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
453     unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
454     {
455     @@ -407,30 +433,23 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
456     unsigned long cmd_mask, dsm_mask;
457     u32 offset, fw_status = 0;
458     acpi_handle handle;
459     - unsigned int func;
460     const guid_t *guid;
461     - int rc, i;
462     + int func, rc, i;
463    
464     if (cmd_rc)
465     *cmd_rc = -EINVAL;
466     - func = cmd;
467     - if (cmd == ND_CMD_CALL) {
468     - call_pkg = buf;
469     - func = call_pkg->nd_command;
470     -
471     - for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
472     - if (call_pkg->nd_reserved2[i])
473     - return -EINVAL;
474     - }
475    
476     if (nvdimm) {
477     struct acpi_device *adev = nfit_mem->adev;
478    
479     if (!adev)
480     return -ENOTTY;
481     - if (call_pkg && nfit_mem->family != call_pkg->nd_family)
482     - return -ENOTTY;
483    
484     + if (cmd == ND_CMD_CALL)
485     + call_pkg = buf;
486     + func = cmd_to_func(nfit_mem, cmd, call_pkg);
487     + if (func < 0)
488     + return func;
489     dimm_name = nvdimm_name(nvdimm);
490     cmd_name = nvdimm_cmd_name(cmd);
491     cmd_mask = nvdimm_cmd_mask(nvdimm);
492     @@ -441,6 +460,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
493     } else {
494     struct acpi_device *adev = to_acpi_dev(acpi_desc);
495    
496     + func = cmd;
497     cmd_name = nvdimm_bus_cmd_name(cmd);
498     cmd_mask = nd_desc->cmd_mask;
499     dsm_mask = cmd_mask;
500     @@ -455,7 +475,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
501     if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
502     return -ENOTTY;
503    
504     - if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
505     + /*
506     + * Check for a valid command. For ND_CMD_CALL, we also have to
507     + * make sure that the DSM function is supported.
508     + */
509     + if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
510     + return -ENOTTY;
511     + else if (!test_bit(cmd, &cmd_mask))
512     return -ENOTTY;
513    
514     in_obj.type = ACPI_TYPE_PACKAGE;
515     @@ -1844,6 +1870,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
516     return 0;
517     }
518    
519     + /*
520     + * Function 0 is the command interrogation function, don't
521     + * export it to potential userspace use, and enable it to be
522     + * used as an error value in acpi_nfit_ctl().
523     + */
524     + dsm_mask &= ~1UL;
525     +
526     guid = to_nfit_uuid(nfit_mem->family);
527     for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
528     if (acpi_check_dsm(adev_dimm->handle, guid,
529     diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
530     index b5e3103c1175..e43c876a9223 100644
531     --- a/drivers/char/mwave/mwavedd.c
532     +++ b/drivers/char/mwave/mwavedd.c
533     @@ -59,6 +59,7 @@
534     #include <linux/mutex.h>
535     #include <linux/delay.h>
536     #include <linux/serial_8250.h>
537     +#include <linux/nospec.h>
538     #include "smapi.h"
539     #include "mwavedd.h"
540     #include "3780i.h"
541     @@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
542     ipcnum);
543     return -EINVAL;
544     }
545     + ipcnum = array_index_nospec(ipcnum,
546     + ARRAY_SIZE(pDrvData->IPCs));
547     PRINTK_3(TRACE_MWAVE,
548     "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
549     " ipcnum %x entry usIntCount %x\n",
550     @@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
551     " Invalid ipcnum %x\n", ipcnum);
552     return -EINVAL;
553     }
554     + ipcnum = array_index_nospec(ipcnum,
555     + ARRAY_SIZE(pDrvData->IPCs));
556     PRINTK_3(TRACE_MWAVE,
557     "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
558     " ipcnum %x, usIntCount %x\n",
559     @@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
560     ipcnum);
561     return -EINVAL;
562     }
563     + ipcnum = array_index_nospec(ipcnum,
564     + ARRAY_SIZE(pDrvData->IPCs));
565     mutex_lock(&mwave_mutex);
566     if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
567     pDrvData->IPCs[ipcnum].bIsEnabled = false;
568     diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
569     index 2d5d8b43727e..c4d0b6f6abf2 100644
570     --- a/drivers/clk/socfpga/clk-pll-s10.c
571     +++ b/drivers/clk/socfpga/clk-pll-s10.c
572     @@ -43,7 +43,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
573     /* Read mdiv and fdiv from the fdbck register */
574     reg = readl(socfpgaclk->hw.reg + 0x4);
575     mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT;
576     - vco_freq = (unsigned long long)parent_rate * (mdiv + 6);
577     + vco_freq = (unsigned long long)vco_freq * (mdiv + 6);
578    
579     return (unsigned long)vco_freq;
580     }
581     diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
582     index 5b238fc314ac..8281dfbf38c2 100644
583     --- a/drivers/clk/socfpga/clk-s10.c
584     +++ b/drivers/clk/socfpga/clk-s10.c
585     @@ -12,17 +12,17 @@
586    
587     #include "stratix10-clk.h"
588    
589     -static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk",
590     - "f2s_free_clk",};
591     +static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk",
592     + "f2s-free-clk",};
593     static const char * const cntr_mux[] = { "main_pll", "periph_pll",
594     - "osc1", "cb_intosc_hs_div2_clk",
595     - "f2s_free_clk"};
596     -static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",};
597     + "osc1", "cb-intosc-hs-div2-clk",
598     + "f2s-free-clk"};
599     +static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",};
600    
601     static const char * const noc_free_mux[] = {"main_noc_base_clk",
602     "peri_noc_base_clk",
603     - "osc1", "cb_intosc_hs_div2_clk",
604     - "f2s_free_clk"};
605     + "osc1", "cb-intosc-hs-div2-clk",
606     + "f2s-free-clk"};
607    
608     static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
609     static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
610     @@ -33,14 +33,14 @@ static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"
611     static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
612     static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
613    
614     -static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"};
615     +static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"};
616     static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
617     static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
618    
619     static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
620     "peri_mpu_base_clk",
621     - "osc1", "cb_intosc_hs_div2_clk",
622     - "f2s_free_clk"};
623     + "osc1", "cb-intosc-hs-div2-clk",
624     + "f2s-free-clk"};
625    
626     /* clocks in AO (always on) controller */
627     static const struct stratix10_pll_clock s10_pll_clks[] = {
628     diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
629     index f65cc0ff76ab..b0908ec62f73 100644
630     --- a/drivers/clk/zynqmp/clkc.c
631     +++ b/drivers/clk/zynqmp/clkc.c
632     @@ -669,8 +669,8 @@ static int zynqmp_clk_setup(struct device_node *np)
633     if (ret)
634     return ret;
635    
636     - zynqmp_data = kzalloc(sizeof(*zynqmp_data) + sizeof(*zynqmp_data) *
637     - clock_max_idx, GFP_KERNEL);
638     + zynqmp_data = kzalloc(struct_size(zynqmp_data, hws, clock_max_idx),
639     + GFP_KERNEL);
640     if (!zynqmp_data)
641     return -ENOMEM;
642    
643     diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
644     index 4213cb0bb2a7..f8664bac9fa8 100644
645     --- a/drivers/edac/altera_edac.h
646     +++ b/drivers/edac/altera_edac.h
647     @@ -295,8 +295,8 @@ struct altr_sdram_mc_data {
648     #define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
649    
650     /* Sticky registers for Uncorrected Errors */
651     -#define S10_SYSMGR_UE_VAL_OFST 0x120
652     -#define S10_SYSMGR_UE_ADDR_OFST 0x124
653     +#define S10_SYSMGR_UE_VAL_OFST 0x220
654     +#define S10_SYSMGR_UE_ADDR_OFST 0x224
655    
656     #define S10_DDR0_IRQ_MASK BIT(16)
657    
658     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
659     index a028661d9e20..92b11de19581 100644
660     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
661     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
662     @@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
663     { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
664     { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
665     { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
666     + { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
667     { 0, 0, 0, 0, 0 },
668     };
669    
670     diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
671     index 191b314f9e9e..709475d5cc30 100644
672     --- a/drivers/gpu/drm/meson/meson_crtc.c
673     +++ b/drivers/gpu/drm/meson/meson_crtc.c
674     @@ -45,7 +45,6 @@ struct meson_crtc {
675     struct drm_crtc base;
676     struct drm_pending_vblank_event *event;
677     struct meson_drm *priv;
678     - bool enabled;
679     };
680     #define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
681    
682     @@ -81,7 +80,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
683    
684     };
685    
686     -static void meson_crtc_enable(struct drm_crtc *crtc)
687     +static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
688     + struct drm_crtc_state *old_state)
689     {
690     struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
691     struct drm_crtc_state *crtc_state = crtc->state;
692     @@ -103,20 +103,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc)
693    
694     drm_crtc_vblank_on(crtc);
695    
696     - meson_crtc->enabled = true;
697     -}
698     -
699     -static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
700     - struct drm_crtc_state *old_state)
701     -{
702     - struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
703     - struct meson_drm *priv = meson_crtc->priv;
704     -
705     - DRM_DEBUG_DRIVER("\n");
706     -
707     - if (!meson_crtc->enabled)
708     - meson_crtc_enable(crtc);
709     -
710     priv->viu.osd1_enabled = true;
711     }
712    
713     @@ -142,8 +128,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
714    
715     crtc->state->event = NULL;
716     }
717     -
718     - meson_crtc->enabled = false;
719     }
720    
721     static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
722     @@ -152,9 +136,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
723     struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
724     unsigned long flags;
725    
726     - if (crtc->state->enable && !meson_crtc->enabled)
727     - meson_crtc_enable(crtc);
728     -
729     if (crtc->state->event) {
730     WARN_ON(drm_crtc_vblank_get(crtc) != 0);
731    
732     diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
733     index d3443125e661..bf5f294f172f 100644
734     --- a/drivers/gpu/drm/meson/meson_drv.c
735     +++ b/drivers/gpu/drm/meson/meson_drv.c
736     @@ -82,6 +82,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = {
737     .fb_create = drm_gem_fb_create,
738     };
739    
740     +static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = {
741     + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
742     +};
743     +
744     static irqreturn_t meson_irq(int irq, void *arg)
745     {
746     struct drm_device *dev = arg;
747     @@ -246,6 +250,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
748     drm->mode_config.max_width = 3840;
749     drm->mode_config.max_height = 2160;
750     drm->mode_config.funcs = &meson_mode_config_funcs;
751     + drm->mode_config.helper_private = &meson_mode_config_helpers;
752    
753     /* Hardware Initialization */
754    
755     diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
756     index fe00b12e4417..bea4c9850247 100644
757     --- a/drivers/hv/channel.c
758     +++ b/drivers/hv/channel.c
759     @@ -701,20 +701,12 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
760     int vmbus_disconnect_ring(struct vmbus_channel *channel)
761     {
762     struct vmbus_channel *cur_channel, *tmp;
763     - unsigned long flags;
764     - LIST_HEAD(list);
765     int ret;
766    
767     if (channel->primary_channel != NULL)
768     return -EINVAL;
769    
770     - /* Snapshot the list of subchannels */
771     - spin_lock_irqsave(&channel->lock, flags);
772     - list_splice_init(&channel->sc_list, &list);
773     - channel->num_sc = 0;
774     - spin_unlock_irqrestore(&channel->lock, flags);
775     -
776     - list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
777     + list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
778     if (cur_channel->rescind)
779     wait_for_completion(&cur_channel->rescind_event);
780    
781     diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
782     index edd34c167a9b..d01689079e9b 100644
783     --- a/drivers/hv/channel_mgmt.c
784     +++ b/drivers/hv/channel_mgmt.c
785     @@ -405,7 +405,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel)
786     primary_channel = channel->primary_channel;
787     spin_lock_irqsave(&primary_channel->lock, flags);
788     list_del(&channel->sc_list);
789     - primary_channel->num_sc--;
790     spin_unlock_irqrestore(&primary_channel->lock, flags);
791     }
792    
793     @@ -1302,49 +1301,6 @@ cleanup:
794     return ret;
795     }
796    
797     -/*
798     - * Retrieve the (sub) channel on which to send an outgoing request.
799     - * When a primary channel has multiple sub-channels, we try to
800     - * distribute the load equally amongst all available channels.
801     - */
802     -struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
803     -{
804     - struct list_head *cur, *tmp;
805     - int cur_cpu;
806     - struct vmbus_channel *cur_channel;
807     - struct vmbus_channel *outgoing_channel = primary;
808     - int next_channel;
809     - int i = 1;
810     -
811     - if (list_empty(&primary->sc_list))
812     - return outgoing_channel;
813     -
814     - next_channel = primary->next_oc++;
815     -
816     - if (next_channel > (primary->num_sc)) {
817     - primary->next_oc = 0;
818     - return outgoing_channel;
819     - }
820     -
821     - cur_cpu = hv_cpu_number_to_vp_number(smp_processor_id());
822     - list_for_each_safe(cur, tmp, &primary->sc_list) {
823     - cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
824     - if (cur_channel->state != CHANNEL_OPENED_STATE)
825     - continue;
826     -
827     - if (cur_channel->target_vp == cur_cpu)
828     - return cur_channel;
829     -
830     - if (i == next_channel)
831     - return cur_channel;
832     -
833     - i++;
834     - }
835     -
836     - return outgoing_channel;
837     -}
838     -EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel);
839     -
840     static void invoke_sc_cb(struct vmbus_channel *primary_channel)
841     {
842     struct list_head *cur, *tmp;
843     diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
844     index 41631512ae97..7b9fbd84d6df 100644
845     --- a/drivers/hv/hv_balloon.c
846     +++ b/drivers/hv/hv_balloon.c
847     @@ -888,12 +888,14 @@ static unsigned long handle_pg_range(unsigned long pg_start,
848     pfn_cnt -= pgs_ol;
849     /*
850     * Check if the corresponding memory block is already
851     - * online by checking its last previously backed page.
852     - * In case it is we need to bring rest (which was not
853     - * backed previously) online too.
854     + * online. It is possible to observe struct pages still
855     + * being uninitialized here so check section instead.
856     + * In case the section is online we need to bring the
857     + * rest of pfns (which were not backed previously)
858     + * online too.
859     */
860     if (start_pfn > has->start_pfn &&
861     - !PageReserved(pfn_to_page(start_pfn - 1)))
862     + online_section_nr(pfn_to_section_nr(start_pfn)))
863     hv_bring_pgs_online(has, start_pfn, pgs_ol);
864    
865     }
866     diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
867     index 64d0c85d5161..1f1a55e07733 100644
868     --- a/drivers/hv/ring_buffer.c
869     +++ b/drivers/hv/ring_buffer.c
870     @@ -164,26 +164,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
871     }
872    
873     /* Get various debug metrics for the specified ring buffer. */
874     -void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
875     - struct hv_ring_buffer_debug_info *debug_info)
876     +int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
877     + struct hv_ring_buffer_debug_info *debug_info)
878     {
879     u32 bytes_avail_towrite;
880     u32 bytes_avail_toread;
881    
882     - if (ring_info->ring_buffer) {
883     - hv_get_ringbuffer_availbytes(ring_info,
884     - &bytes_avail_toread,
885     - &bytes_avail_towrite);
886     -
887     - debug_info->bytes_avail_toread = bytes_avail_toread;
888     - debug_info->bytes_avail_towrite = bytes_avail_towrite;
889     - debug_info->current_read_index =
890     - ring_info->ring_buffer->read_index;
891     - debug_info->current_write_index =
892     - ring_info->ring_buffer->write_index;
893     - debug_info->current_interrupt_mask =
894     - ring_info->ring_buffer->interrupt_mask;
895     - }
896     + if (!ring_info->ring_buffer)
897     + return -EINVAL;
898     +
899     + hv_get_ringbuffer_availbytes(ring_info,
900     + &bytes_avail_toread,
901     + &bytes_avail_towrite);
902     + debug_info->bytes_avail_toread = bytes_avail_toread;
903     + debug_info->bytes_avail_towrite = bytes_avail_towrite;
904     + debug_info->current_read_index = ring_info->ring_buffer->read_index;
905     + debug_info->current_write_index = ring_info->ring_buffer->write_index;
906     + debug_info->current_interrupt_mask
907     + = ring_info->ring_buffer->interrupt_mask;
908     + return 0;
909     }
910     EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
911    
912     diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
913     index d0ff65675292..403fee01572c 100644
914     --- a/drivers/hv/vmbus_drv.c
915     +++ b/drivers/hv/vmbus_drv.c
916     @@ -313,12 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev,
917     {
918     struct hv_device *hv_dev = device_to_hv_device(dev);
919     struct hv_ring_buffer_debug_info outbound;
920     + int ret;
921    
922     if (!hv_dev->channel)
923     return -ENODEV;
924     - if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
925     - return -EINVAL;
926     - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
927     +
928     + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
929     + &outbound);
930     + if (ret < 0)
931     + return ret;
932     +
933     return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
934     }
935     static DEVICE_ATTR_RO(out_intr_mask);
936     @@ -328,12 +332,15 @@ static ssize_t out_read_index_show(struct device *dev,
937     {
938     struct hv_device *hv_dev = device_to_hv_device(dev);
939     struct hv_ring_buffer_debug_info outbound;
940     + int ret;
941    
942     if (!hv_dev->channel)
943     return -ENODEV;
944     - if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
945     - return -EINVAL;
946     - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
947     +
948     + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
949     + &outbound);
950     + if (ret < 0)
951     + return ret;
952     return sprintf(buf, "%d\n", outbound.current_read_index);
953     }
954     static DEVICE_ATTR_RO(out_read_index);
955     @@ -344,12 +351,15 @@ static ssize_t out_write_index_show(struct device *dev,
956     {
957     struct hv_device *hv_dev = device_to_hv_device(dev);
958     struct hv_ring_buffer_debug_info outbound;
959     + int ret;
960    
961     if (!hv_dev->channel)
962     return -ENODEV;
963     - if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
964     - return -EINVAL;
965     - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
966     +
967     + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
968     + &outbound);
969     + if (ret < 0)
970     + return ret;
971     return sprintf(buf, "%d\n", outbound.current_write_index);
972     }
973     static DEVICE_ATTR_RO(out_write_index);
974     @@ -360,12 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
975     {
976     struct hv_device *hv_dev = device_to_hv_device(dev);
977     struct hv_ring_buffer_debug_info outbound;
978     + int ret;
979    
980     if (!hv_dev->channel)
981     return -ENODEV;
982     - if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
983     - return -EINVAL;
984     - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
985     +
986     + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
987     + &outbound);
988     + if (ret < 0)
989     + return ret;
990     return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
991     }
992     static DEVICE_ATTR_RO(out_read_bytes_avail);
993     @@ -376,12 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
994     {
995     struct hv_device *hv_dev = device_to_hv_device(dev);
996     struct hv_ring_buffer_debug_info outbound;
997     + int ret;
998    
999     if (!hv_dev->channel)
1000     return -ENODEV;
1001     - if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
1002     - return -EINVAL;
1003     - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
1004     +
1005     + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
1006     + &outbound);
1007     + if (ret < 0)
1008     + return ret;
1009     return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
1010     }
1011     static DEVICE_ATTR_RO(out_write_bytes_avail);
1012     @@ -391,12 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev,
1013     {
1014     struct hv_device *hv_dev = device_to_hv_device(dev);
1015     struct hv_ring_buffer_debug_info inbound;
1016     + int ret;
1017    
1018     if (!hv_dev->channel)
1019     return -ENODEV;
1020     - if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
1021     - return -EINVAL;
1022     - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1023     +
1024     + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1025     + if (ret < 0)
1026     + return ret;
1027     +
1028     return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
1029     }
1030     static DEVICE_ATTR_RO(in_intr_mask);
1031     @@ -406,12 +425,15 @@ static ssize_t in_read_index_show(struct device *dev,
1032     {
1033     struct hv_device *hv_dev = device_to_hv_device(dev);
1034     struct hv_ring_buffer_debug_info inbound;
1035     + int ret;
1036    
1037     if (!hv_dev->channel)
1038     return -ENODEV;
1039     - if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
1040     - return -EINVAL;
1041     - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1042     +
1043     + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1044     + if (ret < 0)
1045     + return ret;
1046     +
1047     return sprintf(buf, "%d\n", inbound.current_read_index);
1048     }
1049     static DEVICE_ATTR_RO(in_read_index);
1050     @@ -421,12 +443,15 @@ static ssize_t in_write_index_show(struct device *dev,
1051     {
1052     struct hv_device *hv_dev = device_to_hv_device(dev);
1053     struct hv_ring_buffer_debug_info inbound;
1054     + int ret;
1055    
1056     if (!hv_dev->channel)
1057     return -ENODEV;
1058     - if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
1059     - return -EINVAL;
1060     - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1061     +
1062     + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1063     + if (ret < 0)
1064     + return ret;
1065     +
1066     return sprintf(buf, "%d\n", inbound.current_write_index);
1067     }
1068     static DEVICE_ATTR_RO(in_write_index);
1069     @@ -437,12 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
1070     {
1071     struct hv_device *hv_dev = device_to_hv_device(dev);
1072     struct hv_ring_buffer_debug_info inbound;
1073     + int ret;
1074    
1075     if (!hv_dev->channel)
1076     return -ENODEV;
1077     - if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
1078     - return -EINVAL;
1079     - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1080     +
1081     + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1082     + if (ret < 0)
1083     + return ret;
1084     +
1085     return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
1086     }
1087     static DEVICE_ATTR_RO(in_read_bytes_avail);
1088     @@ -453,12 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
1089     {
1090     struct hv_device *hv_dev = device_to_hv_device(dev);
1091     struct hv_ring_buffer_debug_info inbound;
1092     + int ret;
1093    
1094     if (!hv_dev->channel)
1095     return -ENODEV;
1096     - if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
1097     - return -EINVAL;
1098     - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1099     +
1100     + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
1101     + if (ret < 0)
1102     + return ret;
1103     +
1104     return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
1105     }
1106     static DEVICE_ATTR_RO(in_write_bytes_avail);
1107     diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
1108     index 4c8c7a620d08..a5dc13576394 100644
1109     --- a/drivers/ide/ide-proc.c
1110     +++ b/drivers/ide/ide-proc.c
1111     @@ -544,7 +544,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
1112     drive->proc = proc_mkdir(drive->name, parent);
1113     if (drive->proc) {
1114     ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
1115     - proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR,
1116     + proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
1117     drive->proc, &ide_settings_proc_fops,
1118     drive);
1119     }
1120     diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
1121     index cfc8b94527b9..aa4e431cbcd3 100644
1122     --- a/drivers/input/joystick/xpad.c
1123     +++ b/drivers/input/joystick/xpad.c
1124     @@ -252,6 +252,8 @@ static const struct xpad_device {
1125     { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
1126     { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
1127     { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
1128     + { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
1129     + { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
1130     { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
1131     { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
1132     { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
1133     @@ -428,6 +430,7 @@ static const struct usb_device_id xpad_table[] = {
1134     XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
1135     XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
1136     XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
1137     + XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
1138     XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
1139     XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
1140     XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
1141     diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
1142     index 8ec483e8688b..26ec603fe220 100644
1143     --- a/drivers/input/misc/uinput.c
1144     +++ b/drivers/input/misc/uinput.c
1145     @@ -39,6 +39,7 @@
1146     #include <linux/init.h>
1147     #include <linux/fs.h>
1148     #include <linux/miscdevice.h>
1149     +#include <linux/overflow.h>
1150     #include <linux/input/mt.h>
1151     #include "../input-compat.h"
1152    
1153     @@ -405,7 +406,7 @@ static int uinput_open(struct inode *inode, struct file *file)
1154     static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
1155     const struct input_absinfo *abs)
1156     {
1157     - int min, max;
1158     + int min, max, range;
1159    
1160     min = abs->minimum;
1161     max = abs->maximum;
1162     @@ -417,7 +418,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
1163     return -EINVAL;
1164     }
1165    
1166     - if (abs->flat > max - min) {
1167     + if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
1168     printk(KERN_DEBUG
1169     "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
1170     UINPUT_NAME, code, abs->flat, min, max);
1171     diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
1172     index db20e992a40f..7f2a45445b00 100644
1173     --- a/drivers/irqchip/irq-gic-v3-its.c
1174     +++ b/drivers/irqchip/irq-gic-v3-its.c
1175     @@ -2399,13 +2399,14 @@ static void its_free_device(struct its_device *its_dev)
1176     kfree(its_dev);
1177     }
1178    
1179     -static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
1180     +static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
1181     {
1182     int idx;
1183    
1184     - idx = find_first_zero_bit(dev->event_map.lpi_map,
1185     - dev->event_map.nr_lpis);
1186     - if (idx == dev->event_map.nr_lpis)
1187     + idx = bitmap_find_free_region(dev->event_map.lpi_map,
1188     + dev->event_map.nr_lpis,
1189     + get_count_order(nvecs));
1190     + if (idx < 0)
1191     return -ENOSPC;
1192    
1193     *hwirq = dev->event_map.lpi_base + idx;
1194     @@ -2501,21 +2502,21 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1195     int err;
1196     int i;
1197    
1198     - for (i = 0; i < nr_irqs; i++) {
1199     - err = its_alloc_device_irq(its_dev, &hwirq);
1200     - if (err)
1201     - return err;
1202     + err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
1203     + if (err)
1204     + return err;
1205    
1206     - err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
1207     + for (i = 0; i < nr_irqs; i++) {
1208     + err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
1209     if (err)
1210     return err;
1211    
1212     irq_domain_set_hwirq_and_chip(domain, virq + i,
1213     - hwirq, &its_irq_chip, its_dev);
1214     + hwirq + i, &its_irq_chip, its_dev);
1215     irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
1216     pr_debug("ID:%d pID:%d vID:%d\n",
1217     - (int)(hwirq - its_dev->event_map.lpi_base),
1218     - (int) hwirq, virq + i);
1219     + (int)(hwirq + i - its_dev->event_map.lpi_base),
1220     + (int)(hwirq + i), virq + i);
1221     }
1222    
1223     return 0;
1224     diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1225     index fc7d8b8a654f..1ef828575fae 100644
1226     --- a/drivers/md/dm-crypt.c
1227     +++ b/drivers/md/dm-crypt.c
1228     @@ -2405,9 +2405,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
1229     * capi:cipher_api_spec-iv:ivopts
1230     */
1231     tmp = &cipher_in[strlen("capi:")];
1232     - cipher_api = strsep(&tmp, "-");
1233     - *ivmode = strsep(&tmp, ":");
1234     - *ivopts = tmp;
1235     +
1236     + /* Separate IV options if present, it can contain another '-' in hash name */
1237     + *ivopts = strrchr(tmp, ':');
1238     + if (*ivopts) {
1239     + **ivopts = '\0';
1240     + (*ivopts)++;
1241     + }
1242     + /* Parse IV mode */
1243     + *ivmode = strrchr(tmp, '-');
1244     + if (*ivmode) {
1245     + **ivmode = '\0';
1246     + (*ivmode)++;
1247     + }
1248     + /* The rest is crypto API spec */
1249     + cipher_api = tmp;
1250    
1251     if (*ivmode && !strcmp(*ivmode, "lmk"))
1252     cc->tfms_count = 64;
1253     @@ -2477,11 +2489,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
1254     goto bad_mem;
1255    
1256     chainmode = strsep(&tmp, "-");
1257     - *ivopts = strsep(&tmp, "-");
1258     - *ivmode = strsep(&*ivopts, ":");
1259     -
1260     - if (tmp)
1261     - DMWARN("Ignoring unexpected additional cipher options");
1262     + *ivmode = strsep(&tmp, ":");
1263     + *ivopts = tmp;
1264    
1265     /*
1266     * For compatibility with the original dm-crypt mapping format, if
1267     diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
1268     index 20b0776e39ef..ed3caceaed07 100644
1269     --- a/drivers/md/dm-thin-metadata.c
1270     +++ b/drivers/md/dm-thin-metadata.c
1271     @@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td,
1272     return r;
1273     }
1274    
1275     -int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1276     +int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1277     {
1278     int r;
1279     uint32_t ref_count;
1280     @@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
1281     down_read(&pmd->root_lock);
1282     r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
1283     if (!r)
1284     - *result = (ref_count != 0);
1285     + *result = (ref_count > 1);
1286     up_read(&pmd->root_lock);
1287    
1288     return r;
1289     diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
1290     index 35e954ea20a9..f6be0d733c20 100644
1291     --- a/drivers/md/dm-thin-metadata.h
1292     +++ b/drivers/md/dm-thin-metadata.h
1293     @@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
1294    
1295     int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
1296    
1297     -int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
1298     +int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
1299    
1300     int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
1301     int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
1302     diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1303     index dadd9696340c..ca8af21bf644 100644
1304     --- a/drivers/md/dm-thin.c
1305     +++ b/drivers/md/dm-thin.c
1306     @@ -1048,7 +1048,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1307     * passdown we have to check that these blocks are now unused.
1308     */
1309     int r = 0;
1310     - bool used = true;
1311     + bool shared = true;
1312     struct thin_c *tc = m->tc;
1313     struct pool *pool = tc->pool;
1314     dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
1315     @@ -1058,11 +1058,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1316     while (b != end) {
1317     /* find start of unmapped run */
1318     for (; b < end; b++) {
1319     - r = dm_pool_block_is_used(pool->pmd, b, &used);
1320     + r = dm_pool_block_is_shared(pool->pmd, b, &shared);
1321     if (r)
1322     goto out;
1323    
1324     - if (!used)
1325     + if (!shared)
1326     break;
1327     }
1328    
1329     @@ -1071,11 +1071,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1330    
1331     /* find end of run */
1332     for (e = b + 1; e != end; e++) {
1333     - r = dm_pool_block_is_used(pool->pmd, e, &used);
1334     + r = dm_pool_block_is_shared(pool->pmd, e, &shared);
1335     if (r)
1336     goto out;
1337    
1338     - if (used)
1339     + if (shared)
1340     break;
1341     }
1342    
1343     diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
1344     index b8aaa684c397..2ed23c99f59f 100644
1345     --- a/drivers/misc/ibmvmc.c
1346     +++ b/drivers/misc/ibmvmc.c
1347     @@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
1348     *
1349     * Return:
1350     * 0 - Success
1351     + * Non-zero - Failure
1352     */
1353     static int ibmvmc_open(struct inode *inode, struct file *file)
1354     {
1355     struct ibmvmc_file_session *session;
1356     - int rc = 0;
1357    
1358     pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
1359     (unsigned long)inode, (unsigned long)file,
1360     ibmvmc.state);
1361    
1362     session = kzalloc(sizeof(*session), GFP_KERNEL);
1363     + if (!session)
1364     + return -ENOMEM;
1365     +
1366     session->file = file;
1367     file->private_data = session;
1368    
1369     - return rc;
1370     + return 0;
1371     }
1372    
1373     /**
1374     diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
1375     index e4b10b2d1a08..23739a60517f 100644
1376     --- a/drivers/misc/mei/hw-me-regs.h
1377     +++ b/drivers/misc/mei/hw-me-regs.h
1378     @@ -127,6 +127,8 @@
1379     #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
1380     #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
1381    
1382     +#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */
1383     +
1384     #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
1385    
1386     #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
1387     diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
1388     index ea4e152270a3..c8e21c894a5f 100644
1389     --- a/drivers/misc/mei/pci-me.c
1390     +++ b/drivers/misc/mei/pci-me.c
1391     @@ -88,11 +88,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
1392     {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
1393     {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
1394     {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
1395     - {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)},
1396     + {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
1397    
1398     {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
1399     {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
1400    
1401     + {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
1402     +
1403     {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
1404    
1405     {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
1406     diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c
1407     index 54c3fbb4a391..db56d4f58aaa 100644
1408     --- a/drivers/mmc/host/dw_mmc-bluefield.c
1409     +++ b/drivers/mmc/host/dw_mmc-bluefield.c
1410     @@ -1,11 +1,6 @@
1411     // SPDX-License-Identifier: GPL-2.0
1412     /*
1413     * Copyright (C) 2018 Mellanox Technologies.
1414     - *
1415     - * This program is free software; you can redistribute it and/or modify
1416     - * it under the terms of the GNU General Public License as published by
1417     - * the Free Software Foundation; either version 2 of the License, or
1418     - * (at your option) any later version.
1419     */
1420    
1421     #include <linux/bitfield.h>
1422     diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
1423     index c201c378537e..ef9deaa361c7 100644
1424     --- a/drivers/mmc/host/meson-gx-mmc.c
1425     +++ b/drivers/mmc/host/meson-gx-mmc.c
1426     @@ -174,6 +174,8 @@ struct meson_host {
1427     struct sd_emmc_desc *descs;
1428     dma_addr_t descs_dma_addr;
1429    
1430     + int irq;
1431     +
1432     bool vqmmc_enabled;
1433     };
1434    
1435     @@ -1181,7 +1183,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
1436     struct resource *res;
1437     struct meson_host *host;
1438     struct mmc_host *mmc;
1439     - int ret, irq;
1440     + int ret;
1441    
1442     mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
1443     if (!mmc)
1444     @@ -1228,8 +1230,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1445     goto free_host;
1446     }
1447    
1448     - irq = platform_get_irq(pdev, 0);
1449     - if (irq <= 0) {
1450     + host->irq = platform_get_irq(pdev, 0);
1451     + if (host->irq <= 0) {
1452     dev_err(&pdev->dev, "failed to get interrupt resource.\n");
1453     ret = -EINVAL;
1454     goto free_host;
1455     @@ -1283,9 +1285,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1456     writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
1457     host->regs + SD_EMMC_IRQ_EN);
1458    
1459     - ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq,
1460     - meson_mmc_irq_thread, IRQF_SHARED,
1461     - NULL, host);
1462     + ret = request_threaded_irq(host->irq, meson_mmc_irq,
1463     + meson_mmc_irq_thread, IRQF_SHARED, NULL, host);
1464     if (ret)
1465     goto err_init_clk;
1466    
1467     @@ -1303,7 +1304,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
1468     if (host->bounce_buf == NULL) {
1469     dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
1470     ret = -ENOMEM;
1471     - goto err_init_clk;
1472     + goto err_free_irq;
1473     }
1474    
1475     host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
1476     @@ -1322,6 +1323,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1477     err_bounce_buf:
1478     dma_free_coherent(host->dev, host->bounce_buf_size,
1479     host->bounce_buf, host->bounce_dma_addr);
1480     +err_free_irq:
1481     + free_irq(host->irq, host);
1482     err_init_clk:
1483     clk_disable_unprepare(host->mmc_clk);
1484     err_core_clk:
1485     @@ -1339,6 +1342,7 @@ static int meson_mmc_remove(struct platform_device *pdev)
1486    
1487     /* disable interrupts */
1488     writel(0, host->regs + SD_EMMC_IRQ_EN);
1489     + free_irq(host->irq, host);
1490    
1491     dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
1492     host->descs, host->descs_dma_addr);
1493     diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
1494     index 0db99057c44f..9d12c06c7fd6 100644
1495     --- a/drivers/mmc/host/sdhci-iproc.c
1496     +++ b/drivers/mmc/host/sdhci-iproc.c
1497     @@ -296,7 +296,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
1498    
1499     iproc_host->data = iproc_data;
1500    
1501     - mmc_of_parse(host->mmc);
1502     + ret = mmc_of_parse(host->mmc);
1503     + if (ret)
1504     + goto err;
1505     +
1506     sdhci_get_property(pdev);
1507    
1508     host->mmc->caps |= iproc_host->data->mmc_caps;
1509     diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1510     index 3b3f88ffab53..c05e4d50d43d 100644
1511     --- a/drivers/net/can/dev.c
1512     +++ b/drivers/net/can/dev.c
1513     @@ -480,8 +480,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
1514     struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
1515     {
1516     struct can_priv *priv = netdev_priv(dev);
1517     - struct sk_buff *skb = priv->echo_skb[idx];
1518     - struct canfd_frame *cf;
1519    
1520     if (idx >= priv->echo_skb_max) {
1521     netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
1522     @@ -489,20 +487,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8
1523     return NULL;
1524     }
1525    
1526     - if (!skb) {
1527     - netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
1528     - __func__, idx);
1529     - return NULL;
1530     - }
1531     + if (priv->echo_skb[idx]) {
1532     + /* Using "struct canfd_frame::len" for the frame
1533     + * length is supported on both CAN and CANFD frames.
1534     + */
1535     + struct sk_buff *skb = priv->echo_skb[idx];
1536     + struct canfd_frame *cf = (struct canfd_frame *)skb->data;
1537     + u8 len = cf->len;
1538    
1539     - /* Using "struct canfd_frame::len" for the frame
1540     - * length is supported on both CAN and CANFD frames.
1541     - */
1542     - cf = (struct canfd_frame *)skb->data;
1543     - *len_ptr = cf->len;
1544     - priv->echo_skb[idx] = NULL;
1545     + *len_ptr = len;
1546     + priv->echo_skb[idx] = NULL;
1547    
1548     - return skb;
1549     + return skb;
1550     + }
1551     +
1552     + return NULL;
1553     }
1554    
1555     /*
1556     diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
1557     index 75ce11395ee8..ae219b8a7754 100644
1558     --- a/drivers/net/can/flexcan.c
1559     +++ b/drivers/net/can/flexcan.c
1560     @@ -1004,7 +1004,7 @@ static int flexcan_chip_start(struct net_device *dev)
1561     }
1562     } else {
1563     /* clear and invalidate unused mailboxes first */
1564     - for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) {
1565     + for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < ARRAY_SIZE(regs->mb); i++) {
1566     priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
1567     &regs->mb[i].can_ctrl);
1568     }
1569     diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
1570     index d272dc6984ac..b40d4377cc71 100644
1571     --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
1572     +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
1573     @@ -431,8 +431,6 @@
1574     #define MAC_MDIOSCAR_PA_WIDTH 5
1575     #define MAC_MDIOSCAR_RA_INDEX 0
1576     #define MAC_MDIOSCAR_RA_WIDTH 16
1577     -#define MAC_MDIOSCAR_REG_INDEX 0
1578     -#define MAC_MDIOSCAR_REG_WIDTH 21
1579     #define MAC_MDIOSCCDR_BUSY_INDEX 22
1580     #define MAC_MDIOSCCDR_BUSY_WIDTH 1
1581     #define MAC_MDIOSCCDR_CMD_INDEX 16
1582     diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1583     index 1e929a1e4ca7..4666084eda16 100644
1584     --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1585     +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1586     @@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1587     }
1588     }
1589    
1590     +static unsigned int xgbe_create_mdio_sca(int port, int reg)
1591     +{
1592     + unsigned int mdio_sca, da;
1593     +
1594     + da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
1595     +
1596     + mdio_sca = 0;
1597     + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
1598     + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
1599     + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
1600     +
1601     + return mdio_sca;
1602     +}
1603     +
1604     static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1605     int reg, u16 val)
1606     {
1607     @@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1608    
1609     reinit_completion(&pdata->mdio_complete);
1610    
1611     - mdio_sca = 0;
1612     - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1613     - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1614     + mdio_sca = xgbe_create_mdio_sca(addr, reg);
1615     XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1616    
1617     mdio_sccd = 0;
1618     @@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1619    
1620     reinit_completion(&pdata->mdio_complete);
1621    
1622     - mdio_sca = 0;
1623     - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1624     - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1625     + mdio_sca = xgbe_create_mdio_sca(addr, reg);
1626     XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1627    
1628     mdio_sccd = 0;
1629     diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
1630     index 5890fdfd62c3..c7901a3f2a79 100644
1631     --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
1632     +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
1633     @@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
1634     u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
1635     u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
1636     u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
1637     + char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
1638     +
1639     + memcpy(ncqe, cqe, q->elem_size);
1640     + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
1641    
1642     if (sendq) {
1643     struct mlxsw_pci_queue *sdq;
1644    
1645     sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
1646     mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
1647     - wqe_counter, cqe);
1648     + wqe_counter, ncqe);
1649     q->u.cq.comp_sdq_count++;
1650     } else {
1651     struct mlxsw_pci_queue *rdq;
1652    
1653     rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
1654     mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
1655     - wqe_counter, q->u.cq.v, cqe);
1656     + wqe_counter, q->u.cq.v, ncqe);
1657     q->u.cq.comp_rdq_count++;
1658     }
1659     if (++items == credits)
1660     break;
1661     }
1662     - if (items) {
1663     - mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
1664     + if (items)
1665     mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
1666     - }
1667     }
1668    
1669     static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
1670     diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
1671     index bb99f6d41fe0..ffee38e36ce8 100644
1672     --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
1673     +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
1674     @@ -27,7 +27,7 @@
1675    
1676     #define MLXSW_PCI_SW_RESET 0xF0010
1677     #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
1678     -#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
1679     +#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000
1680     #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
1681     #define MLXSW_PCI_FW_READY 0xA1844
1682     #define MLXSW_PCI_FW_READY_MASK 0xFFFF
1683     @@ -53,6 +53,7 @@
1684     #define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
1685     #define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
1686     #define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
1687     +#define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE
1688     #define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
1689     #define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
1690     #define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
1691     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
1692     index a3db033d7399..b490589ef25c 100644
1693     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
1694     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
1695     @@ -882,8 +882,8 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
1696     static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
1697     .type = MLXSW_SP_FID_TYPE_DUMMY,
1698     .fid_size = sizeof(struct mlxsw_sp_fid),
1699     - .start_index = MLXSW_SP_RFID_BASE - 1,
1700     - .end_index = MLXSW_SP_RFID_BASE - 1,
1701     + .start_index = VLAN_N_VID - 1,
1702     + .end_index = VLAN_N_VID - 1,
1703     .ops = &mlxsw_sp_fid_dummy_ops,
1704     };
1705    
1706     diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
1707     index 9020b084b953..7ec4eb74fe21 100644
1708     --- a/drivers/net/ethernet/sun/cassini.c
1709     +++ b/drivers/net/ethernet/sun/cassini.c
1710     @@ -1,22 +1,9 @@
1711     -// SPDX-License-Identifier: GPL-2.0
1712     +// SPDX-License-Identifier: GPL-2.0+
1713     /* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
1714     *
1715     * Copyright (C) 2004 Sun Microsystems Inc.
1716     * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
1717     *
1718     - * This program is free software; you can redistribute it and/or
1719     - * modify it under the terms of the GNU General Public License as
1720     - * published by the Free Software Foundation; either version 2 of the
1721     - * License, or (at your option) any later version.
1722     - *
1723     - * This program is distributed in the hope that it will be useful,
1724     - * but WITHOUT ANY WARRANTY; without even the implied warranty of
1725     - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1726     - * GNU General Public License for more details.
1727     - *
1728     - * You should have received a copy of the GNU General Public License
1729     - * along with this program; if not, see <http://www.gnu.org/licenses/>.
1730     - *
1731     * This driver uses the sungem driver (c) David Miller
1732     * (davem@redhat.com) as its basis.
1733     *
1734     diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
1735     index 13f3860496a8..ae5f05f03f88 100644
1736     --- a/drivers/net/ethernet/sun/cassini.h
1737     +++ b/drivers/net/ethernet/sun/cassini.h
1738     @@ -1,23 +1,10 @@
1739     -/* SPDX-License-Identifier: GPL-2.0 */
1740     +/* SPDX-License-Identifier: GPL-2.0+ */
1741     /* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
1742     * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
1743     *
1744     * Copyright (C) 2004 Sun Microsystems Inc.
1745     * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com)
1746     *
1747     - * This program is free software; you can redistribute it and/or
1748     - * modify it under the terms of the GNU General Public License as
1749     - * published by the Free Software Foundation; either version 2 of the
1750     - * License, or (at your option) any later version.
1751     - *
1752     - * This program is distributed in the hope that it will be useful,
1753     - * but WITHOUT ANY WARRANTY; without even the implied warranty of
1754     - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1755     - * GNU General Public License for more details.
1756     - *
1757     - * You should have received a copy of the GNU General Public License
1758     - * along with this program; if not, see <http://www.gnu.org/licenses/>.
1759     - *
1760     * vendor id: 0x108E (Sun Microsystems, Inc.)
1761     * device id: 0xabba (Cassini)
1762     * revision ids: 0x01 = Cassini
1763     diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
1764     index cbec296107bd..f46da6262abe 100644
1765     --- a/drivers/net/phy/marvell.c
1766     +++ b/drivers/net/phy/marvell.c
1767     @@ -1042,6 +1042,39 @@ static int m88e1145_config_init(struct phy_device *phydev)
1768     return 0;
1769     }
1770    
1771     +/* The VOD can be out of specification on link up. Poke an
1772     + * undocumented register, in an undocumented page, with a magic value
1773     + * to fix this.
1774     + */
1775     +static int m88e6390_errata(struct phy_device *phydev)
1776     +{
1777     + int err;
1778     +
1779     + err = phy_write(phydev, MII_BMCR,
1780     + BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX);
1781     + if (err)
1782     + return err;
1783     +
1784     + usleep_range(300, 400);
1785     +
1786     + err = phy_write_paged(phydev, 0xf8, 0x08, 0x36);
1787     + if (err)
1788     + return err;
1789     +
1790     + return genphy_soft_reset(phydev);
1791     +}
1792     +
1793     +static int m88e6390_config_aneg(struct phy_device *phydev)
1794     +{
1795     + int err;
1796     +
1797     + err = m88e6390_errata(phydev);
1798     + if (err)
1799     + return err;
1800     +
1801     + return m88e1510_config_aneg(phydev);
1802     +}
1803     +
1804     /**
1805     * fiber_lpa_to_ethtool_lpa_t
1806     * @lpa: value of the MII_LPA register for fiber link
1807     @@ -1397,7 +1430,7 @@ static int m88e1318_set_wol(struct phy_device *phydev,
1808     * before enabling it if !phy_interrupt_is_valid()
1809     */
1810     if (!phy_interrupt_is_valid(phydev))
1811     - phy_read(phydev, MII_M1011_IEVENT);
1812     + __phy_read(phydev, MII_M1011_IEVENT);
1813    
1814     /* Enable the WOL interrupt */
1815     err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
1816     @@ -2292,7 +2325,7 @@ static struct phy_driver marvell_drivers[] = {
1817     .flags = PHY_HAS_INTERRUPT,
1818     .probe = m88e6390_probe,
1819     .config_init = &marvell_config_init,
1820     - .config_aneg = &m88e1510_config_aneg,
1821     + .config_aneg = &m88e6390_config_aneg,
1822     .read_status = &marvell_read_status,
1823     .ack_interrupt = &marvell_ack_interrupt,
1824     .config_intr = &marvell_config_intr,
1825     diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
1826     index 2e59a8419b17..66b9cfe692fc 100644
1827     --- a/drivers/net/phy/mdio_bus.c
1828     +++ b/drivers/net/phy/mdio_bus.c
1829     @@ -390,6 +390,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
1830     if (IS_ERR(gpiod)) {
1831     dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n",
1832     bus->id);
1833     + device_del(&bus->dev);
1834     return PTR_ERR(gpiod);
1835     } else if (gpiod) {
1836     bus->reset_gpiod = gpiod;
1837     diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
1838     index fd051ae787cb..5dd661fb662f 100644
1839     --- a/drivers/net/phy/phy_device.c
1840     +++ b/drivers/net/phy/phy_device.c
1841     @@ -2196,6 +2196,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
1842     {
1843     int retval;
1844    
1845     + if (WARN_ON(!new_driver->features)) {
1846     + pr_err("%s: Driver features are missing\n", new_driver->name);
1847     + return -EINVAL;
1848     + }
1849     +
1850     new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY;
1851     new_driver->mdiodrv.driver.name = new_driver->name;
1852     new_driver->mdiodrv.driver.bus = &mdio_bus_type;
1853     diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
1854     index 62dc564b251d..f22639f0116a 100644
1855     --- a/drivers/net/ppp/pppoe.c
1856     +++ b/drivers/net/ppp/pppoe.c
1857     @@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
1858     if (pskb_trim_rcsum(skb, len))
1859     goto drop;
1860    
1861     + ph = pppoe_hdr(skb);
1862     pn = pppoe_pernet(dev_net(dev));
1863    
1864     /* Note that get_item does a sock_hold(), so sk_pppox(po)
1865     diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
1866     index 3bfa7f5e3513..2e5bcb3fdff7 100644
1867     --- a/drivers/net/wireless/mediatek/mt76/mt76.h
1868     +++ b/drivers/net/wireless/mediatek/mt76/mt76.h
1869     @@ -1,3 +1,4 @@
1870     +
1871     /*
1872     * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
1873     *
1874     diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
1875     index 9273d2d2764a..732f4b87fdcb 100644
1876     --- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
1877     +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
1878     @@ -116,9 +116,6 @@ void mt76x0_bss_info_changed(struct ieee80211_hw *hw,
1879     MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
1880     }
1881    
1882     - if (changed & BSS_CHANGED_ASSOC)
1883     - mt76x0_phy_recalibrate_after_assoc(dev);
1884     -
1885     mutex_unlock(&dev->mt76.mutex);
1886     }
1887     EXPORT_SYMBOL_GPL(mt76x0_bss_info_changed);
1888     @@ -138,6 +135,12 @@ void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
1889     struct mt76x02_dev *dev = hw->priv;
1890    
1891     clear_bit(MT76_SCANNING, &dev->mt76.state);
1892     +
1893     + if (dev->cal.gain_init_done) {
1894     + /* Restore AGC gain and resume calibration after scanning. */
1895     + dev->cal.low_gain = -1;
1896     + ieee80211_queue_delayed_work(hw, &dev->cal_work, 0);
1897     + }
1898     }
1899     EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete);
1900    
1901     diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
1902     index 2187bafaf2e9..0057f69d0c36 100644
1903     --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
1904     +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
1905     @@ -41,6 +41,11 @@ static inline bool is_mt7610e(struct mt76x02_dev *dev)
1906    
1907     void mt76x0_init_debugfs(struct mt76x02_dev *dev);
1908    
1909     +static inline bool is_mt7630(struct mt76x02_dev *dev)
1910     +{
1911     + return mt76_chip(&dev->mt76) == 0x7630;
1912     +}
1913     +
1914     /* Init */
1915     struct mt76x02_dev *
1916     mt76x0_alloc_device(struct device *pdev,
1917     diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
1918     index cf024950e0ed..c34abd1c6030 100644
1919     --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
1920     +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
1921     @@ -215,62 +215,6 @@ int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
1922     return 0;
1923     }
1924    
1925     -static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel)
1926     -{
1927     - u8 val;
1928     -
1929     - val = rf_rr(dev, MT_RF(0, 4));
1930     - if ((val & 0x70) != 0x30)
1931     - return;
1932     -
1933     - /*
1934     - * Calibration Mode - Open loop, closed loop, and amplitude:
1935     - * B0.R06.[0]: 1
1936     - * B0.R06.[3:1] bp_close_code: 100
1937     - * B0.R05.[7:0] bp_open_code: 0x0
1938     - * B0.R04.[2:0] cal_bits: 000
1939     - * B0.R03.[2:0] startup_time: 011
1940     - * B0.R03.[6:4] settle_time:
1941     - * 80MHz channel: 110
1942     - * 40MHz channel: 101
1943     - * 20MHz channel: 100
1944     - */
1945     - val = rf_rr(dev, MT_RF(0, 6));
1946     - val &= ~0xf;
1947     - val |= 0x09;
1948     - rf_wr(dev, MT_RF(0, 6), val);
1949     -
1950     - val = rf_rr(dev, MT_RF(0, 5));
1951     - if (val != 0)
1952     - rf_wr(dev, MT_RF(0, 5), 0x0);
1953     -
1954     - val = rf_rr(dev, MT_RF(0, 4));
1955     - val &= ~0x07;
1956     - rf_wr(dev, MT_RF(0, 4), val);
1957     -
1958     - val = rf_rr(dev, MT_RF(0, 3));
1959     - val &= ~0x77;
1960     - if (channel == 1 || channel == 7 || channel == 9 || channel >= 13) {
1961     - val |= 0x63;
1962     - } else if (channel == 3 || channel == 4 || channel == 10) {
1963     - val |= 0x53;
1964     - } else if (channel == 2 || channel == 5 || channel == 6 ||
1965     - channel == 8 || channel == 11 || channel == 12) {
1966     - val |= 0x43;
1967     - } else {
1968     - WARN(1, "Unknown channel %u\n", channel);
1969     - return;
1970     - }
1971     - rf_wr(dev, MT_RF(0, 3), val);
1972     -
1973     - /* TODO replace by mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7)); */
1974     - val = rf_rr(dev, MT_RF(0, 4));
1975     - val = ((val & ~(0x80)) | 0x80);
1976     - rf_wr(dev, MT_RF(0, 4), val);
1977     -
1978     - msleep(2);
1979     -}
1980     -
1981     static void
1982     mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
1983     {
1984     @@ -518,21 +462,47 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band)
1985    
1986     static void mt76x0_ant_select(struct mt76x02_dev *dev)
1987     {
1988     - struct ieee80211_channel *chan = dev->mt76.chandef.chan;
1989     -
1990     - /* single antenna mode */
1991     - if (chan->band == NL80211_BAND_2GHZ) {
1992     - mt76_rmw(dev, MT_COEXCFG3,
1993     - BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1));
1994     - mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6));
1995     + u16 ee_ant = mt76x02_eeprom_get(dev, MT_EE_ANTENNA);
1996     + u16 nic_conf2 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
1997     + u32 wlan, coex3, cmb;
1998     + bool ant_div;
1999     +
2000     + wlan = mt76_rr(dev, MT_WLAN_FUN_CTRL);
2001     + cmb = mt76_rr(dev, MT_CMB_CTRL);
2002     + coex3 = mt76_rr(dev, MT_COEXCFG3);
2003     +
2004     + cmb &= ~(BIT(14) | BIT(12));
2005     + wlan &= ~(BIT(6) | BIT(5));
2006     + coex3 &= ~GENMASK(5, 2);
2007     +
2008     + if (ee_ant & MT_EE_ANTENNA_DUAL) {
2009     + /* dual antenna mode */
2010     + ant_div = !(nic_conf2 & MT_EE_NIC_CONF_2_ANT_OPT) &&
2011     + (nic_conf2 & MT_EE_NIC_CONF_2_ANT_DIV);
2012     + if (ant_div)
2013     + cmb |= BIT(12);
2014     + else
2015     + coex3 |= BIT(4);
2016     + coex3 |= BIT(3);
2017     + if (dev->mt76.cap.has_2ghz)
2018     + wlan |= BIT(6);
2019     } else {
2020     - mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(2),
2021     - BIT(4) | BIT(3));
2022     - mt76_clear(dev, MT_WLAN_FUN_CTRL,
2023     - BIT(6) | BIT(5));
2024     + /* sigle antenna mode */
2025     + if (dev->mt76.cap.has_5ghz) {
2026     + coex3 |= BIT(3) | BIT(4);
2027     + } else {
2028     + wlan |= BIT(6);
2029     + coex3 |= BIT(1);
2030     + }
2031     }
2032     - mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12));
2033     +
2034     + if (is_mt7630(dev))
2035     + cmb |= BIT(14) | BIT(11);
2036     +
2037     + mt76_wr(dev, MT_WLAN_FUN_CTRL, wlan);
2038     + mt76_wr(dev, MT_CMB_CTRL, cmb);
2039     mt76_clear(dev, MT_COEXCFG0, BIT(2));
2040     + mt76_wr(dev, MT_COEXCFG3, coex3);
2041     }
2042    
2043     static void
2044     @@ -585,8 +555,12 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
2045     void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
2046     {
2047     struct ieee80211_channel *chan = dev->mt76.chandef.chan;
2048     + int is_5ghz = (chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
2049     u32 val, tx_alc, reg_val;
2050    
2051     + if (is_mt7630(dev))
2052     + return;
2053     +
2054     if (power_on) {
2055     mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
2056     mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value,
2057     @@ -602,7 +576,7 @@ void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
2058     reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
2059     mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
2060    
2061     - if (chan->band == NL80211_BAND_5GHZ) {
2062     + if (is_5ghz) {
2063     if (chan->hw_value < 100)
2064     val = 0x701;
2065     else if (chan->hw_value < 140)
2066     @@ -615,7 +589,7 @@ void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
2067    
2068     mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val, false);
2069     msleep(350);
2070     - mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 1, false);
2071     + mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false);
2072     usleep_range(15000, 20000);
2073    
2074     mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
2075     @@ -696,7 +670,6 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
2076     mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
2077     mt76x02_phy_set_band(dev, chandef->chan->band,
2078     ch_group_index & 1);
2079     - mt76x0_ant_select(dev);
2080    
2081     mt76_rmw(dev, MT_EXT_CCA_CFG,
2082     (MT_EXT_CCA_CFG_CCA0 |
2083     @@ -719,20 +692,16 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
2084    
2085     mt76x0_read_rx_gain(dev);
2086     mt76x0_phy_set_chan_bbp_params(dev, rf_bw_band);
2087     - mt76x02_init_agc_gain(dev);
2088    
2089     - if (mt76_is_usb(dev)) {
2090     - mt76x0_vco_cal(dev, channel);
2091     - } else {
2092     - /* enable vco */
2093     - rf_set(dev, MT_RF(0, 4), BIT(7));
2094     - }
2095     + /* enable vco */
2096     + rf_set(dev, MT_RF(0, 4), BIT(7));
2097    
2098     if (scan)
2099     return 0;
2100    
2101     - if (mt76_is_mmio(dev))
2102     - mt76x0_phy_calibrate(dev, false);
2103     + mt76x0_phy_calibrate(dev, false);
2104     + mt76x02_init_agc_gain(dev);
2105     +
2106     mt76x0_phy_set_txpower(dev);
2107    
2108     ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
2109     @@ -741,39 +710,6 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
2110     return 0;
2111     }
2112    
2113     -void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev)
2114     -{
2115     - u32 tx_alc, reg_val;
2116     - u8 channel = dev->mt76.chandef.chan->hw_value;
2117     - int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
2118     -
2119     - mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
2120     -
2121     - mt76x0_vco_cal(dev, channel);
2122     -
2123     - tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
2124     - mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
2125     - usleep_range(500, 700);
2126     -
2127     - reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
2128     - mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
2129     -
2130     - mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0, false);
2131     -
2132     - mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false);
2133     - mt76x02_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz, false);
2134     - mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false);
2135     - mt76x02_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz, false);
2136     - mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz, false);
2137     - mt76x02_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz, false);
2138     -
2139     - mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
2140     - mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
2141     - msleep(100);
2142     -
2143     - mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false);
2144     -}
2145     -
2146     static void mt76x0_temp_sensor(struct mt76x02_dev *dev)
2147     {
2148     u8 rf_b7_73, rf_b0_66, rf_b0_67;
2149     @@ -817,10 +753,8 @@ done:
2150     static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev)
2151     {
2152     u8 gain = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
2153     - u32 val = 0x122c << 16 | 0xf2;
2154    
2155     - mt76_wr(dev, MT_BBP(AGC, 8),
2156     - val | FIELD_PREP(MT_BBP_AGC_GAIN, gain));
2157     + mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, gain);
2158     }
2159    
2160     static void
2161     @@ -835,7 +769,8 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
2162     low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
2163     (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
2164    
2165     - gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
2166     + gain_change = dev->cal.low_gain < 0 ||
2167     + (dev->cal.low_gain & 2) ^ (low_gain & 2);
2168     dev->cal.low_gain = low_gain;
2169    
2170     if (!gain_change) {
2171     @@ -924,6 +859,7 @@ void mt76x0_phy_init(struct mt76x02_dev *dev)
2172     {
2173     INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibration_work);
2174    
2175     + mt76x0_ant_select(dev);
2176     mt76x0_rf_init(dev);
2177     mt76x02_phy_set_rxpath(dev);
2178     mt76x02_phy_set_txdac(dev);
2179     diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
2180     index a7fd36c2f633..ea517864186b 100644
2181     --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
2182     +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
2183     @@ -117,6 +117,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
2184     if (ret)
2185     goto out;
2186    
2187     + mt76x0_phy_calibrate(dev, true);
2188     ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
2189     MT_CALIBRATE_INTERVAL);
2190     ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
2191     diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
2192     index 7806963b1905..9a5ae5c06840 100644
2193     --- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
2194     +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
2195     @@ -63,6 +63,7 @@ struct mt76x02_calibration {
2196     bool tssi_comp_pending;
2197     bool dpd_cal_done;
2198     bool channel_cal_done;
2199     + bool gain_init_done;
2200     };
2201    
2202     struct mt76x02_dev {
2203     diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
2204     index b3ec74835d10..1de041590050 100644
2205     --- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
2206     +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
2207     @@ -25,6 +25,7 @@ enum mt76x02_eeprom_field {
2208     MT_EE_VERSION = 0x002,
2209     MT_EE_MAC_ADDR = 0x004,
2210     MT_EE_PCI_ID = 0x00A,
2211     + MT_EE_ANTENNA = 0x022,
2212     MT_EE_NIC_CONF_0 = 0x034,
2213     MT_EE_NIC_CONF_1 = 0x036,
2214     MT_EE_COUNTRY_REGION_5GHZ = 0x038,
2215     @@ -104,6 +105,8 @@ enum mt76x02_eeprom_field {
2216     __MT_EE_MAX
2217     };
2218    
2219     +#define MT_EE_ANTENNA_DUAL BIT(15)
2220     +
2221     #define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
2222     #define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
2223     #define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8)
2224     @@ -118,12 +121,9 @@ enum mt76x02_eeprom_field {
2225     #define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
2226     #define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
2227    
2228     -#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
2229     -#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
2230     -#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
2231     +#define MT_EE_NIC_CONF_2_ANT_OPT BIT(3)
2232     +#define MT_EE_NIC_CONF_2_ANT_DIV BIT(4)
2233     #define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
2234     -#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
2235     -#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
2236    
2237     #define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
2238     MT_EE_USAGE_MAP_START + 1)
2239     diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
2240     index 0f1d7b5c9f68..977a8e7e26df 100644
2241     --- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
2242     +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
2243     @@ -254,5 +254,6 @@ void mt76x02_init_agc_gain(struct mt76x02_dev *dev)
2244     memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
2245     sizeof(dev->cal.agc_gain_cur));
2246     dev->cal.low_gain = -1;
2247     + dev->cal.gain_init_done = true;
2248     }
2249     EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain);
2250     diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
2251     index 1971a1b00038..9471b44ce558 100644
2252     --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
2253     +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
2254     @@ -156,6 +156,9 @@ mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
2255     struct mt76x02_dev *dev = hw->priv;
2256    
2257     clear_bit(MT76_SCANNING, &dev->mt76.state);
2258     +
2259     + if (dev->cal.gain_init_done)
2260     + ieee80211_queue_delayed_work(hw, &dev->cal_work, 0);
2261     }
2262    
2263     const struct ieee80211_ops mt76x2u_ops = {
2264     diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
2265     index 583086dd9cb9..bfc5ef6d85b7 100644
2266     --- a/drivers/nvme/target/rdma.c
2267     +++ b/drivers/nvme/target/rdma.c
2268     @@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
2269     static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
2270     static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
2271     static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
2272     +static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
2273     + struct nvmet_rdma_rsp *r);
2274     +static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
2275     + struct nvmet_rdma_rsp *r);
2276    
2277     static const struct nvmet_fabrics_ops nvmet_rdma_ops;
2278    
2279     @@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
2280     spin_unlock_irqrestore(&queue->rsps_lock, flags);
2281    
2282     if (unlikely(!rsp)) {
2283     - rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
2284     + int ret;
2285     +
2286     + rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
2287     if (unlikely(!rsp))
2288     return NULL;
2289     + ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
2290     + if (unlikely(ret)) {
2291     + kfree(rsp);
2292     + return NULL;
2293     + }
2294     +
2295     rsp->allocated = true;
2296     }
2297    
2298     @@ -196,7 +208,8 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
2299     {
2300     unsigned long flags;
2301    
2302     - if (rsp->allocated) {
2303     + if (unlikely(rsp->allocated)) {
2304     + nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
2305     kfree(rsp);
2306     return;
2307     }
2308     diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
2309     index 194ffd5c8580..039b2074db7e 100644
2310     --- a/drivers/s390/char/sclp_config.c
2311     +++ b/drivers/s390/char/sclp_config.c
2312     @@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
2313    
2314     static void __ref sclp_cpu_change_notify(struct work_struct *work)
2315     {
2316     + lock_device_hotplug();
2317     smp_rescan_cpus();
2318     + unlock_device_hotplug();
2319     }
2320    
2321     static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
2322     diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
2323     index f1c57cd33b5b..1cb35ab8a4ec 100644
2324     --- a/drivers/scsi/ufs/ufshcd.c
2325     +++ b/drivers/scsi/ufs/ufshcd.c
2326     @@ -110,13 +110,19 @@
2327     int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
2328     const char *prefix)
2329     {
2330     - u8 *regs;
2331     + u32 *regs;
2332     + size_t pos;
2333     +
2334     + if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
2335     + return -EINVAL;
2336    
2337     regs = kzalloc(len, GFP_KERNEL);
2338     if (!regs)
2339     return -ENOMEM;
2340    
2341     - memcpy_fromio(regs, hba->mmio_base + offset, len);
2342     + for (pos = 0; pos < len; pos += 4)
2343     + regs[pos / 4] = ufshcd_readl(hba, offset + pos);
2344     +
2345     ufshcd_hex_dump(prefix, regs, len);
2346     kfree(regs);
2347    
2348     diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
2349     index 28cbd6b3d26c..dfee6985efa6 100644
2350     --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
2351     +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
2352     @@ -35,6 +35,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
2353     {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
2354     {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
2355     {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
2356     + {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
2357     {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
2358     {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
2359     {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
2360     diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
2361     index dabb391909aa..bb63519db7ae 100644
2362     --- a/drivers/tty/n_hdlc.c
2363     +++ b/drivers/tty/n_hdlc.c
2364     @@ -597,6 +597,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
2365     /* too large for caller's buffer */
2366     ret = -EOVERFLOW;
2367     } else {
2368     + __set_current_state(TASK_RUNNING);
2369     if (copy_to_user(buf, rbuf->buf, rbuf->count))
2370     ret = -EFAULT;
2371     else
2372     diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
2373     index d4cca5bdaf1c..5c01bb6d1c24 100644
2374     --- a/drivers/tty/serial/serial_core.c
2375     +++ b/drivers/tty/serial/serial_core.c
2376     @@ -550,10 +550,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c)
2377     int ret = 0;
2378    
2379     circ = &state->xmit;
2380     - if (!circ->buf)
2381     + port = uart_port_lock(state, flags);
2382     + if (!circ->buf) {
2383     + uart_port_unlock(port, flags);
2384     return 0;
2385     + }
2386    
2387     - port = uart_port_lock(state, flags);
2388     if (port && uart_circ_chars_free(circ) != 0) {
2389     circ->buf[circ->head] = c;
2390     circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
2391     @@ -586,11 +588,13 @@ static int uart_write(struct tty_struct *tty,
2392     return -EL3HLT;
2393     }
2394    
2395     + port = uart_port_lock(state, flags);
2396     circ = &state->xmit;
2397     - if (!circ->buf)
2398     + if (!circ->buf) {
2399     + uart_port_unlock(port, flags);
2400     return 0;
2401     + }
2402    
2403     - port = uart_port_lock(state, flags);
2404     while (port) {
2405     c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
2406     if (count < c)
2407     diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
2408     index 23c6fd238422..21ffcce16927 100644
2409     --- a/drivers/tty/tty_io.c
2410     +++ b/drivers/tty/tty_io.c
2411     @@ -2189,7 +2189,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
2412     ld = tty_ldisc_ref_wait(tty);
2413     if (!ld)
2414     return -EIO;
2415     - ld->ops->receive_buf(tty, &ch, &mbz, 1);
2416     + if (ld->ops->receive_buf)
2417     + ld->ops->receive_buf(tty, &ch, &mbz, 1);
2418     tty_ldisc_deref(ld);
2419     return 0;
2420     }
2421     diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
2422     index 41ec8e5010f3..bba75560d11e 100644
2423     --- a/drivers/tty/vt/vt.c
2424     +++ b/drivers/tty/vt/vt.c
2425     @@ -1272,6 +1272,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
2426     if (con_is_visible(vc))
2427     update_screen(vc);
2428     vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
2429     + notify_update(vc);
2430     return err;
2431     }
2432    
2433     @@ -2764,8 +2765,8 @@ rescan_last_byte:
2434     con_flush(vc, draw_from, draw_to, &draw_x);
2435     vc_uniscr_debug_check(vc);
2436     console_conditional_schedule();
2437     - console_unlock();
2438     notify_update(vc);
2439     + console_unlock();
2440     return n;
2441     }
2442    
2443     @@ -2884,8 +2885,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2444     unsigned char c;
2445     static DEFINE_SPINLOCK(printing_lock);
2446     const ushort *start;
2447     - ushort cnt = 0;
2448     - ushort myx;
2449     + ushort start_x, cnt;
2450     int kmsg_console;
2451    
2452     /* console busy or not yet initialized */
2453     @@ -2898,10 +2898,6 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2454     if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
2455     vc = vc_cons[kmsg_console - 1].d;
2456    
2457     - /* read `x' only after setting currcons properly (otherwise
2458     - the `x' macro will read the x of the foreground console). */
2459     - myx = vc->vc_x;
2460     -
2461     if (!vc_cons_allocated(fg_console)) {
2462     /* impossible */
2463     /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */
2464     @@ -2916,53 +2912,41 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2465     hide_cursor(vc);
2466    
2467     start = (ushort *)vc->vc_pos;
2468     -
2469     - /* Contrived structure to try to emulate original need_wrap behaviour
2470     - * Problems caused when we have need_wrap set on '\n' character */
2471     + start_x = vc->vc_x;
2472     + cnt = 0;
2473     while (count--) {
2474     c = *b++;
2475     if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
2476     - if (cnt > 0) {
2477     - if (con_is_visible(vc))
2478     - vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
2479     - vc->vc_x += cnt;
2480     - if (vc->vc_need_wrap)
2481     - vc->vc_x--;
2482     - cnt = 0;
2483     - }
2484     + if (cnt && con_is_visible(vc))
2485     + vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
2486     + cnt = 0;
2487     if (c == 8) { /* backspace */
2488     bs(vc);
2489     start = (ushort *)vc->vc_pos;
2490     - myx = vc->vc_x;
2491     + start_x = vc->vc_x;
2492     continue;
2493     }
2494     if (c != 13)
2495     lf(vc);
2496     cr(vc);
2497     start = (ushort *)vc->vc_pos;
2498     - myx = vc->vc_x;
2499     + start_x = vc->vc_x;
2500     if (c == 10 || c == 13)
2501     continue;
2502     }
2503     + vc_uniscr_putc(vc, c);
2504     scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos);
2505     notify_write(vc, c);
2506     cnt++;
2507     - if (myx == vc->vc_cols - 1) {
2508     - vc->vc_need_wrap = 1;
2509     - continue;
2510     - }
2511     - vc->vc_pos += 2;
2512     - myx++;
2513     - }
2514     - if (cnt > 0) {
2515     - if (con_is_visible(vc))
2516     - vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
2517     - vc->vc_x += cnt;
2518     - if (vc->vc_x == vc->vc_cols) {
2519     - vc->vc_x--;
2520     + if (vc->vc_x == vc->vc_cols - 1) {
2521     vc->vc_need_wrap = 1;
2522     + } else {
2523     + vc->vc_pos += 2;
2524     + vc->vc_x++;
2525     }
2526     }
2527     + if (cnt && con_is_visible(vc))
2528     + vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
2529     set_cursor(vc);
2530     notify_update(vc);
2531    
2532     diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
2533     index dc7f7fd71684..c12ac56606c3 100644
2534     --- a/drivers/usb/core/ledtrig-usbport.c
2535     +++ b/drivers/usb/core/ledtrig-usbport.c
2536     @@ -119,11 +119,6 @@ static const struct attribute_group ports_group = {
2537     .attrs = ports_attrs,
2538     };
2539    
2540     -static const struct attribute_group *ports_groups[] = {
2541     - &ports_group,
2542     - NULL
2543     -};
2544     -
2545     /***************************************
2546     * Adding & removing ports
2547     ***************************************/
2548     @@ -307,6 +302,7 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
2549     static int usbport_trig_activate(struct led_classdev *led_cdev)
2550     {
2551     struct usbport_trig_data *usbport_data;
2552     + int err;
2553    
2554     usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL);
2555     if (!usbport_data)
2556     @@ -315,6 +311,9 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
2557    
2558     /* List of ports */
2559     INIT_LIST_HEAD(&usbport_data->ports);
2560     + err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group);
2561     + if (err)
2562     + goto err_free;
2563     usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
2564     usbport_trig_update_count(usbport_data);
2565    
2566     @@ -322,8 +321,11 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
2567     usbport_data->nb.notifier_call = usbport_trig_notify;
2568     led_set_trigger_data(led_cdev, usbport_data);
2569     usb_register_notify(&usbport_data->nb);
2570     -
2571     return 0;
2572     +
2573     +err_free:
2574     + kfree(usbport_data);
2575     + return err;
2576     }
2577    
2578     static void usbport_trig_deactivate(struct led_classdev *led_cdev)
2579     @@ -335,6 +337,8 @@ static void usbport_trig_deactivate(struct led_classdev *led_cdev)
2580     usbport_trig_remove_port(usbport_data, port);
2581     }
2582    
2583     + sysfs_remove_group(&led_cdev->dev->kobj, &ports_group);
2584     +
2585     usb_unregister_notify(&usbport_data->nb);
2586    
2587     kfree(usbport_data);
2588     @@ -344,7 +348,6 @@ static struct led_trigger usbport_led_trigger = {
2589     .name = "usbport",
2590     .activate = usbport_trig_activate,
2591     .deactivate = usbport_trig_deactivate,
2592     - .groups = ports_groups,
2593     };
2594    
2595     static int __init usbport_trig_init(void)
2596     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2597     index 9f92ee03dde7..2a4ea9a1b1e3 100644
2598     --- a/drivers/usb/dwc3/gadget.c
2599     +++ b/drivers/usb/dwc3/gadget.c
2600     @@ -177,6 +177,8 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
2601     req->started = false;
2602     list_del(&req->list);
2603     req->remaining = 0;
2604     + req->unaligned = false;
2605     + req->zero = false;
2606    
2607     if (req->request.status == -EINPROGRESS)
2608     req->request.status = status;
2609     diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
2610     index f26109eafdbf..66ec1fdf9fe7 100644
2611     --- a/drivers/usb/host/ehci-mv.c
2612     +++ b/drivers/usb/host/ehci-mv.c
2613     @@ -302,3 +302,4 @@ MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2614     MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>");
2615     MODULE_ALIAS("mv-ehci");
2616     MODULE_LICENSE("GPL");
2617     +MODULE_DEVICE_TABLE(of, ehci_mv_dt_ids);
2618     diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2619     index 609198d9594c..f459c1a18156 100644
2620     --- a/drivers/usb/serial/ftdi_sio.c
2621     +++ b/drivers/usb/serial/ftdi_sio.c
2622     @@ -1783,6 +1783,10 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
2623     int result;
2624     u16 val;
2625    
2626     + result = usb_autopm_get_interface(serial->interface);
2627     + if (result)
2628     + return result;
2629     +
2630     val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value;
2631     result = usb_control_msg(serial->dev,
2632     usb_sndctrlpipe(serial->dev, 0),
2633     @@ -1795,6 +1799,8 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
2634     val, result);
2635     }
2636    
2637     + usb_autopm_put_interface(serial->interface);
2638     +
2639     return result;
2640     }
2641    
2642     @@ -1846,9 +1852,15 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
2643     unsigned char *buf;
2644     int result;
2645    
2646     + result = usb_autopm_get_interface(serial->interface);
2647     + if (result)
2648     + return result;
2649     +
2650     buf = kmalloc(1, GFP_KERNEL);
2651     - if (!buf)
2652     + if (!buf) {
2653     + usb_autopm_put_interface(serial->interface);
2654     return -ENOMEM;
2655     + }
2656    
2657     result = usb_control_msg(serial->dev,
2658     usb_rcvctrlpipe(serial->dev, 0),
2659     @@ -1863,6 +1875,7 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
2660     }
2661    
2662     kfree(buf);
2663     + usb_autopm_put_interface(serial->interface);
2664    
2665     return result;
2666     }
2667     diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
2668     index 98e7a5df0f6d..bb3f9aa4a909 100644
2669     --- a/drivers/usb/serial/pl2303.c
2670     +++ b/drivers/usb/serial/pl2303.c
2671     @@ -46,6 +46,7 @@ static const struct usb_device_id id_table[] = {
2672     { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
2673     { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
2674     { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
2675     + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
2676     { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
2677     { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
2678     { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
2679     diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
2680     index 4e2554d55362..559941ca884d 100644
2681     --- a/drivers/usb/serial/pl2303.h
2682     +++ b/drivers/usb/serial/pl2303.h
2683     @@ -8,6 +8,7 @@
2684    
2685     #define PL2303_VENDOR_ID 0x067b
2686     #define PL2303_PRODUCT_ID 0x2303
2687     +#define PL2303_PRODUCT_ID_TB 0x2304
2688     #define PL2303_PRODUCT_ID_RSAQ2 0x04bb
2689     #define PL2303_PRODUCT_ID_DCU11 0x1234
2690     #define PL2303_PRODUCT_ID_PHAROS 0xaaa0
2691     @@ -20,6 +21,7 @@
2692     #define PL2303_PRODUCT_ID_MOTOROLA 0x0307
2693     #define PL2303_PRODUCT_ID_ZTEK 0xe1f1
2694    
2695     +
2696     #define ATEN_VENDOR_ID 0x0557
2697     #define ATEN_VENDOR_ID2 0x0547
2698     #define ATEN_PRODUCT_ID 0x2008
2699     diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
2700     index 4d0273508043..edbbb13d6de6 100644
2701     --- a/drivers/usb/serial/usb-serial-simple.c
2702     +++ b/drivers/usb/serial/usb-serial-simple.c
2703     @@ -85,7 +85,8 @@ DEVICE(moto_modem, MOTO_IDS);
2704     /* Motorola Tetra driver */
2705     #define MOTOROLA_TETRA_IDS() \
2706     { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
2707     - { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
2708     + { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
2709     + { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
2710     DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
2711    
2712     /* Novatel Wireless GPS driver */
2713     diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
2714     index ad7a6f475a44..784df2b49628 100644
2715     --- a/drivers/vhost/net.c
2716     +++ b/drivers/vhost/net.c
2717     @@ -1192,7 +1192,8 @@ static void handle_rx(struct vhost_net *net)
2718     if (nvq->done_idx > VHOST_NET_BATCH)
2719     vhost_net_signal_used(nvq);
2720     if (unlikely(vq_log))
2721     - vhost_log_write(vq, vq_log, log, vhost_len);
2722     + vhost_log_write(vq, vq_log, log, vhost_len,
2723     + vq->iov, in);
2724     total_len += vhost_len;
2725     if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
2726     vhost_poll_queue(&vq->poll);
2727     diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
2728     index 55e5aa662ad5..c66fc8308b5e 100644
2729     --- a/drivers/vhost/vhost.c
2730     +++ b/drivers/vhost/vhost.c
2731     @@ -1733,13 +1733,87 @@ static int log_write(void __user *log_base,
2732     return r;
2733     }
2734    
2735     +static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
2736     +{
2737     + struct vhost_umem *umem = vq->umem;
2738     + struct vhost_umem_node *u;
2739     + u64 start, end, l, min;
2740     + int r;
2741     + bool hit = false;
2742     +
2743     + while (len) {
2744     + min = len;
2745     + /* More than one GPAs can be mapped into a single HVA. So
2746     + * iterate all possible umems here to be safe.
2747     + */
2748     + list_for_each_entry(u, &umem->umem_list, link) {
2749     + if (u->userspace_addr > hva - 1 + len ||
2750     + u->userspace_addr - 1 + u->size < hva)
2751     + continue;
2752     + start = max(u->userspace_addr, hva);
2753     + end = min(u->userspace_addr - 1 + u->size,
2754     + hva - 1 + len);
2755     + l = end - start + 1;
2756     + r = log_write(vq->log_base,
2757     + u->start + start - u->userspace_addr,
2758     + l);
2759     + if (r < 0)
2760     + return r;
2761     + hit = true;
2762     + min = min(l, min);
2763     + }
2764     +
2765     + if (!hit)
2766     + return -EFAULT;
2767     +
2768     + len -= min;
2769     + hva += min;
2770     + }
2771     +
2772     + return 0;
2773     +}
2774     +
2775     +static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
2776     +{
2777     + struct iovec iov[64];
2778     + int i, ret;
2779     +
2780     + if (!vq->iotlb)
2781     + return log_write(vq->log_base, vq->log_addr + used_offset, len);
2782     +
2783     + ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
2784     + len, iov, 64, VHOST_ACCESS_WO);
2785     + if (ret)
2786     + return ret;
2787     +
2788     + for (i = 0; i < ret; i++) {
2789     + ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
2790     + iov[i].iov_len);
2791     + if (ret)
2792     + return ret;
2793     + }
2794     +
2795     + return 0;
2796     +}
2797     +
2798     int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
2799     - unsigned int log_num, u64 len)
2800     + unsigned int log_num, u64 len, struct iovec *iov, int count)
2801     {
2802     int i, r;
2803    
2804     /* Make sure data written is seen before log. */
2805     smp_wmb();
2806     +
2807     + if (vq->iotlb) {
2808     + for (i = 0; i < count; i++) {
2809     + r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
2810     + iov[i].iov_len);
2811     + if (r < 0)
2812     + return r;
2813     + }
2814     + return 0;
2815     + }
2816     +
2817     for (i = 0; i < log_num; ++i) {
2818     u64 l = min(log[i].len, len);
2819     r = log_write(vq->log_base, log[i].addr, l);
2820     @@ -1769,9 +1843,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
2821     smp_wmb();
2822     /* Log used flag write. */
2823     used = &vq->used->flags;
2824     - log_write(vq->log_base, vq->log_addr +
2825     - (used - (void __user *)vq->used),
2826     - sizeof vq->used->flags);
2827     + log_used(vq, (used - (void __user *)vq->used),
2828     + sizeof vq->used->flags);
2829     if (vq->log_ctx)
2830     eventfd_signal(vq->log_ctx, 1);
2831     }
2832     @@ -1789,9 +1862,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
2833     smp_wmb();
2834     /* Log avail event write */
2835     used = vhost_avail_event(vq);
2836     - log_write(vq->log_base, vq->log_addr +
2837     - (used - (void __user *)vq->used),
2838     - sizeof *vhost_avail_event(vq));
2839     + log_used(vq, (used - (void __user *)vq->used),
2840     + sizeof *vhost_avail_event(vq));
2841     if (vq->log_ctx)
2842     eventfd_signal(vq->log_ctx, 1);
2843     }
2844     @@ -2191,10 +2263,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2845     /* Make sure data is seen before log. */
2846     smp_wmb();
2847     /* Log used ring entry write. */
2848     - log_write(vq->log_base,
2849     - vq->log_addr +
2850     - ((void __user *)used - (void __user *)vq->used),
2851     - count * sizeof *used);
2852     + log_used(vq, ((void __user *)used - (void __user *)vq->used),
2853     + count * sizeof *used);
2854     }
2855     old = vq->last_used_idx;
2856     new = (vq->last_used_idx += count);
2857     @@ -2236,9 +2306,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2858     /* Make sure used idx is seen before log. */
2859     smp_wmb();
2860     /* Log used index update. */
2861     - log_write(vq->log_base,
2862     - vq->log_addr + offsetof(struct vring_used, idx),
2863     - sizeof vq->used->idx);
2864     + log_used(vq, offsetof(struct vring_used, idx),
2865     + sizeof vq->used->idx);
2866     if (vq->log_ctx)
2867     eventfd_signal(vq->log_ctx, 1);
2868     }
2869     diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
2870     index 466ef7542291..1b675dad5e05 100644
2871     --- a/drivers/vhost/vhost.h
2872     +++ b/drivers/vhost/vhost.h
2873     @@ -205,7 +205,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
2874     bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
2875    
2876     int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
2877     - unsigned int log_num, u64 len);
2878     + unsigned int log_num, u64 len,
2879     + struct iovec *iov, int count);
2880     int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
2881    
2882     struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
2883     diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
2884     index 09731b2f6815..c6b3bdbbdbc9 100644
2885     --- a/drivers/video/console/vgacon.c
2886     +++ b/drivers/video/console/vgacon.c
2887     @@ -271,6 +271,7 @@ static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
2888    
2889     static void vgacon_restore_screen(struct vc_data *c)
2890     {
2891     + c->vc_origin = c->vc_visible_origin;
2892     vgacon_scrollback_cur->save = 0;
2893    
2894     if (!vga_is_gfx && !vgacon_scrollback_cur->restore) {
2895     @@ -287,8 +288,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
2896     int start, end, count, soff;
2897    
2898     if (!lines) {
2899     - c->vc_visible_origin = c->vc_origin;
2900     - vga_set_mem_top(c);
2901     + vgacon_restore_screen(c);
2902     return;
2903     }
2904    
2905     @@ -298,6 +298,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
2906     if (!vgacon_scrollback_cur->save) {
2907     vgacon_cursor(c, CM_ERASE);
2908     vgacon_save_screen(c);
2909     + c->vc_origin = (unsigned long)c->vc_screenbuf;
2910     vgacon_scrollback_cur->save = 1;
2911     }
2912    
2913     @@ -335,7 +336,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
2914     int copysize;
2915    
2916     int diff = c->vc_rows - count;
2917     - void *d = (void *) c->vc_origin;
2918     + void *d = (void *) c->vc_visible_origin;
2919     void *s = (void *) c->vc_screenbuf;
2920    
2921     count *= c->vc_size_row;
2922     diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
2923     index a58666a3f8dd..08aaf580fa1c 100644
2924     --- a/fs/ceph/caps.c
2925     +++ b/fs/ceph/caps.c
2926     @@ -1032,6 +1032,8 @@ static void drop_inode_snap_realm(struct ceph_inode_info *ci)
2927     list_del_init(&ci->i_snap_realm_item);
2928     ci->i_snap_realm_counter++;
2929     ci->i_snap_realm = NULL;
2930     + if (realm->ino == ci->i_vino.ino)
2931     + realm->inode = NULL;
2932     spin_unlock(&realm->inodes_with_caps_lock);
2933     ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
2934     realm);
2935     diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
2936     index f82fd342bca5..fce610f6cd24 100644
2937     --- a/fs/cifs/cifssmb.c
2938     +++ b/fs/cifs/cifssmb.c
2939     @@ -1458,18 +1458,26 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server)
2940     }
2941    
2942     static int
2943     -cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2944     +__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
2945     + bool malformed)
2946     {
2947     int length;
2948     - struct cifs_readdata *rdata = mid->callback_data;
2949    
2950     length = cifs_discard_remaining_data(server);
2951     - dequeue_mid(mid, rdata->result);
2952     + dequeue_mid(mid, malformed);
2953     mid->resp_buf = server->smallbuf;
2954     server->smallbuf = NULL;
2955     return length;
2956     }
2957    
2958     +static int
2959     +cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2960     +{
2961     + struct cifs_readdata *rdata = mid->callback_data;
2962     +
2963     + return __cifs_readv_discard(server, mid, rdata->result);
2964     +}
2965     +
2966     int
2967     cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2968     {
2969     @@ -1511,12 +1519,23 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2970     return -1;
2971     }
2972    
2973     + /* set up first two iov for signature check and to get credits */
2974     + rdata->iov[0].iov_base = buf;
2975     + rdata->iov[0].iov_len = 4;
2976     + rdata->iov[1].iov_base = buf + 4;
2977     + rdata->iov[1].iov_len = server->total_read - 4;
2978     + cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
2979     + rdata->iov[0].iov_base, rdata->iov[0].iov_len);
2980     + cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
2981     + rdata->iov[1].iov_base, rdata->iov[1].iov_len);
2982     +
2983     /* Was the SMB read successful? */
2984     rdata->result = server->ops->map_error(buf, false);
2985     if (rdata->result != 0) {
2986     cifs_dbg(FYI, "%s: server returned error %d\n",
2987     __func__, rdata->result);
2988     - return cifs_readv_discard(server, mid);
2989     + /* normal error on read response */
2990     + return __cifs_readv_discard(server, mid, false);
2991     }
2992    
2993     /* Is there enough to get to the rest of the READ_RSP header? */
2994     @@ -1560,14 +1579,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2995     server->total_read += length;
2996     }
2997    
2998     - /* set up first iov for signature check */
2999     - rdata->iov[0].iov_base = buf;
3000     - rdata->iov[0].iov_len = 4;
3001     - rdata->iov[1].iov_base = buf + 4;
3002     - rdata->iov[1].iov_len = server->total_read - 4;
3003     - cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
3004     - rdata->iov[0].iov_base, server->total_read);
3005     -
3006     /* how much data is in the response? */
3007     #ifdef CONFIG_CIFS_SMB_DIRECT
3008     use_rdma_mr = rdata->mr;
3009     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3010     index 6f24f129a751..b83ab72cf855 100644
3011     --- a/fs/cifs/connect.c
3012     +++ b/fs/cifs/connect.c
3013     @@ -534,6 +534,21 @@ server_unresponsive(struct TCP_Server_Info *server)
3014     return false;
3015     }
3016    
3017     +static inline bool
3018     +zero_credits(struct TCP_Server_Info *server)
3019     +{
3020     + int val;
3021     +
3022     + spin_lock(&server->req_lock);
3023     + val = server->credits + server->echo_credits + server->oplock_credits;
3024     + if (server->in_flight == 0 && val == 0) {
3025     + spin_unlock(&server->req_lock);
3026     + return true;
3027     + }
3028     + spin_unlock(&server->req_lock);
3029     + return false;
3030     +}
3031     +
3032     static int
3033     cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
3034     {
3035     @@ -546,6 +561,12 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
3036     for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
3037     try_to_freeze();
3038    
3039     + /* reconnect if no credits and no requests in flight */
3040     + if (zero_credits(server)) {
3041     + cifs_reconnect(server);
3042     + return -ECONNABORTED;
3043     + }
3044     +
3045     if (server_unresponsive(server))
3046     return -ECONNABORTED;
3047     if (cifs_rdma_enabled(server) && server->smbd_conn)
3048     diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
3049     index 6a9c47541c53..7b8b58fb4d3f 100644
3050     --- a/fs/cifs/smb2misc.c
3051     +++ b/fs/cifs/smb2misc.c
3052     @@ -648,6 +648,13 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
3053     if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK)
3054     return false;
3055    
3056     + if (rsp->sync_hdr.CreditRequest) {
3057     + spin_lock(&server->req_lock);
3058     + server->credits += le16_to_cpu(rsp->sync_hdr.CreditRequest);
3059     + spin_unlock(&server->req_lock);
3060     + wake_up(&server->request_q);
3061     + }
3062     +
3063     if (rsp->StructureSize !=
3064     smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
3065     if (le16_to_cpu(rsp->StructureSize) == 44)
3066     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3067     index 391b40e91910..d7dd7d38fad6 100644
3068     --- a/fs/cifs/smb2ops.c
3069     +++ b/fs/cifs/smb2ops.c
3070     @@ -34,6 +34,7 @@
3071     #include "cifs_ioctl.h"
3072     #include "smbdirect.h"
3073    
3074     +/* Change credits for different ops and return the total number of credits */
3075     static int
3076     change_conf(struct TCP_Server_Info *server)
3077     {
3078     @@ -41,17 +42,15 @@ change_conf(struct TCP_Server_Info *server)
3079     server->oplock_credits = server->echo_credits = 0;
3080     switch (server->credits) {
3081     case 0:
3082     - return -1;
3083     + return 0;
3084     case 1:
3085     server->echoes = false;
3086     server->oplocks = false;
3087     - cifs_dbg(VFS, "disabling echoes and oplocks\n");
3088     break;
3089     case 2:
3090     server->echoes = true;
3091     server->oplocks = false;
3092     server->echo_credits = 1;
3093     - cifs_dbg(FYI, "disabling oplocks\n");
3094     break;
3095     default:
3096     server->echoes = true;
3097     @@ -64,14 +63,15 @@ change_conf(struct TCP_Server_Info *server)
3098     server->echo_credits = 1;
3099     }
3100     server->credits -= server->echo_credits + server->oplock_credits;
3101     - return 0;
3102     + return server->credits + server->echo_credits + server->oplock_credits;
3103     }
3104    
3105     static void
3106     smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
3107     const int optype)
3108     {
3109     - int *val, rc = 0;
3110     + int *val, rc = -1;
3111     +
3112     spin_lock(&server->req_lock);
3113     val = server->ops->get_credits_field(server, optype);
3114    
3115     @@ -101,8 +101,26 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
3116     }
3117     spin_unlock(&server->req_lock);
3118     wake_up(&server->request_q);
3119     - if (rc)
3120     - cifs_reconnect(server);
3121     +
3122     + if (server->tcpStatus == CifsNeedReconnect)
3123     + return;
3124     +
3125     + switch (rc) {
3126     + case -1:
3127     + /* change_conf hasn't been executed */
3128     + break;
3129     + case 0:
3130     + cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
3131     + break;
3132     + case 1:
3133     + cifs_dbg(VFS, "disabling echoes and oplocks\n");
3134     + break;
3135     + case 2:
3136     + cifs_dbg(FYI, "disabling oplocks\n");
3137     + break;
3138     + default:
3139     + cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
3140     + }
3141     }
3142    
3143     static void
3144     @@ -165,14 +183,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
3145    
3146     scredits = server->credits;
3147     /* can deadlock with reopen */
3148     - if (scredits == 1) {
3149     + if (scredits <= 8) {
3150     *num = SMB2_MAX_BUFFER_SIZE;
3151     *credits = 0;
3152     break;
3153     }
3154    
3155     - /* leave one credit for a possible reopen */
3156     - scredits--;
3157     + /* leave some credits for reopen and other ops */
3158     + scredits -= 8;
3159     *num = min_t(unsigned int, size,
3160     scredits * SMB2_MAX_BUFFER_SIZE);
3161    
3162     @@ -3101,11 +3119,23 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3163     server->ops->is_status_pending(buf, server, 0))
3164     return -1;
3165    
3166     - rdata->result = server->ops->map_error(buf, false);
3167     + /* set up first two iov to get credits */
3168     + rdata->iov[0].iov_base = buf;
3169     + rdata->iov[0].iov_len = 4;
3170     + rdata->iov[1].iov_base = buf + 4;
3171     + rdata->iov[1].iov_len =
3172     + min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4;
3173     + cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3174     + rdata->iov[0].iov_base, rdata->iov[0].iov_len);
3175     + cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
3176     + rdata->iov[1].iov_base, rdata->iov[1].iov_len);
3177     +
3178     + rdata->result = server->ops->map_error(buf, true);
3179     if (rdata->result != 0) {
3180     cifs_dbg(FYI, "%s: server returned error %d\n",
3181     __func__, rdata->result);
3182     - dequeue_mid(mid, rdata->result);
3183     + /* normal error on read response */
3184     + dequeue_mid(mid, false);
3185     return 0;
3186     }
3187    
3188     @@ -3178,14 +3208,6 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3189     return 0;
3190     }
3191    
3192     - /* set up first iov for signature check */
3193     - rdata->iov[0].iov_base = buf;
3194     - rdata->iov[0].iov_len = 4;
3195     - rdata->iov[1].iov_base = buf + 4;
3196     - rdata->iov[1].iov_len = server->vals->read_rsp_size - 4;
3197     - cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3198     - rdata->iov[0].iov_base, server->vals->read_rsp_size);
3199     -
3200     length = rdata->copy_into_pages(server, rdata, &iter);
3201    
3202     kfree(bvec);
3203     diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
3204     index 105576daca4a..798f1253141a 100644
3205     --- a/fs/notify/inotify/inotify_user.c
3206     +++ b/fs/notify/inotify/inotify_user.c
3207     @@ -724,8 +724,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
3208     return -EBADF;
3209    
3210     /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
3211     - if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE)))
3212     - return -EINVAL;
3213     + if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
3214     + ret = -EINVAL;
3215     + goto fput_and_out;
3216     + }
3217    
3218     /* verify that this is indeed an inotify instance */
3219     if (unlikely(f.file->f_op != &inotify_fops)) {
3220     diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
3221     index d93e89761a8b..a6349a29748c 100644
3222     --- a/include/linux/bpf_verifier.h
3223     +++ b/include/linux/bpf_verifier.h
3224     @@ -147,6 +147,7 @@ struct bpf_verifier_state {
3225     /* call stack tracking */
3226     struct bpf_func_state *frame[MAX_CALL_FRAMES];
3227     u32 curframe;
3228     + bool speculative;
3229     };
3230    
3231     #define bpf_get_spilled_reg(slot, frame) \
3232     @@ -166,15 +167,25 @@ struct bpf_verifier_state_list {
3233     struct bpf_verifier_state_list *next;
3234     };
3235    
3236     +/* Possible states for alu_state member. */
3237     +#define BPF_ALU_SANITIZE_SRC 1U
3238     +#define BPF_ALU_SANITIZE_DST 2U
3239     +#define BPF_ALU_NEG_VALUE (1U << 2)
3240     +#define BPF_ALU_NON_POINTER (1U << 3)
3241     +#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
3242     + BPF_ALU_SANITIZE_DST)
3243     +
3244     struct bpf_insn_aux_data {
3245     union {
3246     enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
3247     unsigned long map_state; /* pointer/poison value for maps */
3248     s32 call_imm; /* saved imm field of call insn */
3249     + u32 alu_limit; /* limit for add/sub register with pointer */
3250     };
3251     int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
3252     int sanitize_stack_off; /* stack slot to be cleared */
3253     bool seen; /* this insn was processed by the verifier */
3254     + u8 alu_state; /* used in combination with alu_limit */
3255     };
3256    
3257     #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
3258     @@ -210,6 +221,8 @@ struct bpf_subprog_info {
3259     * one verifier_env per bpf_check() call
3260     */
3261     struct bpf_verifier_env {
3262     + u32 insn_idx;
3263     + u32 prev_insn_idx;
3264     struct bpf_prog *prog; /* eBPF program being verified */
3265     const struct bpf_verifier_ops *ops;
3266     struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
3267     diff --git a/include/linux/filter.h b/include/linux/filter.h
3268     index 25a556589ae8..b776626aeb84 100644
3269     --- a/include/linux/filter.h
3270     +++ b/include/linux/filter.h
3271     @@ -53,14 +53,10 @@ struct sock_reuseport;
3272     #define BPF_REG_D BPF_REG_8 /* data, callee-saved */
3273     #define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */
3274    
3275     -/* Kernel hidden auxiliary/helper register for hardening step.
3276     - * Only used by eBPF JITs. It's nothing more than a temporary
3277     - * register that JITs use internally, only that here it's part
3278     - * of eBPF instructions that have been rewritten for blinding
3279     - * constants. See JIT pre-step in bpf_jit_blind_constants().
3280     - */
3281     +/* Kernel hidden auxiliary/helper register. */
3282     #define BPF_REG_AX MAX_BPF_REG
3283     -#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
3284     +#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1)
3285     +#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG
3286    
3287     /* unused opcode to mark special call to bpf_tail_call() helper */
3288     #define BPF_TAIL_CALL 0xf0
3289     diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
3290     index 14131b6fae68..dcb6977afce9 100644
3291     --- a/include/linux/hyperv.h
3292     +++ b/include/linux/hyperv.h
3293     @@ -830,15 +830,6 @@ struct vmbus_channel {
3294     * All Sub-channels of a primary channel are linked here.
3295     */
3296     struct list_head sc_list;
3297     - /*
3298     - * Current number of sub-channels.
3299     - */
3300     - int num_sc;
3301     - /*
3302     - * Number of a sub-channel (position within sc_list) which is supposed
3303     - * to be used as the next outgoing channel.
3304     - */
3305     - int next_oc;
3306     /*
3307     * The primary channel this sub-channel belongs to.
3308     * This will be NULL for the primary channel.
3309     @@ -972,14 +963,6 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
3310     void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
3311     void (*chn_rescind_cb)(struct vmbus_channel *));
3312    
3313     -/*
3314     - * Retrieve the (sub) channel on which to send an outgoing request.
3315     - * When a primary channel has multiple sub-channels, we choose a
3316     - * channel whose VCPU binding is closest to the VCPU on which
3317     - * this call is being made.
3318     - */
3319     -struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
3320     -
3321     /*
3322     * Check if sub-channels have already been offerred. This API will be useful
3323     * when the driver is unloaded after establishing sub-channels. In this case,
3324     @@ -1176,8 +1159,9 @@ struct hv_ring_buffer_debug_info {
3325     u32 bytes_avail_towrite;
3326     };
3327    
3328     -void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
3329     - struct hv_ring_buffer_debug_info *debug_info);
3330     +
3331     +int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
3332     + struct hv_ring_buffer_debug_info *debug_info);
3333    
3334     /* Vmbus interface */
3335     #define vmbus_driver_register(driver) \
3336     diff --git a/include/linux/phy.h b/include/linux/phy.h
3337     index 306630d13523..f5d4235e3844 100644
3338     --- a/include/linux/phy.h
3339     +++ b/include/linux/phy.h
3340     @@ -502,8 +502,8 @@ struct phy_device {
3341     * only works for PHYs with IDs which match this field
3342     * name: The friendly name of this PHY type
3343     * phy_id_mask: Defines the important bits of the phy_id
3344     - * features: A list of features (speed, duplex, etc) supported
3345     - * by this PHY
3346     + * features: A mandatory list of features (speed, duplex, etc)
3347     + * supported by this PHY
3348     * flags: A bitfield defining certain other features this PHY
3349     * supports (like interrupts)
3350     *
3351     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
3352     index 0d1b2c3f127b..a6d820ad17f0 100644
3353     --- a/include/linux/skbuff.h
3354     +++ b/include/linux/skbuff.h
3355     @@ -3204,6 +3204,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3356     *
3357     * This is exactly the same as pskb_trim except that it ensures the
3358     * checksum of received packets are still valid after the operation.
3359     + * It can change skb pointers.
3360     */
3361    
3362     static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3363     diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
3364     index c5969762a8f4..9c8214d2116d 100644
3365     --- a/include/net/ip_fib.h
3366     +++ b/include/net/ip_fib.h
3367     @@ -241,7 +241,7 @@ int fib_table_delete(struct net *, struct fib_table *, struct fib_config *,
3368     struct netlink_ext_ack *extack);
3369     int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
3370     struct netlink_callback *cb, struct fib_dump_filter *filter);
3371     -int fib_table_flush(struct net *net, struct fib_table *table);
3372     +int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
3373     struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
3374     void fib_table_flush_external(struct fib_table *table);
3375     void fib_free_table(struct fib_table *tb);
3376     diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
3377     index fb78f6f500f3..f056b2a00d5c 100644
3378     --- a/include/uapi/linux/input.h
3379     +++ b/include/uapi/linux/input.h
3380     @@ -26,13 +26,17 @@
3381     */
3382    
3383     struct input_event {
3384     -#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL)
3385     +#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL__)
3386     struct timeval time;
3387     #define input_event_sec time.tv_sec
3388     #define input_event_usec time.tv_usec
3389     #else
3390     __kernel_ulong_t __sec;
3391     +#if defined(__sparc__) && defined(__arch64__)
3392     + unsigned int __usec;
3393     +#else
3394     __kernel_ulong_t __usec;
3395     +#endif
3396     #define input_event_sec __sec
3397     #define input_event_usec __usec
3398     #endif
3399     diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
3400     index b2890c268cb3..ac44653025ad 100644
3401     --- a/kernel/bpf/core.c
3402     +++ b/kernel/bpf/core.c
3403     @@ -52,6 +52,7 @@
3404     #define DST regs[insn->dst_reg]
3405     #define SRC regs[insn->src_reg]
3406     #define FP regs[BPF_REG_FP]
3407     +#define AX regs[BPF_REG_AX]
3408     #define ARG1 regs[BPF_REG_ARG1]
3409     #define CTX regs[BPF_REG_CTX]
3410     #define IMM insn->imm
3411     @@ -726,6 +727,26 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
3412     BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
3413     BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
3414    
3415     + /* Constraints on AX register:
3416     + *
3417     + * AX register is inaccessible from user space. It is mapped in
3418     + * all JITs, and used here for constant blinding rewrites. It is
3419     + * typically "stateless" meaning its contents are only valid within
3420     + * the executed instruction, but not across several instructions.
3421     + * There are a few exceptions however which are further detailed
3422     + * below.
3423     + *
3424     + * Constant blinding is only used by JITs, not in the interpreter.
3425     + * The interpreter uses AX in some occasions as a local temporary
3426     + * register e.g. in DIV or MOD instructions.
3427     + *
3428     + * In restricted circumstances, the verifier can also use the AX
3429     + * register for rewrites as long as they do not interfere with
3430     + * the above cases!
3431     + */
3432     + if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
3433     + goto out;
3434     +
3435     if (from->imm == 0 &&
3436     (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
3437     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
3438     @@ -1055,7 +1076,6 @@ bool bpf_opcode_in_insntable(u8 code)
3439     */
3440     static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
3441     {
3442     - u64 tmp;
3443     #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
3444     #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
3445     static const void *jumptable[256] = {
3446     @@ -1129,36 +1149,36 @@ select_insn:
3447     (*(s64 *) &DST) >>= IMM;
3448     CONT;
3449     ALU64_MOD_X:
3450     - div64_u64_rem(DST, SRC, &tmp);
3451     - DST = tmp;
3452     + div64_u64_rem(DST, SRC, &AX);
3453     + DST = AX;
3454     CONT;
3455     ALU_MOD_X:
3456     - tmp = (u32) DST;
3457     - DST = do_div(tmp, (u32) SRC);
3458     + AX = (u32) DST;
3459     + DST = do_div(AX, (u32) SRC);
3460     CONT;
3461     ALU64_MOD_K:
3462     - div64_u64_rem(DST, IMM, &tmp);
3463     - DST = tmp;
3464     + div64_u64_rem(DST, IMM, &AX);
3465     + DST = AX;
3466     CONT;
3467     ALU_MOD_K:
3468     - tmp = (u32) DST;
3469     - DST = do_div(tmp, (u32) IMM);
3470     + AX = (u32) DST;
3471     + DST = do_div(AX, (u32) IMM);
3472     CONT;
3473     ALU64_DIV_X:
3474     DST = div64_u64(DST, SRC);
3475     CONT;
3476     ALU_DIV_X:
3477     - tmp = (u32) DST;
3478     - do_div(tmp, (u32) SRC);
3479     - DST = (u32) tmp;
3480     + AX = (u32) DST;
3481     + do_div(AX, (u32) SRC);
3482     + DST = (u32) AX;
3483     CONT;
3484     ALU64_DIV_K:
3485     DST = div64_u64(DST, IMM);
3486     CONT;
3487     ALU_DIV_K:
3488     - tmp = (u32) DST;
3489     - do_div(tmp, (u32) IMM);
3490     - DST = (u32) tmp;
3491     + AX = (u32) DST;
3492     + do_div(AX, (u32) IMM);
3493     + DST = (u32) AX;
3494     CONT;
3495     ALU_END_TO_BE:
3496     switch (IMM) {
3497     @@ -1414,7 +1434,7 @@ STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
3498     static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
3499     { \
3500     u64 stack[stack_size / sizeof(u64)]; \
3501     - u64 regs[MAX_BPF_REG]; \
3502     + u64 regs[MAX_BPF_EXT_REG]; \
3503     \
3504     FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
3505     ARG1 = (u64) (unsigned long) ctx; \
3506     @@ -1427,7 +1447,7 @@ static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
3507     const struct bpf_insn *insn) \
3508     { \
3509     u64 stack[stack_size / sizeof(u64)]; \
3510     - u64 regs[MAX_BPF_REG]; \
3511     + u64 regs[MAX_BPF_EXT_REG]; \
3512     \
3513     FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
3514     BPF_R1 = r1; \
3515     diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
3516     index 99d243e1ad6e..52378d3e34b3 100644
3517     --- a/kernel/bpf/map_in_map.c
3518     +++ b/kernel/bpf/map_in_map.c
3519     @@ -12,6 +12,7 @@
3520     struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
3521     {
3522     struct bpf_map *inner_map, *inner_map_meta;
3523     + u32 inner_map_meta_size;
3524     struct fd f;
3525    
3526     f = fdget(inner_map_ufd);
3527     @@ -36,7 +37,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
3528     return ERR_PTR(-EINVAL);
3529     }
3530    
3531     - inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER);
3532     + inner_map_meta_size = sizeof(*inner_map_meta);
3533     + /* In some cases verifier needs to access beyond just base map. */
3534     + if (inner_map->ops == &array_map_ops)
3535     + inner_map_meta_size = sizeof(struct bpf_array);
3536     +
3537     + inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
3538     if (!inner_map_meta) {
3539     fdput(f);
3540     return ERR_PTR(-ENOMEM);
3541     @@ -46,9 +52,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
3542     inner_map_meta->key_size = inner_map->key_size;
3543     inner_map_meta->value_size = inner_map->value_size;
3544     inner_map_meta->map_flags = inner_map->map_flags;
3545     - inner_map_meta->ops = inner_map->ops;
3546     inner_map_meta->max_entries = inner_map->max_entries;
3547    
3548     + /* Misc members not needed in bpf_map_meta_equal() check. */
3549     + inner_map_meta->ops = inner_map->ops;
3550     + if (inner_map->ops == &array_map_ops) {
3551     + inner_map_meta->unpriv_array = inner_map->unpriv_array;
3552     + container_of(inner_map_meta, struct bpf_array, map)->index_mask =
3553     + container_of(inner_map, struct bpf_array, map)->index_mask;
3554     + }
3555     +
3556     fdput(f);
3557     return inner_map_meta;
3558     }
3559     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
3560     index eedc7bd4185d..e4c826229152 100644
3561     --- a/kernel/bpf/verifier.c
3562     +++ b/kernel/bpf/verifier.c
3563     @@ -648,6 +648,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
3564     free_func_state(dst_state->frame[i]);
3565     dst_state->frame[i] = NULL;
3566     }
3567     + dst_state->speculative = src->speculative;
3568     dst_state->curframe = src->curframe;
3569     for (i = 0; i <= src->curframe; i++) {
3570     dst = dst_state->frame[i];
3571     @@ -692,7 +693,8 @@ static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
3572     }
3573    
3574     static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
3575     - int insn_idx, int prev_insn_idx)
3576     + int insn_idx, int prev_insn_idx,
3577     + bool speculative)
3578     {
3579     struct bpf_verifier_state *cur = env->cur_state;
3580     struct bpf_verifier_stack_elem *elem;
3581     @@ -710,6 +712,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
3582     err = copy_verifier_state(&elem->st, cur);
3583     if (err)
3584     goto err;
3585     + elem->st.speculative |= speculative;
3586     if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
3587     verbose(env, "BPF program is too complex\n");
3588     goto err;
3589     @@ -1314,6 +1317,31 @@ static int check_stack_read(struct bpf_verifier_env *env,
3590     }
3591     }
3592    
3593     +static int check_stack_access(struct bpf_verifier_env *env,
3594     + const struct bpf_reg_state *reg,
3595     + int off, int size)
3596     +{
3597     + /* Stack accesses must be at a fixed offset, so that we
3598     + * can determine what type of data were returned. See
3599     + * check_stack_read().
3600     + */
3601     + if (!tnum_is_const(reg->var_off)) {
3602     + char tn_buf[48];
3603     +
3604     + tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3605     + verbose(env, "variable stack access var_off=%s off=%d size=%d",
3606     + tn_buf, off, size);
3607     + return -EACCES;
3608     + }
3609     +
3610     + if (off >= 0 || off < -MAX_BPF_STACK) {
3611     + verbose(env, "invalid stack off=%d size=%d\n", off, size);
3612     + return -EACCES;
3613     + }
3614     +
3615     + return 0;
3616     +}
3617     +
3618     /* check read/write into map element returned by bpf_map_lookup_elem() */
3619     static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
3620     int size, bool zero_size_allowed)
3621     @@ -1345,13 +1373,17 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
3622     */
3623     if (env->log.level)
3624     print_verifier_state(env, state);
3625     +
3626     /* The minimum value is only important with signed
3627     * comparisons where we can't assume the floor of a
3628     * value is 0. If we are using signed variables for our
3629     * index'es we need to make sure that whatever we use
3630     * will have a set floor within our range.
3631     */
3632     - if (reg->smin_value < 0) {
3633     + if (reg->smin_value < 0 &&
3634     + (reg->smin_value == S64_MIN ||
3635     + (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
3636     + reg->smin_value + off < 0)) {
3637     verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
3638     regno);
3639     return -EACCES;
3640     @@ -1870,24 +1902,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
3641     }
3642    
3643     } else if (reg->type == PTR_TO_STACK) {
3644     - /* stack accesses must be at a fixed offset, so that we can
3645     - * determine what type of data were returned.
3646     - * See check_stack_read().
3647     - */
3648     - if (!tnum_is_const(reg->var_off)) {
3649     - char tn_buf[48];
3650     -
3651     - tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3652     - verbose(env, "variable stack access var_off=%s off=%d size=%d",
3653     - tn_buf, off, size);
3654     - return -EACCES;
3655     - }
3656     off += reg->var_off.value;
3657     - if (off >= 0 || off < -MAX_BPF_STACK) {
3658     - verbose(env, "invalid stack off=%d size=%d\n", off,
3659     - size);
3660     - return -EACCES;
3661     - }
3662     + err = check_stack_access(env, reg, off, size);
3663     + if (err)
3664     + return err;
3665    
3666     state = func(env, reg);
3667     err = update_stack_depth(env, state, off);
3668     @@ -2968,6 +2986,125 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env,
3669     return true;
3670     }
3671    
3672     +static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
3673     +{
3674     + return &env->insn_aux_data[env->insn_idx];
3675     +}
3676     +
3677     +static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
3678     + u32 *ptr_limit, u8 opcode, bool off_is_neg)
3679     +{
3680     + bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
3681     + (opcode == BPF_SUB && !off_is_neg);
3682     + u32 off;
3683     +
3684     + switch (ptr_reg->type) {
3685     + case PTR_TO_STACK:
3686     + off = ptr_reg->off + ptr_reg->var_off.value;
3687     + if (mask_to_left)
3688     + *ptr_limit = MAX_BPF_STACK + off;
3689     + else
3690     + *ptr_limit = -off;
3691     + return 0;
3692     + case PTR_TO_MAP_VALUE:
3693     + if (mask_to_left) {
3694     + *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
3695     + } else {
3696     + off = ptr_reg->smin_value + ptr_reg->off;
3697     + *ptr_limit = ptr_reg->map_ptr->value_size - off;
3698     + }
3699     + return 0;
3700     + default:
3701     + return -EINVAL;
3702     + }
3703     +}
3704     +
3705     +static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
3706     + const struct bpf_insn *insn)
3707     +{
3708     + return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
3709     +}
3710     +
3711     +static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
3712     + u32 alu_state, u32 alu_limit)
3713     +{
3714     + /* If we arrived here from different branches with different
3715     + * state or limits to sanitize, then this won't work.
3716     + */
3717     + if (aux->alu_state &&
3718     + (aux->alu_state != alu_state ||
3719     + aux->alu_limit != alu_limit))
3720     + return -EACCES;
3721     +
3722     + /* Corresponding fixup done in fixup_bpf_calls(). */
3723     + aux->alu_state = alu_state;
3724     + aux->alu_limit = alu_limit;
3725     + return 0;
3726     +}
3727     +
3728     +static int sanitize_val_alu(struct bpf_verifier_env *env,
3729     + struct bpf_insn *insn)
3730     +{
3731     + struct bpf_insn_aux_data *aux = cur_aux(env);
3732     +
3733     + if (can_skip_alu_sanitation(env, insn))
3734     + return 0;
3735     +
3736     + return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
3737     +}
3738     +
3739     +static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3740     + struct bpf_insn *insn,
3741     + const struct bpf_reg_state *ptr_reg,
3742     + struct bpf_reg_state *dst_reg,
3743     + bool off_is_neg)
3744     +{
3745     + struct bpf_verifier_state *vstate = env->cur_state;
3746     + struct bpf_insn_aux_data *aux = cur_aux(env);
3747     + bool ptr_is_dst_reg = ptr_reg == dst_reg;
3748     + u8 opcode = BPF_OP(insn->code);
3749     + u32 alu_state, alu_limit;
3750     + struct bpf_reg_state tmp;
3751     + bool ret;
3752     +
3753     + if (can_skip_alu_sanitation(env, insn))
3754     + return 0;
3755     +
3756     + /* We already marked aux for masking from non-speculative
3757     + * paths, thus we got here in the first place. We only care
3758     + * to explore bad access from here.
3759     + */
3760     + if (vstate->speculative)
3761     + goto do_sim;
3762     +
3763     + alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
3764     + alu_state |= ptr_is_dst_reg ?
3765     + BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
3766     +
3767     + if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
3768     + return 0;
3769     + if (update_alu_sanitation_state(aux, alu_state, alu_limit))
3770     + return -EACCES;
3771     +do_sim:
3772     + /* Simulate and find potential out-of-bounds access under
3773     + * speculative execution from truncation as a result of
3774     + * masking when off was not within expected range. If off
3775     + * sits in dst, then we temporarily need to move ptr there
3776     + * to simulate dst (== 0) +/-= ptr. Needed, for example,
3777     + * for cases where we use K-based arithmetic in one direction
3778     + * and truncated reg-based in the other in order to explore
3779     + * bad access.
3780     + */
3781     + if (!ptr_is_dst_reg) {
3782     + tmp = *dst_reg;
3783     + *dst_reg = *ptr_reg;
3784     + }
3785     + ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
3786     + if (!ptr_is_dst_reg)
3787     + *dst_reg = tmp;
3788     + return !ret ? -EFAULT : 0;
3789     +}
3790     +
3791     /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
3792     * Caller should also handle BPF_MOV case separately.
3793     * If we return -EACCES, caller may want to try again treating pointer as a
3794     @@ -2986,8 +3123,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3795     smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
3796     u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
3797     umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
3798     + u32 dst = insn->dst_reg, src = insn->src_reg;
3799     u8 opcode = BPF_OP(insn->code);
3800     - u32 dst = insn->dst_reg;
3801     + int ret;
3802    
3803     dst_reg = &regs[dst];
3804    
3805     @@ -3020,6 +3158,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3806     verbose(env, "R%d pointer arithmetic on %s prohibited\n",
3807     dst, reg_type_str[ptr_reg->type]);
3808     return -EACCES;
3809     + case PTR_TO_MAP_VALUE:
3810     + if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
3811     + verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
3812     + off_reg == dst_reg ? dst : src);
3813     + return -EACCES;
3814     + }
3815     + /* fall-through */
3816     default:
3817     break;
3818     }
3819     @@ -3036,6 +3181,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3820    
3821     switch (opcode) {
3822     case BPF_ADD:
3823     + ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
3824     + if (ret < 0) {
3825     + verbose(env, "R%d tried to add from different maps or paths\n", dst);
3826     + return ret;
3827     + }
3828     /* We can take a fixed offset as long as it doesn't overflow
3829     * the s32 'off' field
3830     */
3831     @@ -3086,6 +3236,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3832     }
3833     break;
3834     case BPF_SUB:
3835     + ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
3836     + if (ret < 0) {
3837     + verbose(env, "R%d tried to sub from different maps or paths\n", dst);
3838     + return ret;
3839     + }
3840     if (dst_reg == off_reg) {
3841     /* scalar -= pointer. Creates an unknown scalar */
3842     verbose(env, "R%d tried to subtract pointer from scalar\n",
3843     @@ -3165,6 +3320,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3844     __update_reg_bounds(dst_reg);
3845     __reg_deduce_bounds(dst_reg);
3846     __reg_bound_offset(dst_reg);
3847     +
3848     + /* For unprivileged we require that resulting offset must be in bounds
3849     + * in order to be able to sanitize access later on.
3850     + */
3851     + if (!env->allow_ptr_leaks) {
3852     + if (dst_reg->type == PTR_TO_MAP_VALUE &&
3853     + check_map_access(env, dst, dst_reg->off, 1, false)) {
3854     + verbose(env, "R%d pointer arithmetic of map value goes out of range, "
3855     + "prohibited for !root\n", dst);
3856     + return -EACCES;
3857     + } else if (dst_reg->type == PTR_TO_STACK &&
3858     + check_stack_access(env, dst_reg, dst_reg->off +
3859     + dst_reg->var_off.value, 1)) {
3860     + verbose(env, "R%d stack pointer arithmetic goes out of range, "
3861     + "prohibited for !root\n", dst);
3862     + return -EACCES;
3863     + }
3864     + }
3865     +
3866     return 0;
3867     }
3868    
3869     @@ -3183,6 +3357,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3870     s64 smin_val, smax_val;
3871     u64 umin_val, umax_val;
3872     u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
3873     + u32 dst = insn->dst_reg;
3874     + int ret;
3875    
3876     if (insn_bitness == 32) {
3877     /* Relevant for 32-bit RSH: Information can propagate towards
3878     @@ -3217,6 +3393,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3879    
3880     switch (opcode) {
3881     case BPF_ADD:
3882     + ret = sanitize_val_alu(env, insn);
3883     + if (ret < 0) {
3884     + verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
3885     + return ret;
3886     + }
3887     if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
3888     signed_add_overflows(dst_reg->smax_value, smax_val)) {
3889     dst_reg->smin_value = S64_MIN;
3890     @@ -3236,6 +3417,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3891     dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
3892     break;
3893     case BPF_SUB:
3894     + ret = sanitize_val_alu(env, insn);
3895     + if (ret < 0) {
3896     + verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
3897     + return ret;
3898     + }
3899     if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
3900     signed_sub_overflows(dst_reg->smax_value, smin_val)) {
3901     /* Overflow possible, we know nothing */
3902     @@ -4249,7 +4435,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
3903     }
3904     }
3905    
3906     - other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
3907     + other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
3908     + false);
3909     if (!other_branch)
3910     return -EFAULT;
3911     other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
3912     @@ -4990,6 +5177,12 @@ static bool states_equal(struct bpf_verifier_env *env,
3913     if (old->curframe != cur->curframe)
3914     return false;
3915    
3916     + /* Verification state from speculative execution simulation
3917     + * must never prune a non-speculative execution one.
3918     + */
3919     + if (old->speculative && !cur->speculative)
3920     + return false;
3921     +
3922     /* for states to be equal callsites have to be the same
3923     * and all frame states need to be equivalent
3924     */
3925     @@ -5180,7 +5373,6 @@ static int do_check(struct bpf_verifier_env *env)
3926     struct bpf_insn *insns = env->prog->insnsi;
3927     struct bpf_reg_state *regs;
3928     int insn_cnt = env->prog->len, i;
3929     - int insn_idx, prev_insn_idx = 0;
3930     int insn_processed = 0;
3931     bool do_print_state = false;
3932    
3933     @@ -5188,6 +5380,7 @@ static int do_check(struct bpf_verifier_env *env)
3934     if (!state)
3935     return -ENOMEM;
3936     state->curframe = 0;
3937     + state->speculative = false;
3938     state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
3939     if (!state->frame[0]) {
3940     kfree(state);
3941     @@ -5198,19 +5391,19 @@ static int do_check(struct bpf_verifier_env *env)
3942     BPF_MAIN_FUNC /* callsite */,
3943     0 /* frameno */,
3944     0 /* subprogno, zero == main subprog */);
3945     - insn_idx = 0;
3946     +
3947     for (;;) {
3948     struct bpf_insn *insn;
3949     u8 class;
3950     int err;
3951    
3952     - if (insn_idx >= insn_cnt) {
3953     + if (env->insn_idx >= insn_cnt) {
3954     verbose(env, "invalid insn idx %d insn_cnt %d\n",
3955     - insn_idx, insn_cnt);
3956     + env->insn_idx, insn_cnt);
3957     return -EFAULT;
3958     }
3959    
3960     - insn = &insns[insn_idx];
3961     + insn = &insns[env->insn_idx];
3962     class = BPF_CLASS(insn->code);
3963    
3964     if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
3965     @@ -5220,17 +5413,19 @@ static int do_check(struct bpf_verifier_env *env)
3966     return -E2BIG;
3967     }
3968    
3969     - err = is_state_visited(env, insn_idx);
3970     + err = is_state_visited(env, env->insn_idx);
3971     if (err < 0)
3972     return err;
3973     if (err == 1) {
3974     /* found equivalent state, can prune the search */
3975     if (env->log.level) {
3976     if (do_print_state)
3977     - verbose(env, "\nfrom %d to %d: safe\n",
3978     - prev_insn_idx, insn_idx);
3979     + verbose(env, "\nfrom %d to %d%s: safe\n",
3980     + env->prev_insn_idx, env->insn_idx,
3981     + env->cur_state->speculative ?
3982     + " (speculative execution)" : "");
3983     else
3984     - verbose(env, "%d: safe\n", insn_idx);
3985     + verbose(env, "%d: safe\n", env->insn_idx);
3986     }
3987     goto process_bpf_exit;
3988     }
3989     @@ -5243,10 +5438,12 @@ static int do_check(struct bpf_verifier_env *env)
3990    
3991     if (env->log.level > 1 || (env->log.level && do_print_state)) {
3992     if (env->log.level > 1)
3993     - verbose(env, "%d:", insn_idx);
3994     + verbose(env, "%d:", env->insn_idx);
3995     else
3996     - verbose(env, "\nfrom %d to %d:",
3997     - prev_insn_idx, insn_idx);
3998     + verbose(env, "\nfrom %d to %d%s:",
3999     + env->prev_insn_idx, env->insn_idx,
4000     + env->cur_state->speculative ?
4001     + " (speculative execution)" : "");
4002     print_verifier_state(env, state->frame[state->curframe]);
4003     do_print_state = false;
4004     }
4005     @@ -5257,19 +5454,19 @@ static int do_check(struct bpf_verifier_env *env)
4006     .private_data = env,
4007     };
4008    
4009     - verbose(env, "%d: ", insn_idx);
4010     + verbose(env, "%d: ", env->insn_idx);
4011     print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
4012     }
4013    
4014     if (bpf_prog_is_dev_bound(env->prog->aux)) {
4015     - err = bpf_prog_offload_verify_insn(env, insn_idx,
4016     - prev_insn_idx);
4017     + err = bpf_prog_offload_verify_insn(env, env->insn_idx,
4018     + env->prev_insn_idx);
4019     if (err)
4020     return err;
4021     }
4022    
4023     regs = cur_regs(env);
4024     - env->insn_aux_data[insn_idx].seen = true;
4025     + env->insn_aux_data[env->insn_idx].seen = true;
4026    
4027     if (class == BPF_ALU || class == BPF_ALU64) {
4028     err = check_alu_op(env, insn);
4029     @@ -5295,13 +5492,13 @@ static int do_check(struct bpf_verifier_env *env)
4030     /* check that memory (src_reg + off) is readable,
4031     * the state of dst_reg will be updated by this func
4032     */
4033     - err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
4034     - BPF_SIZE(insn->code), BPF_READ,
4035     - insn->dst_reg, false);
4036     + err = check_mem_access(env, env->insn_idx, insn->src_reg,
4037     + insn->off, BPF_SIZE(insn->code),
4038     + BPF_READ, insn->dst_reg, false);
4039     if (err)
4040     return err;
4041    
4042     - prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
4043     + prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
4044    
4045     if (*prev_src_type == NOT_INIT) {
4046     /* saw a valid insn
4047     @@ -5326,10 +5523,10 @@ static int do_check(struct bpf_verifier_env *env)
4048     enum bpf_reg_type *prev_dst_type, dst_reg_type;
4049    
4050     if (BPF_MODE(insn->code) == BPF_XADD) {
4051     - err = check_xadd(env, insn_idx, insn);
4052     + err = check_xadd(env, env->insn_idx, insn);
4053     if (err)
4054     return err;
4055     - insn_idx++;
4056     + env->insn_idx++;
4057     continue;
4058     }
4059    
4060     @@ -5345,13 +5542,13 @@ static int do_check(struct bpf_verifier_env *env)
4061     dst_reg_type = regs[insn->dst_reg].type;
4062    
4063     /* check that memory (dst_reg + off) is writeable */
4064     - err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4065     - BPF_SIZE(insn->code), BPF_WRITE,
4066     - insn->src_reg, false);
4067     + err = check_mem_access(env, env->insn_idx, insn->dst_reg,
4068     + insn->off, BPF_SIZE(insn->code),
4069     + BPF_WRITE, insn->src_reg, false);
4070     if (err)
4071     return err;
4072    
4073     - prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
4074     + prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
4075    
4076     if (*prev_dst_type == NOT_INIT) {
4077     *prev_dst_type = dst_reg_type;
4078     @@ -5379,9 +5576,9 @@ static int do_check(struct bpf_verifier_env *env)
4079     }
4080    
4081     /* check that memory (dst_reg + off) is writeable */
4082     - err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4083     - BPF_SIZE(insn->code), BPF_WRITE,
4084     - -1, false);
4085     + err = check_mem_access(env, env->insn_idx, insn->dst_reg,
4086     + insn->off, BPF_SIZE(insn->code),
4087     + BPF_WRITE, -1, false);
4088     if (err)
4089     return err;
4090    
4091     @@ -5399,9 +5596,9 @@ static int do_check(struct bpf_verifier_env *env)
4092     }
4093    
4094     if (insn->src_reg == BPF_PSEUDO_CALL)
4095     - err = check_func_call(env, insn, &insn_idx);
4096     + err = check_func_call(env, insn, &env->insn_idx);
4097     else
4098     - err = check_helper_call(env, insn->imm, insn_idx);
4099     + err = check_helper_call(env, insn->imm, env->insn_idx);
4100     if (err)
4101     return err;
4102    
4103     @@ -5414,7 +5611,7 @@ static int do_check(struct bpf_verifier_env *env)
4104     return -EINVAL;
4105     }
4106    
4107     - insn_idx += insn->off + 1;
4108     + env->insn_idx += insn->off + 1;
4109     continue;
4110    
4111     } else if (opcode == BPF_EXIT) {
4112     @@ -5428,8 +5625,8 @@ static int do_check(struct bpf_verifier_env *env)
4113    
4114     if (state->curframe) {
4115     /* exit from nested function */
4116     - prev_insn_idx = insn_idx;
4117     - err = prepare_func_exit(env, &insn_idx);
4118     + env->prev_insn_idx = env->insn_idx;
4119     + err = prepare_func_exit(env, &env->insn_idx);
4120     if (err)
4121     return err;
4122     do_print_state = true;
4123     @@ -5459,7 +5656,8 @@ static int do_check(struct bpf_verifier_env *env)
4124     if (err)
4125     return err;
4126     process_bpf_exit:
4127     - err = pop_stack(env, &prev_insn_idx, &insn_idx);
4128     + err = pop_stack(env, &env->prev_insn_idx,
4129     + &env->insn_idx);
4130     if (err < 0) {
4131     if (err != -ENOENT)
4132     return err;
4133     @@ -5469,7 +5667,7 @@ process_bpf_exit:
4134     continue;
4135     }
4136     } else {
4137     - err = check_cond_jmp_op(env, insn, &insn_idx);
4138     + err = check_cond_jmp_op(env, insn, &env->insn_idx);
4139     if (err)
4140     return err;
4141     }
4142     @@ -5486,8 +5684,8 @@ process_bpf_exit:
4143     if (err)
4144     return err;
4145    
4146     - insn_idx++;
4147     - env->insn_aux_data[insn_idx].seen = true;
4148     + env->insn_idx++;
4149     + env->insn_aux_data[env->insn_idx].seen = true;
4150     } else {
4151     verbose(env, "invalid BPF_LD mode\n");
4152     return -EINVAL;
4153     @@ -5497,7 +5695,7 @@ process_bpf_exit:
4154     return -EINVAL;
4155     }
4156    
4157     - insn_idx++;
4158     + env->insn_idx++;
4159     }
4160    
4161     verbose(env, "processed %d insns (limit %d), stack depth ",
4162     @@ -6220,6 +6418,57 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
4163     continue;
4164     }
4165    
4166     + if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
4167     + insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
4168     + const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
4169     + const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
4170     + struct bpf_insn insn_buf[16];
4171     + struct bpf_insn *patch = &insn_buf[0];
4172     + bool issrc, isneg;
4173     + u32 off_reg;
4174     +
4175     + aux = &env->insn_aux_data[i + delta];
4176     + if (!aux->alu_state)
4177     + continue;
4178     +
4179     + isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
4180     + issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
4181     + BPF_ALU_SANITIZE_SRC;
4182     +
4183     + off_reg = issrc ? insn->src_reg : insn->dst_reg;
4184     + if (isneg)
4185     + *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
4186     + *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
4187     + *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
4188     + *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
4189     + *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
4190     + *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
4191     + if (issrc) {
4192     + *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
4193     + off_reg);
4194     + insn->src_reg = BPF_REG_AX;
4195     + } else {
4196     + *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
4197     + BPF_REG_AX);
4198     + }
4199     + if (isneg)
4200     + insn->code = insn->code == code_add ?
4201     + code_sub : code_add;
4202     + *patch++ = *insn;
4203     + if (issrc && isneg)
4204     + *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
4205     + cnt = patch - insn_buf;
4206     +
4207     + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
4208     + if (!new_prog)
4209     + return -ENOMEM;
4210     +
4211     + delta += cnt - 1;
4212     + env->prog = prog = new_prog;
4213     + insn = new_prog->insnsi + i + delta;
4214     + continue;
4215     + }
4216     +
4217     if (insn->code != (BPF_JMP | BPF_CALL))
4218     continue;
4219     if (insn->src_reg == BPF_PSEUDO_CALL)
4220     diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
4221     index 8f0644af40be..80f955210861 100644
4222     --- a/kernel/time/posix-cpu-timers.c
4223     +++ b/kernel/time/posix-cpu-timers.c
4224     @@ -685,6 +685,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
4225     * set up the signal and overrun bookkeeping.
4226     */
4227     timer->it.cpu.incr = timespec64_to_ns(&new->it_interval);
4228     + timer->it_interval = ns_to_ktime(timer->it.cpu.incr);
4229    
4230     /*
4231     * This acts as a modification timestamp for the timer,
4232     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
4233     index e95b5b7c9c3d..995d1079f958 100644
4234     --- a/mm/page_alloc.c
4235     +++ b/mm/page_alloc.c
4236     @@ -5542,18 +5542,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4237     cond_resched();
4238     }
4239     }
4240     -#ifdef CONFIG_SPARSEMEM
4241     - /*
4242     - * If the zone does not span the rest of the section then
4243     - * we should at least initialize those pages. Otherwise we
4244     - * could blow up on a poisoned page in some paths which depend
4245     - * on full sections being initialized (e.g. memory hotplug).
4246     - */
4247     - while (end_pfn % PAGES_PER_SECTION) {
4248     - __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid);
4249     - end_pfn++;
4250     - }
4251     -#endif
4252     }
4253    
4254     #ifdef CONFIG_ZONE_DEVICE
4255     diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
4256     index 2cb8da465b98..48ddc60b4fbd 100644
4257     --- a/net/bridge/br_forward.c
4258     +++ b/net/bridge/br_forward.c
4259     @@ -36,10 +36,10 @@ static inline int should_deliver(const struct net_bridge_port *p,
4260    
4261     int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
4262     {
4263     + skb_push(skb, ETH_HLEN);
4264     if (!is_skb_forwardable(skb->dev, skb))
4265     goto drop;
4266    
4267     - skb_push(skb, ETH_HLEN);
4268     br_drop_fake_rtable(skb);
4269    
4270     if (skb->ip_summed == CHECKSUM_PARTIAL &&
4271     @@ -98,12 +98,11 @@ static void __br_forward(const struct net_bridge_port *to,
4272     net = dev_net(indev);
4273     } else {
4274     if (unlikely(netpoll_tx_running(to->br->dev))) {
4275     - if (!is_skb_forwardable(skb->dev, skb)) {
4276     + skb_push(skb, ETH_HLEN);
4277     + if (!is_skb_forwardable(skb->dev, skb))
4278     kfree_skb(skb);
4279     - } else {
4280     - skb_push(skb, ETH_HLEN);
4281     + else
4282     br_netpoll_send_skb(to, skb);
4283     - }
4284     return;
4285     }
4286     br_hook = NF_BR_LOCAL_OUT;
4287     diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
4288     index 96c072e71ea2..5811208863b7 100644
4289     --- a/net/bridge/br_netfilter_ipv6.c
4290     +++ b/net/bridge/br_netfilter_ipv6.c
4291     @@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
4292     IPSTATS_MIB_INDISCARDS);
4293     goto drop;
4294     }
4295     + hdr = ipv6_hdr(skb);
4296     }
4297     if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
4298     goto drop;
4299     diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
4300     index 08cbed7d940e..419e8edf23ba 100644
4301     --- a/net/bridge/netfilter/nft_reject_bridge.c
4302     +++ b/net/bridge/netfilter/nft_reject_bridge.c
4303     @@ -229,6 +229,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
4304     pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
4305     return false;
4306    
4307     + ip6h = ipv6_hdr(skb);
4308     thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
4309     if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
4310     return false;
4311     diff --git a/net/can/bcm.c b/net/can/bcm.c
4312     index 0af8f0db892a..79bb8afa9c0c 100644
4313     --- a/net/can/bcm.c
4314     +++ b/net/can/bcm.c
4315     @@ -67,6 +67,9 @@
4316     */
4317     #define MAX_NFRAMES 256
4318    
4319     +/* limit timers to 400 days for sending/timeouts */
4320     +#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
4321     +
4322     /* use of last_frames[index].flags */
4323     #define RX_RECV 0x40 /* received data for this element */
4324     #define RX_THR 0x80 /* element not been sent due to throttle feature */
4325     @@ -140,6 +143,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
4326     return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
4327     }
4328    
4329     +/* check limitations for timeval provided by user */
4330     +static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
4331     +{
4332     + if ((msg_head->ival1.tv_sec < 0) ||
4333     + (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
4334     + (msg_head->ival1.tv_usec < 0) ||
4335     + (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
4336     + (msg_head->ival2.tv_sec < 0) ||
4337     + (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
4338     + (msg_head->ival2.tv_usec < 0) ||
4339     + (msg_head->ival2.tv_usec >= USEC_PER_SEC))
4340     + return true;
4341     +
4342     + return false;
4343     +}
4344     +
4345     #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
4346     #define OPSIZ sizeof(struct bcm_op)
4347     #define MHSIZ sizeof(struct bcm_msg_head)
4348     @@ -873,6 +892,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
4349     if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
4350     return -EINVAL;
4351    
4352     + /* check timeval limitations */
4353     + if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
4354     + return -EINVAL;
4355     +
4356     /* check the given can_id */
4357     op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
4358     if (op) {
4359     @@ -1053,6 +1076,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
4360     (!(msg_head->can_id & CAN_RTR_FLAG))))
4361     return -EINVAL;
4362    
4363     + /* check timeval limitations */
4364     + if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
4365     + return -EINVAL;
4366     +
4367     /* check the given can_id */
4368     op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
4369     if (op) {
4370     diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
4371     index 6df95be96311..fe4f6a624238 100644
4372     --- a/net/ipv4/fib_frontend.c
4373     +++ b/net/ipv4/fib_frontend.c
4374     @@ -203,7 +203,7 @@ static void fib_flush(struct net *net)
4375     struct fib_table *tb;
4376    
4377     hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
4378     - flushed += fib_table_flush(net, tb);
4379     + flushed += fib_table_flush(net, tb, false);
4380     }
4381    
4382     if (flushed)
4383     @@ -1463,7 +1463,7 @@ static void ip_fib_net_exit(struct net *net)
4384    
4385     hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
4386     hlist_del(&tb->tb_hlist);
4387     - fib_table_flush(net, tb);
4388     + fib_table_flush(net, tb, true);
4389     fib_free_table(tb);
4390     }
4391     }
4392     diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
4393     index 237c9f72b265..a573e37e0615 100644
4394     --- a/net/ipv4/fib_trie.c
4395     +++ b/net/ipv4/fib_trie.c
4396     @@ -1856,7 +1856,7 @@ void fib_table_flush_external(struct fib_table *tb)
4397     }
4398    
4399     /* Caller must hold RTNL. */
4400     -int fib_table_flush(struct net *net, struct fib_table *tb)
4401     +int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
4402     {
4403     struct trie *t = (struct trie *)tb->tb_data;
4404     struct key_vector *pn = t->kv;
4405     @@ -1904,8 +1904,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
4406     hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
4407     struct fib_info *fi = fa->fa_info;
4408    
4409     - if (!fi || !(fi->fib_flags & RTNH_F_DEAD) ||
4410     - tb->tb_id != fa->tb_id) {
4411     + if (!fi || tb->tb_id != fa->tb_id ||
4412     + (!(fi->fib_flags & RTNH_F_DEAD) &&
4413     + !fib_props[fa->fa_type].error)) {
4414     + slen = fa->fa_slen;
4415     + continue;
4416     + }
4417     +
4418     + /* Do not flush error routes if network namespace is
4419     + * not being dismantled
4420     + */
4421     + if (!flush_all && fib_props[fa->fa_type].error) {
4422     slen = fa->fa_slen;
4423     continue;
4424     }
4425     diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
4426     index 0fe9419bd12b..3407a82d4549 100644
4427     --- a/net/ipv4/ip_gre.c
4428     +++ b/net/ipv4/ip_gre.c
4429     @@ -567,8 +567,7 @@ err_free_skb:
4430     dev->stats.tx_dropped++;
4431     }
4432    
4433     -static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
4434     - __be16 proto)
4435     +static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
4436     {
4437     struct ip_tunnel *tunnel = netdev_priv(dev);
4438     struct ip_tunnel_info *tun_info;
4439     @@ -576,10 +575,10 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
4440     struct erspan_metadata *md;
4441     struct rtable *rt = NULL;
4442     bool truncate = false;
4443     + __be16 df, proto;
4444     struct flowi4 fl;
4445     int tunnel_hlen;
4446     int version;
4447     - __be16 df;
4448     int nhoff;
4449     int thoff;
4450    
4451     @@ -624,18 +623,20 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
4452     if (version == 1) {
4453     erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
4454     ntohl(md->u.index), truncate, true);
4455     + proto = htons(ETH_P_ERSPAN);
4456     } else if (version == 2) {
4457     erspan_build_header_v2(skb,
4458     ntohl(tunnel_id_to_key32(key->tun_id)),
4459     md->u.md2.dir,
4460     get_hwid(&md->u.md2),
4461     truncate, true);
4462     + proto = htons(ETH_P_ERSPAN2);
4463     } else {
4464     goto err_free_rt;
4465     }
4466    
4467     gre_build_header(skb, 8, TUNNEL_SEQ,
4468     - htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
4469     + proto, 0, htonl(tunnel->o_seqno++));
4470    
4471     df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
4472    
4473     @@ -719,12 +720,13 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
4474     {
4475     struct ip_tunnel *tunnel = netdev_priv(dev);
4476     bool truncate = false;
4477     + __be16 proto;
4478    
4479     if (!pskb_inet_may_pull(skb))
4480     goto free_skb;
4481    
4482     if (tunnel->collect_md) {
4483     - erspan_fb_xmit(skb, dev, skb->protocol);
4484     + erspan_fb_xmit(skb, dev);
4485     return NETDEV_TX_OK;
4486     }
4487    
4488     @@ -740,19 +742,22 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
4489     }
4490    
4491     /* Push ERSPAN header */
4492     - if (tunnel->erspan_ver == 1)
4493     + if (tunnel->erspan_ver == 1) {
4494     erspan_build_header(skb, ntohl(tunnel->parms.o_key),
4495     tunnel->index,
4496     truncate, true);
4497     - else if (tunnel->erspan_ver == 2)
4498     + proto = htons(ETH_P_ERSPAN);
4499     + } else if (tunnel->erspan_ver == 2) {
4500     erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
4501     tunnel->dir, tunnel->hwid,
4502     truncate, true);
4503     - else
4504     + proto = htons(ETH_P_ERSPAN2);
4505     + } else {
4506     goto free_skb;
4507     + }
4508    
4509     tunnel->parms.o_flags &= ~TUNNEL_KEY;
4510     - __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
4511     + __gre_xmit(skb, dev, &tunnel->parms.iph, proto);
4512     return NETDEV_TX_OK;
4513    
4514     free_skb:
4515     diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
4516     index e609b08c9df4..3163428219cd 100644
4517     --- a/net/ipv4/ip_input.c
4518     +++ b/net/ipv4/ip_input.c
4519     @@ -489,6 +489,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
4520     goto drop;
4521     }
4522    
4523     + iph = ip_hdr(skb);
4524     skb->transport_header = skb->network_header + iph->ihl*4;
4525    
4526     /* Remove any debris in the socket control block */
4527     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
4528     index 9e6bc4d6daa7..40cbe5609663 100644
4529     --- a/net/ipv4/tcp.c
4530     +++ b/net/ipv4/tcp.c
4531     @@ -1186,7 +1186,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
4532     flags = msg->msg_flags;
4533    
4534     if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
4535     - if (sk->sk_state != TCP_ESTABLISHED) {
4536     + if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
4537     err = -EINVAL;
4538     goto out_err;
4539     }
4540     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
4541     index 1976fddb9e00..ce125f4dc810 100644
4542     --- a/net/ipv4/udp.c
4543     +++ b/net/ipv4/udp.c
4544     @@ -785,15 +785,23 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
4545     const int hlen = skb_network_header_len(skb) +
4546     sizeof(struct udphdr);
4547    
4548     - if (hlen + cork->gso_size > cork->fragsize)
4549     + if (hlen + cork->gso_size > cork->fragsize) {
4550     + kfree_skb(skb);
4551     return -EINVAL;
4552     - if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
4553     + }
4554     + if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
4555     + kfree_skb(skb);
4556     return -EINVAL;
4557     - if (sk->sk_no_check_tx)
4558     + }
4559     + if (sk->sk_no_check_tx) {
4560     + kfree_skb(skb);
4561     return -EINVAL;
4562     + }
4563     if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
4564     - dst_xfrm(skb_dst(skb)))
4565     + dst_xfrm(skb_dst(skb))) {
4566     + kfree_skb(skb);
4567     return -EIO;
4568     + }
4569    
4570     skb_shinfo(skb)->gso_size = cork->gso_size;
4571     skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
4572     diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
4573     index 0f7d434c1eed..b529a79ac222 100644
4574     --- a/net/ipv6/ip6_gre.c
4575     +++ b/net/ipv6/ip6_gre.c
4576     @@ -920,6 +920,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
4577     __u8 dsfield = false;
4578     struct flowi6 fl6;
4579     int err = -EINVAL;
4580     + __be16 proto;
4581     __u32 mtu;
4582     int nhoff;
4583     int thoff;
4584     @@ -1033,8 +1034,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
4585     }
4586    
4587     /* Push GRE header. */
4588     - gre_build_header(skb, 8, TUNNEL_SEQ,
4589     - htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++));
4590     + proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
4591     + : htons(ETH_P_ERSPAN2);
4592     + gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
4593    
4594     /* TooBig packet may have updated dst->dev's mtu */
4595     if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
4596     @@ -1167,6 +1169,10 @@ static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
4597     t->parms.i_flags = p->i_flags;
4598     t->parms.o_flags = p->o_flags;
4599     t->parms.fwmark = p->fwmark;
4600     + t->parms.erspan_ver = p->erspan_ver;
4601     + t->parms.index = p->index;
4602     + t->parms.dir = p->dir;
4603     + t->parms.hwid = p->hwid;
4604     dst_cache_reset(&t->dst_cache);
4605     }
4606    
4607     @@ -2029,9 +2035,9 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
4608     struct nlattr *data[],
4609     struct netlink_ext_ack *extack)
4610     {
4611     - struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
4612     + struct ip6_tnl *t = netdev_priv(dev);
4613     + struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
4614     struct __ip6_tnl_parm p;
4615     - struct ip6_tnl *t;
4616    
4617     t = ip6gre_changelink_common(dev, tb, data, &p, extack);
4618     if (IS_ERR(t))
4619     diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
4620     index d2d97d07ef27..d01ec252cb81 100644
4621     --- a/net/ipv6/udp.c
4622     +++ b/net/ipv6/udp.c
4623     @@ -1056,15 +1056,23 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
4624     const int hlen = skb_network_header_len(skb) +
4625     sizeof(struct udphdr);
4626    
4627     - if (hlen + cork->gso_size > cork->fragsize)
4628     + if (hlen + cork->gso_size > cork->fragsize) {
4629     + kfree_skb(skb);
4630     return -EINVAL;
4631     - if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
4632     + }
4633     + if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
4634     + kfree_skb(skb);
4635     return -EINVAL;
4636     - if (udp_sk(sk)->no_check6_tx)
4637     + }
4638     + if (udp_sk(sk)->no_check6_tx) {
4639     + kfree_skb(skb);
4640     return -EINVAL;
4641     + }
4642     if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
4643     - dst_xfrm(skb_dst(skb)))
4644     + dst_xfrm(skb_dst(skb))) {
4645     + kfree_skb(skb);
4646     return -EIO;
4647     + }
4648    
4649     skb_shinfo(skb)->gso_size = cork->gso_size;
4650     skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
4651     diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
4652     index 865ecef68196..c7b6010b2c09 100644
4653     --- a/net/openvswitch/flow_netlink.c
4654     +++ b/net/openvswitch/flow_netlink.c
4655     @@ -500,7 +500,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
4656     return -EINVAL;
4657     }
4658    
4659     - if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
4660     + if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
4661     attrs |= 1 << type;
4662     a[type] = nla;
4663     }
4664     diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
4665     index 4cca8f274662..904730b8ce8f 100644
4666     --- a/net/sched/act_tunnel_key.c
4667     +++ b/net/sched/act_tunnel_key.c
4668     @@ -197,6 +197,15 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
4669     [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
4670     };
4671    
4672     +static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
4673     +{
4674     + if (!p)
4675     + return;
4676     + if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
4677     + dst_release(&p->tcft_enc_metadata->dst);
4678     + kfree_rcu(p, rcu);
4679     +}
4680     +
4681     static int tunnel_key_init(struct net *net, struct nlattr *nla,
4682     struct nlattr *est, struct tc_action **a,
4683     int ovr, int bind, bool rtnl_held,
4684     @@ -360,8 +369,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
4685     rcu_swap_protected(t->params, params_new,
4686     lockdep_is_held(&t->tcf_lock));
4687     spin_unlock_bh(&t->tcf_lock);
4688     - if (params_new)
4689     - kfree_rcu(params_new, rcu);
4690     + tunnel_key_release_params(params_new);
4691    
4692     if (ret == ACT_P_CREATED)
4693     tcf_idr_insert(tn, *a);
4694     @@ -385,12 +393,7 @@ static void tunnel_key_release(struct tc_action *a)
4695     struct tcf_tunnel_key_params *params;
4696    
4697     params = rcu_dereference_protected(t->params, 1);
4698     - if (params) {
4699     - if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
4700     - dst_release(&params->tcft_enc_metadata->dst);
4701     -
4702     - kfree_rcu(params, rcu);
4703     - }
4704     + tunnel_key_release_params(params);
4705     }
4706    
4707     static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
4708     diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
4709     index f427a1e00e7e..1c4436523aa5 100644
4710     --- a/net/sched/cls_api.c
4711     +++ b/net/sched/cls_api.c
4712     @@ -1053,7 +1053,6 @@ static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
4713     int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
4714     struct tcf_result *res, bool compat_mode)
4715     {
4716     - __be16 protocol = tc_skb_protocol(skb);
4717     #ifdef CONFIG_NET_CLS_ACT
4718     const int max_reclassify_loop = 4;
4719     const struct tcf_proto *orig_tp = tp;
4720     @@ -1063,6 +1062,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
4721     reclassify:
4722     #endif
4723     for (; tp; tp = rcu_dereference_bh(tp->next)) {
4724     + __be16 protocol = tc_skb_protocol(skb);
4725     int err;
4726    
4727     if (tp->protocol != protocol &&
4728     @@ -1095,7 +1095,6 @@ reset:
4729     }
4730    
4731     tp = first_tp;
4732     - protocol = tc_skb_protocol(skb);
4733     goto reclassify;
4734     #endif
4735     }
4736     diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
4737     index 208d940464d7..45bc2b72dc1c 100644
4738     --- a/net/sched/cls_flower.c
4739     +++ b/net/sched/cls_flower.c
4740     @@ -1176,17 +1176,23 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
4741     struct cls_fl_head *head = rtnl_dereference(tp->root);
4742     struct cls_fl_filter *fold = *arg;
4743     struct cls_fl_filter *fnew;
4744     + struct fl_flow_mask *mask;
4745     struct nlattr **tb;
4746     - struct fl_flow_mask mask = {};
4747     int err;
4748    
4749     if (!tca[TCA_OPTIONS])
4750     return -EINVAL;
4751    
4752     - tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
4753     - if (!tb)
4754     + mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
4755     + if (!mask)
4756     return -ENOBUFS;
4757    
4758     + tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
4759     + if (!tb) {
4760     + err = -ENOBUFS;
4761     + goto errout_mask_alloc;
4762     + }
4763     +
4764     err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
4765     fl_policy, NULL);
4766     if (err < 0)
4767     @@ -1229,12 +1235,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
4768     }
4769     }
4770    
4771     - err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
4772     + err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
4773     tp->chain->tmplt_priv, extack);
4774     if (err)
4775     goto errout_idr;
4776    
4777     - err = fl_check_assign_mask(head, fnew, fold, &mask);
4778     + err = fl_check_assign_mask(head, fnew, fold, mask);
4779     if (err)
4780     goto errout_idr;
4781    
4782     @@ -1278,6 +1284,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
4783     }
4784    
4785     kfree(tb);
4786     + kfree(mask);
4787     return 0;
4788    
4789     errout_mask:
4790     @@ -1291,6 +1298,8 @@ errout:
4791     kfree(fnew);
4792     errout_tb:
4793     kfree(tb);
4794     +errout_mask_alloc:
4795     + kfree(mask);
4796     return err;
4797     }
4798    
4799     diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
4800     index 73547d17d3c6..943f08be7c38 100644
4801     --- a/net/sunrpc/xprt.c
4802     +++ b/net/sunrpc/xprt.c
4803     @@ -1177,7 +1177,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
4804     INIT_LIST_HEAD(&req->rq_xmit2);
4805     goto out;
4806     }
4807     - } else {
4808     + } else if (!req->rq_seqno) {
4809     list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
4810     if (pos->rq_task->tk_owner != task->tk_owner)
4811     continue;
4812     diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
4813     index 51cc6589443f..152f54137082 100644
4814     --- a/sound/pci/hda/patch_conexant.c
4815     +++ b/sound/pci/hda/patch_conexant.c
4816     @@ -931,6 +931,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
4817     SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
4818     SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
4819     SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
4820     + SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
4821     SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
4822     SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
4823     SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
4824     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4825     index 0d95316d6dbd..8ddd016c04d0 100644
4826     --- a/sound/pci/hda/patch_realtek.c
4827     +++ b/sound/pci/hda/patch_realtek.c
4828     @@ -6842,7 +6842,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
4829     {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"},
4830     {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"},
4831     {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"},
4832     - {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"},
4833     + {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"},
4834     {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"},
4835     {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"},
4836     {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"},
4837     diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
4838     index 4d46f4567c3a..bec2eefa8b0f 100644
4839     --- a/sound/soc/codecs/rt5514-spi.c
4840     +++ b/sound/soc/codecs/rt5514-spi.c
4841     @@ -280,6 +280,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_component *component)
4842    
4843     rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp),
4844     GFP_KERNEL);
4845     + if (!rt5514_dsp)
4846     + return -ENOMEM;
4847    
4848     rt5514_dsp->dev = &rt5514_spi->dev;
4849     mutex_init(&rt5514_dsp->dma_lock);
4850     diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
4851     index e2b5a11b16d1..f03195d2ab2e 100644
4852     --- a/sound/soc/codecs/tlv320aic32x4.c
4853     +++ b/sound/soc/codecs/tlv320aic32x4.c
4854     @@ -822,6 +822,10 @@ static int aic32x4_set_bias_level(struct snd_soc_component *component,
4855     case SND_SOC_BIAS_PREPARE:
4856     break;
4857     case SND_SOC_BIAS_STANDBY:
4858     + /* Initial cold start */
4859     + if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF)
4860     + break;
4861     +
4862     /* Switch off BCLK_N Divider */
4863     snd_soc_component_update_bits(component, AIC32X4_BCLKN,
4864     AIC32X4_BCLKEN, 0);
4865     diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
4866     index afc559866095..91a2436ce952 100644
4867     --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
4868     +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
4869     @@ -399,7 +399,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream,
4870     struct snd_pcm_hw_params *params,
4871     struct snd_soc_dai *dai)
4872     {
4873     - snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
4874     + int ret;
4875     +
4876     + ret =
4877     + snd_pcm_lib_malloc_pages(substream,
4878     + params_buffer_bytes(params));
4879     + if (ret)
4880     + return ret;
4881     memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
4882     return 0;
4883     }
4884     diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
4885     index 460b4bdf4c1e..5d546dcdbc80 100644
4886     --- a/tools/testing/selftests/x86/protection_keys.c
4887     +++ b/tools/testing/selftests/x86/protection_keys.c
4888     @@ -1133,6 +1133,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
4889     pkey_assert(err);
4890     }
4891    
4892     +void become_child(void)
4893     +{
4894     + pid_t forkret;
4895     +
4896     + forkret = fork();
4897     + pkey_assert(forkret >= 0);
4898     + dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
4899     +
4900     + if (!forkret) {
4901     + /* in the child */
4902     + return;
4903     + }
4904     + exit(0);
4905     +}
4906     +
4907     /* Assumes that all pkeys other than 'pkey' are unallocated */
4908     void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
4909     {
4910     @@ -1141,7 +1156,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
4911     int nr_allocated_pkeys = 0;
4912     int i;
4913    
4914     - for (i = 0; i < NR_PKEYS*2; i++) {
4915     + for (i = 0; i < NR_PKEYS*3; i++) {
4916     int new_pkey;
4917     dprintf1("%s() alloc loop: %d\n", __func__, i);
4918     new_pkey = alloc_pkey();
4919     @@ -1152,20 +1167,26 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
4920     if ((new_pkey == -1) && (errno == ENOSPC)) {
4921     dprintf2("%s() failed to allocate pkey after %d tries\n",
4922     __func__, nr_allocated_pkeys);
4923     - break;
4924     + } else {
4925     + /*
4926     + * Ensure the number of successes never
4927     + * exceeds the number of keys supported
4928     + * in the hardware.
4929     + */
4930     + pkey_assert(nr_allocated_pkeys < NR_PKEYS);
4931     + allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
4932     }
4933     - pkey_assert(nr_allocated_pkeys < NR_PKEYS);
4934     - allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
4935     +
4936     + /*
4937     + * Make sure that allocation state is properly
4938     + * preserved across fork().
4939     + */
4940     + if (i == NR_PKEYS*2)
4941     + become_child();
4942     }
4943    
4944     dprintf3("%s()::%d\n", __func__, __LINE__);
4945    
4946     - /*
4947     - * ensure it did not reach the end of the loop without
4948     - * failure:
4949     - */
4950     - pkey_assert(i < NR_PKEYS*2);
4951     -
4952     /*
4953     * There are 16 pkeys supported in hardware. Three are
4954     * allocated by the time we get here: