Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0138-4.9.39-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2956 - (hide annotations) (download)
Mon Jul 24 12:03:46 2017 UTC (6 years, 10 months ago) by niro
File size: 85828 byte(s)
-added patches-4.9
1 niro 2956 diff --git a/Makefile b/Makefile
2     index ad0c045d36cd..a872ece51ee5 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 38
9     +SUBLEVEL = 39
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
14     index d2315ffd8f12..f13ae153fb24 100644
15     --- a/arch/arm/include/asm/elf.h
16     +++ b/arch/arm/include/asm/elf.h
17     @@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
18     #define CORE_DUMP_USE_REGSET
19     #define ELF_EXEC_PAGESIZE 4096
20    
21     -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
22     - use of this is to invoke "./ld.so someprog" to test out a new version of
23     - the loader. We need to make sure that it is out of the way of the program
24     - that it will "exec", and that there is sufficient room for the brk. */
25     -
26     -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
27     +/* This is the base location for PIE (ET_DYN with INTERP) loads. */
28     +#define ELF_ET_DYN_BASE 0x400000UL
29    
30     /* When the program starts, a1 contains a pointer to a function to be
31     registered with atexit, as per the SVR4 ABI. A value of 0 means we
32     diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
33     index e9bd58793464..49a5d8ccae27 100644
34     --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
35     +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
36     @@ -75,14 +75,10 @@
37    
38     timer {
39     compatible = "arm,armv8-timer";
40     - interrupts = <GIC_PPI 13
41     - (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
42     - <GIC_PPI 14
43     - (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
44     - <GIC_PPI 11
45     - (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
46     - <GIC_PPI 10
47     - (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
48     + interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>,
49     + <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>,
50     + <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>,
51     + <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>;
52     };
53    
54     soc {
55     diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
56     index a55384f4a5d7..afa23b057def 100644
57     --- a/arch/arm64/include/asm/elf.h
58     +++ b/arch/arm64/include/asm/elf.h
59     @@ -113,12 +113,11 @@
60     #define ELF_EXEC_PAGESIZE PAGE_SIZE
61    
62     /*
63     - * This is the location that an ET_DYN program is loaded if exec'ed. Typical
64     - * use of this is to invoke "./ld.so someprog" to test out a new version of
65     - * the loader. We need to make sure that it is out of the way of the program
66     - * that it will "exec", and that there is sufficient room for the brk.
67     + * This is the base location for PIE (ET_DYN with INTERP) loads. On
68     + * 64-bit, this is raised to 4GB to leave the entire 32-bit address
69     + * space open for things that want to use the area for 32-bit pointers.
70     */
71     -#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
72     +#define ELF_ET_DYN_BASE 0x100000000UL
73    
74     #ifndef __ASSEMBLY__
75    
76     @@ -169,7 +168,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
77    
78     #ifdef CONFIG_COMPAT
79    
80     -#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
81     +/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
82     +#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
83    
84     /* AArch32 registers. */
85     #define COMPAT_ELF_NGREG 18
86     diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
87     index 16e024602737..cb7697dec294 100644
88     --- a/arch/parisc/include/asm/dma-mapping.h
89     +++ b/arch/parisc/include/asm/dma-mapping.h
90     @@ -20,6 +20,8 @@
91     ** flush/purge and allocate "regular" cacheable pages for everything.
92     */
93    
94     +#define DMA_ERROR_CODE (~(dma_addr_t)0)
95     +
96     #ifdef CONFIG_PA11
97     extern struct dma_map_ops pcxl_dma_ops;
98     extern struct dma_map_ops pcx_dma_ops;
99     @@ -54,12 +56,13 @@ parisc_walk_tree(struct device *dev)
100     break;
101     }
102     }
103     - BUG_ON(!dev->platform_data);
104     return dev->platform_data;
105     }
106     -
107     -#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu)
108     -
109     +
110     +#define GET_IOC(dev) ({ \
111     + void *__pdata = parisc_walk_tree(dev); \
112     + __pdata ? HBA_DATA(__pdata)->iommu : NULL; \
113     +})
114    
115     #ifdef CONFIG_IOMMU_CCIO
116     struct parisc_device;
117     diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
118     index 59be25764433..a81226257878 100644
119     --- a/arch/parisc/include/asm/mmu_context.h
120     +++ b/arch/parisc/include/asm/mmu_context.h
121     @@ -49,15 +49,26 @@ static inline void load_context(mm_context_t context)
122     mtctl(__space_to_prot(context), 8);
123     }
124    
125     -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
126     +static inline void switch_mm_irqs_off(struct mm_struct *prev,
127     + struct mm_struct *next, struct task_struct *tsk)
128     {
129     -
130     if (prev != next) {
131     mtctl(__pa(next->pgd), 25);
132     load_context(next->context);
133     }
134     }
135    
136     +static inline void switch_mm(struct mm_struct *prev,
137     + struct mm_struct *next, struct task_struct *tsk)
138     +{
139     + unsigned long flags;
140     +
141     + local_irq_save(flags);
142     + switch_mm_irqs_off(prev, next, tsk);
143     + local_irq_restore(flags);
144     +}
145     +#define switch_mm_irqs_off switch_mm_irqs_off
146     +
147     #define deactivate_mm(tsk,mm) do { } while (0)
148    
149     static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
150     diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
151     index 3cfef1de8061..8ec2ff8fae0d 100644
152     --- a/arch/parisc/kernel/syscall_table.S
153     +++ b/arch/parisc/kernel/syscall_table.S
154     @@ -361,7 +361,7 @@
155     ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */
156     ENTRY_SAME(add_key)
157     ENTRY_SAME(request_key) /* 265 */
158     - ENTRY_SAME(keyctl)
159     + ENTRY_COMP(keyctl)
160     ENTRY_SAME(ioprio_set)
161     ENTRY_SAME(ioprio_get)
162     ENTRY_SAME(inotify_init)
163     diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
164     index 040c48fc5391..b6f3b5e98810 100644
165     --- a/arch/parisc/mm/fault.c
166     +++ b/arch/parisc/mm/fault.c
167     @@ -366,7 +366,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
168     case 15: /* Data TLB miss fault/Data page fault */
169     /* send SIGSEGV when outside of vma */
170     if (!vma ||
171     - address < vma->vm_start || address > vma->vm_end) {
172     + address < vma->vm_start || address >= vma->vm_end) {
173     si.si_signo = SIGSEGV;
174     si.si_code = SEGV_MAPERR;
175     break;
176     diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
177     index ee46ffef608e..743ad7a400d6 100644
178     --- a/arch/powerpc/include/asm/elf.h
179     +++ b/arch/powerpc/include/asm/elf.h
180     @@ -23,12 +23,13 @@
181     #define CORE_DUMP_USE_REGSET
182     #define ELF_EXEC_PAGESIZE PAGE_SIZE
183    
184     -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
185     - use of this is to invoke "./ld.so someprog" to test out a new version of
186     - the loader. We need to make sure that it is out of the way of the program
187     - that it will "exec", and that there is sufficient room for the brk. */
188     -
189     -#define ELF_ET_DYN_BASE 0x20000000
190     +/*
191     + * This is the base location for PIE (ET_DYN with INTERP) loads. On
192     + * 64-bit, this is raised to 4GB to leave the entire 32-bit address
193     + * space open for things that want to use the area for 32-bit pointers.
194     + */
195     +#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \
196     + 0x100000000UL)
197    
198     #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
199    
200     diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
201     index 1736c7d3c94c..8d665f1b29f8 100644
202     --- a/arch/s390/include/asm/elf.h
203     +++ b/arch/s390/include/asm/elf.h
204     @@ -158,14 +158,13 @@ extern unsigned int vdso_enabled;
205     #define CORE_DUMP_USE_REGSET
206     #define ELF_EXEC_PAGESIZE 4096
207    
208     -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
209     - use of this is to invoke "./ld.so someprog" to test out a new version of
210     - the loader. We need to make sure that it is out of the way of the program
211     - that it will "exec", and that there is sufficient room for the brk. 64-bit
212     - tasks are aligned to 4GB. */
213     -#define ELF_ET_DYN_BASE (is_compat_task() ? \
214     - (STACK_TOP / 3 * 2) : \
215     - (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
216     +/*
217     + * This is the base location for PIE (ET_DYN with INTERP) loads. On
218     + * 64-bit, this is raised to 4GB to leave the entire 32-bit address
219     + * space open for things that want to use the area for 32-bit pointers.
220     + */
221     +#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \
222     + 0x100000000UL)
223    
224     /* This yields a mask that user programs can use to figure out what
225     instruction set this CPU supports. */
226     diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
227     index fc61739150e7..f960a043cdeb 100644
228     --- a/arch/x86/crypto/sha1_ssse3_glue.c
229     +++ b/arch/x86/crypto/sha1_ssse3_glue.c
230     @@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
231    
232     static bool avx2_usable(void)
233     {
234     - if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
235     + if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
236     && boot_cpu_has(X86_FEATURE_BMI1)
237     && boot_cpu_has(X86_FEATURE_BMI2))
238     return true;
239     diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
240     index 94aad6364b47..c152db2ab687 100644
241     --- a/arch/x86/include/asm/elf.h
242     +++ b/arch/x86/include/asm/elf.h
243     @@ -245,12 +245,13 @@ extern int force_personality32;
244     #define CORE_DUMP_USE_REGSET
245     #define ELF_EXEC_PAGESIZE 4096
246    
247     -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
248     - use of this is to invoke "./ld.so someprog" to test out a new version of
249     - the loader. We need to make sure that it is out of the way of the program
250     - that it will "exec", and that there is sufficient room for the brk. */
251     -
252     -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253     +/*
254     + * This is the base location for PIE (ET_DYN with INTERP) loads. On
255     + * 64-bit, this is raised to 4GB to leave the entire 32-bit address
256     + * space open for things that want to use the area for 32-bit pointers.
257     + */
258     +#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
259     + 0x100000000UL)
260    
261     /* This yields a mask that user programs can use to figure out what
262     instruction set this CPU supports. This could be done in user space,
263     diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
264     index 78f3760ca1f2..b601ddac5719 100644
265     --- a/arch/x86/include/asm/msr-index.h
266     +++ b/arch/x86/include/asm/msr-index.h
267     @@ -405,6 +405,8 @@
268     #define MSR_IA32_TSC_ADJUST 0x0000003b
269     #define MSR_IA32_BNDCFGS 0x00000d90
270    
271     +#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc
272     +
273     #define MSR_IA32_XSS 0x00000da0
274    
275     #define FEATURE_CONTROL_LOCKED (1<<0)
276     diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
277     index 35058c2c0eea..9368fecca3ee 100644
278     --- a/arch/x86/kvm/cpuid.h
279     +++ b/arch/x86/kvm/cpuid.h
280     @@ -144,6 +144,14 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
281     return best && (best->ebx & bit(X86_FEATURE_RTM));
282     }
283    
284     +static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
285     +{
286     + struct kvm_cpuid_entry2 *best;
287     +
288     + best = kvm_find_cpuid_entry(vcpu, 7, 0);
289     + return best && (best->ebx & bit(X86_FEATURE_MPX));
290     +}
291     +
292     static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
293     {
294     struct kvm_cpuid_entry2 *best;
295     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
296     index 04e6bbbd8736..3dc6d8017ce9 100644
297     --- a/arch/x86/kvm/vmx.c
298     +++ b/arch/x86/kvm/vmx.c
299     @@ -2987,7 +2987,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
300     msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
301     break;
302     case MSR_IA32_BNDCFGS:
303     - if (!kvm_mpx_supported())
304     + if (!kvm_mpx_supported() ||
305     + (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
306     return 1;
307     msr_info->data = vmcs_read64(GUEST_BNDCFGS);
308     break;
309     @@ -3069,7 +3070,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
310     vmcs_writel(GUEST_SYSENTER_ESP, data);
311     break;
312     case MSR_IA32_BNDCFGS:
313     - if (!kvm_mpx_supported())
314     + if (!kvm_mpx_supported() ||
315     + (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
316     + return 1;
317     + if (is_noncanonical_address(data & PAGE_MASK) ||
318     + (data & MSR_IA32_BNDCFGS_RSVD))
319     return 1;
320     vmcs_write64(GUEST_BNDCFGS, data);
321     break;
322     @@ -6474,7 +6479,6 @@ static __init int hardware_setup(void)
323     vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
324     vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
325     vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
326     - vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
327    
328     memcpy(vmx_msr_bitmap_legacy_x2apic,
329     vmx_msr_bitmap_legacy, PAGE_SIZE);
330     diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
331     index a7b46798c81d..39efa7e6c0c0 100644
332     --- a/drivers/base/power/sysfs.c
333     +++ b/drivers/base/power/sysfs.c
334     @@ -268,6 +268,8 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
335     value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
336     else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
337     value = PM_QOS_LATENCY_ANY;
338     + else
339     + return -EINVAL;
340     }
341     ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
342     return ret < 0 ? ret : n;
343     diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
344     index 62e4de2aa8d1..f98121f11f7c 100644
345     --- a/drivers/base/power/wakeup.c
346     +++ b/drivers/base/power/wakeup.c
347     @@ -60,6 +60,8 @@ static LIST_HEAD(wakeup_sources);
348    
349     static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
350    
351     +DEFINE_STATIC_SRCU(wakeup_srcu);
352     +
353     static struct wakeup_source deleted_ws = {
354     .name = "deleted",
355     .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
356     @@ -198,7 +200,7 @@ void wakeup_source_remove(struct wakeup_source *ws)
357     spin_lock_irqsave(&events_lock, flags);
358     list_del_rcu(&ws->entry);
359     spin_unlock_irqrestore(&events_lock, flags);
360     - synchronize_rcu();
361     + synchronize_srcu(&wakeup_srcu);
362     }
363     EXPORT_SYMBOL_GPL(wakeup_source_remove);
364    
365     @@ -332,12 +334,12 @@ void device_wakeup_detach_irq(struct device *dev)
366     void device_wakeup_arm_wake_irqs(void)
367     {
368     struct wakeup_source *ws;
369     + int srcuidx;
370    
371     - rcu_read_lock();
372     + srcuidx = srcu_read_lock(&wakeup_srcu);
373     list_for_each_entry_rcu(ws, &wakeup_sources, entry)
374     dev_pm_arm_wake_irq(ws->wakeirq);
375     -
376     - rcu_read_unlock();
377     + srcu_read_unlock(&wakeup_srcu, srcuidx);
378     }
379    
380     /**
381     @@ -348,12 +350,12 @@ void device_wakeup_arm_wake_irqs(void)
382     void device_wakeup_disarm_wake_irqs(void)
383     {
384     struct wakeup_source *ws;
385     + int srcuidx;
386    
387     - rcu_read_lock();
388     + srcuidx = srcu_read_lock(&wakeup_srcu);
389     list_for_each_entry_rcu(ws, &wakeup_sources, entry)
390     dev_pm_disarm_wake_irq(ws->wakeirq);
391     -
392     - rcu_read_unlock();
393     + srcu_read_unlock(&wakeup_srcu, srcuidx);
394     }
395    
396     /**
397     @@ -805,10 +807,10 @@ EXPORT_SYMBOL_GPL(pm_wakeup_event);
398     void pm_print_active_wakeup_sources(void)
399     {
400     struct wakeup_source *ws;
401     - int active = 0;
402     + int srcuidx, active = 0;
403     struct wakeup_source *last_activity_ws = NULL;
404    
405     - rcu_read_lock();
406     + srcuidx = srcu_read_lock(&wakeup_srcu);
407     list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
408     if (ws->active) {
409     pr_info("active wakeup source: %s\n", ws->name);
410     @@ -824,7 +826,7 @@ void pm_print_active_wakeup_sources(void)
411     if (!active && last_activity_ws)
412     pr_info("last active wakeup source: %s\n",
413     last_activity_ws->name);
414     - rcu_read_unlock();
415     + srcu_read_unlock(&wakeup_srcu, srcuidx);
416     }
417     EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
418    
419     @@ -951,8 +953,9 @@ void pm_wakep_autosleep_enabled(bool set)
420     {
421     struct wakeup_source *ws;
422     ktime_t now = ktime_get();
423     + int srcuidx;
424    
425     - rcu_read_lock();
426     + srcuidx = srcu_read_lock(&wakeup_srcu);
427     list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
428     spin_lock_irq(&ws->lock);
429     if (ws->autosleep_enabled != set) {
430     @@ -966,7 +969,7 @@ void pm_wakep_autosleep_enabled(bool set)
431     }
432     spin_unlock_irq(&ws->lock);
433     }
434     - rcu_read_unlock();
435     + srcu_read_unlock(&wakeup_srcu, srcuidx);
436     }
437     #endif /* CONFIG_PM_AUTOSLEEP */
438    
439     @@ -1027,15 +1030,16 @@ static int print_wakeup_source_stats(struct seq_file *m,
440     static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
441     {
442     struct wakeup_source *ws;
443     + int srcuidx;
444    
445     seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
446     "expire_count\tactive_since\ttotal_time\tmax_time\t"
447     "last_change\tprevent_suspend_time\n");
448    
449     - rcu_read_lock();
450     + srcuidx = srcu_read_lock(&wakeup_srcu);
451     list_for_each_entry_rcu(ws, &wakeup_sources, entry)
452     print_wakeup_source_stats(m, ws);
453     - rcu_read_unlock();
454     + srcu_read_unlock(&wakeup_srcu, srcuidx);
455    
456     print_wakeup_source_stats(m, &deleted_ws);
457    
458     diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
459     index 97e34799e077..6fcf25f795d4 100644
460     --- a/drivers/crypto/atmel-sha.c
461     +++ b/drivers/crypto/atmel-sha.c
462     @@ -1000,7 +1000,9 @@ static int atmel_sha_finup(struct ahash_request *req)
463     ctx->flags |= SHA_FLAGS_FINUP;
464    
465     err1 = atmel_sha_update(req);
466     - if (err1 == -EINPROGRESS || err1 == -EBUSY)
467     + if (err1 == -EINPROGRESS ||
468     + (err1 == -EBUSY && (ahash_request_flags(req) &
469     + CRYPTO_TFM_REQ_MAY_BACKLOG)))
470     return err1;
471    
472     /*
473     diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
474     index 3bda6e5e2a45..0d743c634f25 100644
475     --- a/drivers/crypto/caam/caamalg.c
476     +++ b/drivers/crypto/caam/caamalg.c
477     @@ -2014,10 +2014,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
478     {
479     struct ablkcipher_request *req = context;
480     struct ablkcipher_edesc *edesc;
481     -#ifdef DEBUG
482     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
483     int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
484    
485     +#ifdef DEBUG
486     dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
487     #endif
488    
489     @@ -2037,6 +2037,14 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
490     #endif
491    
492     ablkcipher_unmap(jrdev, edesc, req);
493     +
494     + /*
495     + * The crypto API expects us to set the IV (req->info) to the last
496     + * ciphertext block. This is used e.g. by the CTS mode.
497     + */
498     + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
499     + ivsize, 0);
500     +
501     kfree(edesc);
502    
503     ablkcipher_request_complete(req, err);
504     @@ -2047,10 +2055,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
505     {
506     struct ablkcipher_request *req = context;
507     struct ablkcipher_edesc *edesc;
508     -#ifdef DEBUG
509     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
510     int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
511    
512     +#ifdef DEBUG
513     dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
514     #endif
515    
516     @@ -2069,6 +2077,14 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
517     #endif
518    
519     ablkcipher_unmap(jrdev, edesc, req);
520     +
521     + /*
522     + * The crypto API expects us to set the IV (req->info) to the last
523     + * ciphertext block.
524     + */
525     + scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
526     + ivsize, 0);
527     +
528     kfree(edesc);
529    
530     ablkcipher_request_complete(req, err);
531     diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
532     index 2474f1494955..631337c2e4a7 100644
533     --- a/drivers/crypto/caam/caamhash.c
534     +++ b/drivers/crypto/caam/caamhash.c
535     @@ -491,7 +491,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
536     ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
537     if (!ret) {
538     /* in progress */
539     - wait_for_completion_interruptible(&result.completion);
540     + wait_for_completion(&result.completion);
541     ret = result.err;
542     #ifdef DEBUG
543     print_hex_dump(KERN_ERR,
544     diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
545     index e1eaf4ff9762..3ce1d5cdcbd2 100644
546     --- a/drivers/crypto/caam/key_gen.c
547     +++ b/drivers/crypto/caam/key_gen.c
548     @@ -103,7 +103,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
549     ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
550     if (!ret) {
551     /* in progress */
552     - wait_for_completion_interruptible(&result.completion);
553     + wait_for_completion(&result.completion);
554     ret = result.err;
555     #ifdef DEBUG
556     print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
557     diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
558     index 0418a2f41dc0..571de2f284cf 100644
559     --- a/drivers/crypto/talitos.c
560     +++ b/drivers/crypto/talitos.c
561     @@ -816,7 +816,7 @@ static void talitos_unregister_rng(struct device *dev)
562     * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
563     */
564     #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
565     -#define TALITOS_MAX_KEY_SIZE 96
566     +#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
567     #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
568    
569     struct talitos_ctx {
570     @@ -1495,6 +1495,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
571     {
572     struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
573    
574     + if (keylen > TALITOS_MAX_KEY_SIZE) {
575     + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
576     + return -EINVAL;
577     + }
578     +
579     memcpy(&ctx->key, key, keylen);
580     ctx->keylen = keylen;
581    
582     diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
583     index 19d642eae096..24d388d74011 100644
584     --- a/drivers/irqchip/irq-gic-v3.c
585     +++ b/drivers/irqchip/irq-gic-v3.c
586     @@ -646,6 +646,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
587     int enabled;
588     u64 val;
589    
590     + if (cpu >= nr_cpu_ids)
591     + return -EINVAL;
592     +
593     if (gic_irq_in_rdist(d))
594     return -EINVAL;
595    
596     diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
597     index 380a64115a98..258bc8deae3b 100644
598     --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
599     +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
600     @@ -230,7 +230,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
601     /* Wait for 100ms as Octeon resets. */
602     mdelay(100);
603    
604     - if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1) == 0x1234ULL) {
605     + if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
606     dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n",
607     oct->octeon_id);
608     return 1;
609     diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
610     index e779af88621b..cda32d5b9977 100644
611     --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
612     +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
613     @@ -48,7 +48,7 @@ int lio_cn6xxx_soft_reset(struct octeon_device *oct)
614     /* Wait for 10ms as Octeon resets. */
615     mdelay(100);
616    
617     - if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) {
618     + if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1)) {
619     dev_err(&oct->pci_dev->dev, "Soft reset failed\n");
620     return 1;
621     }
622     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
623     index ea582342dd8f..9d3722930c95 100644
624     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
625     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
626     @@ -2671,8 +2671,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
627     PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
628     stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
629     stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
630     - stats->tx_carrier_errors =
631     - PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
632     stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
633     stats->rx_frame_errors;
634     stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
635     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
636     index 2115c8aacc5b..8beecd615a21 100644
637     --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
638     +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
639     @@ -67,6 +67,7 @@ enum {
640    
641     enum {
642     MLX5_DROP_NEW_HEALTH_WORK,
643     + MLX5_DROP_NEW_RECOVERY_WORK,
644     };
645    
646     static u8 get_nic_state(struct mlx5_core_dev *dev)
647     @@ -193,7 +194,7 @@ static void health_care(struct work_struct *work)
648     mlx5_handle_bad_state(dev);
649    
650     spin_lock(&health->wq_lock);
651     - if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
652     + if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags))
653     schedule_delayed_work(&health->recover_work, recover_delay);
654     else
655     dev_err(&dev->pdev->dev,
656     @@ -328,6 +329,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
657     init_timer(&health->timer);
658     health->sick = 0;
659     clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
660     + clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
661     health->health = &dev->iseg->health;
662     health->health_counter = &dev->iseg->health_counter;
663    
664     @@ -350,11 +352,22 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
665    
666     spin_lock(&health->wq_lock);
667     set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
668     + set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
669     spin_unlock(&health->wq_lock);
670     cancel_delayed_work_sync(&health->recover_work);
671     cancel_work_sync(&health->work);
672     }
673    
674     +void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
675     +{
676     + struct mlx5_core_health *health = &dev->priv.health;
677     +
678     + spin_lock(&health->wq_lock);
679     + set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
680     + spin_unlock(&health->wq_lock);
681     + cancel_delayed_work_sync(&dev->priv.health.recover_work);
682     +}
683     +
684     void mlx5_health_cleanup(struct mlx5_core_dev *dev)
685     {
686     struct mlx5_core_health *health = &dev->priv.health;
687     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
688     index 5bea0bf2b484..b3309f2ed7dc 100644
689     --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
690     +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
691     @@ -1169,7 +1169,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
692     int err = 0;
693    
694     if (cleanup)
695     - mlx5_drain_health_wq(dev);
696     + mlx5_drain_health_recovery(dev);
697    
698     mutex_lock(&dev->intf_state_mutex);
699     if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
700     diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
701     index 4ca461322d60..b1af7cd190a0 100644
702     --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
703     +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
704     @@ -1505,8 +1505,8 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
705     *index = entry->index;
706     resolved = false;
707     } else if (removing) {
708     - ofdpa_neigh_del(trans, found);
709     *index = found->index;
710     + ofdpa_neigh_del(trans, found);
711     } else if (updating) {
712     ofdpa_neigh_update(found, trans, NULL, false);
713     resolved = !is_zero_ether_addr(found->eth_dst);
714     diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
715     index 00279da6a1e8..c4ada7227f40 100644
716     --- a/drivers/net/ethernet/sfc/ef10.c
717     +++ b/drivers/net/ethernet/sfc/ef10.c
718     @@ -4399,12 +4399,9 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
719     struct efx_ef10_filter_table *table = efx->filter_state;
720     struct net_device *net_dev = efx->net_dev;
721     struct netdev_hw_addr *uc;
722     - int addr_count;
723     unsigned int i;
724    
725     - addr_count = netdev_uc_count(net_dev);
726     table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
727     - table->dev_uc_count = 1 + addr_count;
728     ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
729     i = 1;
730     netdev_for_each_uc_addr(uc, net_dev) {
731     @@ -4415,6 +4412,8 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
732     ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
733     i++;
734     }
735     +
736     + table->dev_uc_count = i;
737     }
738    
739     static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
740     @@ -4422,11 +4421,10 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
741     struct efx_ef10_filter_table *table = efx->filter_state;
742     struct net_device *net_dev = efx->net_dev;
743     struct netdev_hw_addr *mc;
744     - unsigned int i, addr_count;
745     + unsigned int i;
746    
747     table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
748    
749     - addr_count = netdev_mc_count(net_dev);
750     i = 0;
751     netdev_for_each_mc_addr(mc, net_dev) {
752     if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
753     diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
754     index 4865221aa9ac..b88f7d65953d 100644
755     --- a/drivers/net/phy/dp83640.c
756     +++ b/drivers/net/phy/dp83640.c
757     @@ -908,7 +908,7 @@ static void decode_txts(struct dp83640_private *dp83640,
758     if (overflow) {
759     pr_debug("tx timestamp queue overflow, count %d\n", overflow);
760     while (skb) {
761     - skb_complete_tx_timestamp(skb, NULL);
762     + kfree_skb(skb);
763     skb = skb_dequeue(&dp83640->tx_queue);
764     }
765     return;
766     diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
767     index fab56c9350cf..222918828655 100644
768     --- a/drivers/net/phy/micrel.c
769     +++ b/drivers/net/phy/micrel.c
770     @@ -622,6 +622,8 @@ static int ksz9031_read_status(struct phy_device *phydev)
771     if ((regval & 0xFF) == 0xFF) {
772     phy_init_hw(phydev);
773     phydev->link = 0;
774     + if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
775     + phydev->drv->config_intr(phydev);
776     }
777    
778     return 0;
779     diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
780     index 642df9391193..578bd5001d93 100644
781     --- a/drivers/net/vrf.c
782     +++ b/drivers/net/vrf.c
783     @@ -787,15 +787,10 @@ static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
784     static void vrf_dev_uninit(struct net_device *dev)
785     {
786     struct net_vrf *vrf = netdev_priv(dev);
787     - struct net_device *port_dev;
788     - struct list_head *iter;
789    
790     vrf_rtable_release(dev, vrf);
791     vrf_rt6_release(dev, vrf);
792    
793     - netdev_for_each_lower_dev(dev, port_dev, iter)
794     - vrf_del_slave(dev, port_dev);
795     -
796     free_percpu(dev->dstats);
797     dev->dstats = NULL;
798     }
799     @@ -1232,6 +1227,12 @@ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
800    
801     static void vrf_dellink(struct net_device *dev, struct list_head *head)
802     {
803     + struct net_device *port_dev;
804     + struct list_head *iter;
805     +
806     + netdev_for_each_lower_dev(dev, port_dev, iter)
807     + vrf_del_slave(dev, port_dev);
808     +
809     unregister_netdevice_queue(dev, head);
810     }
811    
812     diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
813     index 963e5339a4d7..983e941bdf29 100644
814     --- a/drivers/net/vxlan.c
815     +++ b/drivers/net/vxlan.c
816     @@ -227,15 +227,15 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
817    
818     static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
819     {
820     - struct vxlan_dev *vxlan;
821     + struct vxlan_dev_node *node;
822    
823     /* For flow based devices, map all packets to VNI 0 */
824     if (vs->flags & VXLAN_F_COLLECT_METADATA)
825     vni = 0;
826    
827     - hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) {
828     - if (vxlan->default_dst.remote_vni == vni)
829     - return vxlan;
830     + hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
831     + if (node->vxlan->default_dst.remote_vni == vni)
832     + return node->vxlan;
833     }
834    
835     return NULL;
836     @@ -2309,17 +2309,22 @@ static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
837     struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
838    
839     spin_lock(&vn->sock_lock);
840     - hlist_del_init_rcu(&vxlan->hlist);
841     + hlist_del_init_rcu(&vxlan->hlist4.hlist);
842     +#if IS_ENABLED(CONFIG_IPV6)
843     + hlist_del_init_rcu(&vxlan->hlist6.hlist);
844     +#endif
845     spin_unlock(&vn->sock_lock);
846     }
847    
848     -static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
849     +static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
850     + struct vxlan_dev_node *node)
851     {
852     struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
853     __be32 vni = vxlan->default_dst.remote_vni;
854    
855     + node->vxlan = vxlan;
856     spin_lock(&vn->sock_lock);
857     - hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
858     + hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
859     spin_unlock(&vn->sock_lock);
860     }
861    
862     @@ -2778,6 +2783,7 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
863     {
864     struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
865     struct vxlan_sock *vs = NULL;
866     + struct vxlan_dev_node *node;
867    
868     if (!vxlan->cfg.no_share) {
869     spin_lock(&vn->sock_lock);
870     @@ -2795,12 +2801,16 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
871     if (IS_ERR(vs))
872     return PTR_ERR(vs);
873     #if IS_ENABLED(CONFIG_IPV6)
874     - if (ipv6)
875     + if (ipv6) {
876     rcu_assign_pointer(vxlan->vn6_sock, vs);
877     - else
878     + node = &vxlan->hlist6;
879     + } else
880     #endif
881     + {
882     rcu_assign_pointer(vxlan->vn4_sock, vs);
883     - vxlan_vs_add_dev(vs, vxlan);
884     + node = &vxlan->hlist4;
885     + }
886     + vxlan_vs_add_dev(vs, vxlan, node);
887     return 0;
888     }
889    
890     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
891     index 72139b579b18..746f8c9a891d 100644
892     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
893     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
894     @@ -705,7 +705,7 @@ int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
895     int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
896     struct sk_buff_head *pktq, uint totlen)
897     {
898     - struct sk_buff *glom_skb;
899     + struct sk_buff *glom_skb = NULL;
900     struct sk_buff *skb;
901     u32 addr = sdiodev->sbwad;
902     int err = 0;
903     @@ -726,10 +726,8 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
904     return -ENOMEM;
905     err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
906     glom_skb);
907     - if (err) {
908     - brcmu_pkt_buf_free_skb(glom_skb);
909     + if (err)
910     goto done;
911     - }
912    
913     skb_queue_walk(pktq, skb) {
914     memcpy(skb->data, glom_skb->data, skb->len);
915     @@ -740,6 +738,7 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
916     pktq);
917    
918     done:
919     + brcmu_pkt_buf_free_skb(glom_skb);
920     return err;
921     }
922    
923     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
924     index 0f5dde1f2248..1d4352e1ac81 100644
925     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
926     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
927     @@ -4928,6 +4928,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
928     cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
929     GFP_KERNEL);
930     } else if (ieee80211_is_action(mgmt->frame_control)) {
931     + if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) {
932     + brcmf_err("invalid action frame length\n");
933     + err = -EINVAL;
934     + goto exit;
935     + }
936     af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
937     if (af_params == NULL) {
938     brcmf_err("unable to allocate frame\n");
939     @@ -6871,7 +6876,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
940     wiphy = wiphy_new(ops, sizeof(struct brcmf_cfg80211_info));
941     if (!wiphy) {
942     brcmf_err("Could not allocate wiphy device\n");
943     - return NULL;
944     + goto ops_out;
945     }
946     memcpy(wiphy->perm_addr, drvr->mac, ETH_ALEN);
947     set_wiphy_dev(wiphy, busdev);
948     @@ -7005,6 +7010,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
949     ifp->vif = NULL;
950     wiphy_out:
951     brcmf_free_wiphy(wiphy);
952     +ops_out:
953     kfree(ops);
954     return NULL;
955     }
956     diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
957     index 599cf5090030..cd442e46afb4 100644
958     --- a/drivers/net/xen-netfront.c
959     +++ b/drivers/net/xen-netfront.c
960     @@ -281,6 +281,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
961     {
962     RING_IDX req_prod = queue->rx.req_prod_pvt;
963     int notify;
964     + int err = 0;
965    
966     if (unlikely(!netif_carrier_ok(queue->info->netdev)))
967     return;
968     @@ -295,8 +296,10 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
969     struct xen_netif_rx_request *req;
970    
971     skb = xennet_alloc_one_rx_buffer(queue);
972     - if (!skb)
973     + if (!skb) {
974     + err = -ENOMEM;
975     break;
976     + }
977    
978     id = xennet_rxidx(req_prod);
979    
980     @@ -320,8 +323,13 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
981    
982     queue->rx.req_prod_pvt = req_prod;
983    
984     - /* Not enough requests? Try again later. */
985     - if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
986     + /* Try again later if there are not enough requests or skb allocation
987     + * failed.
988     + * Enough requests is quantified as the sum of newly created slots and
989     + * the unconsumed slots at the backend.
990     + */
991     + if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
992     + unlikely(err)) {
993     mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
994     return;
995     }
996     diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
997     index 965911d9b36a..1b4d93e9157e 100644
998     --- a/drivers/nvmem/core.c
999     +++ b/drivers/nvmem/core.c
1000     @@ -488,21 +488,24 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
1001    
1002     rval = device_add(&nvmem->dev);
1003     if (rval)
1004     - goto out;
1005     + goto err_put_device;
1006    
1007     if (config->compat) {
1008     rval = nvmem_setup_compat(nvmem, config);
1009     if (rval)
1010     - goto out;
1011     + goto err_device_del;
1012     }
1013    
1014     if (config->cells)
1015     nvmem_add_cells(nvmem, config);
1016    
1017     return nvmem;
1018     -out:
1019     - ida_simple_remove(&nvmem_ida, nvmem->id);
1020     - kfree(nvmem);
1021     +
1022     +err_device_del:
1023     + device_del(&nvmem->dev);
1024     +err_put_device:
1025     + put_device(&nvmem->dev);
1026     +
1027     return ERR_PTR(rval);
1028     }
1029     EXPORT_SYMBOL_GPL(nvmem_register);
1030     diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
1031     index 3ed6238f8f6e..c4953eca907d 100644
1032     --- a/drivers/parisc/ccio-dma.c
1033     +++ b/drivers/parisc/ccio-dma.c
1034     @@ -741,6 +741,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
1035    
1036     BUG_ON(!dev);
1037     ioc = GET_IOC(dev);
1038     + if (!ioc)
1039     + return DMA_ERROR_CODE;
1040    
1041     BUG_ON(size <= 0);
1042    
1043     @@ -814,6 +816,10 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1044    
1045     BUG_ON(!dev);
1046     ioc = GET_IOC(dev);
1047     + if (!ioc) {
1048     + WARN_ON(!ioc);
1049     + return;
1050     + }
1051    
1052     DBG_RUN("%s() iovp 0x%lx/%x\n",
1053     __func__, (long)iova, size);
1054     @@ -918,6 +924,8 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
1055    
1056     BUG_ON(!dev);
1057     ioc = GET_IOC(dev);
1058     + if (!ioc)
1059     + return 0;
1060    
1061     DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
1062    
1063     @@ -990,6 +998,10 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1064    
1065     BUG_ON(!dev);
1066     ioc = GET_IOC(dev);
1067     + if (!ioc) {
1068     + WARN_ON(!ioc);
1069     + return;
1070     + }
1071    
1072     DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1073     __func__, nents, sg_virt(sglist), sglist->length);
1074     diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
1075     index 1133b5cc88ca..5c63b920b471 100644
1076     --- a/drivers/parisc/dino.c
1077     +++ b/drivers/parisc/dino.c
1078     @@ -154,7 +154,10 @@ struct dino_device
1079     };
1080    
1081     /* Looks nice and keeps the compiler happy */
1082     -#define DINO_DEV(d) ((struct dino_device *) d)
1083     +#define DINO_DEV(d) ({ \
1084     + void *__pdata = d; \
1085     + BUG_ON(!__pdata); \
1086     + (struct dino_device *)__pdata; })
1087    
1088    
1089     /*
1090     diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
1091     index 2ec2aef4d211..bc286cbbbc9b 100644
1092     --- a/drivers/parisc/lba_pci.c
1093     +++ b/drivers/parisc/lba_pci.c
1094     @@ -111,8 +111,10 @@ static u32 lba_t32;
1095    
1096    
1097     /* Looks nice and keeps the compiler happy */
1098     -#define LBA_DEV(d) ((struct lba_device *) (d))
1099     -
1100     +#define LBA_DEV(d) ({ \
1101     + void *__pdata = d; \
1102     + BUG_ON(!__pdata); \
1103     + (struct lba_device *)__pdata; })
1104    
1105     /*
1106     ** Only allow 8 subsidiary busses per LBA
1107     diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
1108     index 151b86b6d2e2..56918d1c0ed3 100644
1109     --- a/drivers/parisc/sba_iommu.c
1110     +++ b/drivers/parisc/sba_iommu.c
1111     @@ -691,6 +691,8 @@ static int sba_dma_supported( struct device *dev, u64 mask)
1112     return 0;
1113    
1114     ioc = GET_IOC(dev);
1115     + if (!ioc)
1116     + return 0;
1117    
1118     /*
1119     * check if mask is >= than the current max IO Virt Address
1120     @@ -722,6 +724,8 @@ sba_map_single(struct device *dev, void *addr, size_t size,
1121     int pide;
1122    
1123     ioc = GET_IOC(dev);
1124     + if (!ioc)
1125     + return DMA_ERROR_CODE;
1126    
1127     /* save offset bits */
1128     offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
1129     @@ -813,6 +817,10 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1130     DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
1131    
1132     ioc = GET_IOC(dev);
1133     + if (!ioc) {
1134     + WARN_ON(!ioc);
1135     + return;
1136     + }
1137     offset = iova & ~IOVP_MASK;
1138     iova ^= offset; /* clear offset bits */
1139     size += offset;
1140     @@ -952,6 +960,8 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
1141     DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
1142    
1143     ioc = GET_IOC(dev);
1144     + if (!ioc)
1145     + return 0;
1146    
1147     /* Fast path single entry scatterlists. */
1148     if (nents == 1) {
1149     @@ -1037,6 +1047,10 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1150     __func__, nents, sg_virt(sglist), sglist->length);
1151    
1152     ioc = GET_IOC(dev);
1153     + if (!ioc) {
1154     + WARN_ON(!ioc);
1155     + return;
1156     + }
1157    
1158     #ifdef SBA_COLLECT_STATS
1159     ioc->usg_calls++;
1160     diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
1161     index 8c3bf3d613c0..ce2c3c6349d4 100644
1162     --- a/drivers/tty/vt/vt.c
1163     +++ b/drivers/tty/vt/vt.c
1164     @@ -2711,13 +2711,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
1165     * related to the kernel should not use this.
1166     */
1167     data = vt_get_shift_state();
1168     - ret = __put_user(data, p);
1169     + ret = put_user(data, p);
1170     break;
1171     case TIOCL_GETMOUSEREPORTING:
1172     console_lock(); /* May be overkill */
1173     data = mouse_reporting();
1174     console_unlock();
1175     - ret = __put_user(data, p);
1176     + ret = put_user(data, p);
1177     break;
1178     case TIOCL_SETVESABLANK:
1179     console_lock();
1180     @@ -2726,7 +2726,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
1181     break;
1182     case TIOCL_GETKMSGREDIRECT:
1183     data = vt_get_kmsg_redirect();
1184     - ret = __put_user(data, p);
1185     + ret = put_user(data, p);
1186     break;
1187     case TIOCL_SETKMSGREDIRECT:
1188     if (!capable(CAP_SYS_ADMIN)) {
1189     diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
1190     index cfd724f98332..1fdf4e5bf8c6 100644
1191     --- a/fs/binfmt_elf.c
1192     +++ b/fs/binfmt_elf.c
1193     @@ -911,17 +911,60 @@ static int load_elf_binary(struct linux_binprm *bprm)
1194     elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
1195    
1196     vaddr = elf_ppnt->p_vaddr;
1197     + /*
1198     + * If we are loading ET_EXEC or we have already performed
1199     + * the ET_DYN load_addr calculations, proceed normally.
1200     + */
1201     if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
1202     elf_flags |= MAP_FIXED;
1203     } else if (loc->elf_ex.e_type == ET_DYN) {
1204     - /* Try and get dynamic programs out of the way of the
1205     - * default mmap base, as well as whatever program they
1206     - * might try to exec. This is because the brk will
1207     - * follow the loader, and is not movable. */
1208     - load_bias = ELF_ET_DYN_BASE - vaddr;
1209     - if (current->flags & PF_RANDOMIZE)
1210     - load_bias += arch_mmap_rnd();
1211     - load_bias = ELF_PAGESTART(load_bias);
1212     + /*
1213     + * This logic is run once for the first LOAD Program
1214     + * Header for ET_DYN binaries to calculate the
1215     + * randomization (load_bias) for all the LOAD
1216     + * Program Headers, and to calculate the entire
1217     + * size of the ELF mapping (total_size). (Note that
1218     + * load_addr_set is set to true later once the
1219     + * initial mapping is performed.)
1220     + *
1221     + * There are effectively two types of ET_DYN
1222     + * binaries: programs (i.e. PIE: ET_DYN with INTERP)
1223     + * and loaders (ET_DYN without INTERP, since they
1224     + * _are_ the ELF interpreter). The loaders must
1225     + * be loaded away from programs since the program
1226     + * may otherwise collide with the loader (especially
1227     + * for ET_EXEC which does not have a randomized
1228     + * position). For example to handle invocations of
1229     + * "./ld.so someprog" to test out a new version of
1230     + * the loader, the subsequent program that the
1231     + * loader loads must avoid the loader itself, so
1232     + * they cannot share the same load range. Sufficient
1233     + * room for the brk must be allocated with the
1234     + * loader as well, since brk must be available with
1235     + * the loader.
1236     + *
1237     + * Therefore, programs are loaded offset from
1238     + * ELF_ET_DYN_BASE and loaders are loaded into the
1239     + * independently randomized mmap region (0 load_bias
1240     + * without MAP_FIXED).
1241     + */
1242     + if (elf_interpreter) {
1243     + load_bias = ELF_ET_DYN_BASE;
1244     + if (current->flags & PF_RANDOMIZE)
1245     + load_bias += arch_mmap_rnd();
1246     + elf_flags |= MAP_FIXED;
1247     + } else
1248     + load_bias = 0;
1249     +
1250     + /*
1251     + * Since load_bias is used for all subsequent loading
1252     + * calculations, we must lower it by the first vaddr
1253     + * so that the remaining calculations based on the
1254     + * ELF vaddrs will be correctly offset. The result
1255     + * is then page aligned.
1256     + */
1257     + load_bias = ELF_PAGESTART(load_bias - vaddr);
1258     +
1259     total_size = total_mapping_size(elf_phdata,
1260     loc->elf_ex.e_phnum);
1261     if (!total_size) {
1262     diff --git a/fs/dcache.c b/fs/dcache.c
1263     index 4485a48f4091..1dbc6b560fef 100644
1264     --- a/fs/dcache.c
1265     +++ b/fs/dcache.c
1266     @@ -1133,11 +1133,12 @@ void shrink_dcache_sb(struct super_block *sb)
1267     LIST_HEAD(dispose);
1268    
1269     freed = list_lru_walk(&sb->s_dentry_lru,
1270     - dentry_lru_isolate_shrink, &dispose, UINT_MAX);
1271     + dentry_lru_isolate_shrink, &dispose, 1024);
1272    
1273     this_cpu_sub(nr_dentry_unused, freed);
1274     shrink_dentry_list(&dispose);
1275     - } while (freed > 0);
1276     + cond_resched();
1277     + } while (list_lru_count(&sb->s_dentry_lru) > 0);
1278     }
1279     EXPORT_SYMBOL(shrink_dcache_sb);
1280    
1281     diff --git a/fs/exec.c b/fs/exec.c
1282     index 91441402d706..b8c43be24751 100644
1283     --- a/fs/exec.c
1284     +++ b/fs/exec.c
1285     @@ -215,8 +215,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
1286    
1287     if (write) {
1288     unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
1289     - unsigned long ptr_size;
1290     - struct rlimit *rlim;
1291     + unsigned long ptr_size, limit;
1292    
1293     /*
1294     * Since the stack will hold pointers to the strings, we
1295     @@ -245,14 +244,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
1296     return page;
1297    
1298     /*
1299     - * Limit to 1/4-th the stack size for the argv+env strings.
1300     + * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
1301     + * (whichever is smaller) for the argv+env strings.
1302     * This ensures that:
1303     * - the remaining binfmt code will not run out of stack space,
1304     * - the program will have a reasonable amount of stack left
1305     * to work from.
1306     */
1307     - rlim = current->signal->rlim;
1308     - if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
1309     + limit = _STK_LIM / 4 * 3;
1310     + limit = min(limit, rlimit(RLIMIT_STACK) / 4);
1311     + if (size > limit)
1312     goto fail;
1313     }
1314    
1315     diff --git a/fs/mount.h b/fs/mount.h
1316     index d8295f273a2f..3603884a63dd 100644
1317     --- a/fs/mount.h
1318     +++ b/fs/mount.h
1319     @@ -58,6 +58,7 @@ struct mount {
1320     struct mnt_namespace *mnt_ns; /* containing namespace */
1321     struct mountpoint *mnt_mp; /* where is it mounted */
1322     struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
1323     + struct list_head mnt_umounting; /* list entry for umount propagation */
1324     #ifdef CONFIG_FSNOTIFY
1325     struct hlist_head mnt_fsnotify_marks;
1326     __u32 mnt_fsnotify_mask;
1327     diff --git a/fs/namespace.c b/fs/namespace.c
1328     index 5e35057f07ac..d7360f9897b4 100644
1329     --- a/fs/namespace.c
1330     +++ b/fs/namespace.c
1331     @@ -237,6 +237,7 @@ static struct mount *alloc_vfsmnt(const char *name)
1332     INIT_LIST_HEAD(&mnt->mnt_slave_list);
1333     INIT_LIST_HEAD(&mnt->mnt_slave);
1334     INIT_HLIST_NODE(&mnt->mnt_mp_list);
1335     + INIT_LIST_HEAD(&mnt->mnt_umounting);
1336     #ifdef CONFIG_FSNOTIFY
1337     INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
1338     #endif
1339     diff --git a/fs/pnode.c b/fs/pnode.c
1340     index b394ca5307ec..d15c63e97ef1 100644
1341     --- a/fs/pnode.c
1342     +++ b/fs/pnode.c
1343     @@ -24,6 +24,11 @@ static inline struct mount *first_slave(struct mount *p)
1344     return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
1345     }
1346    
1347     +static inline struct mount *last_slave(struct mount *p)
1348     +{
1349     + return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
1350     +}
1351     +
1352     static inline struct mount *next_slave(struct mount *p)
1353     {
1354     return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
1355     @@ -164,6 +169,19 @@ static struct mount *propagation_next(struct mount *m,
1356     }
1357     }
1358    
1359     +static struct mount *skip_propagation_subtree(struct mount *m,
1360     + struct mount *origin)
1361     +{
1362     + /*
1363     + * Advance m such that propagation_next will not return
1364     + * the slaves of m.
1365     + */
1366     + if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
1367     + m = last_slave(m);
1368     +
1369     + return m;
1370     +}
1371     +
1372     static struct mount *next_group(struct mount *m, struct mount *origin)
1373     {
1374     while (1) {
1375     @@ -415,65 +433,104 @@ void propagate_mount_unlock(struct mount *mnt)
1376     }
1377     }
1378    
1379     -/*
1380     - * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
1381     - */
1382     -static void mark_umount_candidates(struct mount *mnt)
1383     +static void umount_one(struct mount *mnt, struct list_head *to_umount)
1384     {
1385     - struct mount *parent = mnt->mnt_parent;
1386     - struct mount *m;
1387     -
1388     - BUG_ON(parent == mnt);
1389     -
1390     - for (m = propagation_next(parent, parent); m;
1391     - m = propagation_next(m, parent)) {
1392     - struct mount *child = __lookup_mnt(&m->mnt,
1393     - mnt->mnt_mountpoint);
1394     - if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
1395     - continue;
1396     - if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
1397     - SET_MNT_MARK(child);
1398     - }
1399     - }
1400     + CLEAR_MNT_MARK(mnt);
1401     + mnt->mnt.mnt_flags |= MNT_UMOUNT;
1402     + list_del_init(&mnt->mnt_child);
1403     + list_del_init(&mnt->mnt_umounting);
1404     + list_move_tail(&mnt->mnt_list, to_umount);
1405     }
1406    
1407     /*
1408     * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
1409     * parent propagates to.
1410     */
1411     -static void __propagate_umount(struct mount *mnt)
1412     +static bool __propagate_umount(struct mount *mnt,
1413     + struct list_head *to_umount,
1414     + struct list_head *to_restore)
1415     {
1416     - struct mount *parent = mnt->mnt_parent;
1417     - struct mount *m;
1418     + bool progress = false;
1419     + struct mount *child;
1420    
1421     - BUG_ON(parent == mnt);
1422     + /*
1423     + * The state of the parent won't change if this mount is
1424     + * already unmounted or marked as without children.
1425     + */
1426     + if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED))
1427     + goto out;
1428    
1429     - for (m = propagation_next(parent, parent); m;
1430     - m = propagation_next(m, parent)) {
1431     - struct mount *topper;
1432     - struct mount *child = __lookup_mnt(&m->mnt,
1433     - mnt->mnt_mountpoint);
1434     - /*
1435     - * umount the child only if the child has no children
1436     - * and the child is marked safe to unmount.
1437     - */
1438     - if (!child || !IS_MNT_MARKED(child))
1439     + /* Verify topper is the only grandchild that has not been
1440     + * speculatively unmounted.
1441     + */
1442     + list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
1443     + if (child->mnt_mountpoint == mnt->mnt.mnt_root)
1444     continue;
1445     - CLEAR_MNT_MARK(child);
1446     + if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child))
1447     + continue;
1448     + /* Found a mounted child */
1449     + goto children;
1450     + }
1451    
1452     - /* If there is exactly one mount covering all of child
1453     - * replace child with that mount.
1454     - */
1455     - topper = find_topper(child);
1456     - if (topper)
1457     - mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
1458     - topper);
1459     + /* Mark mounts that can be unmounted if not locked */
1460     + SET_MNT_MARK(mnt);
1461     + progress = true;
1462     +
1463     + /* If a mount is without children and not locked umount it. */
1464     + if (!IS_MNT_LOCKED(mnt)) {
1465     + umount_one(mnt, to_umount);
1466     + } else {
1467     +children:
1468     + list_move_tail(&mnt->mnt_umounting, to_restore);
1469     + }
1470     +out:
1471     + return progress;
1472     +}
1473     +
1474     +static void umount_list(struct list_head *to_umount,
1475     + struct list_head *to_restore)
1476     +{
1477     + struct mount *mnt, *child, *tmp;
1478     + list_for_each_entry(mnt, to_umount, mnt_list) {
1479     + list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) {
1480     + /* topper? */
1481     + if (child->mnt_mountpoint == mnt->mnt.mnt_root)
1482     + list_move_tail(&child->mnt_umounting, to_restore);
1483     + else
1484     + umount_one(child, to_umount);
1485     + }
1486     + }
1487     +}
1488    
1489     - if (list_empty(&child->mnt_mounts)) {
1490     - list_del_init(&child->mnt_child);
1491     - child->mnt.mnt_flags |= MNT_UMOUNT;
1492     - list_move_tail(&child->mnt_list, &mnt->mnt_list);
1493     +static void restore_mounts(struct list_head *to_restore)
1494     +{
1495     + /* Restore mounts to a clean working state */
1496     + while (!list_empty(to_restore)) {
1497     + struct mount *mnt, *parent;
1498     + struct mountpoint *mp;
1499     +
1500     + mnt = list_first_entry(to_restore, struct mount, mnt_umounting);
1501     + CLEAR_MNT_MARK(mnt);
1502     + list_del_init(&mnt->mnt_umounting);
1503     +
1504     + /* Should this mount be reparented? */
1505     + mp = mnt->mnt_mp;
1506     + parent = mnt->mnt_parent;
1507     + while (parent->mnt.mnt_flags & MNT_UMOUNT) {
1508     + mp = parent->mnt_mp;
1509     + parent = parent->mnt_parent;
1510     }
1511     + if (parent != mnt->mnt_parent)
1512     + mnt_change_mountpoint(parent, mp, mnt);
1513     + }
1514     +}
1515     +
1516     +static void cleanup_umount_visitations(struct list_head *visited)
1517     +{
1518     + while (!list_empty(visited)) {
1519     + struct mount *mnt =
1520     + list_first_entry(visited, struct mount, mnt_umounting);
1521     + list_del_init(&mnt->mnt_umounting);
1522     }
1523     }
1524    
1525     @@ -487,11 +544,68 @@ static void __propagate_umount(struct mount *mnt)
1526     int propagate_umount(struct list_head *list)
1527     {
1528     struct mount *mnt;
1529     + LIST_HEAD(to_restore);
1530     + LIST_HEAD(to_umount);
1531     + LIST_HEAD(visited);
1532     +
1533     + /* Find candidates for unmounting */
1534     + list_for_each_entry_reverse(mnt, list, mnt_list) {
1535     + struct mount *parent = mnt->mnt_parent;
1536     + struct mount *m;
1537     +
1538     + /*
1539     + * If this mount has already been visited it is known that it's
1540     + * entire peer group and all of their slaves in the propagation
1541     + * tree for the mountpoint has already been visited and there is
1542     + * no need to visit them again.
1543     + */
1544     + if (!list_empty(&mnt->mnt_umounting))
1545     + continue;
1546     +
1547     + list_add_tail(&mnt->mnt_umounting, &visited);
1548     + for (m = propagation_next(parent, parent); m;
1549     + m = propagation_next(m, parent)) {
1550     + struct mount *child = __lookup_mnt(&m->mnt,
1551     + mnt->mnt_mountpoint);
1552     + if (!child)
1553     + continue;
1554     +
1555     + if (!list_empty(&child->mnt_umounting)) {
1556     + /*
1557     + * If the child has already been visited it is
1558     + * know that it's entire peer group and all of
1559     + * their slaves in the propgation tree for the
1560     + * mountpoint has already been visited and there
1561     + * is no need to visit this subtree again.
1562     + */
1563     + m = skip_propagation_subtree(m, parent);
1564     + continue;
1565     + } else if (child->mnt.mnt_flags & MNT_UMOUNT) {
1566     + /*
1567     + * We have come accross an partially unmounted
1568     + * mount in list that has not been visited yet.
1569     + * Remember it has been visited and continue
1570     + * about our merry way.
1571     + */
1572     + list_add_tail(&child->mnt_umounting, &visited);
1573     + continue;
1574     + }
1575     +
1576     + /* Check the child and parents while progress is made */
1577     + while (__propagate_umount(child,
1578     + &to_umount, &to_restore)) {
1579     + /* Is the parent a umount candidate? */
1580     + child = child->mnt_parent;
1581     + if (list_empty(&child->mnt_umounting))
1582     + break;
1583     + }
1584     + }
1585     + }
1586    
1587     - list_for_each_entry_reverse(mnt, list, mnt_list)
1588     - mark_umount_candidates(mnt);
1589     + umount_list(&to_umount, &to_restore);
1590     + restore_mounts(&to_restore);
1591     + cleanup_umount_visitations(&visited);
1592     + list_splice_tail(&to_umount, list);
1593    
1594     - list_for_each_entry(mnt, list, mnt_list)
1595     - __propagate_umount(mnt);
1596     return 0;
1597     }
1598     diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
1599     index fa3b155ce7e1..2d65bbd6dbd1 100644
1600     --- a/include/linux/cpumask.h
1601     +++ b/include/linux/cpumask.h
1602     @@ -236,6 +236,23 @@ unsigned int cpumask_local_spread(unsigned int i, int node);
1603     (cpu) = cpumask_next_zero((cpu), (mask)), \
1604     (cpu) < nr_cpu_ids;)
1605    
1606     +extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
1607     +
1608     +/**
1609     + * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
1610     + * @cpu: the (optionally unsigned) integer iterator
1611     + * @mask: the cpumask poiter
1612     + * @start: the start location
1613     + *
1614     + * The implementation does not assume any bit in @mask is set (including @start).
1615     + *
1616     + * After the loop, cpu is >= nr_cpu_ids.
1617     + */
1618     +#define for_each_cpu_wrap(cpu, mask, start) \
1619     + for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
1620     + (cpu) < nr_cpumask_bits; \
1621     + (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
1622     +
1623     /**
1624     * for_each_cpu_and - iterate over every cpu in both masks
1625     * @cpu: the (optionally unsigned) integer iterator
1626     diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
1627     index cb0ba9f2a9a2..fa7fd03cb5f9 100644
1628     --- a/include/linux/list_lru.h
1629     +++ b/include/linux/list_lru.h
1630     @@ -44,6 +44,7 @@ struct list_lru_node {
1631     /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
1632     struct list_lru_memcg *memcg_lrus;
1633     #endif
1634     + long nr_items;
1635     } ____cacheline_aligned_in_smp;
1636    
1637     struct list_lru {
1638     diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
1639     index e1a903a5bb3e..6a620e01b040 100644
1640     --- a/include/linux/mlx5/driver.h
1641     +++ b/include/linux/mlx5/driver.h
1642     @@ -788,6 +788,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev);
1643     void mlx5_start_health_poll(struct mlx5_core_dev *dev);
1644     void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
1645     void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
1646     +void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
1647     int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
1648     struct mlx5_buf *buf, int node);
1649     int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
1650     diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
1651     index 2e347d4545cf..2c43993e079c 100644
1652     --- a/include/net/ip6_route.h
1653     +++ b/include/net/ip6_route.h
1654     @@ -22,6 +22,7 @@ struct route_info {
1655     #include <net/flow.h>
1656     #include <net/ip6_fib.h>
1657     #include <net/sock.h>
1658     +#include <net/lwtunnel.h>
1659     #include <linux/ip.h>
1660     #include <linux/ipv6.h>
1661     #include <linux/route.h>
1662     @@ -232,4 +233,11 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt,
1663     return daddr;
1664     }
1665    
1666     +static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b)
1667     +{
1668     + return a->dst.dev == b->dst.dev &&
1669     + a->rt6i_idev == b->rt6i_idev &&
1670     + ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) &&
1671     + !lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate);
1672     +}
1673     #endif
1674     diff --git a/include/net/vxlan.h b/include/net/vxlan.h
1675     index 308adc4154f4..9fce47e3e13e 100644
1676     --- a/include/net/vxlan.h
1677     +++ b/include/net/vxlan.h
1678     @@ -221,9 +221,17 @@ struct vxlan_config {
1679     bool no_share;
1680     };
1681    
1682     +struct vxlan_dev_node {
1683     + struct hlist_node hlist;
1684     + struct vxlan_dev *vxlan;
1685     +};
1686     +
1687     /* Pseudo network device */
1688     struct vxlan_dev {
1689     - struct hlist_node hlist; /* vni hash table */
1690     + struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */
1691     +#if IS_ENABLED(CONFIG_IPV6)
1692     + struct vxlan_dev_node hlist6; /* vni hash table for IPv6 socket */
1693     +#endif
1694     struct list_head next; /* vxlan's per namespace list */
1695     struct vxlan_sock __rcu *vn4_sock; /* listening socket for IPv4 */
1696     #if IS_ENABLED(CONFIG_IPV6)
1697     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
1698     index 44c17f47d94c..8ce679d36c58 100644
1699     --- a/kernel/bpf/verifier.c
1700     +++ b/kernel/bpf/verifier.c
1701     @@ -885,6 +885,11 @@ static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn)
1702     if (err)
1703     return err;
1704    
1705     + if (is_pointer_value(env, insn->src_reg)) {
1706     + verbose("R%d leaks addr into mem\n", insn->src_reg);
1707     + return -EACCES;
1708     + }
1709     +
1710     /* check whether atomic_add can read the memory */
1711     err = check_mem_access(env, insn->dst_reg, insn->off,
1712     BPF_SIZE(insn->code), BPF_READ, -1);
1713     diff --git a/kernel/extable.c b/kernel/extable.c
1714     index e820ccee9846..4f06fc34313f 100644
1715     --- a/kernel/extable.c
1716     +++ b/kernel/extable.c
1717     @@ -66,7 +66,7 @@ static inline int init_kernel_text(unsigned long addr)
1718     return 0;
1719     }
1720    
1721     -int core_kernel_text(unsigned long addr)
1722     +int notrace core_kernel_text(unsigned long addr)
1723     {
1724     if (addr >= (unsigned long)_stext &&
1725     addr < (unsigned long)_etext)
1726     diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
1727     index 56583e764ebf..e3944c4b072d 100644
1728     --- a/kernel/rcu/tree_plugin.h
1729     +++ b/kernel/rcu/tree_plugin.h
1730     @@ -1767,6 +1767,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
1731     if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
1732     /* Prior smp_mb__after_atomic() orders against prior enqueue. */
1733     WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
1734     + smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
1735     swake_up(&rdp_leader->nocb_wq);
1736     }
1737     }
1738     @@ -2021,6 +2022,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp)
1739     * nocb_gp_head, where they await a grace period.
1740     */
1741     gotcbs = false;
1742     + smp_mb(); /* wakeup before ->nocb_head reads. */
1743     for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
1744     rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
1745     if (!rdp->nocb_gp_head)
1746     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
1747     index 692c948ae333..d177b21d04ce 100644
1748     --- a/kernel/sched/core.c
1749     +++ b/kernel/sched/core.c
1750     @@ -6102,6 +6102,9 @@ enum s_alloc {
1751     * Build an iteration mask that can exclude certain CPUs from the upwards
1752     * domain traversal.
1753     *
1754     + * Only CPUs that can arrive at this group should be considered to continue
1755     + * balancing.
1756     + *
1757     * Asymmetric node setups can result in situations where the domain tree is of
1758     * unequal depth, make sure to skip domains that already cover the entire
1759     * range.
1760     @@ -6113,18 +6116,31 @@ enum s_alloc {
1761     */
1762     static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
1763     {
1764     - const struct cpumask *span = sched_domain_span(sd);
1765     + const struct cpumask *sg_span = sched_group_cpus(sg);
1766     struct sd_data *sdd = sd->private;
1767     struct sched_domain *sibling;
1768     int i;
1769    
1770     - for_each_cpu(i, span) {
1771     + for_each_cpu(i, sg_span) {
1772     sibling = *per_cpu_ptr(sdd->sd, i);
1773     - if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
1774     +
1775     + /*
1776     + * Can happen in the asymmetric case, where these siblings are
1777     + * unused. The mask will not be empty because those CPUs that
1778     + * do have the top domain _should_ span the domain.
1779     + */
1780     + if (!sibling->child)
1781     + continue;
1782     +
1783     + /* If we would not end up here, we can't continue from here */
1784     + if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
1785     continue;
1786    
1787     cpumask_set_cpu(i, sched_group_mask(sg));
1788     }
1789     +
1790     + /* We must not have empty masks here */
1791     + WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg)));
1792     }
1793    
1794     /*
1795     @@ -6148,7 +6164,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
1796    
1797     cpumask_clear(covered);
1798    
1799     - for_each_cpu(i, span) {
1800     + for_each_cpu_wrap(i, span, cpu) {
1801     struct cpumask *sg_span;
1802    
1803     if (cpumask_test_cpu(i, covered))
1804     @@ -7422,22 +7438,6 @@ int sched_cpu_dying(unsigned int cpu)
1805     }
1806     #endif
1807    
1808     -#ifdef CONFIG_SCHED_SMT
1809     -DEFINE_STATIC_KEY_FALSE(sched_smt_present);
1810     -
1811     -static void sched_init_smt(void)
1812     -{
1813     - /*
1814     - * We've enumerated all CPUs and will assume that if any CPU
1815     - * has SMT siblings, CPU0 will too.
1816     - */
1817     - if (cpumask_weight(cpu_smt_mask(0)) > 1)
1818     - static_branch_enable(&sched_smt_present);
1819     -}
1820     -#else
1821     -static inline void sched_init_smt(void) { }
1822     -#endif
1823     -
1824     void __init sched_init_smp(void)
1825     {
1826     cpumask_var_t non_isolated_cpus;
1827     @@ -7467,9 +7467,6 @@ void __init sched_init_smp(void)
1828    
1829     init_sched_rt_class();
1830     init_sched_dl_class();
1831     -
1832     - sched_init_smt();
1833     -
1834     sched_smp_initialized = true;
1835     }
1836    
1837     diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
1838     index c242944f5cbd..7a68c631d5b5 100644
1839     --- a/kernel/sched/fair.c
1840     +++ b/kernel/sched/fair.c
1841     @@ -5310,43 +5310,6 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1842     return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
1843     }
1844    
1845     -/*
1846     - * Implement a for_each_cpu() variant that starts the scan at a given cpu
1847     - * (@start), and wraps around.
1848     - *
1849     - * This is used to scan for idle CPUs; such that not all CPUs looking for an
1850     - * idle CPU find the same CPU. The down-side is that tasks tend to cycle
1851     - * through the LLC domain.
1852     - *
1853     - * Especially tbench is found sensitive to this.
1854     - */
1855     -
1856     -static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped)
1857     -{
1858     - int next;
1859     -
1860     -again:
1861     - next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
1862     -
1863     - if (*wrapped) {
1864     - if (next >= start)
1865     - return nr_cpumask_bits;
1866     - } else {
1867     - if (next >= nr_cpumask_bits) {
1868     - *wrapped = 1;
1869     - n = -1;
1870     - goto again;
1871     - }
1872     - }
1873     -
1874     - return next;
1875     -}
1876     -
1877     -#define for_each_cpu_wrap(cpu, mask, start, wrap) \
1878     - for ((wrap) = 0, (cpu) = (start)-1; \
1879     - (cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)), \
1880     - (cpu) < nr_cpumask_bits; )
1881     -
1882     #ifdef CONFIG_SCHED_SMT
1883    
1884     static inline void set_idle_cores(int cpu, int val)
1885     @@ -5376,7 +5339,7 @@ static inline bool test_idle_cores(int cpu, bool def)
1886     * Since SMT siblings share all cache levels, inspecting this limited remote
1887     * state should be fairly cheap.
1888     */
1889     -void __update_idle_core(struct rq *rq)
1890     +void update_idle_core(struct rq *rq)
1891     {
1892     int core = cpu_of(rq);
1893     int cpu;
1894     @@ -5406,17 +5369,14 @@ void __update_idle_core(struct rq *rq)
1895     static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
1896     {
1897     struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
1898     - int core, cpu, wrap;
1899     -
1900     - if (!static_branch_likely(&sched_smt_present))
1901     - return -1;
1902     + int core, cpu;
1903    
1904     if (!test_idle_cores(target, false))
1905     return -1;
1906    
1907     cpumask_and(cpus, sched_domain_span(sd), tsk_cpus_allowed(p));
1908    
1909     - for_each_cpu_wrap(core, cpus, target, wrap) {
1910     + for_each_cpu_wrap(core, cpus, target) {
1911     bool idle = true;
1912    
1913     for_each_cpu(cpu, cpu_smt_mask(core)) {
1914     @@ -5444,9 +5404,6 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
1915     {
1916     int cpu;
1917    
1918     - if (!static_branch_likely(&sched_smt_present))
1919     - return -1;
1920     -
1921     for_each_cpu(cpu, cpu_smt_mask(target)) {
1922     if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1923     continue;
1924     @@ -5482,7 +5439,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
1925     u64 avg_cost, avg_idle = this_rq()->avg_idle;
1926     u64 time, cost;
1927     s64 delta;
1928     - int cpu, wrap;
1929     + int cpu;
1930    
1931     this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
1932     if (!this_sd)
1933     @@ -5499,7 +5456,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
1934    
1935     time = local_clock();
1936    
1937     - for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
1938     + for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
1939     if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1940     continue;
1941     if (idle_cpu(cpu))
1942     diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
1943     index 055f935d4421..ad77d666583c 100644
1944     --- a/kernel/sched/sched.h
1945     +++ b/kernel/sched/sched.h
1946     @@ -43,6 +43,12 @@ extern void cpu_load_update_active(struct rq *this_rq);
1947     static inline void cpu_load_update_active(struct rq *this_rq) { }
1948     #endif
1949    
1950     +#ifdef CONFIG_SCHED_SMT
1951     +extern void update_idle_core(struct rq *rq);
1952     +#else
1953     +static inline void update_idle_core(struct rq *rq) { }
1954     +#endif
1955     +
1956     /*
1957     * Helpers for converting nanosecond timing to jiffy resolution
1958     */
1959     @@ -731,23 +737,6 @@ static inline int cpu_of(struct rq *rq)
1960     #endif
1961     }
1962    
1963     -
1964     -#ifdef CONFIG_SCHED_SMT
1965     -
1966     -extern struct static_key_false sched_smt_present;
1967     -
1968     -extern void __update_idle_core(struct rq *rq);
1969     -
1970     -static inline void update_idle_core(struct rq *rq)
1971     -{
1972     - if (static_branch_unlikely(&sched_smt_present))
1973     - __update_idle_core(rq);
1974     -}
1975     -
1976     -#else
1977     -static inline void update_idle_core(struct rq *rq) { }
1978     -#endif
1979     -
1980     DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1981    
1982     #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
1983     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1984     index 83c60f9013cb..52ee2c51f4b3 100644
1985     --- a/kernel/trace/trace.c
1986     +++ b/kernel/trace/trace.c
1987     @@ -1906,7 +1906,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1988     #endif
1989     ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
1990     ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1991     - ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1992     + ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
1993     (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1994     (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1995     }
1996     diff --git a/lib/cpumask.c b/lib/cpumask.c
1997     index 81dedaab36cc..4731a0895760 100644
1998     --- a/lib/cpumask.c
1999     +++ b/lib/cpumask.c
2000     @@ -43,6 +43,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
2001     }
2002     EXPORT_SYMBOL(cpumask_any_but);
2003    
2004     +/**
2005     + * cpumask_next_wrap - helper to implement for_each_cpu_wrap
2006     + * @n: the cpu prior to the place to search
2007     + * @mask: the cpumask pointer
2008     + * @start: the start point of the iteration
2009     + * @wrap: assume @n crossing @start terminates the iteration
2010     + *
2011     + * Returns >= nr_cpu_ids on completion
2012     + *
2013     + * Note: the @wrap argument is required for the start condition when
2014     + * we cannot assume @start is set in @mask.
2015     + */
2016     +int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
2017     +{
2018     + int next;
2019     +
2020     +again:
2021     + next = cpumask_next(n, mask);
2022     +
2023     + if (wrap && n < start && next >= start) {
2024     + return nr_cpumask_bits;
2025     +
2026     + } else if (next >= nr_cpumask_bits) {
2027     + wrap = true;
2028     + n = -1;
2029     + goto again;
2030     + }
2031     +
2032     + return next;
2033     +}
2034     +EXPORT_SYMBOL(cpumask_next_wrap);
2035     +
2036     /* These are not inline because of header tangles. */
2037     #ifdef CONFIG_CPUMASK_OFFSTACK
2038     /**
2039     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
2040     index e7d5db958538..8258e9eee806 100644
2041     --- a/mm/huge_memory.c
2042     +++ b/mm/huge_memory.c
2043     @@ -1373,8 +1373,8 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2044     get_page(page);
2045     spin_unlock(ptl);
2046     split_huge_page(page);
2047     - put_page(page);
2048     unlock_page(page);
2049     + put_page(page);
2050     goto out_unlocked;
2051     }
2052    
2053     diff --git a/mm/list_lru.c b/mm/list_lru.c
2054     index 234676e31edd..7a40fa2be858 100644
2055     --- a/mm/list_lru.c
2056     +++ b/mm/list_lru.c
2057     @@ -117,6 +117,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
2058     l = list_lru_from_kmem(nlru, item);
2059     list_add_tail(item, &l->list);
2060     l->nr_items++;
2061     + nlru->nr_items++;
2062     spin_unlock(&nlru->lock);
2063     return true;
2064     }
2065     @@ -136,6 +137,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
2066     l = list_lru_from_kmem(nlru, item);
2067     list_del_init(item);
2068     l->nr_items--;
2069     + nlru->nr_items--;
2070     spin_unlock(&nlru->lock);
2071     return true;
2072     }
2073     @@ -183,15 +185,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one);
2074    
2075     unsigned long list_lru_count_node(struct list_lru *lru, int nid)
2076     {
2077     - long count = 0;
2078     - int memcg_idx;
2079     + struct list_lru_node *nlru;
2080    
2081     - count += __list_lru_count_one(lru, nid, -1);
2082     - if (list_lru_memcg_aware(lru)) {
2083     - for_each_memcg_cache_index(memcg_idx)
2084     - count += __list_lru_count_one(lru, nid, memcg_idx);
2085     - }
2086     - return count;
2087     + nlru = &lru->node[nid];
2088     + return nlru->nr_items;
2089     }
2090     EXPORT_SYMBOL_GPL(list_lru_count_node);
2091    
2092     @@ -226,6 +223,7 @@ __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
2093     assert_spin_locked(&nlru->lock);
2094     case LRU_REMOVED:
2095     isolated++;
2096     + nlru->nr_items--;
2097     /*
2098     * If the lru lock has been dropped, our list
2099     * traversal is now invalid and so we have to
2100     diff --git a/mm/mmap.c b/mm/mmap.c
2101     index 145d3d5253e8..75d263bd8739 100644
2102     --- a/mm/mmap.c
2103     +++ b/mm/mmap.c
2104     @@ -2228,7 +2228,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2105    
2106     /* Guard against exceeding limits of the address space. */
2107     address &= PAGE_MASK;
2108     - if (address >= TASK_SIZE)
2109     + if (address >= (TASK_SIZE & PAGE_MASK))
2110     return -ENOMEM;
2111     address += PAGE_SIZE;
2112    
2113     diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
2114     index 7dbc80d01eb0..6406010e155b 100644
2115     --- a/net/bridge/br_mdb.c
2116     +++ b/net/bridge/br_mdb.c
2117     @@ -323,7 +323,8 @@ static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
2118     __mdb_entry_to_br_ip(entry, &complete_info->ip);
2119     mdb.obj.complete_priv = complete_info;
2120     mdb.obj.complete = br_mdb_complete;
2121     - switchdev_port_obj_add(port_dev, &mdb.obj);
2122     + if (switchdev_port_obj_add(port_dev, &mdb.obj))
2123     + kfree(complete_info);
2124     }
2125     } else if (port_dev && type == RTM_DELMDB) {
2126     switchdev_port_obj_del(port_dev, &mdb.obj);
2127     diff --git a/net/core/dev.c b/net/core/dev.c
2128     index 97f806116ae9..c17952b6e0b6 100644
2129     --- a/net/core/dev.c
2130     +++ b/net/core/dev.c
2131     @@ -4641,6 +4641,12 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
2132     }
2133     EXPORT_SYMBOL(gro_find_complete_by_type);
2134    
2135     +static void napi_skb_free_stolen_head(struct sk_buff *skb)
2136     +{
2137     + skb_dst_drop(skb);
2138     + kmem_cache_free(skbuff_head_cache, skb);
2139     +}
2140     +
2141     static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
2142     {
2143     switch (ret) {
2144     @@ -4654,12 +4660,10 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
2145     break;
2146    
2147     case GRO_MERGED_FREE:
2148     - if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
2149     - skb_dst_drop(skb);
2150     - kmem_cache_free(skbuff_head_cache, skb);
2151     - } else {
2152     + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
2153     + napi_skb_free_stolen_head(skb);
2154     + else
2155     __kfree_skb(skb);
2156     - }
2157     break;
2158    
2159     case GRO_HELD:
2160     @@ -4729,10 +4733,16 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi,
2161     break;
2162    
2163     case GRO_DROP:
2164     - case GRO_MERGED_FREE:
2165     napi_reuse_skb(napi, skb);
2166     break;
2167    
2168     + case GRO_MERGED_FREE:
2169     + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
2170     + napi_skb_free_stolen_head(skb);
2171     + else
2172     + napi_reuse_skb(napi, skb);
2173     + break;
2174     +
2175     case GRO_MERGED:
2176     break;
2177     }
2178     @@ -7521,7 +7531,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
2179     {
2180     #if BITS_PER_LONG == 64
2181     BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
2182     - memcpy(stats64, netdev_stats, sizeof(*stats64));
2183     + memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
2184     /* zero out counters that only exist in rtnl_link_stats64 */
2185     memset((char *)stats64 + sizeof(*netdev_stats), 0,
2186     sizeof(*stats64) - sizeof(*netdev_stats));
2187     @@ -7563,9 +7573,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2188     } else {
2189     netdev_stats_to_stats64(storage, &dev->stats);
2190     }
2191     - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
2192     - storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
2193     - storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler);
2194     + storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
2195     + storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
2196     + storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
2197     return storage;
2198     }
2199     EXPORT_SYMBOL(dev_get_stats);
2200     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2201     index 86fbf0f3235e..1a4db27f5833 100644
2202     --- a/net/ipv4/tcp.c
2203     +++ b/net/ipv4/tcp.c
2204     @@ -2300,6 +2300,8 @@ int tcp_disconnect(struct sock *sk, int flags)
2205     tcp_init_send_head(sk);
2206     memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2207     __sk_dst_reset(sk);
2208     + dst_release(sk->sk_rx_dst);
2209     + sk->sk_rx_dst = NULL;
2210     tcp_saved_syn_free(tp);
2211    
2212     WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2213     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2214     index b6f4c42cc8ce..b2cabda72320 100644
2215     --- a/net/ipv6/addrconf.c
2216     +++ b/net/ipv6/addrconf.c
2217     @@ -1875,15 +1875,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
2218     if (dad_failed)
2219     ifp->flags |= IFA_F_DADFAILED;
2220    
2221     - if (ifp->flags&IFA_F_PERMANENT) {
2222     - spin_lock_bh(&ifp->lock);
2223     - addrconf_del_dad_work(ifp);
2224     - ifp->flags |= IFA_F_TENTATIVE;
2225     - spin_unlock_bh(&ifp->lock);
2226     - if (dad_failed)
2227     - ipv6_ifa_notify(0, ifp);
2228     - in6_ifa_put(ifp);
2229     - } else if (ifp->flags&IFA_F_TEMPORARY) {
2230     + if (ifp->flags&IFA_F_TEMPORARY) {
2231     struct inet6_ifaddr *ifpub;
2232     spin_lock_bh(&ifp->lock);
2233     ifpub = ifp->ifpub;
2234     @@ -1896,6 +1888,14 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
2235     spin_unlock_bh(&ifp->lock);
2236     }
2237     ipv6_del_addr(ifp);
2238     + } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
2239     + spin_lock_bh(&ifp->lock);
2240     + addrconf_del_dad_work(ifp);
2241     + ifp->flags |= IFA_F_TENTATIVE;
2242     + spin_unlock_bh(&ifp->lock);
2243     + if (dad_failed)
2244     + ipv6_ifa_notify(0, ifp);
2245     + in6_ifa_put(ifp);
2246     } else {
2247     ipv6_del_addr(ifp);
2248     }
2249     @@ -3316,6 +3316,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2250     struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2251     struct netdev_notifier_changeupper_info *info;
2252     struct inet6_dev *idev = __in6_dev_get(dev);
2253     + struct net *net = dev_net(dev);
2254     int run_pending = 0;
2255     int err;
2256    
2257     @@ -3331,7 +3332,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2258     case NETDEV_CHANGEMTU:
2259     /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
2260     if (dev->mtu < IPV6_MIN_MTU) {
2261     - addrconf_ifdown(dev, 1);
2262     + addrconf_ifdown(dev, dev != net->loopback_dev);
2263     break;
2264     }
2265    
2266     @@ -3447,7 +3448,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2267     * IPV6_MIN_MTU stop IPv6 on this interface.
2268     */
2269     if (dev->mtu < IPV6_MIN_MTU)
2270     - addrconf_ifdown(dev, 1);
2271     + addrconf_ifdown(dev, dev != net->loopback_dev);
2272     }
2273     break;
2274    
2275     diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2276     index 636d4d893085..4345ee39f180 100644
2277     --- a/net/ipv6/ip6_fib.c
2278     +++ b/net/ipv6/ip6_fib.c
2279     @@ -771,10 +771,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
2280     goto next_iter;
2281     }
2282    
2283     - if (iter->dst.dev == rt->dst.dev &&
2284     - iter->rt6i_idev == rt->rt6i_idev &&
2285     - ipv6_addr_equal(&iter->rt6i_gateway,
2286     - &rt->rt6i_gateway)) {
2287     + if (rt6_duplicate_nexthop(iter, rt)) {
2288     if (rt->rt6i_nsiblings)
2289     rt->rt6i_nsiblings = 0;
2290     if (!(iter->rt6i_flags & RTF_EXPIRES))
2291     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2292     index b8b475389ae4..5764a84465f8 100644
2293     --- a/net/ipv6/route.c
2294     +++ b/net/ipv6/route.c
2295     @@ -2953,17 +2953,11 @@ static int ip6_route_info_append(struct list_head *rt6_nh_list,
2296     struct rt6_info *rt, struct fib6_config *r_cfg)
2297     {
2298     struct rt6_nh *nh;
2299     - struct rt6_info *rtnh;
2300     int err = -EEXIST;
2301    
2302     list_for_each_entry(nh, rt6_nh_list, next) {
2303     /* check if rt6_info already exists */
2304     - rtnh = nh->rt6_info;
2305     -
2306     - if (rtnh->dst.dev == rt->dst.dev &&
2307     - rtnh->rt6i_idev == rt->rt6i_idev &&
2308     - ipv6_addr_equal(&rtnh->rt6i_gateway,
2309     - &rt->rt6i_gateway))
2310     + if (rt6_duplicate_nexthop(nh->rt6_info, rt))
2311     return err;
2312     }
2313    
2314     diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
2315     index e0b23fb5b8d5..525b624fec8b 100644
2316     --- a/net/rds/tcp_listen.c
2317     +++ b/net/rds/tcp_listen.c
2318     @@ -129,7 +129,7 @@ int rds_tcp_accept_one(struct socket *sock)
2319     if (!sock) /* module unload or netns delete in progress */
2320     return -ENETUNREACH;
2321    
2322     - ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
2323     + ret = sock_create_lite(sock->sk->sk_family,
2324     sock->sk->sk_type, sock->sk->sk_protocol,
2325     &new_sock);
2326     if (ret)
2327     diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
2328     index 206dc24add3a..ff27a85a71a9 100644
2329     --- a/net/sched/sch_api.c
2330     +++ b/net/sched/sch_api.c
2331     @@ -1008,6 +1008,9 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
2332    
2333     return sch;
2334     }
2335     + /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
2336     + if (ops->destroy)
2337     + ops->destroy(sch);
2338     err_out3:
2339     dev_put(dev);
2340     kfree((char *) sch - sch->padded);
2341     diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
2342     index e3d0458af17b..2fae8b5f1b80 100644
2343     --- a/net/sched/sch_hhf.c
2344     +++ b/net/sched/sch_hhf.c
2345     @@ -627,7 +627,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
2346     q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
2347     sizeof(u32));
2348     if (!q->hhf_arrays[i]) {
2349     - hhf_destroy(sch);
2350     + /* Note: hhf_destroy() will be called
2351     + * by our caller.
2352     + */
2353     return -ENOMEM;
2354     }
2355     }
2356     @@ -638,7 +640,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
2357     q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
2358     BITS_PER_BYTE);
2359     if (!q->hhf_valid_bits[i]) {
2360     - hhf_destroy(sch);
2361     + /* Note: hhf_destroy() will be called
2362     + * by our caller.
2363     + */
2364     return -ENOMEM;
2365     }
2366     }
2367     diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
2368     index 2bc8d7f8df16..20b7f1646f69 100644
2369     --- a/net/sched/sch_mq.c
2370     +++ b/net/sched/sch_mq.c
2371     @@ -52,7 +52,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
2372     /* pre-allocate qdiscs, attachment can't fail */
2373     priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
2374     GFP_KERNEL);
2375     - if (priv->qdiscs == NULL)
2376     + if (!priv->qdiscs)
2377     return -ENOMEM;
2378    
2379     for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
2380     @@ -60,18 +60,14 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
2381     qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
2382     TC_H_MAKE(TC_H_MAJ(sch->handle),
2383     TC_H_MIN(ntx + 1)));
2384     - if (qdisc == NULL)
2385     - goto err;
2386     + if (!qdisc)
2387     + return -ENOMEM;
2388     priv->qdiscs[ntx] = qdisc;
2389     qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2390     }
2391    
2392     sch->flags |= TCQ_F_MQROOT;
2393     return 0;
2394     -
2395     -err:
2396     - mq_destroy(sch);
2397     - return -ENOMEM;
2398     }
2399    
2400     static void mq_attach(struct Qdisc *sch)
2401     diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
2402     index b5c502c78143..922683418e53 100644
2403     --- a/net/sched/sch_mqprio.c
2404     +++ b/net/sched/sch_mqprio.c
2405     @@ -118,10 +118,8 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
2406     /* pre-allocate qdisc, attachment can't fail */
2407     priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
2408     GFP_KERNEL);
2409     - if (priv->qdiscs == NULL) {
2410     - err = -ENOMEM;
2411     - goto err;
2412     - }
2413     + if (!priv->qdiscs)
2414     + return -ENOMEM;
2415    
2416     for (i = 0; i < dev->num_tx_queues; i++) {
2417     dev_queue = netdev_get_tx_queue(dev, i);
2418     @@ -129,10 +127,9 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
2419     get_default_qdisc_ops(dev, i),
2420     TC_H_MAKE(TC_H_MAJ(sch->handle),
2421     TC_H_MIN(i + 1)));
2422     - if (qdisc == NULL) {
2423     - err = -ENOMEM;
2424     - goto err;
2425     - }
2426     + if (!qdisc)
2427     + return -ENOMEM;
2428     +
2429     priv->qdiscs[i] = qdisc;
2430     qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2431     }
2432     @@ -148,7 +145,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
2433     priv->hw_owned = 1;
2434     err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
2435     if (err)
2436     - goto err;
2437     + return err;
2438     } else {
2439     netdev_set_num_tc(dev, qopt->num_tc);
2440     for (i = 0; i < qopt->num_tc; i++)
2441     @@ -162,10 +159,6 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
2442    
2443     sch->flags |= TCQ_F_MQROOT;
2444     return 0;
2445     -
2446     -err:
2447     - mqprio_destroy(sch);
2448     - return err;
2449     }
2450    
2451     static void mqprio_attach(struct Qdisc *sch)
2452     diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
2453     index 7f195ed4d568..bc5e99584e41 100644
2454     --- a/net/sched/sch_sfq.c
2455     +++ b/net/sched/sch_sfq.c
2456     @@ -742,9 +742,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
2457     q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
2458     q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
2459     if (!q->ht || !q->slots) {
2460     - sfq_destroy(sch);
2461     + /* Note: sfq_destroy() will be called by our caller */
2462     return -ENOMEM;
2463     }
2464     +
2465     for (i = 0; i < q->divisor; i++)
2466     q->ht[i] = SFQ_EMPTY_SLOT;
2467    
2468     diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
2469     index e7a3068a1c3b..e9e9bc5c8773 100644
2470     --- a/net/wireless/nl80211.c
2471     +++ b/net/wireless/nl80211.c
2472     @@ -305,8 +305,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
2473     [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
2474     [NL80211_ATTR_PID] = { .type = NLA_U32 },
2475     [NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
2476     - [NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
2477     - .len = WLAN_PMKID_LEN },
2478     + [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
2479     [NL80211_ATTR_DURATION] = { .type = NLA_U32 },
2480     [NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
2481     [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
2482     @@ -362,6 +361,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
2483     [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
2484     [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
2485     [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
2486     + [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 },
2487     [NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
2488     [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
2489     [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
2490     @@ -512,7 +512,7 @@ nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = {
2491     static const struct nla_policy
2492     nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = {
2493     [NL80211_NAN_FUNC_TYPE] = { .type = NLA_U8 },
2494     - [NL80211_NAN_FUNC_SERVICE_ID] = { .type = NLA_BINARY,
2495     + [NL80211_NAN_FUNC_SERVICE_ID] = {
2496     .len = NL80211_NAN_FUNC_SERVICE_ID_LEN },
2497     [NL80211_NAN_FUNC_PUBLISH_TYPE] = { .type = NLA_U8 },
2498     [NL80211_NAN_FUNC_PUBLISH_BCAST] = { .type = NLA_FLAG },
2499     @@ -6326,6 +6326,10 @@ static int validate_scan_freqs(struct nlattr *freqs)
2500     struct nlattr *attr1, *attr2;
2501     int n_channels = 0, tmp1, tmp2;
2502    
2503     + nla_for_each_nested(attr1, freqs, tmp1)
2504     + if (nla_len(attr1) != sizeof(u32))
2505     + return 0;
2506     +
2507     nla_for_each_nested(attr1, freqs, tmp1) {
2508     n_channels++;
2509     /*
2510     diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
2511     index a8368d1c4348..55171647f516 100755
2512     --- a/scripts/checkpatch.pl
2513     +++ b/scripts/checkpatch.pl
2514     @@ -3499,7 +3499,7 @@ sub process {
2515     $fixedline =~ s/\s*=\s*$/ = {/;
2516     fix_insert_line($fixlinenr, $fixedline);
2517     $fixedline = $line;
2518     - $fixedline =~ s/^(.\s*){\s*/$1/;
2519     + $fixedline =~ s/^(.\s*)\{\s*/$1/;
2520     fix_insert_line($fixlinenr, $fixedline);
2521     }
2522     }
2523     @@ -3840,7 +3840,7 @@ sub process {
2524     my $fixedline = rtrim($prevrawline) . " {";
2525     fix_insert_line($fixlinenr, $fixedline);
2526     $fixedline = $rawline;
2527     - $fixedline =~ s/^(.\s*){\s*/$1\t/;
2528     + $fixedline =~ s/^(.\s*)\{\s*/$1\t/;
2529     if ($fixedline !~ /^\+\s*$/) {
2530     fix_insert_line($fixlinenr, $fixedline);
2531     }
2532     @@ -4329,7 +4329,7 @@ sub process {
2533     if (ERROR("SPACING",
2534     "space required before the open brace '{'\n" . $herecurr) &&
2535     $fix) {
2536     - $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\))){/$1 {/;
2537     + $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\)))\{/$1 {/;
2538     }
2539     }
2540    
2541     diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h b/tools/lib/lockdep/uinclude/linux/lockdep.h
2542     index c808c7d02d21..d30214221920 100644
2543     --- a/tools/lib/lockdep/uinclude/linux/lockdep.h
2544     +++ b/tools/lib/lockdep/uinclude/linux/lockdep.h
2545     @@ -8,7 +8,7 @@
2546     #include <linux/utsname.h>
2547     #include <linux/compiler.h>
2548    
2549     -#define MAX_LOCK_DEPTH 2000UL
2550     +#define MAX_LOCK_DEPTH 63UL
2551    
2552     #define asmlinkage
2553     #define __visible
2554     diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c
2555     index 10a21a958aaf..763f37fecfb8 100644
2556     --- a/tools/testing/selftests/capabilities/test_execve.c
2557     +++ b/tools/testing/selftests/capabilities/test_execve.c
2558     @@ -138,9 +138,6 @@ static void chdir_to_tmpfs(void)
2559    
2560     if (chdir(cwd) != 0)
2561     err(1, "chdir to private tmpfs");
2562     -
2563     - if (umount2(".", MNT_DETACH) != 0)
2564     - err(1, "detach private tmpfs");
2565     }
2566    
2567     static void copy_fromat_to(int fromfd, const char *fromname, const char *toname)
2568     @@ -248,7 +245,7 @@ static int do_tests(int uid, const char *our_path)
2569     err(1, "chown");
2570     if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0)
2571     err(1, "chmod");
2572     -}
2573     + }
2574    
2575     capng_get_caps_process();
2576    
2577     @@ -384,7 +381,7 @@ static int do_tests(int uid, const char *our_path)
2578     } else {
2579     printf("[RUN]\tNon-root +ia, sgidnonroot => i\n");
2580     exec_other_validate_cap("./validate_cap_sgidnonroot",
2581     - false, false, true, false);
2582     + false, false, true, false);
2583    
2584     if (fork_wait()) {
2585     printf("[RUN]\tNon-root +ia, sgidroot => i\n");