Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0311-5.4.212-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (hide annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (18 months, 3 weeks ago) by niro
File size: 97049 byte(s)
-add missing
1 niro 3637 diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
2     index 9393c50b5afc9..c98fd11907cc8 100644
3     --- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
4     +++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
5     @@ -230,6 +230,20 @@ The possible values in this file are:
6     * - 'Mitigation: Clear CPU buffers'
7     - The processor is vulnerable and the CPU buffer clearing mitigation is
8     enabled.
9     + * - 'Unknown: No mitigations'
10     + - The processor vulnerability status is unknown because it is
11     + out of Servicing period. Mitigation is not attempted.
12     +
13     +Definitions:
14     +------------
15     +
16     +Servicing period: The process of providing functional and security updates to
17     +Intel processors or platforms, utilizing the Intel Platform Update (IPU)
18     +process or other similar mechanisms.
19     +
20     +End of Servicing Updates (ESU): ESU is the date at which Intel will no
21     +longer provide Servicing, such as through IPU or other similar update
22     +processes. ESU dates will typically be aligned to end of quarter.
23    
24     If the processor is vulnerable then the following information is appended to
25     the above information:
26     diff --git a/Makefile b/Makefile
27     index e54b9a1659b4f..cecfe23f521f1 100644
28     --- a/Makefile
29     +++ b/Makefile
30     @@ -1,7 +1,7 @@
31     # SPDX-License-Identifier: GPL-2.0
32     VERSION = 5
33     PATCHLEVEL = 4
34     -SUBLEVEL = 211
35     +SUBLEVEL = 212
36     EXTRAVERSION =
37     NAME = Kleptomaniac Octopus
38    
39     diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
40     index 286cec4d86d7b..cc6ed74960501 100644
41     --- a/arch/parisc/kernel/unaligned.c
42     +++ b/arch/parisc/kernel/unaligned.c
43     @@ -107,7 +107,7 @@
44     #define R1(i) (((i)>>21)&0x1f)
45     #define R2(i) (((i)>>16)&0x1f)
46     #define R3(i) ((i)&0x1f)
47     -#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
48     +#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1))
49     #define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
50     #define IM5_2(i) IM((i)>>16,5)
51     #define IM5_3(i) IM((i),5)
52     diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
53     index f0bc4dc3e9bf0..6511d15ace45e 100644
54     --- a/arch/s390/hypfs/hypfs_diag.c
55     +++ b/arch/s390/hypfs/hypfs_diag.c
56     @@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
57     int rc;
58    
59     if (diag204_probe()) {
60     - pr_err("The hardware system does not support hypfs\n");
61     + pr_info("The hardware system does not support hypfs\n");
62     return -ENODATA;
63     }
64    
65     diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
66     index 70139d0791b61..ca4fc66a361fb 100644
67     --- a/arch/s390/hypfs/inode.c
68     +++ b/arch/s390/hypfs/inode.c
69     @@ -501,9 +501,9 @@ fail_hypfs_sprp_exit:
70     hypfs_vm_exit();
71     fail_hypfs_diag_exit:
72     hypfs_diag_exit();
73     + pr_err("Initialization of hypfs failed with rc=%i\n", rc);
74     fail_dbfs_exit:
75     hypfs_dbfs_exit();
76     - pr_err("Initialization of hypfs failed with rc=%i\n", rc);
77     return rc;
78     }
79     device_initcall(hypfs_init)
80     diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
81     index 4e6299e2ca947..fdd5f37ac1fb8 100644
82     --- a/arch/s390/kernel/process.c
83     +++ b/arch/s390/kernel/process.c
84     @@ -76,6 +76,18 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
85    
86     memcpy(dst, src, arch_task_struct_size);
87     dst->thread.fpu.regs = dst->thread.fpu.fprs;
88     +
89     + /*
90     + * Don't transfer over the runtime instrumentation or the guarded
91     + * storage control block pointers. These fields are cleared here instead
92     + * of in copy_thread() to avoid premature freeing of associated memory
93     + * on fork() failure. Wait to clear the RI flag because ->stack still
94     + * refers to the source thread.
95     + */
96     + dst->thread.ri_cb = NULL;
97     + dst->thread.gs_cb = NULL;
98     + dst->thread.gs_bc_cb = NULL;
99     +
100     return 0;
101     }
102    
103     @@ -133,13 +145,11 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
104     frame->childregs.flags = 0;
105     if (new_stackp)
106     frame->childregs.gprs[15] = new_stackp;
107     -
108     - /* Don't copy runtime instrumentation info */
109     - p->thread.ri_cb = NULL;
110     + /*
111     + * Clear the runtime instrumentation flag after the above childregs
112     + * copy. The CB pointer was already cleared in arch_dup_task_struct().
113     + */
114     frame->childregs.psw.mask &= ~PSW_MASK_RI;
115     - /* Don't copy guarded storage control block */
116     - p->thread.gs_cb = NULL;
117     - p->thread.gs_bc_cb = NULL;
118    
119     /* Set a new TLS ? */
120     if (clone_flags & CLONE_SETTLS) {
121     diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
122     index 7b0bb475c1664..9770381776a63 100644
123     --- a/arch/s390/mm/fault.c
124     +++ b/arch/s390/mm/fault.c
125     @@ -432,7 +432,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
126     flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
127     if (user_mode(regs))
128     flags |= FAULT_FLAG_USER;
129     - if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
130     + if ((trans_exc_code & store_indication) == 0x400)
131     + access = VM_WRITE;
132     + if (access == VM_WRITE)
133     flags |= FAULT_FLAG_WRITE;
134     down_read(&mm->mmap_sem);
135    
136     diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
137     index aec6e63c6a04a..0258e0065771a 100644
138     --- a/arch/x86/events/intel/uncore_snb.c
139     +++ b/arch/x86/events/intel/uncore_snb.c
140     @@ -575,6 +575,22 @@ int snb_pci2phy_map_init(int devid)
141     return 0;
142     }
143    
144     +static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
145     +{
146     + struct hw_perf_event *hwc = &event->hw;
147     +
148     + /*
149     + * SNB IMC counters are 32-bit and are laid out back to back
150     + * in MMIO space. Therefore we must use a 32-bit accessor function
151     + * using readq() from uncore_mmio_read_counter() causes problems
152     + * because it is reading 64-bit at a time. This is okay for the
153     + * uncore_perf_event_update() function because it drops the upper
154     + * 32-bits but not okay for plain uncore_read_counter() as invoked
155     + * in uncore_pmu_event_start().
156     + */
157     + return (u64)readl(box->io_addr + hwc->event_base);
158     +}
159     +
160     static struct pmu snb_uncore_imc_pmu = {
161     .task_ctx_nr = perf_invalid_context,
162     .event_init = snb_uncore_imc_event_init,
163     @@ -594,7 +610,7 @@ static struct intel_uncore_ops snb_uncore_imc_ops = {
164     .disable_event = snb_uncore_imc_disable_event,
165     .enable_event = snb_uncore_imc_enable_event,
166     .hw_config = snb_uncore_imc_hw_config,
167     - .read_counter = uncore_mmio_read_counter,
168     + .read_counter = snb_uncore_imc_read_counter,
169     };
170    
171     static struct intel_uncore_type snb_uncore_imc = {
172     diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
173     index a3e32bc938562..736b0e412344b 100644
174     --- a/arch/x86/include/asm/cpufeatures.h
175     +++ b/arch/x86/include/asm/cpufeatures.h
176     @@ -407,6 +407,7 @@
177     #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
178     #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
179     #define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
180     -#define X86_BUG_EIBRS_PBRSB X86_BUG(26) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
181     +#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
182     +#define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
183    
184     #endif /* _ASM_X86_CPUFEATURES_H */
185     diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
186     index 57efa90f3fbd0..c90d91cb14341 100644
187     --- a/arch/x86/kernel/cpu/bugs.c
188     +++ b/arch/x86/kernel/cpu/bugs.c
189     @@ -396,7 +396,8 @@ static void __init mmio_select_mitigation(void)
190     u64 ia32_cap;
191    
192     if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
193     - cpu_mitigations_off()) {
194     + boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
195     + cpu_mitigations_off()) {
196     mmio_mitigation = MMIO_MITIGATION_OFF;
197     return;
198     }
199     @@ -501,6 +502,8 @@ out:
200     pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
201     if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
202     pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
203     + else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
204     + pr_info("MMIO Stale Data: Unknown: No mitigations\n");
205     }
206    
207     static void __init md_clear_select_mitigation(void)
208     @@ -1880,6 +1883,9 @@ static ssize_t tsx_async_abort_show_state(char *buf)
209    
210     static ssize_t mmio_stale_data_show_state(char *buf)
211     {
212     + if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
213     + return sysfs_emit(buf, "Unknown: No mitigations\n");
214     +
215     if (mmio_mitigation == MMIO_MITIGATION_OFF)
216     return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
217    
218     @@ -2007,6 +2013,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
219     return srbds_show_state(buf);
220    
221     case X86_BUG_MMIO_STALE_DATA:
222     + case X86_BUG_MMIO_UNKNOWN:
223     return mmio_stale_data_show_state(buf);
224    
225     default:
226     @@ -2063,6 +2070,9 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
227    
228     ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
229     {
230     - return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
231     + if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
232     + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
233     + else
234     + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
235     }
236     #endif
237     diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
238     index b926b7244d42d..59413e741ecf1 100644
239     --- a/arch/x86/kernel/cpu/common.c
240     +++ b/arch/x86/kernel/cpu/common.c
241     @@ -1026,6 +1026,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
242     #define NO_ITLB_MULTIHIT BIT(7)
243     #define NO_SPECTRE_V2 BIT(8)
244     #define NO_EIBRS_PBRSB BIT(9)
245     +#define NO_MMIO BIT(10)
246    
247     #define VULNWL(_vendor, _family, _model, _whitelist) \
248     { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
249     @@ -1046,6 +1047,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
250     VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
251    
252     /* Intel Family 6 */
253     + VULNWL_INTEL(TIGERLAKE, NO_MMIO),
254     + VULNWL_INTEL(TIGERLAKE_L, NO_MMIO),
255     + VULNWL_INTEL(ALDERLAKE, NO_MMIO),
256     + VULNWL_INTEL(ALDERLAKE_L, NO_MMIO),
257     +
258     VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
259     VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
260     VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
261     @@ -1064,9 +1070,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
262     VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
263     VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
264    
265     - VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
266     - VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
267     - VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
268     + VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
269     + VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
270     + VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
271    
272     /*
273     * Technically, swapgs isn't serializing on AMD (despite it previously
274     @@ -1081,18 +1087,18 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
275     VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
276    
277     /* AMD Family 0xf - 0x12 */
278     - VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
279     - VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
280     - VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
281     - VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
282     + VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
283     + VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
284     + VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
285     + VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
286    
287     /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
288     - VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
289     - VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
290     + VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
291     + VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
292    
293     /* Zhaoxin Family 7 */
294     - VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2),
295     - VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2),
296     + VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_MMIO),
297     + VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_MMIO),
298     {}
299     };
300    
301     @@ -1234,10 +1240,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
302     * Affected CPU list is generally enough to enumerate the vulnerability,
303     * but for virtualization case check for ARCH_CAP MSR bits also, VMM may
304     * not want the guest to enumerate the bug.
305     + *
306     + * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
307     + * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
308     */
309     - if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
310     - !arch_cap_mmio_immune(ia32_cap))
311     - setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
312     + if (!arch_cap_mmio_immune(ia32_cap)) {
313     + if (cpu_matches(cpu_vuln_blacklist, MMIO))
314     + setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
315     + else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
316     + setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
317     + }
318    
319     if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
320     !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
321     diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
322     index b934f9f68a168..4e7c49fcf0030 100644
323     --- a/arch/x86/kernel/unwind_orc.c
324     +++ b/arch/x86/kernel/unwind_orc.c
325     @@ -90,22 +90,27 @@ static struct orc_entry *orc_find(unsigned long ip);
326     static struct orc_entry *orc_ftrace_find(unsigned long ip)
327     {
328     struct ftrace_ops *ops;
329     - unsigned long caller;
330     + unsigned long tramp_addr, offset;
331    
332     ops = ftrace_ops_trampoline(ip);
333     if (!ops)
334     return NULL;
335    
336     + /* Set tramp_addr to the start of the code copied by the trampoline */
337     if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
338     - caller = (unsigned long)ftrace_regs_call;
339     + tramp_addr = (unsigned long)ftrace_regs_caller;
340     else
341     - caller = (unsigned long)ftrace_call;
342     + tramp_addr = (unsigned long)ftrace_caller;
343     +
344     + /* Now place tramp_addr to the location within the trampoline ip is at */
345     + offset = ip - ops->trampoline;
346     + tramp_addr += offset;
347    
348     /* Prevent unlikely recursion */
349     - if (ip == caller)
350     + if (ip == tramp_addr)
351     return NULL;
352    
353     - return orc_find(caller);
354     + return orc_find(tramp_addr);
355     }
356     #else
357     static struct orc_entry *orc_ftrace_find(unsigned long ip)
358     diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
359     index 41feb88ee92d6..458b4d99fb4e5 100644
360     --- a/drivers/acpi/processor_thermal.c
361     +++ b/drivers/acpi/processor_thermal.c
362     @@ -150,7 +150,7 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
363     unsigned int cpu;
364    
365     for_each_cpu(cpu, policy->related_cpus) {
366     - struct acpi_processor *pr = per_cpu(processors, policy->cpu);
367     + struct acpi_processor *pr = per_cpu(processors, cpu);
368    
369     if (pr)
370     freq_qos_remove_request(&pr->thermal_req);
371     diff --git a/drivers/android/binder.c b/drivers/android/binder.c
372     index b9fb2a9269443..c273d0df69394 100644
373     --- a/drivers/android/binder.c
374     +++ b/drivers/android/binder.c
375     @@ -6083,6 +6083,7 @@ const struct file_operations binder_fops = {
376     .open = binder_open,
377     .flush = binder_flush,
378     .release = binder_release,
379     + .may_pollfree = true,
380     };
381    
382     static int __init init_binder_device(const char *name)
383     diff --git a/drivers/block/loop.c b/drivers/block/loop.c
384     index 6b3e27b8cd245..b8f57b1c2864b 100644
385     --- a/drivers/block/loop.c
386     +++ b/drivers/block/loop.c
387     @@ -1397,6 +1397,11 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
388     info->lo_number = lo->lo_number;
389     info->lo_offset = lo->lo_offset;
390     info->lo_sizelimit = lo->lo_sizelimit;
391     +
392     + /* loff_t vars have been assigned __u64 */
393     + if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
394     + return -EOVERFLOW;
395     +
396     info->lo_flags = lo->lo_flags;
397     memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
398     memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
399     diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
400     index c6e9b7bd7618c..80ccdf96093ff 100644
401     --- a/drivers/dma-buf/udmabuf.c
402     +++ b/drivers/dma-buf/udmabuf.c
403     @@ -287,7 +287,23 @@ static struct miscdevice udmabuf_misc = {
404    
405     static int __init udmabuf_dev_init(void)
406     {
407     - return misc_register(&udmabuf_misc);
408     + int ret;
409     +
410     + ret = misc_register(&udmabuf_misc);
411     + if (ret < 0) {
412     + pr_err("Could not initialize udmabuf device\n");
413     + return ret;
414     + }
415     +
416     + ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
417     + DMA_BIT_MASK(64));
418     + if (ret < 0) {
419     + pr_err("Could not setup DMA mask for udmabuf device\n");
420     + misc_deregister(&udmabuf_misc);
421     + return ret;
422     + }
423     +
424     + return 0;
425     }
426    
427     static void __exit udmabuf_dev_exit(void)
428     diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
429     index eca67d5d5b10d..721be82ccebec 100644
430     --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
431     +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
432     @@ -546,9 +546,11 @@ static void dce112_get_pix_clk_dividers_helper (
433     switch (pix_clk_params->color_depth) {
434     case COLOR_DEPTH_101010:
435     actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2;
436     + actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
437     break;
438     case COLOR_DEPTH_121212:
439     actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2;
440     + actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
441     break;
442     case COLOR_DEPTH_161616:
443     actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;
444     diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
445     index 8b2f29f6dabd2..068e79fa3490d 100644
446     --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
447     +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
448     @@ -118,6 +118,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
449     while (tmp_mpcc != NULL) {
450     if (tmp_mpcc->dpp_id == dpp_id)
451     return tmp_mpcc;
452     +
453     + /* avoid circular linked list */
454     + ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
455     + if (tmp_mpcc == tmp_mpcc->mpcc_bot)
456     + break;
457     +
458     tmp_mpcc = tmp_mpcc->mpcc_bot;
459     }
460     return NULL;
461     diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
462     index e74a07d03fde9..4b0200e96eb77 100644
463     --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
464     +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
465     @@ -425,6 +425,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
466     OTG_CLOCK_ON, 1,
467     1, 1000);
468     } else {
469     +
470     + //last chance to clear underflow, otherwise, it will always there due to clock is off.
471     + if (optc->funcs->is_optc_underflow_occurred(optc) == true)
472     + optc->funcs->clear_optc_underflow(optc);
473     +
474     REG_UPDATE_2(OTG_CLOCK_CONTROL,
475     OTG_CLOCK_GATE_DIS, 0,
476     OTG_CLOCK_EN, 0);
477     diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
478     index 5a188b2bc033c..0a00bd8e00abc 100644
479     --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
480     +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
481     @@ -488,6 +488,12 @@ struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
482     while (tmp_mpcc != NULL) {
483     if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
484     return tmp_mpcc;
485     +
486     + /* avoid circular linked list */
487     + ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
488     + if (tmp_mpcc == tmp_mpcc->mpcc_bot)
489     + break;
490     +
491     tmp_mpcc = tmp_mpcc->mpcc_bot;
492     }
493     return NULL;
494     diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
495     index a3b151b29bd71..fc616db4231bb 100644
496     --- a/drivers/hid/hid-steam.c
497     +++ b/drivers/hid/hid-steam.c
498     @@ -134,6 +134,11 @@ static int steam_recv_report(struct steam_device *steam,
499     int ret;
500    
501     r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
502     + if (!r) {
503     + hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
504     + return -EINVAL;
505     + }
506     +
507     if (hid_report_len(r) < 64)
508     return -EINVAL;
509    
510     @@ -165,6 +170,11 @@ static int steam_send_report(struct steam_device *steam,
511     int ret;
512    
513     r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
514     + if (!r) {
515     + hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
516     + return -EINVAL;
517     + }
518     +
519     if (hid_report_len(r) < 64)
520     return -EINVAL;
521    
522     diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
523     index b382c6bf2c5cb..f8ef6268f3f29 100644
524     --- a/drivers/hid/hidraw.c
525     +++ b/drivers/hid/hidraw.c
526     @@ -346,10 +346,13 @@ static int hidraw_release(struct inode * inode, struct file * file)
527     unsigned int minor = iminor(inode);
528     struct hidraw_list *list = file->private_data;
529     unsigned long flags;
530     + int i;
531    
532     mutex_lock(&minors_lock);
533    
534     spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
535     + for (i = list->tail; i < list->head; i++)
536     + kfree(list->buffer[i].value);
537     list_del(&list->node);
538     spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
539     kfree(list);
540     diff --git a/drivers/md/md.c b/drivers/md/md.c
541     index 11fd3b32b5621..5226a23c72dba 100644
542     --- a/drivers/md/md.c
543     +++ b/drivers/md/md.c
544     @@ -6094,6 +6094,7 @@ void md_stop(struct mddev *mddev)
545     /* stop the array and free an attached data structures.
546     * This is called from dm-raid
547     */
548     + __md_stop_writes(mddev);
549     __md_stop(mddev);
550     bioset_exit(&mddev->bio_set);
551     bioset_exit(&mddev->sync_set);
552     diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
553     index 11e7fcfc3f195..d101fa8d61bb0 100644
554     --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
555     +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
556     @@ -2611,6 +2611,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
557     del_timer_sync(&hdw->encoder_run_timer);
558     del_timer_sync(&hdw->encoder_wait_timer);
559     flush_work(&hdw->workpoll);
560     + v4l2_device_unregister(&hdw->v4l2_dev);
561     usb_free_urb(hdw->ctl_read_urb);
562     usb_free_urb(hdw->ctl_write_urb);
563     kfree(hdw->ctl_read_buffer);
564     diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
565     index 31ed7616e84e7..0d6cd2a4cc416 100644
566     --- a/drivers/net/bonding/bond_3ad.c
567     +++ b/drivers/net/bonding/bond_3ad.c
568     @@ -1997,30 +1997,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
569     */
570     void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
571     {
572     - /* check that the bond is not initialized yet */
573     - if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
574     - bond->dev->dev_addr)) {
575     -
576     - BOND_AD_INFO(bond).aggregator_identifier = 0;
577     -
578     - BOND_AD_INFO(bond).system.sys_priority =
579     - bond->params.ad_actor_sys_prio;
580     - if (is_zero_ether_addr(bond->params.ad_actor_system))
581     - BOND_AD_INFO(bond).system.sys_mac_addr =
582     - *((struct mac_addr *)bond->dev->dev_addr);
583     - else
584     - BOND_AD_INFO(bond).system.sys_mac_addr =
585     - *((struct mac_addr *)bond->params.ad_actor_system);
586     + BOND_AD_INFO(bond).aggregator_identifier = 0;
587     + BOND_AD_INFO(bond).system.sys_priority =
588     + bond->params.ad_actor_sys_prio;
589     + if (is_zero_ether_addr(bond->params.ad_actor_system))
590     + BOND_AD_INFO(bond).system.sys_mac_addr =
591     + *((struct mac_addr *)bond->dev->dev_addr);
592     + else
593     + BOND_AD_INFO(bond).system.sys_mac_addr =
594     + *((struct mac_addr *)bond->params.ad_actor_system);
595    
596     - /* initialize how many times this module is called in one
597     - * second (should be about every 100ms)
598     - */
599     - ad_ticks_per_sec = tick_resolution;
600     + /* initialize how many times this module is called in one
601     + * second (should be about every 100ms)
602     + */
603     + ad_ticks_per_sec = tick_resolution;
604    
605     - bond_3ad_initiate_agg_selection(bond,
606     - AD_AGGREGATOR_SELECTION_TIMER *
607     - ad_ticks_per_sec);
608     - }
609     + bond_3ad_initiate_agg_selection(bond,
610     + AD_AGGREGATOR_SELECTION_TIMER *
611     + ad_ticks_per_sec);
612     }
613    
614     /**
615     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
616     index 452be9749827a..3434ad6824a05 100644
617     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
618     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
619     @@ -597,7 +597,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
620     hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
621     hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
622     if (bp->flags & BNXT_FLAG_CHIP_P5)
623     - hw_resc->max_irqs -= vf_msix * n;
624     + hw_resc->max_nqs -= vf_msix;
625    
626     rc = pf->active_vfs;
627     }
628     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
629     index 0be13a90ff792..d155181b939e4 100644
630     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
631     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
632     @@ -1211,7 +1211,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
633     struct cyclecounter cc;
634     unsigned long flags;
635     u32 incval = 0;
636     - u32 tsauxc = 0;
637     u32 fuse0 = 0;
638    
639     /* For some of the boards below this mask is technically incorrect.
640     @@ -1246,18 +1245,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
641     case ixgbe_mac_x550em_a:
642     case ixgbe_mac_X550:
643     cc.read = ixgbe_ptp_read_X550;
644     -
645     - /* enable SYSTIME counter */
646     - IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
647     - IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
648     - IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
649     - tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
650     - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
651     - tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
652     - IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
653     - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
654     -
655     - IXGBE_WRITE_FLUSH(hw);
656     break;
657     case ixgbe_mac_X540:
658     cc.read = ixgbe_ptp_read_82599;
659     @@ -1289,6 +1276,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
660     spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
661     }
662    
663     +/**
664     + * ixgbe_ptp_init_systime - Initialize SYSTIME registers
665     + * @adapter: the ixgbe private board structure
666     + *
667     + * Initialize and start the SYSTIME registers.
668     + */
669     +static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
670     +{
671     + struct ixgbe_hw *hw = &adapter->hw;
672     + u32 tsauxc;
673     +
674     + switch (hw->mac.type) {
675     + case ixgbe_mac_X550EM_x:
676     + case ixgbe_mac_x550em_a:
677     + case ixgbe_mac_X550:
678     + tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
679     +
680     + /* Reset SYSTIME registers to 0 */
681     + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
682     + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
683     + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
684     +
685     + /* Reset interrupt settings */
686     + IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
687     + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
688     +
689     + /* Activate the SYSTIME counter */
690     + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
691     + tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
692     + break;
693     + case ixgbe_mac_X540:
694     + case ixgbe_mac_82599EB:
695     + /* Reset SYSTIME registers to 0 */
696     + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
697     + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
698     + break;
699     + default:
700     + /* Other devices aren't supported */
701     + return;
702     + };
703     +
704     + IXGBE_WRITE_FLUSH(hw);
705     +}
706     +
707     /**
708     * ixgbe_ptp_reset
709     * @adapter: the ixgbe private board structure
710     @@ -1315,6 +1346,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
711    
712     ixgbe_ptp_start_cyclecounter(adapter);
713    
714     + ixgbe_ptp_init_systime(adapter);
715     +
716     spin_lock_irqsave(&adapter->tmreg_lock, flags);
717     timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
718     ktime_to_ns(ktime_get_real()));
719     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
720     index 88b51f64a64ea..f448a139e222e 100644
721     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
722     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
723     @@ -1434,6 +1434,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
724    
725     params->num_tc = 1;
726     params->tunneled_offload_en = false;
727     + if (rep->vport != MLX5_VPORT_UPLINK)
728     + params->vlan_strip_disable = true;
729    
730     mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
731    
732     diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
733     index 383d72415c659..87327086ea8ca 100644
734     --- a/drivers/net/ethernet/moxa/moxart_ether.c
735     +++ b/drivers/net/ethernet/moxa/moxart_ether.c
736     @@ -74,11 +74,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
737     static void moxart_mac_free_memory(struct net_device *ndev)
738     {
739     struct moxart_mac_priv_t *priv = netdev_priv(ndev);
740     - int i;
741     -
742     - for (i = 0; i < RX_DESC_NUM; i++)
743     - dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
744     - priv->rx_buf_size, DMA_FROM_DEVICE);
745    
746     if (priv->tx_desc_base)
747     dma_free_coherent(&priv->pdev->dev,
748     @@ -193,6 +188,7 @@ static int moxart_mac_open(struct net_device *ndev)
749     static int moxart_mac_stop(struct net_device *ndev)
750     {
751     struct moxart_mac_priv_t *priv = netdev_priv(ndev);
752     + int i;
753    
754     napi_disable(&priv->napi);
755    
756     @@ -204,6 +200,11 @@ static int moxart_mac_stop(struct net_device *ndev)
757     /* disable all functions */
758     writel(0, priv->base + REG_MAC_CTRL);
759    
760     + /* unmap areas mapped in moxart_mac_setup_desc_ring() */
761     + for (i = 0; i < RX_DESC_NUM; i++)
762     + dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
763     + priv->rx_buf_size, DMA_FROM_DEVICE);
764     +
765     return 0;
766     }
767    
768     diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
769     index 1cedb634f4f7b..f01078b2581ce 100644
770     --- a/drivers/net/ipvlan/ipvtap.c
771     +++ b/drivers/net/ipvlan/ipvtap.c
772     @@ -194,7 +194,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = {
773     .notifier_call = ipvtap_device_event,
774     };
775    
776     -static int ipvtap_init(void)
777     +static int __init ipvtap_init(void)
778     {
779     int err;
780    
781     @@ -228,7 +228,7 @@ out1:
782     }
783     module_init(ipvtap_init);
784    
785     -static void ipvtap_exit(void)
786     +static void __exit ipvtap_exit(void)
787     {
788     rtnl_link_unregister(&ipvtap_link_ops);
789     unregister_netdevice_notifier(&ipvtap_notifier_block);
790     diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
791     index 4c02439d3776d..ca3f18aa16acb 100644
792     --- a/drivers/pinctrl/pinctrl-amd.c
793     +++ b/drivers/pinctrl/pinctrl-amd.c
794     @@ -793,6 +793,7 @@ static int amd_gpio_suspend(struct device *dev)
795     {
796     struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
797     struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
798     + unsigned long flags;
799     int i;
800    
801     for (i = 0; i < desc->npins; i++) {
802     @@ -801,7 +802,9 @@ static int amd_gpio_suspend(struct device *dev)
803     if (!amd_gpio_should_save(gpio_dev, pin))
804     continue;
805    
806     - gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4);
807     + raw_spin_lock_irqsave(&gpio_dev->lock, flags);
808     + gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
809     + raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
810     }
811    
812     return 0;
813     @@ -811,6 +814,7 @@ static int amd_gpio_resume(struct device *dev)
814     {
815     struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
816     struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
817     + unsigned long flags;
818     int i;
819    
820     for (i = 0; i < desc->npins; i++) {
821     @@ -819,7 +823,10 @@ static int amd_gpio_resume(struct device *dev)
822     if (!amd_gpio_should_save(gpio_dev, pin))
823     continue;
824    
825     - writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4);
826     + raw_spin_lock_irqsave(&gpio_dev->lock, flags);
827     + gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
828     + writel(gpio_dev->saved_regs[i], gpio_dev->base + pin * 4);
829     + raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
830     }
831    
832     return 0;
833     diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
834     index 5087ed6afbdc3..8d1b19b2322f5 100644
835     --- a/drivers/scsi/storvsc_drv.c
836     +++ b/drivers/scsi/storvsc_drv.c
837     @@ -1846,7 +1846,7 @@ static int storvsc_probe(struct hv_device *device,
838     */
839     host_dev->handle_error_wq =
840     alloc_ordered_workqueue("storvsc_error_wq_%d",
841     - WQ_MEM_RECLAIM,
842     + 0,
843     host->host_no);
844     if (!host_dev->handle_error_wq)
845     goto err_out2;
846     diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
847     index a9399f2b39308..8bedf0504e92f 100644
848     --- a/drivers/usb/cdns3/gadget.c
849     +++ b/drivers/usb/cdns3/gadget.c
850     @@ -2166,6 +2166,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
851     struct usb_request *request;
852     struct cdns3_request *priv_req;
853     struct cdns3_trb *trb = NULL;
854     + struct cdns3_trb trb_tmp;
855     int ret;
856     int val;
857    
858     @@ -2175,8 +2176,10 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
859     if (request) {
860     priv_req = to_cdns3_request(request);
861     trb = priv_req->trb;
862     - if (trb)
863     + if (trb) {
864     + trb_tmp = *trb;
865     trb->control = trb->control ^ TRB_CYCLE;
866     + }
867     }
868    
869     writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
870     @@ -2191,7 +2194,8 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
871    
872     if (request) {
873     if (trb)
874     - trb->control = trb->control ^ TRB_CYCLE;
875     + *trb = trb_tmp;
876     +
877     cdns3_rearm_transfer(priv_ep, 1);
878     }
879    
880     diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
881     index 1dcf02e12af4f..8ae010f07d7da 100644
882     --- a/drivers/video/fbdev/pm2fb.c
883     +++ b/drivers/video/fbdev/pm2fb.c
884     @@ -616,6 +616,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
885     return -EINVAL;
886     }
887    
888     + if (!var->pixclock) {
889     + DPRINTK("pixclock is zero\n");
890     + return -EINVAL;
891     + }
892     +
893     if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
894     DPRINTK("pixclock too high (%ldKHz)\n",
895     PICOS2KHZ(var->pixclock));
896     diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
897     index cd77c0621a555..c2e5fe972f566 100644
898     --- a/fs/btrfs/ctree.h
899     +++ b/fs/btrfs/ctree.h
900     @@ -2727,7 +2727,7 @@ struct btrfs_dir_item *
901     btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
902     struct btrfs_root *root,
903     struct btrfs_path *path, u64 dir,
904     - u64 objectid, const char *name, int name_len,
905     + u64 index, const char *name, int name_len,
906     int mod);
907     struct btrfs_dir_item *
908     btrfs_search_dir_index_item(struct btrfs_root *root,
909     diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
910     index 1cb7f5d79765d..444e1e5d012e4 100644
911     --- a/fs/btrfs/dev-replace.c
912     +++ b/fs/btrfs/dev-replace.c
913     @@ -125,7 +125,7 @@ no_valid_dev_replace_entry_found:
914     if (btrfs_find_device(fs_info->fs_devices,
915     BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) {
916     btrfs_err(fs_info,
917     - "replace devid present without an active replace item");
918     +"replace without active item, run 'device scan --forget' on the target device");
919     ret = -EUCLEAN;
920     } else {
921     dev_replace->srcdev = NULL;
922     @@ -918,8 +918,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
923     up_write(&dev_replace->rwsem);
924    
925     /* Scrub for replace must not be running in suspended state */
926     - ret = btrfs_scrub_cancel(fs_info);
927     - ASSERT(ret != -ENOTCONN);
928     + btrfs_scrub_cancel(fs_info);
929    
930     trans = btrfs_start_transaction(root, 0);
931     if (IS_ERR(trans)) {
932     diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
933     index 863367c2c6205..98c6faa8ce15b 100644
934     --- a/fs/btrfs/dir-item.c
935     +++ b/fs/btrfs/dir-item.c
936     @@ -171,10 +171,40 @@ out_free:
937     return 0;
938     }
939    
940     +static struct btrfs_dir_item *btrfs_lookup_match_dir(
941     + struct btrfs_trans_handle *trans,
942     + struct btrfs_root *root, struct btrfs_path *path,
943     + struct btrfs_key *key, const char *name,
944     + int name_len, int mod)
945     +{
946     + const int ins_len = (mod < 0 ? -1 : 0);
947     + const int cow = (mod != 0);
948     + int ret;
949     +
950     + ret = btrfs_search_slot(trans, root, key, path, ins_len, cow);
951     + if (ret < 0)
952     + return ERR_PTR(ret);
953     + if (ret > 0)
954     + return ERR_PTR(-ENOENT);
955     +
956     + return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
957     +}
958     +
959     /*
960     - * lookup a directory item based on name. 'dir' is the objectid
961     - * we're searching in, and 'mod' tells us if you plan on deleting the
962     - * item (use mod < 0) or changing the options (use mod > 0)
963     + * Lookup for a directory item by name.
964     + *
965     + * @trans: The transaction handle to use. Can be NULL if @mod is 0.
966     + * @root: The root of the target tree.
967     + * @path: Path to use for the search.
968     + * @dir: The inode number (objectid) of the directory.
969     + * @name: The name associated to the directory entry we are looking for.
970     + * @name_len: The length of the name.
971     + * @mod: Used to indicate if the tree search is meant for a read only
972     + * lookup, for a modification lookup or for a deletion lookup, so
973     + * its value should be 0, 1 or -1, respectively.
974     + *
975     + * Returns: NULL if the dir item does not exists, an error pointer if an error
976     + * happened, or a pointer to a dir item if a dir item exists for the given name.
977     */
978     struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
979     struct btrfs_root *root,
980     @@ -182,23 +212,18 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
981     const char *name, int name_len,
982     int mod)
983     {
984     - int ret;
985     struct btrfs_key key;
986     - int ins_len = mod < 0 ? -1 : 0;
987     - int cow = mod != 0;
988     + struct btrfs_dir_item *di;
989    
990     key.objectid = dir;
991     key.type = BTRFS_DIR_ITEM_KEY;
992     -
993     key.offset = btrfs_name_hash(name, name_len);
994    
995     - ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
996     - if (ret < 0)
997     - return ERR_PTR(ret);
998     - if (ret > 0)
999     + di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
1000     + if (IS_ERR(di) && PTR_ERR(di) == -ENOENT)
1001     return NULL;
1002    
1003     - return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
1004     + return di;
1005     }
1006    
1007     int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
1008     @@ -212,7 +237,6 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
1009     int slot;
1010     struct btrfs_path *path;
1011    
1012     -
1013     path = btrfs_alloc_path();
1014     if (!path)
1015     return -ENOMEM;
1016     @@ -221,20 +245,20 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
1017     key.type = BTRFS_DIR_ITEM_KEY;
1018     key.offset = btrfs_name_hash(name, name_len);
1019    
1020     - ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1021     -
1022     - /* return back any errors */
1023     - if (ret < 0)
1024     - goto out;
1025     + di = btrfs_lookup_match_dir(NULL, root, path, &key, name, name_len, 0);
1026     + if (IS_ERR(di)) {
1027     + ret = PTR_ERR(di);
1028     + /* Nothing found, we're safe */
1029     + if (ret == -ENOENT) {
1030     + ret = 0;
1031     + goto out;
1032     + }
1033    
1034     - /* nothing found, we're safe */
1035     - if (ret > 0) {
1036     - ret = 0;
1037     - goto out;
1038     + if (ret < 0)
1039     + goto out;
1040     }
1041    
1042     /* we found an item, look for our name in the item */
1043     - di = btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
1044     if (di) {
1045     /* our exact name was found */
1046     ret = -EEXIST;
1047     @@ -261,35 +285,42 @@ out:
1048     }
1049    
1050     /*
1051     - * lookup a directory item based on index. 'dir' is the objectid
1052     - * we're searching in, and 'mod' tells us if you plan on deleting the
1053     - * item (use mod < 0) or changing the options (use mod > 0)
1054     + * Lookup for a directory index item by name and index number.
1055     + *
1056     + * @trans: The transaction handle to use. Can be NULL if @mod is 0.
1057     + * @root: The root of the target tree.
1058     + * @path: Path to use for the search.
1059     + * @dir: The inode number (objectid) of the directory.
1060     + * @index: The index number.
1061     + * @name: The name associated to the directory entry we are looking for.
1062     + * @name_len: The length of the name.
1063     + * @mod: Used to indicate if the tree search is meant for a read only
1064     + * lookup, for a modification lookup or for a deletion lookup, so
1065     + * its value should be 0, 1 or -1, respectively.
1066     *
1067     - * The name is used to make sure the index really points to the name you were
1068     - * looking for.
1069     + * Returns: NULL if the dir index item does not exists, an error pointer if an
1070     + * error happened, or a pointer to a dir item if the dir index item exists and
1071     + * matches the criteria (name and index number).
1072     */
1073     struct btrfs_dir_item *
1074     btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
1075     struct btrfs_root *root,
1076     struct btrfs_path *path, u64 dir,
1077     - u64 objectid, const char *name, int name_len,
1078     + u64 index, const char *name, int name_len,
1079     int mod)
1080     {
1081     - int ret;
1082     + struct btrfs_dir_item *di;
1083     struct btrfs_key key;
1084     - int ins_len = mod < 0 ? -1 : 0;
1085     - int cow = mod != 0;
1086    
1087     key.objectid = dir;
1088     key.type = BTRFS_DIR_INDEX_KEY;
1089     - key.offset = objectid;
1090     + key.offset = index;
1091    
1092     - ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
1093     - if (ret < 0)
1094     - return ERR_PTR(ret);
1095     - if (ret > 0)
1096     - return ERR_PTR(-ENOENT);
1097     - return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
1098     + di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
1099     + if (di == ERR_PTR(-ENOENT))
1100     + return NULL;
1101     +
1102     + return di;
1103     }
1104    
1105     struct btrfs_dir_item *
1106     @@ -346,21 +377,18 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
1107     const char *name, u16 name_len,
1108     int mod)
1109     {
1110     - int ret;
1111     struct btrfs_key key;
1112     - int ins_len = mod < 0 ? -1 : 0;
1113     - int cow = mod != 0;
1114     + struct btrfs_dir_item *di;
1115    
1116     key.objectid = dir;
1117     key.type = BTRFS_XATTR_ITEM_KEY;
1118     key.offset = btrfs_name_hash(name, name_len);
1119     - ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
1120     - if (ret < 0)
1121     - return ERR_PTR(ret);
1122     - if (ret > 0)
1123     +
1124     + di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
1125     + if (IS_ERR(di) && PTR_ERR(di) == -ENOENT)
1126     return NULL;
1127    
1128     - return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
1129     + return di;
1130     }
1131    
1132     /*
1133     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
1134     index 7755a0362a3ad..20c5db8ef8427 100644
1135     --- a/fs/btrfs/inode.c
1136     +++ b/fs/btrfs/inode.c
1137     @@ -9751,8 +9751,6 @@ static int btrfs_rename_exchange(struct inode *old_dir,
1138     /* force full log commit if subvolume involved. */
1139     btrfs_set_log_full_commit(trans);
1140     } else {
1141     - btrfs_pin_log_trans(root);
1142     - root_log_pinned = true;
1143     ret = btrfs_insert_inode_ref(trans, dest,
1144     new_dentry->d_name.name,
1145     new_dentry->d_name.len,
1146     @@ -9768,8 +9766,6 @@ static int btrfs_rename_exchange(struct inode *old_dir,
1147     /* force full log commit if subvolume involved. */
1148     btrfs_set_log_full_commit(trans);
1149     } else {
1150     - btrfs_pin_log_trans(dest);
1151     - dest_log_pinned = true;
1152     ret = btrfs_insert_inode_ref(trans, root,
1153     old_dentry->d_name.name,
1154     old_dentry->d_name.len,
1155     @@ -9797,6 +9793,29 @@ static int btrfs_rename_exchange(struct inode *old_dir,
1156     BTRFS_I(new_inode), 1);
1157     }
1158    
1159     + /*
1160     + * Now pin the logs of the roots. We do it to ensure that no other task
1161     + * can sync the logs while we are in progress with the rename, because
1162     + * that could result in an inconsistency in case any of the inodes that
1163     + * are part of this rename operation were logged before.
1164     + *
1165     + * We pin the logs even if at this precise moment none of the inodes was
1166     + * logged before. This is because right after we checked for that, some
1167     + * other task fsyncing some other inode not involved with this rename
1168     + * operation could log that one of our inodes exists.
1169     + *
1170     + * We don't need to pin the logs before the above calls to
1171     + * btrfs_insert_inode_ref(), since those don't ever need to change a log.
1172     + */
1173     + if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
1174     + btrfs_pin_log_trans(root);
1175     + root_log_pinned = true;
1176     + }
1177     + if (new_ino != BTRFS_FIRST_FREE_OBJECTID) {
1178     + btrfs_pin_log_trans(dest);
1179     + dest_log_pinned = true;
1180     + }
1181     +
1182     /* src is a subvolume */
1183     if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
1184     ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
1185     @@ -10046,8 +10065,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1186     /* force full log commit if subvolume involved. */
1187     btrfs_set_log_full_commit(trans);
1188     } else {
1189     - btrfs_pin_log_trans(root);
1190     - log_pinned = true;
1191     ret = btrfs_insert_inode_ref(trans, dest,
1192     new_dentry->d_name.name,
1193     new_dentry->d_name.len,
1194     @@ -10071,6 +10088,25 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1195     if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
1196     ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
1197     } else {
1198     + /*
1199     + * Now pin the log. We do it to ensure that no other task can
1200     + * sync the log while we are in progress with the rename, as
1201     + * that could result in an inconsistency in case any of the
1202     + * inodes that are part of this rename operation were logged
1203     + * before.
1204     + *
1205     + * We pin the log even if at this precise moment none of the
1206     + * inodes was logged before. This is because right after we
1207     + * checked for that, some other task fsyncing some other inode
1208     + * not involved with this rename operation could log that one of
1209     + * our inodes exists.
1210     + *
1211     + * We don't need to pin the logs before the above call to
1212     + * btrfs_insert_inode_ref(), since that does not need to change
1213     + * a log.
1214     + */
1215     + btrfs_pin_log_trans(root);
1216     + log_pinned = true;
1217     ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
1218     BTRFS_I(d_inode(old_dentry)),
1219     old_dentry->d_name.name,
1220     diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
1221     index 0d07ebe511e7f..ba4e198811a47 100644
1222     --- a/fs/btrfs/root-tree.c
1223     +++ b/fs/btrfs/root-tree.c
1224     @@ -371,9 +371,10 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
1225     key.offset = ref_id;
1226     again:
1227     ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
1228     - if (ret < 0)
1229     + if (ret < 0) {
1230     + err = ret;
1231     goto out;
1232     - if (ret == 0) {
1233     + } else if (ret == 0) {
1234     leaf = path->nodes[0];
1235     ref = btrfs_item_ptr(leaf, path->slots[0],
1236     struct btrfs_root_ref);
1237     diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
1238     index 368c43c6cbd08..d15de5abb562d 100644
1239     --- a/fs/btrfs/tree-checker.c
1240     +++ b/fs/btrfs/tree-checker.c
1241     @@ -1019,7 +1019,8 @@ static void extent_err(const struct extent_buffer *eb, int slot,
1242     }
1243    
1244     static int check_extent_item(struct extent_buffer *leaf,
1245     - struct btrfs_key *key, int slot)
1246     + struct btrfs_key *key, int slot,
1247     + struct btrfs_key *prev_key)
1248     {
1249     struct btrfs_fs_info *fs_info = leaf->fs_info;
1250     struct btrfs_extent_item *ei;
1251     @@ -1230,6 +1231,26 @@ static int check_extent_item(struct extent_buffer *leaf,
1252     total_refs, inline_refs);
1253     return -EUCLEAN;
1254     }
1255     +
1256     + if ((prev_key->type == BTRFS_EXTENT_ITEM_KEY) ||
1257     + (prev_key->type == BTRFS_METADATA_ITEM_KEY)) {
1258     + u64 prev_end = prev_key->objectid;
1259     +
1260     + if (prev_key->type == BTRFS_METADATA_ITEM_KEY)
1261     + prev_end += fs_info->nodesize;
1262     + else
1263     + prev_end += prev_key->offset;
1264     +
1265     + if (unlikely(prev_end > key->objectid)) {
1266     + extent_err(leaf, slot,
1267     + "previous extent [%llu %u %llu] overlaps current extent [%llu %u %llu]",
1268     + prev_key->objectid, prev_key->type,
1269     + prev_key->offset, key->objectid, key->type,
1270     + key->offset);
1271     + return -EUCLEAN;
1272     + }
1273     + }
1274     +
1275     return 0;
1276     }
1277    
1278     @@ -1343,7 +1364,7 @@ static int check_leaf_item(struct extent_buffer *leaf,
1279     break;
1280     case BTRFS_EXTENT_ITEM_KEY:
1281     case BTRFS_METADATA_ITEM_KEY:
1282     - ret = check_extent_item(leaf, key, slot);
1283     + ret = check_extent_item(leaf, key, slot, prev_key);
1284     break;
1285     case BTRFS_TREE_BLOCK_REF_KEY:
1286     case BTRFS_SHARED_DATA_REF_KEY:
1287     diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
1288     index bebd74267bed6..926b1d34e55cc 100644
1289     --- a/fs/btrfs/tree-log.c
1290     +++ b/fs/btrfs/tree-log.c
1291     @@ -918,8 +918,7 @@ static noinline int inode_in_dir(struct btrfs_root *root,
1292     di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
1293     index, name, name_len, 0);
1294     if (IS_ERR(di)) {
1295     - if (PTR_ERR(di) != -ENOENT)
1296     - ret = PTR_ERR(di);
1297     + ret = PTR_ERR(di);
1298     goto out;
1299     } else if (di) {
1300     btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
1301     @@ -1171,8 +1170,7 @@ next:
1302     di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1303     ref_index, name, namelen, 0);
1304     if (IS_ERR(di)) {
1305     - if (PTR_ERR(di) != -ENOENT)
1306     - return PTR_ERR(di);
1307     + return PTR_ERR(di);
1308     } else if (di) {
1309     ret = drop_one_dir_item(trans, root, path, dir, di);
1310     if (ret)
1311     @@ -2022,9 +2020,6 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1312     goto out;
1313     }
1314    
1315     - if (dst_di == ERR_PTR(-ENOENT))
1316     - dst_di = NULL;
1317     -
1318     if (IS_ERR(dst_di)) {
1319     ret = PTR_ERR(dst_di);
1320     goto out;
1321     @@ -2309,7 +2304,7 @@ again:
1322     dir_key->offset,
1323     name, name_len, 0);
1324     }
1325     - if (!log_di || log_di == ERR_PTR(-ENOENT)) {
1326     + if (!log_di) {
1327     btrfs_dir_item_key_to_cpu(eb, di, &location);
1328     btrfs_release_path(path);
1329     btrfs_release_path(log_path);
1330     @@ -3522,8 +3517,7 @@ out_unlock:
1331     if (err == -ENOSPC) {
1332     btrfs_set_log_full_commit(trans);
1333     err = 0;
1334     - } else if (err < 0 && err != -ENOENT) {
1335     - /* ENOENT can be returned if the entry hasn't been fsynced yet */
1336     + } else if (err < 0) {
1337     btrfs_abort_transaction(trans, err);
1338     }
1339    
1340     diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
1341     index 48858510739b2..cd7ddf24157a3 100644
1342     --- a/fs/btrfs/xattr.c
1343     +++ b/fs/btrfs/xattr.c
1344     @@ -387,6 +387,9 @@ static int btrfs_xattr_handler_set(const struct xattr_handler *handler,
1345     const char *name, const void *buffer,
1346     size_t size, int flags)
1347     {
1348     + if (btrfs_root_readonly(BTRFS_I(inode)->root))
1349     + return -EROFS;
1350     +
1351     name = xattr_full_name(handler, name);
1352     return btrfs_setxattr_trans(inode, name, buffer, size, flags);
1353     }
1354     diff --git a/fs/io_uring.c b/fs/io_uring.c
1355     index e73969fa96bcb..501c7e14c07cf 100644
1356     --- a/fs/io_uring.c
1357     +++ b/fs/io_uring.c
1358     @@ -1908,6 +1908,9 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1359     __poll_t mask;
1360     u16 events;
1361    
1362     + if (req->file->f_op->may_pollfree)
1363     + return -EOPNOTSUPP;
1364     +
1365     if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1366     return -EINVAL;
1367     if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1368     diff --git a/fs/signalfd.c b/fs/signalfd.c
1369     index 3e94d181930fd..c3415d969ecfc 100644
1370     --- a/fs/signalfd.c
1371     +++ b/fs/signalfd.c
1372     @@ -248,6 +248,7 @@ static const struct file_operations signalfd_fops = {
1373     .poll = signalfd_poll,
1374     .read = signalfd_read,
1375     .llseek = noop_llseek,
1376     + .may_pollfree = true,
1377     };
1378    
1379     static int do_signalfd4(int ufd, sigset_t *mask, int flags)
1380     diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
1381     index 66397ed10acb7..69ab5942bd14f 100644
1382     --- a/include/asm-generic/sections.h
1383     +++ b/include/asm-generic/sections.h
1384     @@ -114,7 +114,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt,
1385     /**
1386     * memory_intersects - checks if the region occupied by an object intersects
1387     * with another memory region
1388     - * @begin: virtual address of the beginning of the memory regien
1389     + * @begin: virtual address of the beginning of the memory region
1390     * @end: virtual address of the end of the memory region
1391     * @virt: virtual address of the memory object
1392     * @size: size of the memory object
1393     @@ -127,7 +127,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt,
1394     {
1395     void *vend = virt + size;
1396    
1397     - return (virt >= begin && virt < end) || (vend >= begin && vend < end);
1398     + if (virt < end && vend > begin)
1399     + return true;
1400     +
1401     + return false;
1402     }
1403    
1404     /**
1405     diff --git a/include/linux/fs.h b/include/linux/fs.h
1406     index ef118b8ba6993..4ecbe12f62152 100644
1407     --- a/include/linux/fs.h
1408     +++ b/include/linux/fs.h
1409     @@ -1859,6 +1859,7 @@ struct file_operations {
1410     struct file *file_out, loff_t pos_out,
1411     loff_t len, unsigned int remap_flags);
1412     int (*fadvise)(struct file *, loff_t, loff_t, int);
1413     + bool may_pollfree;
1414     } __randomize_layout;
1415    
1416     struct inode_operations {
1417     diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
1418     index db472c9cd8e9d..f0d846df3a424 100644
1419     --- a/include/linux/netfilter_bridge/ebtables.h
1420     +++ b/include/linux/netfilter_bridge/ebtables.h
1421     @@ -94,10 +94,6 @@ struct ebt_table {
1422     struct ebt_replace_kernel *table;
1423     unsigned int valid_hooks;
1424     rwlock_t lock;
1425     - /* e.g. could be the table explicitly only allows certain
1426     - * matches, targets, ... 0 == let it in */
1427     - int (*check)(const struct ebt_table_info *info,
1428     - unsigned int valid_hooks);
1429     /* the data used by the kernel */
1430     struct ebt_table_info *private;
1431     struct module *me;
1432     diff --git a/include/linux/rmap.h b/include/linux/rmap.h
1433     index 91ccae9467164..c80bd129e9399 100644
1434     --- a/include/linux/rmap.h
1435     +++ b/include/linux/rmap.h
1436     @@ -39,12 +39,15 @@ struct anon_vma {
1437     atomic_t refcount;
1438    
1439     /*
1440     - * Count of child anon_vmas and VMAs which points to this anon_vma.
1441     + * Count of child anon_vmas. Equals to the count of all anon_vmas that
1442     + * have ->parent pointing to this one, including itself.
1443     *
1444     * This counter is used for making decision about reusing anon_vma
1445     * instead of forking new one. See comments in function anon_vma_clone.
1446     */
1447     - unsigned degree;
1448     + unsigned long num_children;
1449     + /* Count of VMAs whose ->anon_vma pointer points to this object. */
1450     + unsigned long num_active_vmas;
1451    
1452     struct anon_vma *parent; /* Parent of this anon_vma */
1453    
1454     diff --git a/include/linux/sched.h b/include/linux/sched.h
1455     index 171cb7475b450..d0e639497b107 100644
1456     --- a/include/linux/sched.h
1457     +++ b/include/linux/sched.h
1458     @@ -526,10 +526,6 @@ struct sched_dl_entity {
1459     * task has to wait for a replenishment to be performed at the
1460     * next firing of dl_timer.
1461     *
1462     - * @dl_boosted tells if we are boosted due to DI. If so we are
1463     - * outside bandwidth enforcement mechanism (but only until we
1464     - * exit the critical section);
1465     - *
1466     * @dl_yielded tells if task gave up the CPU before consuming
1467     * all its available runtime during the last job.
1468     *
1469     @@ -544,7 +540,6 @@ struct sched_dl_entity {
1470     * overruns.
1471     */
1472     unsigned int dl_throttled : 1;
1473     - unsigned int dl_boosted : 1;
1474     unsigned int dl_yielded : 1;
1475     unsigned int dl_non_contending : 1;
1476     unsigned int dl_overrun : 1;
1477     @@ -563,6 +558,15 @@ struct sched_dl_entity {
1478     * time.
1479     */
1480     struct hrtimer inactive_timer;
1481     +
1482     +#ifdef CONFIG_RT_MUTEXES
1483     + /*
1484     + * Priority Inheritance. When a DEADLINE scheduling entity is boosted
1485     + * pi_se points to the donor, otherwise points to the dl_se it belongs
1486     + * to (the original one/itself).
1487     + */
1488     + struct sched_dl_entity *pi_se;
1489     +#endif
1490     };
1491    
1492     #ifdef CONFIG_UCLAMP_TASK
1493     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1494     index b04b5bd43f541..680f71ecdc08b 100644
1495     --- a/include/linux/skbuff.h
1496     +++ b/include/linux/skbuff.h
1497     @@ -2201,6 +2201,14 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1498    
1499     #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1500    
1501     +static inline void skb_assert_len(struct sk_buff *skb)
1502     +{
1503     +#ifdef CONFIG_DEBUG_NET
1504     + if (WARN_ONCE(!skb->len, "%s\n", __func__))
1505     + DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
1506     +#endif /* CONFIG_DEBUG_NET */
1507     +}
1508     +
1509     /*
1510     * Add data to an sk_buff
1511     */
1512     diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
1513     index 9899b9af7f22f..16258c0c7319e 100644
1514     --- a/include/net/busy_poll.h
1515     +++ b/include/net/busy_poll.h
1516     @@ -31,7 +31,7 @@ extern unsigned int sysctl_net_busy_poll __read_mostly;
1517    
1518     static inline bool net_busy_loop_on(void)
1519     {
1520     - return sysctl_net_busy_poll;
1521     + return READ_ONCE(sysctl_net_busy_poll);
1522     }
1523    
1524     static inline bool sk_can_busy_loop(const struct sock *sk)
1525     diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c
1526     index f0d2433184521..2973878162962 100644
1527     --- a/kernel/audit_fsnotify.c
1528     +++ b/kernel/audit_fsnotify.c
1529     @@ -102,6 +102,7 @@ struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pa
1530    
1531     ret = fsnotify_add_inode_mark(&audit_mark->mark, inode, true);
1532     if (ret < 0) {
1533     + audit_mark->path = NULL;
1534     fsnotify_put_mark(&audit_mark->mark);
1535     audit_mark = ERR_PTR(ret);
1536     }
1537     diff --git a/kernel/kprobes.c b/kernel/kprobes.c
1538     index 671b51782182b..9631ecc8a34c9 100644
1539     --- a/kernel/kprobes.c
1540     +++ b/kernel/kprobes.c
1541     @@ -1737,11 +1737,12 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
1542     /* Try to disarm and disable this/parent probe */
1543     if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1544     /*
1545     - * If kprobes_all_disarmed is set, orig_p
1546     - * should have already been disarmed, so
1547     - * skip unneed disarming process.
1548     + * Don't be lazy here. Even if 'kprobes_all_disarmed'
1549     + * is false, 'orig_p' might not have been armed yet.
1550     + * Note arm_all_kprobes() __tries__ to arm all kprobes
1551     + * on the best effort basis.
1552     */
1553     - if (!kprobes_all_disarmed) {
1554     + if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
1555     ret = disarm_kprobe(orig_p, true);
1556     if (ret) {
1557     p->flags &= ~KPROBE_FLAG_DISABLED;
1558     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
1559     index 5befdecefe947..06b686ef36e68 100644
1560     --- a/kernel/sched/core.c
1561     +++ b/kernel/sched/core.c
1562     @@ -4554,20 +4554,21 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
1563     if (!dl_prio(p->normal_prio) ||
1564     (pi_task && dl_prio(pi_task->prio) &&
1565     dl_entity_preempt(&pi_task->dl, &p->dl))) {
1566     - p->dl.dl_boosted = 1;
1567     + p->dl.pi_se = pi_task->dl.pi_se;
1568     queue_flag |= ENQUEUE_REPLENISH;
1569     - } else
1570     - p->dl.dl_boosted = 0;
1571     + } else {
1572     + p->dl.pi_se = &p->dl;
1573     + }
1574     p->sched_class = &dl_sched_class;
1575     } else if (rt_prio(prio)) {
1576     if (dl_prio(oldprio))
1577     - p->dl.dl_boosted = 0;
1578     + p->dl.pi_se = &p->dl;
1579     if (oldprio < prio)
1580     queue_flag |= ENQUEUE_HEAD;
1581     p->sched_class = &rt_sched_class;
1582     } else {
1583     if (dl_prio(oldprio))
1584     - p->dl.dl_boosted = 0;
1585     + p->dl.pi_se = &p->dl;
1586     if (rt_prio(oldprio))
1587     p->rt.timeout = 0;
1588     p->sched_class = &fair_sched_class;
1589     diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
1590     index 2bda9fdba31c4..d8052c2d87e49 100644
1591     --- a/kernel/sched/deadline.c
1592     +++ b/kernel/sched/deadline.c
1593     @@ -43,6 +43,28 @@ static inline int on_dl_rq(struct sched_dl_entity *dl_se)
1594     return !RB_EMPTY_NODE(&dl_se->rb_node);
1595     }
1596    
1597     +#ifdef CONFIG_RT_MUTEXES
1598     +static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
1599     +{
1600     + return dl_se->pi_se;
1601     +}
1602     +
1603     +static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
1604     +{
1605     + return pi_of(dl_se) != dl_se;
1606     +}
1607     +#else
1608     +static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
1609     +{
1610     + return dl_se;
1611     +}
1612     +
1613     +static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
1614     +{
1615     + return false;
1616     +}
1617     +#endif
1618     +
1619     #ifdef CONFIG_SMP
1620     static inline struct dl_bw *dl_bw_of(int i)
1621     {
1622     @@ -657,7 +679,7 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
1623     struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1624     struct rq *rq = rq_of_dl_rq(dl_rq);
1625    
1626     - WARN_ON(dl_se->dl_boosted);
1627     + WARN_ON(is_dl_boosted(dl_se));
1628     WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
1629    
1630     /*
1631     @@ -695,21 +717,20 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
1632     * could happen are, typically, a entity voluntarily trying to overcome its
1633     * runtime, or it just underestimated it during sched_setattr().
1634     */
1635     -static void replenish_dl_entity(struct sched_dl_entity *dl_se,
1636     - struct sched_dl_entity *pi_se)
1637     +static void replenish_dl_entity(struct sched_dl_entity *dl_se)
1638     {
1639     struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1640     struct rq *rq = rq_of_dl_rq(dl_rq);
1641    
1642     - BUG_ON(pi_se->dl_runtime <= 0);
1643     + BUG_ON(pi_of(dl_se)->dl_runtime <= 0);
1644    
1645     /*
1646     * This could be the case for a !-dl task that is boosted.
1647     * Just go with full inherited parameters.
1648     */
1649     if (dl_se->dl_deadline == 0) {
1650     - dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
1651     - dl_se->runtime = pi_se->dl_runtime;
1652     + dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
1653     + dl_se->runtime = pi_of(dl_se)->dl_runtime;
1654     }
1655    
1656     if (dl_se->dl_yielded && dl_se->runtime > 0)
1657     @@ -722,8 +743,8 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
1658     * arbitrary large.
1659     */
1660     while (dl_se->runtime <= 0) {
1661     - dl_se->deadline += pi_se->dl_period;
1662     - dl_se->runtime += pi_se->dl_runtime;
1663     + dl_se->deadline += pi_of(dl_se)->dl_period;
1664     + dl_se->runtime += pi_of(dl_se)->dl_runtime;
1665     }
1666    
1667     /*
1668     @@ -737,8 +758,8 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
1669     */
1670     if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
1671     printk_deferred_once("sched: DL replenish lagged too much\n");
1672     - dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
1673     - dl_se->runtime = pi_se->dl_runtime;
1674     + dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
1675     + dl_se->runtime = pi_of(dl_se)->dl_runtime;
1676     }
1677    
1678     if (dl_se->dl_yielded)
1679     @@ -771,8 +792,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
1680     * task with deadline equal to period this is the same of using
1681     * dl_period instead of dl_deadline in the equation above.
1682     */
1683     -static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
1684     - struct sched_dl_entity *pi_se, u64 t)
1685     +static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
1686     {
1687     u64 left, right;
1688    
1689     @@ -794,9 +814,9 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
1690     * of anything below microseconds resolution is actually fiction
1691     * (but still we want to give the user that illusion >;).
1692     */
1693     - left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
1694     + left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
1695     right = ((dl_se->deadline - t) >> DL_SCALE) *
1696     - (pi_se->dl_runtime >> DL_SCALE);
1697     + (pi_of(dl_se)->dl_runtime >> DL_SCALE);
1698    
1699     return dl_time_before(right, left);
1700     }
1701     @@ -881,24 +901,23 @@ static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
1702     * Please refer to the comments update_dl_revised_wakeup() function to find
1703     * more about the Revised CBS rule.
1704     */
1705     -static void update_dl_entity(struct sched_dl_entity *dl_se,
1706     - struct sched_dl_entity *pi_se)
1707     +static void update_dl_entity(struct sched_dl_entity *dl_se)
1708     {
1709     struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1710     struct rq *rq = rq_of_dl_rq(dl_rq);
1711    
1712     if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
1713     - dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
1714     + dl_entity_overflow(dl_se, rq_clock(rq))) {
1715    
1716     if (unlikely(!dl_is_implicit(dl_se) &&
1717     !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1718     - !dl_se->dl_boosted)){
1719     + !is_dl_boosted(dl_se))) {
1720     update_dl_revised_wakeup(dl_se, rq);
1721     return;
1722     }
1723    
1724     - dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
1725     - dl_se->runtime = pi_se->dl_runtime;
1726     + dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
1727     + dl_se->runtime = pi_of(dl_se)->dl_runtime;
1728     }
1729     }
1730    
1731     @@ -997,7 +1016,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1732     * The task might have been boosted by someone else and might be in the
1733     * boosting/deboosting path, its not throttled.
1734     */
1735     - if (dl_se->dl_boosted)
1736     + if (is_dl_boosted(dl_se))
1737     goto unlock;
1738    
1739     /*
1740     @@ -1025,7 +1044,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1741     * but do not enqueue -- wait for our wakeup to do that.
1742     */
1743     if (!task_on_rq_queued(p)) {
1744     - replenish_dl_entity(dl_se, dl_se);
1745     + replenish_dl_entity(dl_se);
1746     goto unlock;
1747     }
1748    
1749     @@ -1115,7 +1134,7 @@ static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1750    
1751     if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1752     dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1753     - if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1754     + if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
1755     return;
1756     dl_se->dl_throttled = 1;
1757     if (dl_se->runtime > 0)
1758     @@ -1246,7 +1265,7 @@ throttle:
1759     dl_se->dl_overrun = 1;
1760    
1761     __dequeue_task_dl(rq, curr, 0);
1762     - if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
1763     + if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
1764     enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1765    
1766     if (!is_leftmost(curr, &rq->dl))
1767     @@ -1440,8 +1459,7 @@ static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1768     }
1769    
1770     static void
1771     -enqueue_dl_entity(struct sched_dl_entity *dl_se,
1772     - struct sched_dl_entity *pi_se, int flags)
1773     +enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1774     {
1775     BUG_ON(on_dl_rq(dl_se));
1776    
1777     @@ -1452,9 +1470,9 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
1778     */
1779     if (flags & ENQUEUE_WAKEUP) {
1780     task_contending(dl_se, flags);
1781     - update_dl_entity(dl_se, pi_se);
1782     + update_dl_entity(dl_se);
1783     } else if (flags & ENQUEUE_REPLENISH) {
1784     - replenish_dl_entity(dl_se, pi_se);
1785     + replenish_dl_entity(dl_se);
1786     } else if ((flags & ENQUEUE_RESTORE) &&
1787     dl_time_before(dl_se->deadline,
1788     rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1789     @@ -1471,28 +1489,40 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1790    
1791     static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1792     {
1793     - struct task_struct *pi_task = rt_mutex_get_top_task(p);
1794     - struct sched_dl_entity *pi_se = &p->dl;
1795     -
1796     - /*
1797     - * Use the scheduling parameters of the top pi-waiter task if:
1798     - * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
1799     - * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
1800     - * smaller than our deadline OR we are a !SCHED_DEADLINE task getting
1801     - * boosted due to a SCHED_DEADLINE pi-waiter).
1802     - * Otherwise we keep our runtime and deadline.
1803     - */
1804     - if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
1805     - pi_se = &pi_task->dl;
1806     + if (is_dl_boosted(&p->dl)) {
1807     + /*
1808     + * Because of delays in the detection of the overrun of a
1809     + * thread's runtime, it might be the case that a thread
1810     + * goes to sleep in a rt mutex with negative runtime. As
1811     + * a consequence, the thread will be throttled.
1812     + *
1813     + * While waiting for the mutex, this thread can also be
1814     + * boosted via PI, resulting in a thread that is throttled
1815     + * and boosted at the same time.
1816     + *
1817     + * In this case, the boost overrides the throttle.
1818     + */
1819     + if (p->dl.dl_throttled) {
1820     + /*
1821     + * The replenish timer needs to be canceled. No
1822     + * problem if it fires concurrently: boosted threads
1823     + * are ignored in dl_task_timer().
1824     + */
1825     + hrtimer_try_to_cancel(&p->dl.dl_timer);
1826     + p->dl.dl_throttled = 0;
1827     + }
1828     } else if (!dl_prio(p->normal_prio)) {
1829     /*
1830     - * Special case in which we have a !SCHED_DEADLINE task
1831     - * that is going to be deboosted, but exceeds its
1832     - * runtime while doing so. No point in replenishing
1833     - * it, as it's going to return back to its original
1834     - * scheduling class after this.
1835     + * Special case in which we have a !SCHED_DEADLINE task that is going
1836     + * to be deboosted, but exceeds its runtime while doing so. No point in
1837     + * replenishing it, as it's going to return back to its original
1838     + * scheduling class after this. If it has been throttled, we need to
1839     + * clear the flag, otherwise the task may wake up as throttled after
1840     + * being boosted again with no means to replenish the runtime and clear
1841     + * the throttle.
1842     */
1843     - BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1844     + p->dl.dl_throttled = 0;
1845     + BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
1846     return;
1847     }
1848    
1849     @@ -1529,7 +1559,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1850     return;
1851     }
1852    
1853     - enqueue_dl_entity(&p->dl, pi_se, flags);
1854     + enqueue_dl_entity(&p->dl, flags);
1855    
1856     if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1857     enqueue_pushable_dl_task(rq, p);
1858     @@ -2698,11 +2728,14 @@ void __dl_clear_params(struct task_struct *p)
1859     dl_se->dl_bw = 0;
1860     dl_se->dl_density = 0;
1861    
1862     - dl_se->dl_boosted = 0;
1863     dl_se->dl_throttled = 0;
1864     dl_se->dl_yielded = 0;
1865     dl_se->dl_non_contending = 0;
1866     dl_se->dl_overrun = 0;
1867     +
1868     +#ifdef CONFIG_RT_MUTEXES
1869     + dl_se->pi_se = dl_se;
1870     +#endif
1871     }
1872    
1873     bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
1874     diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
1875     index 34b76895b81e8..189eed03e4e34 100644
1876     --- a/kernel/sys_ni.c
1877     +++ b/kernel/sys_ni.c
1878     @@ -268,6 +268,7 @@ COND_SYSCALL_COMPAT(keyctl);
1879    
1880     /* mm/fadvise.c */
1881     COND_SYSCALL(fadvise64_64);
1882     +COND_SYSCALL_COMPAT(fadvise64_64);
1883    
1884     /* mm/, CONFIG_MMU only */
1885     COND_SYSCALL(swapon);
1886     diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
1887     index 7719d444bda12..44f1469af842b 100644
1888     --- a/kernel/trace/ftrace.c
1889     +++ b/kernel/trace/ftrace.c
1890     @@ -2732,6 +2732,16 @@ int ftrace_startup(struct ftrace_ops *ops, int command)
1891    
1892     ftrace_startup_enable(command);
1893    
1894     + /*
1895     + * If ftrace is in an undefined state, we just remove ops from list
1896     + * to prevent the NULL pointer, instead of totally rolling it back and
1897     + * free trampoline, because those actions could cause further damage.
1898     + */
1899     + if (unlikely(ftrace_disabled)) {
1900     + __unregister_ftrace_function(ops);
1901     + return -ENODEV;
1902     + }
1903     +
1904     ops->flags &= ~FTRACE_OPS_FL_ADDING;
1905    
1906     return 0;
1907     diff --git a/lib/ratelimit.c b/lib/ratelimit.c
1908     index e01a93f46f833..ce945c17980b9 100644
1909     --- a/lib/ratelimit.c
1910     +++ b/lib/ratelimit.c
1911     @@ -26,10 +26,16 @@
1912     */
1913     int ___ratelimit(struct ratelimit_state *rs, const char *func)
1914     {
1915     + /* Paired with WRITE_ONCE() in .proc_handler().
1916     + * Changing two values seperately could be inconsistent
1917     + * and some message could be lost. (See: net_ratelimit_state).
1918     + */
1919     + int interval = READ_ONCE(rs->interval);
1920     + int burst = READ_ONCE(rs->burst);
1921     unsigned long flags;
1922     int ret;
1923    
1924     - if (!rs->interval)
1925     + if (!interval)
1926     return 1;
1927    
1928     /*
1929     @@ -44,7 +50,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
1930     if (!rs->begin)
1931     rs->begin = jiffies;
1932    
1933     - if (time_is_before_jiffies(rs->begin + rs->interval)) {
1934     + if (time_is_before_jiffies(rs->begin + interval)) {
1935     if (rs->missed) {
1936     if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
1937     printk_deferred(KERN_WARNING
1938     @@ -56,7 +62,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
1939     rs->begin = jiffies;
1940     rs->printed = 0;
1941     }
1942     - if (rs->burst && rs->burst > rs->printed) {
1943     + if (burst && burst > rs->printed) {
1944     rs->printed++;
1945     ret = 1;
1946     } else {
1947     diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
1948     index 45f57fd2db649..5667fb746a1fe 100644
1949     --- a/lib/vdso/gettimeofday.c
1950     +++ b/lib/vdso/gettimeofday.c
1951     @@ -38,7 +38,7 @@ u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
1952     }
1953     #endif
1954    
1955     -static int do_hres(const struct vdso_data *vd, clockid_t clk,
1956     +static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
1957     struct __kernel_timespec *ts)
1958     {
1959     const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
1960     @@ -68,8 +68,8 @@ static int do_hres(const struct vdso_data *vd, clockid_t clk,
1961     return 0;
1962     }
1963    
1964     -static void do_coarse(const struct vdso_data *vd, clockid_t clk,
1965     - struct __kernel_timespec *ts)
1966     +static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
1967     + struct __kernel_timespec *ts)
1968     {
1969     const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
1970     u32 seq;
1971     @@ -79,6 +79,8 @@ static void do_coarse(const struct vdso_data *vd, clockid_t clk,
1972     ts->tv_sec = vdso_ts->sec;
1973     ts->tv_nsec = vdso_ts->nsec;
1974     } while (unlikely(vdso_read_retry(vd, seq)));
1975     +
1976     + return 0;
1977     }
1978    
1979     static __maybe_unused int
1980     @@ -96,15 +98,16 @@ __cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
1981     * clocks are handled in the VDSO directly.
1982     */
1983     msk = 1U << clock;
1984     - if (likely(msk & VDSO_HRES)) {
1985     - return do_hres(&vd[CS_HRES_COARSE], clock, ts);
1986     - } else if (msk & VDSO_COARSE) {
1987     - do_coarse(&vd[CS_HRES_COARSE], clock, ts);
1988     - return 0;
1989     - } else if (msk & VDSO_RAW) {
1990     - return do_hres(&vd[CS_RAW], clock, ts);
1991     - }
1992     - return -1;
1993     + if (likely(msk & VDSO_HRES))
1994     + vd = &vd[CS_HRES_COARSE];
1995     + else if (msk & VDSO_COARSE)
1996     + return do_coarse(&vd[CS_HRES_COARSE], clock, ts);
1997     + else if (msk & VDSO_RAW)
1998     + vd = &vd[CS_RAW];
1999     + else
2000     + return -1;
2001     +
2002     + return do_hres(vd, clock, ts);
2003     }
2004    
2005     static __maybe_unused int
2006     diff --git a/mm/mmap.c b/mm/mmap.c
2007     index 8873ef114d280..e8cf6f88933c3 100644
2008     --- a/mm/mmap.c
2009     +++ b/mm/mmap.c
2010     @@ -1679,8 +1679,12 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
2011     pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
2012     return 0;
2013    
2014     - /* Do we need to track softdirty? */
2015     - if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
2016     + /*
2017     + * Do we need to track softdirty? hugetlb does not support softdirty
2018     + * tracking yet.
2019     + */
2020     + if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY) &&
2021     + !is_vm_hugetlb_page(vma))
2022     return 1;
2023    
2024     /* Specialty mapping? */
2025     @@ -2606,6 +2610,18 @@ static void unmap_region(struct mm_struct *mm,
2026     tlb_gather_mmu(&tlb, mm, start, end);
2027     update_hiwater_rss(mm);
2028     unmap_vmas(&tlb, vma, start, end);
2029     +
2030     + /*
2031     + * Ensure we have no stale TLB entries by the time this mapping is
2032     + * removed from the rmap.
2033     + * Note that we don't have to worry about nested flushes here because
2034     + * we're holding the mm semaphore for removing the mapping - so any
2035     + * concurrent flush in this region has to be coming through the rmap,
2036     + * and we synchronize against that using the rmap lock.
2037     + */
2038     + if ((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0)
2039     + tlb_flush_mmu(&tlb);
2040     +
2041     free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2042     next ? next->vm_start : USER_PGTABLES_CEILING);
2043     tlb_finish_mmu(&tlb, start, end);
2044     diff --git a/mm/rmap.c b/mm/rmap.c
2045     index 6d80e92688fe7..c64da910bb731 100644
2046     --- a/mm/rmap.c
2047     +++ b/mm/rmap.c
2048     @@ -83,7 +83,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
2049     anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
2050     if (anon_vma) {
2051     atomic_set(&anon_vma->refcount, 1);
2052     - anon_vma->degree = 1; /* Reference for first vma */
2053     + anon_vma->num_children = 0;
2054     + anon_vma->num_active_vmas = 0;
2055     anon_vma->parent = anon_vma;
2056     /*
2057     * Initialise the anon_vma root to point to itself. If called
2058     @@ -191,6 +192,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
2059     anon_vma = anon_vma_alloc();
2060     if (unlikely(!anon_vma))
2061     goto out_enomem_free_avc;
2062     + anon_vma->num_children++; /* self-parent link for new root */
2063     allocated = anon_vma;
2064     }
2065    
2066     @@ -200,8 +202,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
2067     if (likely(!vma->anon_vma)) {
2068     vma->anon_vma = anon_vma;
2069     anon_vma_chain_link(vma, avc, anon_vma);
2070     - /* vma reference or self-parent link for new root */
2071     - anon_vma->degree++;
2072     + anon_vma->num_active_vmas++;
2073     allocated = NULL;
2074     avc = NULL;
2075     }
2076     @@ -280,19 +281,19 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
2077     anon_vma_chain_link(dst, avc, anon_vma);
2078    
2079     /*
2080     - * Reuse existing anon_vma if its degree lower than two,
2081     - * that means it has no vma and only one anon_vma child.
2082     + * Reuse existing anon_vma if it has no vma and only one
2083     + * anon_vma child.
2084     *
2085     - * Do not chose parent anon_vma, otherwise first child
2086     - * will always reuse it. Root anon_vma is never reused:
2087     + * Root anon_vma is never reused:
2088     * it has self-parent reference and at least one child.
2089     */
2090     - if (!dst->anon_vma && anon_vma != src->anon_vma &&
2091     - anon_vma->degree < 2)
2092     + if (!dst->anon_vma &&
2093     + anon_vma->num_children < 2 &&
2094     + anon_vma->num_active_vmas == 0)
2095     dst->anon_vma = anon_vma;
2096     }
2097     if (dst->anon_vma)
2098     - dst->anon_vma->degree++;
2099     + dst->anon_vma->num_active_vmas++;
2100     unlock_anon_vma_root(root);
2101     return 0;
2102    
2103     @@ -342,6 +343,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
2104     anon_vma = anon_vma_alloc();
2105     if (!anon_vma)
2106     goto out_error;
2107     + anon_vma->num_active_vmas++;
2108     avc = anon_vma_chain_alloc(GFP_KERNEL);
2109     if (!avc)
2110     goto out_error_free_anon_vma;
2111     @@ -362,7 +364,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
2112     vma->anon_vma = anon_vma;
2113     anon_vma_lock_write(anon_vma);
2114     anon_vma_chain_link(vma, avc, anon_vma);
2115     - anon_vma->parent->degree++;
2116     + anon_vma->parent->num_children++;
2117     anon_vma_unlock_write(anon_vma);
2118    
2119     return 0;
2120     @@ -394,7 +396,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
2121     * to free them outside the lock.
2122     */
2123     if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
2124     - anon_vma->parent->degree--;
2125     + anon_vma->parent->num_children--;
2126     continue;
2127     }
2128    
2129     @@ -402,7 +404,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
2130     anon_vma_chain_free(avc);
2131     }
2132     if (vma->anon_vma)
2133     - vma->anon_vma->degree--;
2134     + vma->anon_vma->num_active_vmas--;
2135     unlock_anon_vma_root(root);
2136    
2137     /*
2138     @@ -413,7 +415,8 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
2139     list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
2140     struct anon_vma *anon_vma = avc->anon_vma;
2141    
2142     - VM_WARN_ON(anon_vma->degree);
2143     + VM_WARN_ON(anon_vma->num_children);
2144     + VM_WARN_ON(anon_vma->num_active_vmas);
2145     put_anon_vma(anon_vma);
2146    
2147     list_del(&avc->same_vma);
2148     diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
2149     index 7aa64f300422e..3682d2e1cd7d2 100644
2150     --- a/net/bluetooth/l2cap_core.c
2151     +++ b/net/bluetooth/l2cap_core.c
2152     @@ -1835,11 +1835,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
2153     src_match = !bacmp(&c->src, src);
2154     dst_match = !bacmp(&c->dst, dst);
2155     if (src_match && dst_match) {
2156     - c = l2cap_chan_hold_unless_zero(c);
2157     - if (c) {
2158     - read_unlock(&chan_list_lock);
2159     - return c;
2160     - }
2161     + if (!l2cap_chan_hold_unless_zero(c))
2162     + continue;
2163     +
2164     + read_unlock(&chan_list_lock);
2165     + return c;
2166     }
2167    
2168     /* Closest match */
2169     diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
2170     index 1153bbcdff721..5e6428cbd7580 100644
2171     --- a/net/bpf/test_run.c
2172     +++ b/net/bpf/test_run.c
2173     @@ -200,6 +200,9 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
2174     {
2175     struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
2176    
2177     + if (!skb->len)
2178     + return -EINVAL;
2179     +
2180     if (!__skb)
2181     return 0;
2182    
2183     diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
2184     index 32bc2821027f3..57f91efce0f73 100644
2185     --- a/net/bridge/netfilter/ebtable_broute.c
2186     +++ b/net/bridge/netfilter/ebtable_broute.c
2187     @@ -36,18 +36,10 @@ static struct ebt_replace_kernel initial_table = {
2188     .entries = (char *)&initial_chain,
2189     };
2190    
2191     -static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
2192     -{
2193     - if (valid_hooks & ~(1 << NF_BR_BROUTING))
2194     - return -EINVAL;
2195     - return 0;
2196     -}
2197     -
2198     static const struct ebt_table broute_table = {
2199     .name = "broute",
2200     .table = &initial_table,
2201     .valid_hooks = 1 << NF_BR_BROUTING,
2202     - .check = check,
2203     .me = THIS_MODULE,
2204     };
2205    
2206     diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
2207     index bcf982e12f16b..7f2e620f4978f 100644
2208     --- a/net/bridge/netfilter/ebtable_filter.c
2209     +++ b/net/bridge/netfilter/ebtable_filter.c
2210     @@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = {
2211     .entries = (char *)initial_chains,
2212     };
2213    
2214     -static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
2215     -{
2216     - if (valid_hooks & ~FILTER_VALID_HOOKS)
2217     - return -EINVAL;
2218     - return 0;
2219     -}
2220     -
2221     static const struct ebt_table frame_filter = {
2222     .name = "filter",
2223     .table = &initial_table,
2224     .valid_hooks = FILTER_VALID_HOOKS,
2225     - .check = check,
2226     .me = THIS_MODULE,
2227     };
2228    
2229     diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
2230     index 0d092773f8161..1743a105485c4 100644
2231     --- a/net/bridge/netfilter/ebtable_nat.c
2232     +++ b/net/bridge/netfilter/ebtable_nat.c
2233     @@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = {
2234     .entries = (char *)initial_chains,
2235     };
2236    
2237     -static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
2238     -{
2239     - if (valid_hooks & ~NAT_VALID_HOOKS)
2240     - return -EINVAL;
2241     - return 0;
2242     -}
2243     -
2244     static const struct ebt_table frame_nat = {
2245     .name = "nat",
2246     .table = &initial_table,
2247     .valid_hooks = NAT_VALID_HOOKS,
2248     - .check = check,
2249     .me = THIS_MODULE,
2250     };
2251    
2252     diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
2253     index d9375c52f50e6..ddb988c339c17 100644
2254     --- a/net/bridge/netfilter/ebtables.c
2255     +++ b/net/bridge/netfilter/ebtables.c
2256     @@ -999,8 +999,7 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
2257     goto free_iterate;
2258     }
2259    
2260     - /* the table doesn't like it */
2261     - if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
2262     + if (repl->valid_hooks != t->valid_hooks)
2263     goto free_unlock;
2264    
2265     if (repl->num_counters && repl->num_counters != t->private->nentries) {
2266     @@ -1193,11 +1192,6 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
2267     if (ret != 0)
2268     goto free_chainstack;
2269    
2270     - if (table->check && table->check(newinfo, table->valid_hooks)) {
2271     - ret = -EINVAL;
2272     - goto free_chainstack;
2273     - }
2274     -
2275     table->private = newinfo;
2276     rwlock_init(&table->lock);
2277     mutex_lock(&ebt_mutex);
2278     diff --git a/net/core/dev.c b/net/core/dev.c
2279     index a03036456221b..84bc6d0e8560b 100644
2280     --- a/net/core/dev.c
2281     +++ b/net/core/dev.c
2282     @@ -3712,6 +3712,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
2283     bool again = false;
2284    
2285     skb_reset_mac_header(skb);
2286     + skb_assert_len(skb);
2287    
2288     if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
2289     __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
2290     @@ -4411,7 +4412,7 @@ static int netif_rx_internal(struct sk_buff *skb)
2291     {
2292     int ret;
2293    
2294     - net_timestamp_check(netdev_tstamp_prequeue, skb);
2295     + net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
2296    
2297     trace_netif_rx(skb);
2298    
2299     @@ -4753,7 +4754,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
2300     int ret = NET_RX_DROP;
2301     __be16 type;
2302    
2303     - net_timestamp_check(!netdev_tstamp_prequeue, skb);
2304     + net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
2305    
2306     trace_netif_receive_skb(skb);
2307    
2308     @@ -5135,7 +5136,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
2309     {
2310     int ret;
2311    
2312     - net_timestamp_check(netdev_tstamp_prequeue, skb);
2313     + net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
2314    
2315     if (skb_defer_rx_timestamp(skb))
2316     return NET_RX_SUCCESS;
2317     @@ -5165,7 +5166,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
2318    
2319     INIT_LIST_HEAD(&sublist);
2320     list_for_each_entry_safe(skb, next, head, list) {
2321     - net_timestamp_check(netdev_tstamp_prequeue, skb);
2322     + net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
2323     skb_list_del_init(skb);
2324     if (!skb_defer_rx_timestamp(skb))
2325     list_add_tail(&skb->list, &sublist);
2326     @@ -5892,7 +5893,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
2327     net_rps_action_and_irq_enable(sd);
2328     }
2329    
2330     - napi->weight = dev_rx_weight;
2331     + napi->weight = READ_ONCE(dev_rx_weight);
2332     while (again) {
2333     struct sk_buff *skb;
2334    
2335     @@ -6393,8 +6394,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
2336     {
2337     struct softnet_data *sd = this_cpu_ptr(&softnet_data);
2338     unsigned long time_limit = jiffies +
2339     - usecs_to_jiffies(netdev_budget_usecs);
2340     - int budget = netdev_budget;
2341     + usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
2342     + int budget = READ_ONCE(netdev_budget);
2343     LIST_HEAD(list);
2344     LIST_HEAD(repoll);
2345    
2346     diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2347     index 8b6140e67e7f8..aa81aead0a654 100644
2348     --- a/net/core/neighbour.c
2349     +++ b/net/core/neighbour.c
2350     @@ -280,11 +280,26 @@ static int neigh_del_timer(struct neighbour *n)
2351     return 0;
2352     }
2353    
2354     -static void pneigh_queue_purge(struct sk_buff_head *list)
2355     +static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
2356     {
2357     + struct sk_buff_head tmp;
2358     + unsigned long flags;
2359     struct sk_buff *skb;
2360    
2361     - while ((skb = skb_dequeue(list)) != NULL) {
2362     + skb_queue_head_init(&tmp);
2363     + spin_lock_irqsave(&list->lock, flags);
2364     + skb = skb_peek(list);
2365     + while (skb != NULL) {
2366     + struct sk_buff *skb_next = skb_peek_next(skb, list);
2367     + if (net == NULL || net_eq(dev_net(skb->dev), net)) {
2368     + __skb_unlink(skb, list);
2369     + __skb_queue_tail(&tmp, skb);
2370     + }
2371     + skb = skb_next;
2372     + }
2373     + spin_unlock_irqrestore(&list->lock, flags);
2374     +
2375     + while ((skb = __skb_dequeue(&tmp))) {
2376     dev_put(skb->dev);
2377     kfree_skb(skb);
2378     }
2379     @@ -358,9 +373,9 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
2380     write_lock_bh(&tbl->lock);
2381     neigh_flush_dev(tbl, dev, skip_perm);
2382     pneigh_ifdown_and_unlock(tbl, dev);
2383     -
2384     - del_timer_sync(&tbl->proxy_timer);
2385     - pneigh_queue_purge(&tbl->proxy_queue);
2386     + pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev));
2387     + if (skb_queue_empty_lockless(&tbl->proxy_queue))
2388     + del_timer_sync(&tbl->proxy_timer);
2389     return 0;
2390     }
2391    
2392     @@ -1741,7 +1756,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
2393     /* It is not clean... Fix it to unload IPv6 module safely */
2394     cancel_delayed_work_sync(&tbl->gc_work);
2395     del_timer_sync(&tbl->proxy_timer);
2396     - pneigh_queue_purge(&tbl->proxy_queue);
2397     + pneigh_queue_purge(&tbl->proxy_queue, NULL);
2398     neigh_ifdown(tbl, NULL);
2399     if (atomic_read(&tbl->entries))
2400     pr_crit("neighbour leakage\n");
2401     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2402     index 5bdb3cd20d619..c9fe2c0b8cae3 100644
2403     --- a/net/core/skbuff.c
2404     +++ b/net/core/skbuff.c
2405     @@ -4564,7 +4564,7 @@ static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
2406     {
2407     bool ret;
2408    
2409     - if (likely(sysctl_tstamp_allow_data || tsonly))
2410     + if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly))
2411     return true;
2412    
2413     read_lock_bh(&sk->sk_callback_lock);
2414     diff --git a/net/core/sock.c b/net/core/sock.c
2415     index c84f68bff7f58..a2b12a5cf42bc 100644
2416     --- a/net/core/sock.c
2417     +++ b/net/core/sock.c
2418     @@ -2946,7 +2946,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2419    
2420     #ifdef CONFIG_NET_RX_BUSY_POLL
2421     sk->sk_napi_id = 0;
2422     - sk->sk_ll_usec = sysctl_net_busy_read;
2423     + sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read);
2424     #endif
2425    
2426     sk->sk_max_pacing_rate = ~0UL;
2427     diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
2428     index 48041f50ecfb4..586598887095d 100644
2429     --- a/net/core/sysctl_net_core.c
2430     +++ b/net/core/sysctl_net_core.c
2431     @@ -238,14 +238,17 @@ static int set_default_qdisc(struct ctl_table *table, int write,
2432     static int proc_do_dev_weight(struct ctl_table *table, int write,
2433     void __user *buffer, size_t *lenp, loff_t *ppos)
2434     {
2435     - int ret;
2436     + static DEFINE_MUTEX(dev_weight_mutex);
2437     + int ret, weight;
2438    
2439     + mutex_lock(&dev_weight_mutex);
2440     ret = proc_dointvec(table, write, buffer, lenp, ppos);
2441     - if (ret != 0)
2442     - return ret;
2443     -
2444     - dev_rx_weight = weight_p * dev_weight_rx_bias;
2445     - dev_tx_weight = weight_p * dev_weight_tx_bias;
2446     + if (!ret && write) {
2447     + weight = READ_ONCE(weight_p);
2448     + WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
2449     + WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
2450     + }
2451     + mutex_unlock(&dev_weight_mutex);
2452    
2453     return ret;
2454     }
2455     diff --git a/net/key/af_key.c b/net/key/af_key.c
2456     index 32fe99cd01fc8..c06cc48c68c90 100644
2457     --- a/net/key/af_key.c
2458     +++ b/net/key/af_key.c
2459     @@ -1701,9 +1701,12 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
2460     pfk->registered |= (1<<hdr->sadb_msg_satype);
2461     }
2462    
2463     + mutex_lock(&pfkey_mutex);
2464     xfrm_probe_algs();
2465    
2466     supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO);
2467     + mutex_unlock(&pfkey_mutex);
2468     +
2469     if (!supp_skb) {
2470     if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC)
2471     pfk->registered &= ~(1<<hdr->sadb_msg_satype);
2472     diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
2473     index ef72819d9d315..d569915da003c 100644
2474     --- a/net/netfilter/Kconfig
2475     +++ b/net/netfilter/Kconfig
2476     @@ -118,7 +118,6 @@ config NF_CONNTRACK_ZONES
2477    
2478     config NF_CONNTRACK_PROCFS
2479     bool "Supply CT list in procfs (OBSOLETE)"
2480     - default y
2481     depends on PROC_FS
2482     ---help---
2483     This option enables for the list of known conntrack entries
2484     diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
2485     index 4911f8eb394ff..d966a3aff1d33 100644
2486     --- a/net/netfilter/nft_osf.c
2487     +++ b/net/netfilter/nft_osf.c
2488     @@ -115,9 +115,21 @@ static int nft_osf_validate(const struct nft_ctx *ctx,
2489     const struct nft_expr *expr,
2490     const struct nft_data **data)
2491     {
2492     - return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
2493     - (1 << NF_INET_PRE_ROUTING) |
2494     - (1 << NF_INET_FORWARD));
2495     + unsigned int hooks;
2496     +
2497     + switch (ctx->family) {
2498     + case NFPROTO_IPV4:
2499     + case NFPROTO_IPV6:
2500     + case NFPROTO_INET:
2501     + hooks = (1 << NF_INET_LOCAL_IN) |
2502     + (1 << NF_INET_PRE_ROUTING) |
2503     + (1 << NF_INET_FORWARD);
2504     + break;
2505     + default:
2506     + return -EOPNOTSUPP;
2507     + }
2508     +
2509     + return nft_chain_validate_hooks(ctx->chain, hooks);
2510     }
2511    
2512     static struct nft_expr_type nft_osf_type;
2513     diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
2514     index cf0512fc648e7..6ed6ccef5e1ad 100644
2515     --- a/net/netfilter/nft_payload.c
2516     +++ b/net/netfilter/nft_payload.c
2517     @@ -558,6 +558,8 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
2518     const struct nlattr * const tb[])
2519     {
2520     struct nft_payload_set *priv = nft_expr_priv(expr);
2521     + u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
2522     + int err;
2523    
2524     priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
2525     priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
2526     @@ -565,11 +567,15 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
2527     priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
2528    
2529     if (tb[NFTA_PAYLOAD_CSUM_TYPE])
2530     - priv->csum_type =
2531     - ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
2532     - if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
2533     - priv->csum_offset =
2534     - ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
2535     + csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
2536     + if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
2537     + err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
2538     + &csum_offset);
2539     + if (err < 0)
2540     + return err;
2541     +
2542     + priv->csum_offset = csum_offset;
2543     + }
2544     if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
2545     u32 flags;
2546    
2547     @@ -580,13 +586,14 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
2548     priv->csum_flags = flags;
2549     }
2550    
2551     - switch (priv->csum_type) {
2552     + switch (csum_type) {
2553     case NFT_PAYLOAD_CSUM_NONE:
2554     case NFT_PAYLOAD_CSUM_INET:
2555     break;
2556     default:
2557     return -EOPNOTSUPP;
2558     }
2559     + priv->csum_type = csum_type;
2560    
2561     return nft_validate_register_load(priv->sreg, priv->len);
2562     }
2563     @@ -624,6 +631,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
2564     {
2565     enum nft_payload_bases base;
2566     unsigned int offset, len;
2567     + int err;
2568    
2569     if (tb[NFTA_PAYLOAD_BASE] == NULL ||
2570     tb[NFTA_PAYLOAD_OFFSET] == NULL ||
2571     @@ -649,8 +657,13 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
2572     if (tb[NFTA_PAYLOAD_DREG] == NULL)
2573     return ERR_PTR(-EINVAL);
2574    
2575     - offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
2576     - len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
2577     + err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
2578     + if (err < 0)
2579     + return ERR_PTR(err);
2580     +
2581     + err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
2582     + if (err < 0)
2583     + return ERR_PTR(err);
2584    
2585     if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
2586     base != NFT_PAYLOAD_LL_HEADER)
2587     diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
2588     index 1effd4878619f..4e850c81ad8d8 100644
2589     --- a/net/netfilter/nft_tunnel.c
2590     +++ b/net/netfilter/nft_tunnel.c
2591     @@ -134,6 +134,7 @@ static const struct nft_expr_ops nft_tunnel_get_ops = {
2592    
2593     static struct nft_expr_type nft_tunnel_type __read_mostly = {
2594     .name = "tunnel",
2595     + .family = NFPROTO_NETDEV,
2596     .ops = &nft_tunnel_get_ops,
2597     .policy = nft_tunnel_policy,
2598     .maxattr = NFTA_TUNNEL_MAX,
2599     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2600     index a2696acbcd9d2..8f5ef28411992 100644
2601     --- a/net/packet/af_packet.c
2602     +++ b/net/packet/af_packet.c
2603     @@ -2960,8 +2960,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2604     if (err)
2605     goto out_free;
2606    
2607     - if (sock->type == SOCK_RAW &&
2608     - !dev_validate_header(dev, skb->data, len)) {
2609     + if ((sock->type == SOCK_RAW &&
2610     + !dev_validate_header(dev, skb->data, len)) || !skb->len) {
2611     err = -EINVAL;
2612     goto out_free;
2613     }
2614     diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
2615     index 11c45c8c6c164..036d92c0ad794 100644
2616     --- a/net/rose/rose_loopback.c
2617     +++ b/net/rose/rose_loopback.c
2618     @@ -96,7 +96,8 @@ static void rose_loopback_timer(struct timer_list *unused)
2619     }
2620    
2621     if (frametype == ROSE_CALL_REQUEST) {
2622     - if (!rose_loopback_neigh->dev) {
2623     + if (!rose_loopback_neigh->dev &&
2624     + !rose_loopback_neigh->loopback) {
2625     kfree_skb(skb);
2626     continue;
2627     }
2628     diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
2629     index ae5847de94c88..81fcf6c5bde96 100644
2630     --- a/net/sched/sch_generic.c
2631     +++ b/net/sched/sch_generic.c
2632     @@ -403,7 +403,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets)
2633    
2634     void __qdisc_run(struct Qdisc *q)
2635     {
2636     - int quota = dev_tx_weight;
2637     + int quota = READ_ONCE(dev_tx_weight);
2638     int packets;
2639    
2640     while (qdisc_restart(q, &packets)) {
2641     diff --git a/net/socket.c b/net/socket.c
2642     index 94358566c9d10..02feaf5bd84a3 100644
2643     --- a/net/socket.c
2644     +++ b/net/socket.c
2645     @@ -1661,7 +1661,7 @@ int __sys_listen(int fd, int backlog)
2646    
2647     sock = sockfd_lookup_light(fd, &err, &fput_needed);
2648     if (sock) {
2649     - somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
2650     + somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn);
2651     if ((unsigned int)backlog > somaxconn)
2652     backlog = somaxconn;
2653    
2654     diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
2655     index 08e1ccc01e983..1893203cc94fc 100644
2656     --- a/net/sunrpc/clnt.c
2657     +++ b/net/sunrpc/clnt.c
2658     @@ -1896,7 +1896,7 @@ call_encode(struct rpc_task *task)
2659     break;
2660     case -EKEYEXPIRED:
2661     if (!task->tk_cred_retry) {
2662     - rpc_exit(task, task->tk_status);
2663     + rpc_call_rpcerror(task, task->tk_status);
2664     } else {
2665     task->tk_action = call_refresh;
2666     task->tk_cred_retry--;
2667     diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
2668     index 28a8cdef8e51f..6f58be5a17711 100644
2669     --- a/net/xfrm/xfrm_policy.c
2670     +++ b/net/xfrm/xfrm_policy.c
2671     @@ -3619,6 +3619,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2672     if (pols[1]) {
2673     if (IS_ERR(pols[1])) {
2674     XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2675     + xfrm_pol_put(pols[0]);
2676     return 0;
2677     }
2678     pols[1]->curlft.use_time = ktime_get_real_seconds();
2679     diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
2680     index 952fff4855467..2dde6e5e9e69f 100644
2681     --- a/scripts/Makefile.modpost
2682     +++ b/scripts/Makefile.modpost
2683     @@ -75,8 +75,7 @@ obj := $(KBUILD_EXTMOD)
2684     src := $(obj)
2685    
2686     # Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS
2687     -include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \
2688     - $(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
2689     +include $(if $(wildcard $(src)/Kbuild), $(src)/Kbuild, $(src)/Makefile)
2690     endif
2691    
2692     MODPOST += $(subst -i,-n,$(filter -i,$(MAKEFLAGS))) -s -T - $(wildcard vmlinux)
2693     diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
2694     index 4b9a26caa2c2e..6cc29b58d6707 100644
2695     --- a/tools/testing/selftests/bpf/test_align.c
2696     +++ b/tools/testing/selftests/bpf/test_align.c
2697     @@ -475,10 +475,10 @@ static struct bpf_align_test tests[] = {
2698     */
2699     {7, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
2700     /* Checked s>=0 */
2701     - {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
2702     + {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
2703     /* packet pointer + nonnegative (4n+2) */
2704     - {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
2705     - {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
2706     + {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
2707     + {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
2708     /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
2709     * We checked the bounds, but it might have been able
2710     * to overflow if the packet pointer started in the
2711     @@ -486,7 +486,7 @@ static struct bpf_align_test tests[] = {
2712     * So we did not get a 'range' on R6, and the access
2713     * attempt will fail.
2714     */
2715     - {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
2716     + {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
2717     }
2718     },
2719     {
2720     @@ -580,18 +580,18 @@ static struct bpf_align_test tests[] = {
2721     /* Adding 14 makes R6 be (4n+2) */
2722     {11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
2723     /* Subtracting from packet pointer overflows ubounds */
2724     - {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
2725     + {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
2726     /* New unknown value in R7 is (4n), >= 76 */
2727     {15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
2728     /* Adding it to packet pointer gives nice bounds again */
2729     - {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
2730     + {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
2731     /* At the time the word size load is performed from R5,
2732     * its total fixed offset is NET_IP_ALIGN + reg->off (0)
2733     * which is 2. Then the variable offset is (4n+2), so
2734     * the total offset is 4-byte aligned and meets the
2735     * load's requirements.
2736     */
2737     - {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
2738     + {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
2739     },
2740     },
2741     };