Contents of /trunk/kernel-alx-legacy/patches-4.9/0426-4.9.327-all-fixes.patch
Parent Directory | Revision Log
Revision 3728 -
(show annotations)
(download)
Mon Oct 24 14:08:38 2022 UTC (23 months ago) by niro
File size: 36089 byte(s)
Mon Oct 24 14:08:38 2022 UTC (23 months ago) by niro
File size: 36089 byte(s)
-linux-4.9.327
1 | diff --git a/Documentation/hw-vuln/processor_mmio_stale_data.rst b/Documentation/hw-vuln/processor_mmio_stale_data.rst |
2 | index 9393c50b5afc9..c98fd11907cc8 100644 |
3 | --- a/Documentation/hw-vuln/processor_mmio_stale_data.rst |
4 | +++ b/Documentation/hw-vuln/processor_mmio_stale_data.rst |
5 | @@ -230,6 +230,20 @@ The possible values in this file are: |
6 | * - 'Mitigation: Clear CPU buffers' |
7 | - The processor is vulnerable and the CPU buffer clearing mitigation is |
8 | enabled. |
9 | + * - 'Unknown: No mitigations' |
10 | + - The processor vulnerability status is unknown because it is |
11 | + out of Servicing period. Mitigation is not attempted. |
12 | + |
13 | +Definitions: |
14 | +------------ |
15 | + |
16 | +Servicing period: The process of providing functional and security updates to |
17 | +Intel processors or platforms, utilizing the Intel Platform Update (IPU) |
18 | +process or other similar mechanisms. |
19 | + |
20 | +End of Servicing Updates (ESU): ESU is the date at which Intel will no |
21 | +longer provide Servicing, such as through IPU or other similar update |
22 | +processes. ESU dates will typically be aligned to end of quarter. |
23 | |
24 | If the processor is vulnerable then the following information is appended to |
25 | the above information: |
26 | diff --git a/Makefile b/Makefile |
27 | index e12236237dfad..df5f7e0d30393 100644 |
28 | --- a/Makefile |
29 | +++ b/Makefile |
30 | @@ -1,6 +1,6 @@ |
31 | VERSION = 4 |
32 | PATCHLEVEL = 9 |
33 | -SUBLEVEL = 326 |
34 | +SUBLEVEL = 327 |
35 | EXTRAVERSION = |
36 | NAME = Roaring Lionus |
37 | |
38 | diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h |
39 | index f4377b005cba9..c944253b3a4b6 100644 |
40 | --- a/arch/arm64/include/asm/mmu.h |
41 | +++ b/arch/arm64/include/asm/mmu.h |
42 | @@ -90,7 +90,7 @@ extern void init_mem_pgprot(void); |
43 | extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, |
44 | unsigned long virt, phys_addr_t size, |
45 | pgprot_t prot, bool allow_block_mappings); |
46 | -extern void *fixmap_remap_fdt(phys_addr_t dt_phys); |
47 | +extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot); |
48 | |
49 | #endif /* !__ASSEMBLY__ */ |
50 | #endif |
51 | diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c |
52 | index c9ca903462a68..6a9668f6e933f 100644 |
53 | --- a/arch/arm64/kernel/kaslr.c |
54 | +++ b/arch/arm64/kernel/kaslr.c |
55 | @@ -65,9 +65,6 @@ out: |
56 | return default_cmdline; |
57 | } |
58 | |
59 | -extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, |
60 | - pgprot_t prot); |
61 | - |
62 | /* |
63 | * This routine will be executed with the kernel mapped at its default virtual |
64 | * address, and if it returns successfully, the kernel will be remapped, and |
65 | @@ -96,7 +93,7 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset) |
66 | * attempt at mapping the FDT in setup_machine() |
67 | */ |
68 | early_fixmap_init(); |
69 | - fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); |
70 | + fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); |
71 | if (!fdt) |
72 | return 0; |
73 | |
74 | diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c |
75 | index f534f492a2687..ae82d9694542c 100644 |
76 | --- a/arch/arm64/kernel/setup.c |
77 | +++ b/arch/arm64/kernel/setup.c |
78 | @@ -178,7 +178,11 @@ static void __init smp_build_mpidr_hash(void) |
79 | |
80 | static void __init setup_machine_fdt(phys_addr_t dt_phys) |
81 | { |
82 | - void *dt_virt = fixmap_remap_fdt(dt_phys); |
83 | + int size; |
84 | + void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); |
85 | + |
86 | + if (dt_virt) |
87 | + memblock_reserve(dt_phys, size); |
88 | |
89 | if (!dt_virt || !early_init_dt_scan(dt_virt)) { |
90 | pr_crit("\n" |
91 | @@ -191,6 +195,9 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) |
92 | cpu_relax(); |
93 | } |
94 | |
95 | + /* Early fixups are done, map the FDT as read-only now */ |
96 | + fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO); |
97 | + |
98 | dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name()); |
99 | } |
100 | |
101 | diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c |
102 | index 36bd50091c4bb..784ea7c8d9966 100644 |
103 | --- a/arch/arm64/mm/mmu.c |
104 | +++ b/arch/arm64/mm/mmu.c |
105 | @@ -718,7 +718,7 @@ void __set_fixmap(enum fixed_addresses idx, |
106 | } |
107 | } |
108 | |
109 | -void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) |
110 | +void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) |
111 | { |
112 | const u64 dt_virt_base = __fix_to_virt(FIX_FDT); |
113 | int offset; |
114 | @@ -771,19 +771,6 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) |
115 | return dt_virt; |
116 | } |
117 | |
118 | -void *__init fixmap_remap_fdt(phys_addr_t dt_phys) |
119 | -{ |
120 | - void *dt_virt; |
121 | - int size; |
122 | - |
123 | - dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO); |
124 | - if (!dt_virt) |
125 | - return NULL; |
126 | - |
127 | - memblock_reserve(dt_phys, size); |
128 | - return dt_virt; |
129 | -} |
130 | - |
131 | int __init arch_ioremap_pud_supported(void) |
132 | { |
133 | /* |
134 | diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c |
135 | index 957bdeb7a5c79..b02d5c395a709 100644 |
136 | --- a/arch/parisc/kernel/unaligned.c |
137 | +++ b/arch/parisc/kernel/unaligned.c |
138 | @@ -120,7 +120,7 @@ |
139 | #define R1(i) (((i)>>21)&0x1f) |
140 | #define R2(i) (((i)>>16)&0x1f) |
141 | #define R3(i) ((i)&0x1f) |
142 | -#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1)) |
143 | +#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1)) |
144 | #define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0)) |
145 | #define IM5_2(i) IM((i)>>16,5) |
146 | #define IM5_3(i) IM((i),5) |
147 | diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c |
148 | index 794bebb43d23d..64448c0998eb5 100644 |
149 | --- a/arch/s390/hypfs/hypfs_diag.c |
150 | +++ b/arch/s390/hypfs/hypfs_diag.c |
151 | @@ -436,7 +436,7 @@ __init int hypfs_diag_init(void) |
152 | int rc; |
153 | |
154 | if (diag204_probe()) { |
155 | - pr_err("The hardware system does not support hypfs\n"); |
156 | + pr_info("The hardware system does not support hypfs\n"); |
157 | return -ENODATA; |
158 | } |
159 | if (diag204_info_type == DIAG204_INFO_EXT) { |
160 | diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c |
161 | index 224aeda1e8ccf..d73d2d001a620 100644 |
162 | --- a/arch/s390/hypfs/inode.c |
163 | +++ b/arch/s390/hypfs/inode.c |
164 | @@ -493,9 +493,9 @@ fail_hypfs_vm_exit: |
165 | hypfs_vm_exit(); |
166 | fail_hypfs_diag_exit: |
167 | hypfs_diag_exit(); |
168 | + pr_err("Initialization of hypfs failed with rc=%i\n", rc); |
169 | fail_dbfs_exit: |
170 | hypfs_dbfs_exit(); |
171 | - pr_err("Initialization of hypfs failed with rc=%i\n", rc); |
172 | return rc; |
173 | } |
174 | |
175 | diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c |
176 | index ba2f21873cbd6..6fa4220e34b54 100644 |
177 | --- a/arch/s390/mm/fault.c |
178 | +++ b/arch/s390/mm/fault.c |
179 | @@ -409,7 +409,9 @@ static inline int do_exception(struct pt_regs *regs, int access) |
180 | flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
181 | if (user_mode(regs)) |
182 | flags |= FAULT_FLAG_USER; |
183 | - if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) |
184 | + if ((trans_exc_code & store_indication) == 0x400) |
185 | + access = VM_WRITE; |
186 | + if (access == VM_WRITE) |
187 | flags |= FAULT_FLAG_WRITE; |
188 | down_read(&mm->mmap_sem); |
189 | |
190 | diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h |
191 | index 910304aec2e66..a033fa5c596d4 100644 |
192 | --- a/arch/x86/include/asm/cpufeatures.h |
193 | +++ b/arch/x86/include/asm/cpufeatures.h |
194 | @@ -363,5 +363,6 @@ |
195 | #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ |
196 | #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ |
197 | #define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ |
198 | +#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */ |
199 | |
200 | #endif /* _ASM_X86_CPUFEATURES_H */ |
201 | diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h |
202 | index 8b6c01774ca23..aadb91d43eef7 100644 |
203 | --- a/arch/x86/include/asm/intel-family.h |
204 | +++ b/arch/x86/include/asm/intel-family.h |
205 | @@ -70,6 +70,9 @@ |
206 | #define INTEL_FAM6_ALDERLAKE 0x97 |
207 | #define INTEL_FAM6_ALDERLAKE_L 0x9A |
208 | |
209 | +#define INTEL_FAM6_TIGERLAKE_L 0x8C |
210 | +#define INTEL_FAM6_TIGERLAKE 0x8D |
211 | + |
212 | /* "Small Core" Processors (Atom) */ |
213 | |
214 | #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ |
215 | diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
216 | index b4416df41d63a..d8ba0b60e088d 100644 |
217 | --- a/arch/x86/kernel/cpu/bugs.c |
218 | +++ b/arch/x86/kernel/cpu/bugs.c |
219 | @@ -395,7 +395,8 @@ static void __init mmio_select_mitigation(void) |
220 | u64 ia32_cap; |
221 | |
222 | if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || |
223 | - cpu_mitigations_off()) { |
224 | + boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) || |
225 | + cpu_mitigations_off()) { |
226 | mmio_mitigation = MMIO_MITIGATION_OFF; |
227 | return; |
228 | } |
229 | @@ -500,6 +501,8 @@ out: |
230 | pr_info("TAA: %s\n", taa_strings[taa_mitigation]); |
231 | if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) |
232 | pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); |
233 | + else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) |
234 | + pr_info("MMIO Stale Data: Unknown: No mitigations\n"); |
235 | } |
236 | |
237 | static void __init md_clear_select_mitigation(void) |
238 | @@ -1824,6 +1827,9 @@ static ssize_t tsx_async_abort_show_state(char *buf) |
239 | |
240 | static ssize_t mmio_stale_data_show_state(char *buf) |
241 | { |
242 | + if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) |
243 | + return sysfs_emit(buf, "Unknown: No mitigations\n"); |
244 | + |
245 | if (mmio_mitigation == MMIO_MITIGATION_OFF) |
246 | return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); |
247 | |
248 | @@ -1934,6 +1940,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr |
249 | return srbds_show_state(buf); |
250 | |
251 | case X86_BUG_MMIO_STALE_DATA: |
252 | + case X86_BUG_MMIO_UNKNOWN: |
253 | return mmio_stale_data_show_state(buf); |
254 | |
255 | default: |
256 | @@ -1990,6 +1997,9 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char * |
257 | |
258 | ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) |
259 | { |
260 | - return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); |
261 | + if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) |
262 | + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN); |
263 | + else |
264 | + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); |
265 | } |
266 | #endif |
267 | diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
268 | index 48843fc766953..656f336074a31 100644 |
269 | --- a/arch/x86/kernel/cpu/common.c |
270 | +++ b/arch/x86/kernel/cpu/common.c |
271 | @@ -899,6 +899,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) |
272 | #define MSBDS_ONLY BIT(5) |
273 | #define NO_SWAPGS BIT(6) |
274 | #define NO_ITLB_MULTIHIT BIT(7) |
275 | +#define NO_MMIO BIT(8) |
276 | |
277 | #define VULNWL(_vendor, _family, _model, _whitelist) \ |
278 | { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } |
279 | @@ -916,6 +917,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { |
280 | VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), |
281 | |
282 | /* Intel Family 6 */ |
283 | + VULNWL_INTEL(TIGERLAKE, NO_MMIO), |
284 | + VULNWL_INTEL(TIGERLAKE_L, NO_MMIO), |
285 | + VULNWL_INTEL(ALDERLAKE, NO_MMIO), |
286 | + VULNWL_INTEL(ALDERLAKE_L, NO_MMIO), |
287 | + |
288 | VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT), |
289 | VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT), |
290 | VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), |
291 | @@ -933,9 +939,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { |
292 | |
293 | VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), |
294 | |
295 | - VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), |
296 | - VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), |
297 | - VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), |
298 | + VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), |
299 | + VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), |
300 | + VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), |
301 | |
302 | /* |
303 | * Technically, swapgs isn't serializing on AMD (despite it previously |
304 | @@ -946,13 +952,13 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { |
305 | */ |
306 | |
307 | /* AMD Family 0xf - 0x12 */ |
308 | - VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), |
309 | - VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), |
310 | - VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), |
311 | - VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), |
312 | + VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), |
313 | + VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), |
314 | + VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), |
315 | + VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), |
316 | |
317 | /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ |
318 | - VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), |
319 | + VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), |
320 | {} |
321 | }; |
322 | |
323 | @@ -1092,10 +1098,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) |
324 | * Affected CPU list is generally enough to enumerate the vulnerability, |
325 | * but for virtualization case check for ARCH_CAP MSR bits also, VMM may |
326 | * not want the guest to enumerate the bug. |
327 | + * |
328 | + * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist, |
329 | + * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits. |
330 | */ |
331 | - if (cpu_matches(cpu_vuln_blacklist, MMIO) && |
332 | - !arch_cap_mmio_immune(ia32_cap)) |
333 | - setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); |
334 | + if (!arch_cap_mmio_immune(ia32_cap)) { |
335 | + if (cpu_matches(cpu_vuln_blacklist, MMIO)) |
336 | + setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); |
337 | + else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO)) |
338 | + setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN); |
339 | + } |
340 | |
341 | if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) |
342 | return; |
343 | diff --git a/drivers/block/loop.c b/drivers/block/loop.c |
344 | index 2ff17b397cd2f..a23c903753b5e 100644 |
345 | --- a/drivers/block/loop.c |
346 | +++ b/drivers/block/loop.c |
347 | @@ -1202,6 +1202,11 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info) |
348 | info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev); |
349 | info->lo_offset = lo->lo_offset; |
350 | info->lo_sizelimit = lo->lo_sizelimit; |
351 | + |
352 | + /* loff_t vars have been assigned __u64 */ |
353 | + if (lo->lo_offset < 0 || lo->lo_sizelimit < 0) |
354 | + return -EOVERFLOW; |
355 | + |
356 | info->lo_flags = lo->lo_flags; |
357 | memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE); |
358 | memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE); |
359 | diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c |
360 | index ed6591f92f712..0b1bb99e05e80 100644 |
361 | --- a/drivers/hid/hidraw.c |
362 | +++ b/drivers/hid/hidraw.c |
363 | @@ -354,10 +354,13 @@ static int hidraw_release(struct inode * inode, struct file * file) |
364 | unsigned int minor = iminor(inode); |
365 | struct hidraw_list *list = file->private_data; |
366 | unsigned long flags; |
367 | + int i; |
368 | |
369 | mutex_lock(&minors_lock); |
370 | |
371 | spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags); |
372 | + for (i = list->tail; i < list->head; i++) |
373 | + kfree(list->buffer[i].value); |
374 | list_del(&list->node); |
375 | spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags); |
376 | kfree(list); |
377 | diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c |
378 | index b868a77a048ca..a02da1d55fd05 100644 |
379 | --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c |
380 | +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c |
381 | @@ -2656,6 +2656,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf, |
382 | del_timer_sync(&hdw->encoder_run_timer); |
383 | del_timer_sync(&hdw->encoder_wait_timer); |
384 | flush_work(&hdw->workpoll); |
385 | + v4l2_device_unregister(&hdw->v4l2_dev); |
386 | usb_free_urb(hdw->ctl_read_urb); |
387 | usb_free_urb(hdw->ctl_write_urb); |
388 | kfree(hdw->ctl_read_buffer); |
389 | diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c |
390 | index 8ec0671f97113..7ba724ecef302 100644 |
391 | --- a/drivers/net/bonding/bond_3ad.c |
392 | +++ b/drivers/net/bonding/bond_3ad.c |
393 | @@ -1941,30 +1941,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout) |
394 | */ |
395 | void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution) |
396 | { |
397 | - /* check that the bond is not initialized yet */ |
398 | - if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr), |
399 | - bond->dev->dev_addr)) { |
400 | - |
401 | - BOND_AD_INFO(bond).aggregator_identifier = 0; |
402 | - |
403 | - BOND_AD_INFO(bond).system.sys_priority = |
404 | - bond->params.ad_actor_sys_prio; |
405 | - if (is_zero_ether_addr(bond->params.ad_actor_system)) |
406 | - BOND_AD_INFO(bond).system.sys_mac_addr = |
407 | - *((struct mac_addr *)bond->dev->dev_addr); |
408 | - else |
409 | - BOND_AD_INFO(bond).system.sys_mac_addr = |
410 | - *((struct mac_addr *)bond->params.ad_actor_system); |
411 | + BOND_AD_INFO(bond).aggregator_identifier = 0; |
412 | + BOND_AD_INFO(bond).system.sys_priority = |
413 | + bond->params.ad_actor_sys_prio; |
414 | + if (is_zero_ether_addr(bond->params.ad_actor_system)) |
415 | + BOND_AD_INFO(bond).system.sys_mac_addr = |
416 | + *((struct mac_addr *)bond->dev->dev_addr); |
417 | + else |
418 | + BOND_AD_INFO(bond).system.sys_mac_addr = |
419 | + *((struct mac_addr *)bond->params.ad_actor_system); |
420 | |
421 | - /* initialize how many times this module is called in one |
422 | - * second (should be about every 100ms) |
423 | - */ |
424 | - ad_ticks_per_sec = tick_resolution; |
425 | + /* initialize how many times this module is called in one |
426 | + * second (should be about every 100ms) |
427 | + */ |
428 | + ad_ticks_per_sec = tick_resolution; |
429 | |
430 | - bond_3ad_initiate_agg_selection(bond, |
431 | - AD_AGGREGATOR_SELECTION_TIMER * |
432 | - ad_ticks_per_sec); |
433 | - } |
434 | + bond_3ad_initiate_agg_selection(bond, |
435 | + AD_AGGREGATOR_SELECTION_TIMER * |
436 | + ad_ticks_per_sec); |
437 | } |
438 | |
439 | /** |
440 | diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c |
441 | index a93a1b3bb8e4d..2ae59af3e16f0 100644 |
442 | --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c |
443 | +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c |
444 | @@ -1080,7 +1080,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) |
445 | struct cyclecounter cc; |
446 | unsigned long flags; |
447 | u32 incval = 0; |
448 | - u32 tsauxc = 0; |
449 | u32 fuse0 = 0; |
450 | |
451 | /* For some of the boards below this mask is technically incorrect. |
452 | @@ -1115,18 +1114,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) |
453 | case ixgbe_mac_x550em_a: |
454 | case ixgbe_mac_X550: |
455 | cc.read = ixgbe_ptp_read_X550; |
456 | - |
457 | - /* enable SYSTIME counter */ |
458 | - IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0); |
459 | - IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); |
460 | - IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); |
461 | - tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); |
462 | - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, |
463 | - tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME); |
464 | - IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS); |
465 | - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC); |
466 | - |
467 | - IXGBE_WRITE_FLUSH(hw); |
468 | break; |
469 | case ixgbe_mac_X540: |
470 | cc.read = ixgbe_ptp_read_82599; |
471 | @@ -1158,6 +1145,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) |
472 | spin_unlock_irqrestore(&adapter->tmreg_lock, flags); |
473 | } |
474 | |
475 | +/** |
476 | + * ixgbe_ptp_init_systime - Initialize SYSTIME registers |
477 | + * @adapter: the ixgbe private board structure |
478 | + * |
479 | + * Initialize and start the SYSTIME registers. |
480 | + */ |
481 | +static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter) |
482 | +{ |
483 | + struct ixgbe_hw *hw = &adapter->hw; |
484 | + u32 tsauxc; |
485 | + |
486 | + switch (hw->mac.type) { |
487 | + case ixgbe_mac_X550EM_x: |
488 | + case ixgbe_mac_x550em_a: |
489 | + case ixgbe_mac_X550: |
490 | + tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); |
491 | + |
492 | + /* Reset SYSTIME registers to 0 */ |
493 | + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0); |
494 | + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); |
495 | + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); |
496 | + |
497 | + /* Reset interrupt settings */ |
498 | + IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS); |
499 | + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC); |
500 | + |
501 | + /* Activate the SYSTIME counter */ |
502 | + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, |
503 | + tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME); |
504 | + break; |
505 | + case ixgbe_mac_X540: |
506 | + case ixgbe_mac_82599EB: |
507 | + /* Reset SYSTIME registers to 0 */ |
508 | + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); |
509 | + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); |
510 | + break; |
511 | + default: |
512 | + /* Other devices aren't supported */ |
513 | + return; |
514 | + }; |
515 | + |
516 | + IXGBE_WRITE_FLUSH(hw); |
517 | +} |
518 | + |
519 | /** |
520 | * ixgbe_ptp_reset |
521 | * @adapter: the ixgbe private board structure |
522 | @@ -1184,6 +1215,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) |
523 | |
524 | ixgbe_ptp_start_cyclecounter(adapter); |
525 | |
526 | + ixgbe_ptp_init_systime(adapter); |
527 | + |
528 | spin_lock_irqsave(&adapter->tmreg_lock, flags); |
529 | timecounter_init(&adapter->hw_tc, &adapter->hw_cc, |
530 | ktime_to_ns(ktime_get_real())); |
531 | diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c |
532 | index 1a4070f719c29..9b32b9fc44a5c 100644 |
533 | --- a/drivers/video/fbdev/pm2fb.c |
534 | +++ b/drivers/video/fbdev/pm2fb.c |
535 | @@ -614,6 +614,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) |
536 | return -EINVAL; |
537 | } |
538 | |
539 | + if (!var->pixclock) { |
540 | + DPRINTK("pixclock is zero\n"); |
541 | + return -EINVAL; |
542 | + } |
543 | + |
544 | if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) { |
545 | DPRINTK("pixclock too high (%ldKHz)\n", |
546 | PICOS2KHZ(var->pixclock)); |
547 | diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c |
548 | index fccbf5567e786..783214344ac16 100644 |
549 | --- a/fs/btrfs/xattr.c |
550 | +++ b/fs/btrfs/xattr.c |
551 | @@ -375,6 +375,9 @@ static int btrfs_xattr_handler_get(const struct xattr_handler *handler, |
552 | struct dentry *unused, struct inode *inode, |
553 | const char *name, void *buffer, size_t size) |
554 | { |
555 | + if (btrfs_root_readonly(BTRFS_I(inode)->root)) |
556 | + return -EROFS; |
557 | + |
558 | name = xattr_full_name(handler, name); |
559 | return __btrfs_getxattr(inode, name, buffer, size); |
560 | } |
561 | diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h |
562 | index 4df64a1fc09e7..09e9fd061ac0c 100644 |
563 | --- a/include/asm-generic/sections.h |
564 | +++ b/include/asm-generic/sections.h |
565 | @@ -87,7 +87,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt, |
566 | /** |
567 | * memory_intersects - checks if the region occupied by an object intersects |
568 | * with another memory region |
569 | - * @begin: virtual address of the beginning of the memory regien |
570 | + * @begin: virtual address of the beginning of the memory region |
571 | * @end: virtual address of the end of the memory region |
572 | * @virt: virtual address of the memory object |
573 | * @size: size of the memory object |
574 | @@ -100,7 +100,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt, |
575 | { |
576 | void *vend = virt + size; |
577 | |
578 | - return (virt >= begin && virt < end) || (vend >= begin && vend < end); |
579 | + if (virt < end && vend > begin) |
580 | + return true; |
581 | + |
582 | + return false; |
583 | } |
584 | |
585 | /** |
586 | diff --git a/include/linux/rmap.h b/include/linux/rmap.h |
587 | index b46bb5620a76d..9dc3617a5bfce 100644 |
588 | --- a/include/linux/rmap.h |
589 | +++ b/include/linux/rmap.h |
590 | @@ -37,12 +37,15 @@ struct anon_vma { |
591 | atomic_t refcount; |
592 | |
593 | /* |
594 | - * Count of child anon_vmas and VMAs which points to this anon_vma. |
595 | + * Count of child anon_vmas. Equals to the count of all anon_vmas that |
596 | + * have ->parent pointing to this one, including itself. |
597 | * |
598 | * This counter is used for making decision about reusing anon_vma |
599 | * instead of forking new one. See comments in function anon_vma_clone. |
600 | */ |
601 | - unsigned degree; |
602 | + unsigned long num_children; |
603 | + /* Count of VMAs whose ->anon_vma pointer points to this object. */ |
604 | + unsigned long num_active_vmas; |
605 | |
606 | struct anon_vma *parent; /* Parent of this anon_vma */ |
607 | |
608 | diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h |
609 | index 2fbeb1313c0f4..e522187cb6935 100644 |
610 | --- a/include/net/busy_poll.h |
611 | +++ b/include/net/busy_poll.h |
612 | @@ -39,7 +39,7 @@ extern unsigned int sysctl_net_busy_poll __read_mostly; |
613 | |
614 | static inline bool net_busy_loop_on(void) |
615 | { |
616 | - return sysctl_net_busy_poll; |
617 | + return READ_ONCE(sysctl_net_busy_poll); |
618 | } |
619 | |
620 | static inline u64 busy_loop_us_clock(void) |
621 | diff --git a/kernel/kprobes.c b/kernel/kprobes.c |
622 | index fb2357d0dbc85..63509602e932c 100644 |
623 | --- a/kernel/kprobes.c |
624 | +++ b/kernel/kprobes.c |
625 | @@ -1616,12 +1616,14 @@ static struct kprobe *__disable_kprobe(struct kprobe *p) |
626 | /* Try to disarm and disable this/parent probe */ |
627 | if (p == orig_p || aggr_kprobe_disabled(orig_p)) { |
628 | /* |
629 | - * If kprobes_all_disarmed is set, orig_p |
630 | - * should have already been disarmed, so |
631 | - * skip unneed disarming process. |
632 | + * Don't be lazy here. Even if 'kprobes_all_disarmed' |
633 | + * is false, 'orig_p' might not have been armed yet. |
634 | + * Note arm_all_kprobes() __tries__ to arm all kprobes |
635 | + * on the best effort basis. |
636 | */ |
637 | - if (!kprobes_all_disarmed) |
638 | + if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) |
639 | disarm_kprobe(orig_p, true); |
640 | + |
641 | orig_p->flags |= KPROBE_FLAG_DISABLED; |
642 | } |
643 | } |
644 | diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c |
645 | index 06841602025ef..7ffb9f122555b 100644 |
646 | --- a/kernel/trace/ftrace.c |
647 | +++ b/kernel/trace/ftrace.c |
648 | @@ -2737,6 +2737,16 @@ static int ftrace_startup(struct ftrace_ops *ops, int command) |
649 | |
650 | ftrace_startup_enable(command); |
651 | |
652 | + /* |
653 | + * If ftrace is in an undefined state, we just remove ops from list |
654 | + * to prevent the NULL pointer, instead of totally rolling it back and |
655 | + * free trampoline, because those actions could cause further damage. |
656 | + */ |
657 | + if (unlikely(ftrace_disabled)) { |
658 | + __unregister_ftrace_function(ops); |
659 | + return -ENODEV; |
660 | + } |
661 | + |
662 | ops->flags &= ~FTRACE_OPS_FL_ADDING; |
663 | |
664 | return 0; |
665 | diff --git a/lib/ratelimit.c b/lib/ratelimit.c |
666 | index d01f471352390..b805702de84dd 100644 |
667 | --- a/lib/ratelimit.c |
668 | +++ b/lib/ratelimit.c |
669 | @@ -27,10 +27,16 @@ |
670 | */ |
671 | int ___ratelimit(struct ratelimit_state *rs, const char *func) |
672 | { |
673 | + /* Paired with WRITE_ONCE() in .proc_handler(). |
674 | + * Changing two values seperately could be inconsistent |
675 | + * and some message could be lost. (See: net_ratelimit_state). |
676 | + */ |
677 | + int interval = READ_ONCE(rs->interval); |
678 | + int burst = READ_ONCE(rs->burst); |
679 | unsigned long flags; |
680 | int ret; |
681 | |
682 | - if (!rs->interval) |
683 | + if (!interval) |
684 | return 1; |
685 | |
686 | /* |
687 | @@ -45,7 +51,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) |
688 | if (!rs->begin) |
689 | rs->begin = jiffies; |
690 | |
691 | - if (time_is_before_jiffies(rs->begin + rs->interval)) { |
692 | + if (time_is_before_jiffies(rs->begin + interval)) { |
693 | if (rs->missed) { |
694 | if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) { |
695 | printk_deferred(KERN_WARNING |
696 | @@ -57,7 +63,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) |
697 | rs->begin = jiffies; |
698 | rs->printed = 0; |
699 | } |
700 | - if (rs->burst && rs->burst > rs->printed) { |
701 | + if (burst && burst > rs->printed) { |
702 | rs->printed++; |
703 | ret = 1; |
704 | } else { |
705 | diff --git a/mm/mmap.c b/mm/mmap.c |
706 | index 18bd38ac15317..a696c17ba9b0d 100644 |
707 | --- a/mm/mmap.c |
708 | +++ b/mm/mmap.c |
709 | @@ -1593,8 +1593,12 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) |
710 | pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags))) |
711 | return 0; |
712 | |
713 | - /* Do we need to track softdirty? */ |
714 | - if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY)) |
715 | + /* |
716 | + * Do we need to track softdirty? hugetlb does not support softdirty |
717 | + * tracking yet. |
718 | + */ |
719 | + if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY) && |
720 | + !is_vm_hugetlb_page(vma)) |
721 | return 1; |
722 | |
723 | /* Specialty mapping? */ |
724 | @@ -2525,6 +2529,18 @@ static void unmap_region(struct mm_struct *mm, |
725 | tlb_gather_mmu(&tlb, mm, start, end); |
726 | update_hiwater_rss(mm); |
727 | unmap_vmas(&tlb, vma, start, end); |
728 | + |
729 | + /* |
730 | + * Ensure we have no stale TLB entries by the time this mapping is |
731 | + * removed from the rmap. |
732 | + * Note that we don't have to worry about nested flushes here because |
733 | + * we're holding the mm semaphore for removing the mapping - so any |
734 | + * concurrent flush in this region has to be coming through the rmap, |
735 | + * and we synchronize against that using the rmap lock. |
736 | + */ |
737 | + if ((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0) |
738 | + tlb_flush_mmu(&tlb); |
739 | + |
740 | free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, |
741 | next ? next->vm_start : USER_PGTABLES_CEILING); |
742 | tlb_finish_mmu(&tlb, start, end); |
743 | diff --git a/mm/rmap.c b/mm/rmap.c |
744 | index 0a5310b76ec85..76064d9649186 100644 |
745 | --- a/mm/rmap.c |
746 | +++ b/mm/rmap.c |
747 | @@ -78,7 +78,8 @@ static inline struct anon_vma *anon_vma_alloc(void) |
748 | anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); |
749 | if (anon_vma) { |
750 | atomic_set(&anon_vma->refcount, 1); |
751 | - anon_vma->degree = 1; /* Reference for first vma */ |
752 | + anon_vma->num_children = 0; |
753 | + anon_vma->num_active_vmas = 0; |
754 | anon_vma->parent = anon_vma; |
755 | /* |
756 | * Initialise the anon_vma root to point to itself. If called |
757 | @@ -187,6 +188,7 @@ int anon_vma_prepare(struct vm_area_struct *vma) |
758 | anon_vma = anon_vma_alloc(); |
759 | if (unlikely(!anon_vma)) |
760 | goto out_enomem_free_avc; |
761 | + anon_vma->num_children++; /* self-parent link for new root */ |
762 | allocated = anon_vma; |
763 | } |
764 | |
765 | @@ -196,8 +198,7 @@ int anon_vma_prepare(struct vm_area_struct *vma) |
766 | if (likely(!vma->anon_vma)) { |
767 | vma->anon_vma = anon_vma; |
768 | anon_vma_chain_link(vma, avc, anon_vma); |
769 | - /* vma reference or self-parent link for new root */ |
770 | - anon_vma->degree++; |
771 | + anon_vma->num_active_vmas++; |
772 | allocated = NULL; |
773 | avc = NULL; |
774 | } |
775 | @@ -276,19 +277,19 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) |
776 | anon_vma_chain_link(dst, avc, anon_vma); |
777 | |
778 | /* |
779 | - * Reuse existing anon_vma if its degree lower than two, |
780 | - * that means it has no vma and only one anon_vma child. |
781 | + * Reuse existing anon_vma if it has no vma and only one |
782 | + * anon_vma child. |
783 | * |
784 | - * Do not chose parent anon_vma, otherwise first child |
785 | - * will always reuse it. Root anon_vma is never reused: |
786 | + * Root anon_vma is never reused: |
787 | * it has self-parent reference and at least one child. |
788 | */ |
789 | - if (!dst->anon_vma && anon_vma != src->anon_vma && |
790 | - anon_vma->degree < 2) |
791 | + if (!dst->anon_vma && |
792 | + anon_vma->num_children < 2 && |
793 | + anon_vma->num_active_vmas == 0) |
794 | dst->anon_vma = anon_vma; |
795 | } |
796 | if (dst->anon_vma) |
797 | - dst->anon_vma->degree++; |
798 | + dst->anon_vma->num_active_vmas++; |
799 | unlock_anon_vma_root(root); |
800 | return 0; |
801 | |
802 | @@ -338,6 +339,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) |
803 | anon_vma = anon_vma_alloc(); |
804 | if (!anon_vma) |
805 | goto out_error; |
806 | + anon_vma->num_active_vmas++; |
807 | avc = anon_vma_chain_alloc(GFP_KERNEL); |
808 | if (!avc) |
809 | goto out_error_free_anon_vma; |
810 | @@ -358,7 +360,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) |
811 | vma->anon_vma = anon_vma; |
812 | anon_vma_lock_write(anon_vma); |
813 | anon_vma_chain_link(vma, avc, anon_vma); |
814 | - anon_vma->parent->degree++; |
815 | + anon_vma->parent->num_children++; |
816 | anon_vma_unlock_write(anon_vma); |
817 | |
818 | return 0; |
819 | @@ -390,7 +392,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma) |
820 | * to free them outside the lock. |
821 | */ |
822 | if (RB_EMPTY_ROOT(&anon_vma->rb_root)) { |
823 | - anon_vma->parent->degree--; |
824 | + anon_vma->parent->num_children--; |
825 | continue; |
826 | } |
827 | |
828 | @@ -398,7 +400,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma) |
829 | anon_vma_chain_free(avc); |
830 | } |
831 | if (vma->anon_vma) |
832 | - vma->anon_vma->degree--; |
833 | + vma->anon_vma->num_active_vmas--; |
834 | unlock_anon_vma_root(root); |
835 | |
836 | /* |
837 | @@ -409,7 +411,8 @@ void unlink_anon_vmas(struct vm_area_struct *vma) |
838 | list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { |
839 | struct anon_vma *anon_vma = avc->anon_vma; |
840 | |
841 | - VM_WARN_ON(anon_vma->degree); |
842 | + VM_WARN_ON(anon_vma->num_children); |
843 | + VM_WARN_ON(anon_vma->num_active_vmas); |
844 | put_anon_vma(anon_vma); |
845 | |
846 | list_del(&avc->same_vma); |
847 | diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c |
848 | index 78cf9508b7c1d..474c12d4f8bae 100644 |
849 | --- a/net/bluetooth/l2cap_core.c |
850 | +++ b/net/bluetooth/l2cap_core.c |
851 | @@ -1826,11 +1826,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, |
852 | src_match = !bacmp(&c->src, src); |
853 | dst_match = !bacmp(&c->dst, dst); |
854 | if (src_match && dst_match) { |
855 | - c = l2cap_chan_hold_unless_zero(c); |
856 | - if (c) { |
857 | - read_unlock(&chan_list_lock); |
858 | - return c; |
859 | - } |
860 | + if (!l2cap_chan_hold_unless_zero(c)) |
861 | + continue; |
862 | + |
863 | + read_unlock(&chan_list_lock); |
864 | + return c; |
865 | } |
866 | |
867 | /* Closest match */ |
868 | diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
869 | index 22b216629f9bc..022e26c180241 100644 |
870 | --- a/net/core/skbuff.c |
871 | +++ b/net/core/skbuff.c |
872 | @@ -3905,7 +3905,7 @@ static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) |
873 | { |
874 | bool ret; |
875 | |
876 | - if (likely(sysctl_tstamp_allow_data || tsonly)) |
877 | + if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly)) |
878 | return true; |
879 | |
880 | read_lock_bh(&sk->sk_callback_lock); |
881 | diff --git a/net/core/sock.c b/net/core/sock.c |
882 | index 1845a37d9f7e1..e4b28c10901ec 100644 |
883 | --- a/net/core/sock.c |
884 | +++ b/net/core/sock.c |
885 | @@ -2508,7 +2508,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) |
886 | |
887 | #ifdef CONFIG_NET_RX_BUSY_POLL |
888 | sk->sk_napi_id = 0; |
889 | - sk->sk_ll_usec = sysctl_net_busy_read; |
890 | + sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read); |
891 | #endif |
892 | |
893 | sk->sk_max_pacing_rate = ~0U; |
894 | diff --git a/net/key/af_key.c b/net/key/af_key.c |
895 | index 0737fc7b7ebdb..88d4a3a02ab72 100644 |
896 | --- a/net/key/af_key.c |
897 | +++ b/net/key/af_key.c |
898 | @@ -1724,9 +1724,12 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad |
899 | pfk->registered |= (1<<hdr->sadb_msg_satype); |
900 | } |
901 | |
902 | + mutex_lock(&pfkey_mutex); |
903 | xfrm_probe_algs(); |
904 | |
905 | supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO); |
906 | + mutex_unlock(&pfkey_mutex); |
907 | + |
908 | if (!supp_skb) { |
909 | if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC) |
910 | pfk->registered &= ~(1<<hdr->sadb_msg_satype); |
911 | diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig |
912 | index dacd2d34a790b..f7cc20641b09f 100644 |
913 | --- a/net/netfilter/Kconfig |
914 | +++ b/net/netfilter/Kconfig |
915 | @@ -96,7 +96,6 @@ config NF_CONNTRACK_ZONES |
916 | |
917 | config NF_CONNTRACK_PROCFS |
918 | bool "Supply CT list in procfs (OBSOLETE)" |
919 | - default y |
920 | depends on PROC_FS |
921 | ---help--- |
922 | This option enables for the list of known conntrack entries |
923 | diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c |
924 | index f73d47b3ffb72..82bcd14fbcb3d 100644 |
925 | --- a/net/netfilter/nft_payload.c |
926 | +++ b/net/netfilter/nft_payload.c |
927 | @@ -287,6 +287,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx, |
928 | { |
929 | enum nft_payload_bases base; |
930 | unsigned int offset, len; |
931 | + int err; |
932 | |
933 | if (tb[NFTA_PAYLOAD_BASE] == NULL || |
934 | tb[NFTA_PAYLOAD_OFFSET] == NULL || |
935 | @@ -312,8 +313,13 @@ nft_payload_select_ops(const struct nft_ctx *ctx, |
936 | if (tb[NFTA_PAYLOAD_DREG] == NULL) |
937 | return ERR_PTR(-EINVAL); |
938 | |
939 | - offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); |
940 | - len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); |
941 | + err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset); |
942 | + if (err < 0) |
943 | + return ERR_PTR(err); |
944 | + |
945 | + err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len); |
946 | + if (err < 0) |
947 | + return ERR_PTR(err); |
948 | |
949 | if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) && |
950 | base != NFT_PAYLOAD_LL_HEADER) |
951 | diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c |
952 | index 0f371e50d9c4e..e6526c8ecacc0 100644 |
953 | --- a/net/rose/rose_loopback.c |
954 | +++ b/net/rose/rose_loopback.c |
955 | @@ -99,7 +99,8 @@ static void rose_loopback_timer(unsigned long param) |
956 | } |
957 | |
958 | if (frametype == ROSE_CALL_REQUEST) { |
959 | - if (!rose_loopback_neigh->dev) { |
960 | + if (!rose_loopback_neigh->dev && |
961 | + !rose_loopback_neigh->loopback) { |
962 | kfree_skb(skb); |
963 | continue; |
964 | } |
965 | diff --git a/net/socket.c b/net/socket.c |
966 | index ab64ae80ca2cd..6f1abcba0e360 100644 |
967 | --- a/net/socket.c |
968 | +++ b/net/socket.c |
969 | @@ -1403,7 +1403,7 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog) |
970 | |
971 | sock = sockfd_lookup_light(fd, &err, &fput_needed); |
972 | if (sock) { |
973 | - somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; |
974 | + somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn); |
975 | if ((unsigned int)backlog > somaxconn) |
976 | backlog = somaxconn; |
977 | |
978 | diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c |
979 | index 0894108f561cb..ae90a273475c0 100644 |
980 | --- a/net/xfrm/xfrm_policy.c |
981 | +++ b/net/xfrm/xfrm_policy.c |
982 | @@ -2538,6 +2538,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, |
983 | if (pols[1]) { |
984 | if (IS_ERR(pols[1])) { |
985 | XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); |
986 | + xfrm_pol_put(pols[0]); |
987 | return 0; |
988 | } |
989 | pols[1]->curlft.use_time = get_seconds(); |
990 | diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost |
991 | index 8cb7971b3f25c..43b93e32ed587 100644 |
992 | --- a/scripts/Makefile.modpost |
993 | +++ b/scripts/Makefile.modpost |
994 | @@ -50,8 +50,7 @@ obj := $(KBUILD_EXTMOD) |
995 | src := $(obj) |
996 | |
997 | # Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS |
998 | -include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \ |
999 | - $(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile) |
1000 | +include $(if $(wildcard $(src)/Kbuild), $(src)/Kbuild, $(src)/Makefile) |
1001 | endif |
1002 | |
1003 | include scripts/Makefile.lib |