Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0272-5.4.173-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3635 - (show annotations) (download)
Mon Oct 24 12:34:12 2022 UTC (19 months ago) by niro
File size: 28332 byte(s)
-sync kernel patches
1 diff --git a/Makefile b/Makefile
2 index 2f914dd223c81..cb9e6cd0d0249 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 172
10 +SUBLEVEL = 173
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 @@ -1022,7 +1022,7 @@ HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
15
16 ifdef CONFIG_STACK_VALIDATION
17 has_libelf := $(call try-run,\
18 - echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
19 + echo "int main() {}" | $(HOSTCC) $(KBUILD_HOSTLDFLAGS) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
20 ifeq ($(has_libelf),1)
21 objtool_target := tools/objtool FORCE
22 else
23 diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
24 index 3b69a76d341e7..1626dfc6f6ce6 100644
25 --- a/arch/arm/kernel/perf_callchain.c
26 +++ b/arch/arm/kernel/perf_callchain.c
27 @@ -62,9 +62,10 @@ user_backtrace(struct frame_tail __user *tail,
28 void
29 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
30 {
31 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
32 struct frame_tail __user *tail;
33
34 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
35 + if (guest_cbs && guest_cbs->is_in_guest()) {
36 /* We don't support guest os callchain now */
37 return;
38 }
39 @@ -98,9 +99,10 @@ callchain_trace(struct stackframe *fr,
40 void
41 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
42 {
43 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
44 struct stackframe fr;
45
46 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
47 + if (guest_cbs && guest_cbs->is_in_guest()) {
48 /* We don't support guest os callchain now */
49 return;
50 }
51 @@ -111,18 +113,21 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
52
53 unsigned long perf_instruction_pointer(struct pt_regs *regs)
54 {
55 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
56 - return perf_guest_cbs->get_guest_ip();
57 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
58 +
59 + if (guest_cbs && guest_cbs->is_in_guest())
60 + return guest_cbs->get_guest_ip();
61
62 return instruction_pointer(regs);
63 }
64
65 unsigned long perf_misc_flags(struct pt_regs *regs)
66 {
67 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
68 int misc = 0;
69
70 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
71 - if (perf_guest_cbs->is_user_mode())
72 + if (guest_cbs && guest_cbs->is_in_guest()) {
73 + if (guest_cbs->is_user_mode())
74 misc |= PERF_RECORD_MISC_GUEST_USER;
75 else
76 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
77 diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
78 index fc388eb60e0b7..64cce0c8560ab 100644
79 --- a/arch/arm/mm/Kconfig
80 +++ b/arch/arm/mm/Kconfig
81 @@ -743,6 +743,7 @@ config SWP_EMULATE
82 config CPU_BIG_ENDIAN
83 bool "Build big-endian kernel"
84 depends on ARCH_SUPPORTS_BIG_ENDIAN
85 + depends on !LD_IS_LLD
86 help
87 Say Y if you plan on running a kernel in big-endian mode.
88 Note that your board must be properly built and your board
89 diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
90 index b0e03e052dd1d..b84ec4ce7d8dc 100644
91 --- a/arch/arm64/kernel/perf_callchain.c
92 +++ b/arch/arm64/kernel/perf_callchain.c
93 @@ -102,7 +102,9 @@ compat_user_backtrace(struct compat_frame_tail __user *tail,
94 void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
95 struct pt_regs *regs)
96 {
97 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
98 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
99 +
100 + if (guest_cbs && guest_cbs->is_in_guest()) {
101 /* We don't support guest os callchain now */
102 return;
103 }
104 @@ -147,9 +149,10 @@ static int callchain_trace(struct stackframe *frame, void *data)
105 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
106 struct pt_regs *regs)
107 {
108 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
109 struct stackframe frame;
110
111 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
112 + if (guest_cbs && guest_cbs->is_in_guest()) {
113 /* We don't support guest os callchain now */
114 return;
115 }
116 @@ -160,18 +163,21 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
117
118 unsigned long perf_instruction_pointer(struct pt_regs *regs)
119 {
120 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
121 - return perf_guest_cbs->get_guest_ip();
122 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
123 +
124 + if (guest_cbs && guest_cbs->is_in_guest())
125 + return guest_cbs->get_guest_ip();
126
127 return instruction_pointer(regs);
128 }
129
130 unsigned long perf_misc_flags(struct pt_regs *regs)
131 {
132 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
133 int misc = 0;
134
135 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
136 - if (perf_guest_cbs->is_user_mode())
137 + if (guest_cbs && guest_cbs->is_in_guest()) {
138 + if (guest_cbs->is_user_mode())
139 misc |= PERF_RECORD_MISC_GUEST_USER;
140 else
141 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
142 diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c
143 index ab55e98ee8f62..35318a635a5fa 100644
144 --- a/arch/csky/kernel/perf_callchain.c
145 +++ b/arch/csky/kernel/perf_callchain.c
146 @@ -86,10 +86,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
147 void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
148 struct pt_regs *regs)
149 {
150 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
151 unsigned long fp = 0;
152
153 /* C-SKY does not support virtualization. */
154 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
155 + if (guest_cbs && guest_cbs->is_in_guest())
156 return;
157
158 fp = regs->regs[4];
159 @@ -110,10 +111,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
160 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
161 struct pt_regs *regs)
162 {
163 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
164 struct stackframe fr;
165
166 /* C-SKY does not support virtualization. */
167 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
168 + if (guest_cbs && guest_cbs->is_in_guest()) {
169 pr_warn("C-SKY does not support perf in guest mode!");
170 return;
171 }
172 diff --git a/arch/nds32/kernel/perf_event_cpu.c b/arch/nds32/kernel/perf_event_cpu.c
173 index 334c2a6cec23d..8a4f9babb1646 100644
174 --- a/arch/nds32/kernel/perf_event_cpu.c
175 +++ b/arch/nds32/kernel/perf_event_cpu.c
176 @@ -1363,6 +1363,7 @@ void
177 perf_callchain_user(struct perf_callchain_entry_ctx *entry,
178 struct pt_regs *regs)
179 {
180 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
181 unsigned long fp = 0;
182 unsigned long gp = 0;
183 unsigned long lp = 0;
184 @@ -1371,7 +1372,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry,
185
186 leaf_fp = 0;
187
188 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
189 + if (guest_cbs && guest_cbs->is_in_guest()) {
190 /* We don't support guest os callchain now */
191 return;
192 }
193 @@ -1479,9 +1480,10 @@ void
194 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
195 struct pt_regs *regs)
196 {
197 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
198 struct stackframe fr;
199
200 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
201 + if (guest_cbs && guest_cbs->is_in_guest()) {
202 /* We don't support guest os callchain now */
203 return;
204 }
205 @@ -1493,20 +1495,23 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
206
207 unsigned long perf_instruction_pointer(struct pt_regs *regs)
208 {
209 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
210 +
211 /* However, NDS32 does not support virtualization */
212 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
213 - return perf_guest_cbs->get_guest_ip();
214 + if (guest_cbs && guest_cbs->is_in_guest())
215 + return guest_cbs->get_guest_ip();
216
217 return instruction_pointer(regs);
218 }
219
220 unsigned long perf_misc_flags(struct pt_regs *regs)
221 {
222 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
223 int misc = 0;
224
225 /* However, NDS32 does not support virtualization */
226 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
227 - if (perf_guest_cbs->is_user_mode())
228 + if (guest_cbs && guest_cbs->is_in_guest()) {
229 + if (guest_cbs->is_user_mode())
230 misc |= PERF_RECORD_MISC_GUEST_USER;
231 else
232 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
233 diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c
234 index 8d2804f05cf93..22a93009362d7 100644
235 --- a/arch/riscv/kernel/perf_callchain.c
236 +++ b/arch/riscv/kernel/perf_callchain.c
237 @@ -60,10 +60,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
238 void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
239 struct pt_regs *regs)
240 {
241 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
242 unsigned long fp = 0;
243
244 /* RISC-V does not support perf in guest mode. */
245 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
246 + if (guest_cbs && guest_cbs->is_in_guest())
247 return;
248
249 fp = regs->s0;
250 @@ -84,8 +85,10 @@ void notrace walk_stackframe(struct task_struct *task,
251 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
252 struct pt_regs *regs)
253 {
254 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
255 +
256 /* RISC-V does not support perf in guest mode. */
257 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
258 + if (guest_cbs && guest_cbs->is_in_guest()) {
259 pr_warn("RISC-V does not support perf in guest mode!");
260 return;
261 }
262 diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
263 index fd73a8aa89d23..8be5750fe5ac3 100644
264 --- a/arch/s390/kvm/interrupt.c
265 +++ b/arch/s390/kvm/interrupt.c
266 @@ -1982,6 +1982,13 @@ int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
267 return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
268 }
269
270 +int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu)
271 +{
272 + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
273 +
274 + return test_bit(IRQ_PEND_RESTART, &li->pending_irqs);
275 +}
276 +
277 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
278 {
279 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
280 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
281 index b286818d8d54d..49dc00d82e5ea 100644
282 --- a/arch/s390/kvm/kvm-s390.c
283 +++ b/arch/s390/kvm/kvm-s390.c
284 @@ -4205,10 +4205,15 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
285 spin_lock(&vcpu->kvm->arch.start_stop_lock);
286 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
287
288 - /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
289 + /*
290 + * Set the VCPU to STOPPED and THEN clear the interrupt flag,
291 + * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
292 + * have been fully processed. This will ensure that the VCPU
293 + * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
294 + */
295 + kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
296 kvm_s390_clear_stop_irq(vcpu);
297
298 - kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
299 __disable_ibs_on_vcpu(vcpu);
300
301 for (i = 0; i < online_vcpus; i++) {
302 diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
303 index 63d94a5253a8f..d497d3e58784b 100644
304 --- a/arch/s390/kvm/kvm-s390.h
305 +++ b/arch/s390/kvm/kvm-s390.h
306 @@ -373,6 +373,7 @@ void kvm_s390_destroy_adapters(struct kvm *kvm);
307 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
308 extern struct kvm_device_ops kvm_flic_ops;
309 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
310 +int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu);
311 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
312 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
313 void __user *buf, int len);
314 diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
315 index 683036c1c92a8..3dc921e853b6e 100644
316 --- a/arch/s390/kvm/sigp.c
317 +++ b/arch/s390/kvm/sigp.c
318 @@ -288,6 +288,34 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
319 if (!dst_vcpu)
320 return SIGP_CC_NOT_OPERATIONAL;
321
322 + /*
323 + * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders
324 + * are processed asynchronously. Until the affected VCPU finishes
325 + * its work and calls back into KVM to clear the (RESTART or STOP)
326 + * interrupt, we need to return any new non-reset orders "busy".
327 + *
328 + * This is important because a single VCPU could issue:
329 + * 1) SIGP STOP $DESTINATION
330 + * 2) SIGP SENSE $DESTINATION
331 + *
332 + * If the SIGP SENSE would not be rejected as "busy", it could
333 + * return an incorrect answer as to whether the VCPU is STOPPED
334 + * or OPERATING.
335 + */
336 + if (order_code != SIGP_INITIAL_CPU_RESET &&
337 + order_code != SIGP_CPU_RESET) {
338 + /*
339 + * Lockless check. Both SIGP STOP and SIGP (RE)START
340 + * properly synchronize everything while processing
341 + * their orders, while the guest cannot observe a
342 + * difference when issuing other orders from two
343 + * different VCPUs.
344 + */
345 + if (kvm_s390_is_stop_irq_pending(dst_vcpu) ||
346 + kvm_s390_is_restart_irq_pending(dst_vcpu))
347 + return SIGP_CC_BUSY;
348 + }
349 +
350 switch (order_code) {
351 case SIGP_SENSE:
352 vcpu->stat.instruction_sigp_sense++;
353 diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
354 index 00bccb4d17722..e4f7ac28dcf2b 100644
355 --- a/arch/x86/events/core.c
356 +++ b/arch/x86/events/core.c
357 @@ -2366,10 +2366,11 @@ static bool perf_hw_regs(struct pt_regs *regs)
358 void
359 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
360 {
361 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
362 struct unwind_state state;
363 unsigned long addr;
364
365 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
366 + if (guest_cbs && guest_cbs->is_in_guest()) {
367 /* TODO: We don't support guest os callchain now */
368 return;
369 }
370 @@ -2475,10 +2476,11 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
371 void
372 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
373 {
374 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
375 struct stack_frame frame;
376 const unsigned long __user *fp;
377
378 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
379 + if (guest_cbs && guest_cbs->is_in_guest()) {
380 /* TODO: We don't support guest os callchain now */
381 return;
382 }
383 @@ -2562,18 +2564,21 @@ static unsigned long code_segment_base(struct pt_regs *regs)
384
385 unsigned long perf_instruction_pointer(struct pt_regs *regs)
386 {
387 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
388 - return perf_guest_cbs->get_guest_ip();
389 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
390 +
391 + if (guest_cbs && guest_cbs->is_in_guest())
392 + return guest_cbs->get_guest_ip();
393
394 return regs->ip + code_segment_base(regs);
395 }
396
397 unsigned long perf_misc_flags(struct pt_regs *regs)
398 {
399 + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
400 int misc = 0;
401
402 - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
403 - if (perf_guest_cbs->is_user_mode())
404 + if (guest_cbs && guest_cbs->is_in_guest()) {
405 + if (guest_cbs->is_user_mode())
406 misc |= PERF_RECORD_MISC_GUEST_USER;
407 else
408 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
409 diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
410 index 70758f99c9e47..b33540e1efa88 100644
411 --- a/arch/x86/events/intel/core.c
412 +++ b/arch/x86/events/intel/core.c
413 @@ -2333,6 +2333,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
414 {
415 struct perf_sample_data data;
416 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
417 + struct perf_guest_info_callbacks *guest_cbs;
418 int bit;
419 int handled = 0;
420
421 @@ -2386,9 +2387,11 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
422 */
423 if (__test_and_clear_bit(55, (unsigned long *)&status)) {
424 handled++;
425 - if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
426 - perf_guest_cbs->handle_intel_pt_intr))
427 - perf_guest_cbs->handle_intel_pt_intr();
428 +
429 + guest_cbs = perf_get_guest_cbs();
430 + if (unlikely(guest_cbs && guest_cbs->is_in_guest() &&
431 + guest_cbs->handle_intel_pt_intr))
432 + guest_cbs->handle_intel_pt_intr();
433 else
434 intel_pt_interrupt();
435 }
436 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
437 index 0aaf40be956ff..1f7dfa5aa42da 100644
438 --- a/arch/x86/kvm/x86.c
439 +++ b/arch/x86/kvm/x86.c
440 @@ -1218,7 +1218,7 @@ static const u32 msrs_to_save_all[] = {
441 MSR_IA32_UMWAIT_CONTROL,
442
443 MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
444 - MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3,
445 + MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
446 MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
447 MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
448 MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
449 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
450 index 30d0523014e0d..5e9b00711357b 100644
451 --- a/drivers/base/devtmpfs.c
452 +++ b/drivers/base/devtmpfs.c
453 @@ -25,6 +25,7 @@
454 #include <linux/sched.h>
455 #include <linux/slab.h>
456 #include <linux/kthread.h>
457 +#include <linux/fs_context.h>
458 #include <uapi/linux/mount.h>
459 #include "base.h"
460
461 @@ -62,8 +63,15 @@ static struct dentry *public_dev_mount(struct file_system_type *fs_type, int fla
462 const char *dev_name, void *data)
463 {
464 struct super_block *s = mnt->mnt_sb;
465 + int err;
466 +
467 atomic_inc(&s->s_active);
468 down_write(&s->s_umount);
469 + err = reconfigure_single(s, flags, data);
470 + if (err < 0) {
471 + deactivate_locked_super(s);
472 + return ERR_PTR(err);
473 + }
474 return dget(s->s_root);
475 }
476
477 diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
478 index 59db70fb45614..314b9bb78e437 100644
479 --- a/drivers/firmware/qemu_fw_cfg.c
480 +++ b/drivers/firmware/qemu_fw_cfg.c
481 @@ -385,9 +385,7 @@ static void fw_cfg_sysfs_cache_cleanup(void)
482 struct fw_cfg_sysfs_entry *entry, *next;
483
484 list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) {
485 - /* will end up invoking fw_cfg_sysfs_cache_delist()
486 - * via each object's release() method (i.e. destructor)
487 - */
488 + fw_cfg_sysfs_cache_delist(entry);
489 kobject_put(&entry->kobj);
490 }
491 }
492 @@ -445,7 +443,6 @@ static void fw_cfg_sysfs_release_entry(struct kobject *kobj)
493 {
494 struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
495
496 - fw_cfg_sysfs_cache_delist(entry);
497 kfree(entry);
498 }
499
500 @@ -598,20 +595,18 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
501 /* set file entry information */
502 entry->size = be32_to_cpu(f->size);
503 entry->select = be16_to_cpu(f->select);
504 - memcpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH);
505 + strscpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH);
506
507 /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
508 err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
509 fw_cfg_sel_ko, "%d", entry->select);
510 - if (err) {
511 - kobject_put(&entry->kobj);
512 - return err;
513 - }
514 + if (err)
515 + goto err_put_entry;
516
517 /* add raw binary content access */
518 err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
519 if (err)
520 - goto err_add_raw;
521 + goto err_del_entry;
522
523 /* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */
524 fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name);
525 @@ -620,9 +615,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
526 fw_cfg_sysfs_cache_enlist(entry);
527 return 0;
528
529 -err_add_raw:
530 +err_del_entry:
531 kobject_del(&entry->kobj);
532 - kfree(entry);
533 +err_put_entry:
534 + kobject_put(&entry->kobj);
535 return err;
536 }
537
538 diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
539 index 96b85d66e7a87..fe58723fc5ac7 100644
540 --- a/drivers/media/usb/uvc/uvc_video.c
541 +++ b/drivers/media/usb/uvc/uvc_video.c
542 @@ -1915,6 +1915,10 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
543 if (ep == NULL)
544 return -EIO;
545
546 + /* Reject broken descriptors. */
547 + if (usb_endpoint_maxp(&ep->desc) == 0)
548 + return -EIO;
549 +
550 ret = uvc_init_video_bulk(stream, ep, gfp_flags);
551 }
552
553 diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
554 index a7e47e068ad9b..7769a9b556c70 100644
555 --- a/drivers/mtd/chips/Kconfig
556 +++ b/drivers/mtd/chips/Kconfig
557 @@ -55,12 +55,14 @@ choice
558 LITTLE_ENDIAN_BYTE, if the bytes are reversed.
559
560 config MTD_CFI_NOSWAP
561 + depends on !ARCH_IXP4XX || CPU_BIG_ENDIAN
562 bool "NO"
563
564 config MTD_CFI_BE_BYTE_SWAP
565 bool "BIG_ENDIAN_BYTE"
566
567 config MTD_CFI_LE_BYTE_SWAP
568 + depends on !ARCH_IXP4XX
569 bool "LITTLE_ENDIAN_BYTE"
570
571 endchoice
572 diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
573 index bc82305ebb4c2..ffbf4f6cb9cfe 100644
574 --- a/drivers/mtd/maps/Kconfig
575 +++ b/drivers/mtd/maps/Kconfig
576 @@ -303,7 +303,7 @@ config MTD_DC21285
577
578 config MTD_IXP4XX
579 tristate "CFI Flash device mapped on Intel IXP4xx based systems"
580 - depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX
581 + depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX && MTD_CFI_ADV_OPTIONS
582 help
583 This enables MTD access to flash devices on platforms based
584 on Intel's IXP4xx family of network processors such as the
585 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
586 index f070f25bb735a..df7a14320fd29 100644
587 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
588 +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
589 @@ -1000,6 +1000,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
590 _initpabias(hw);
591 rtl92c_dm_init(hw);
592 exit:
593 + local_irq_disable();
594 local_irq_restore(flags);
595 return err;
596 }
597 diff --git a/fs/fs_context.c b/fs/fs_context.c
598 index 138b5b4d621d2..a2367c7aef5b3 100644
599 --- a/fs/fs_context.c
600 +++ b/fs/fs_context.c
601 @@ -585,7 +585,7 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param)
602 param->key);
603 }
604
605 - if (len > PAGE_SIZE - 2 - size)
606 + if (size + len + 2 > PAGE_SIZE)
607 return invalf(fc, "VFS: Legacy: Cumulative options too large");
608 if (strchr(param->key, ',') ||
609 (param->type == fs_value_is_string &&
610 diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
611 index 2bb916d68576f..023b9bc54b7ce 100644
612 --- a/fs/orangefs/orangefs-bufmap.c
613 +++ b/fs/orangefs/orangefs-bufmap.c
614 @@ -179,7 +179,7 @@ orangefs_bufmap_free(struct orangefs_bufmap *bufmap)
615 {
616 kfree(bufmap->page_array);
617 kfree(bufmap->desc_array);
618 - kfree(bufmap->buffer_index_array);
619 + bitmap_free(bufmap->buffer_index_array);
620 kfree(bufmap);
621 }
622
623 @@ -229,8 +229,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc)
624 bufmap->desc_size = user_desc->size;
625 bufmap->desc_shift = ilog2(bufmap->desc_size);
626
627 - bufmap->buffer_index_array =
628 - kzalloc(DIV_ROUND_UP(bufmap->desc_count, BITS_PER_LONG), GFP_KERNEL);
629 + bufmap->buffer_index_array = bitmap_zalloc(bufmap->desc_count, GFP_KERNEL);
630 if (!bufmap->buffer_index_array)
631 goto out_free_bufmap;
632
633 @@ -253,7 +252,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc)
634 out_free_desc_array:
635 kfree(bufmap->desc_array);
636 out_free_index_array:
637 - kfree(bufmap->buffer_index_array);
638 + bitmap_free(bufmap->buffer_index_array);
639 out_free_bufmap:
640 kfree(bufmap);
641 out:
642 diff --git a/fs/super.c b/fs/super.c
643 index 877532baf513d..b289356f302fc 100644
644 --- a/fs/super.c
645 +++ b/fs/super.c
646 @@ -1470,8 +1470,8 @@ struct dentry *mount_nodev(struct file_system_type *fs_type,
647 }
648 EXPORT_SYMBOL(mount_nodev);
649
650 -static int reconfigure_single(struct super_block *s,
651 - int flags, void *data)
652 +int reconfigure_single(struct super_block *s,
653 + int flags, void *data)
654 {
655 struct fs_context *fc;
656 int ret;
657 diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
658 index ba8a58754340d..ebcb91a57e865 100644
659 --- a/include/linux/fs_context.h
660 +++ b/include/linux/fs_context.h
661 @@ -135,6 +135,8 @@ extern int generic_parse_monolithic(struct fs_context *fc, void *data);
662 extern int vfs_get_tree(struct fs_context *fc);
663 extern void put_fs_context(struct fs_context *fc);
664 extern void fc_drop_locked(struct fs_context *fc);
665 +int reconfigure_single(struct super_block *s,
666 + int flags, void *data);
667
668 /*
669 * sget() wrappers to be called from the ->get_tree() op.
670 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
671 index 68ccc5b1913b4..b7ac395513c0f 100644
672 --- a/include/linux/perf_event.h
673 +++ b/include/linux/perf_event.h
674 @@ -1175,7 +1175,18 @@ extern void perf_event_bpf_event(struct bpf_prog *prog,
675 enum perf_bpf_event_type type,
676 u16 flags);
677
678 -extern struct perf_guest_info_callbacks *perf_guest_cbs;
679 +extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
680 +static inline struct perf_guest_info_callbacks *perf_get_guest_cbs(void)
681 +{
682 + /*
683 + * Callbacks are RCU-protected and must be READ_ONCE to avoid reloading
684 + * the callbacks between a !NULL check and dereferences, to ensure
685 + * pending stores/changes to the callback pointers are visible before a
686 + * non-NULL perf_guest_cbs is visible to readers, and to prevent a
687 + * module from unloading callbacks while readers are active.
688 + */
689 + return rcu_dereference(perf_guest_cbs);
690 +}
691 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
692 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
693
694 diff --git a/kernel/events/core.c b/kernel/events/core.c
695 index 6ffe3d3e7b06d..7e124f9abb18b 100644
696 --- a/kernel/events/core.c
697 +++ b/kernel/events/core.c
698 @@ -6045,18 +6045,25 @@ static void perf_pending_event(struct irq_work *entry)
699 * Later on, we might change it to a list if there is
700 * another virtualization implementation supporting the callbacks.
701 */
702 -struct perf_guest_info_callbacks *perf_guest_cbs;
703 +struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
704
705 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
706 {
707 - perf_guest_cbs = cbs;
708 + if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs)))
709 + return -EBUSY;
710 +
711 + rcu_assign_pointer(perf_guest_cbs, cbs);
712 return 0;
713 }
714 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
715
716 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
717 {
718 - perf_guest_cbs = NULL;
719 + if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) != cbs))
720 + return -EINVAL;
721 +
722 + rcu_assign_pointer(perf_guest_cbs, NULL);
723 + synchronize_rcu();
724 return 0;
725 }
726 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
727 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
728 index 022799479a722..d06205626cd54 100644
729 --- a/sound/pci/hda/patch_realtek.c
730 +++ b/sound/pci/hda/patch_realtek.c
731 @@ -1926,6 +1926,7 @@ enum {
732 ALC887_FIXUP_ASUS_BASS,
733 ALC887_FIXUP_BASS_CHMAP,
734 ALC1220_FIXUP_GB_DUAL_CODECS,
735 + ALC1220_FIXUP_GB_X570,
736 ALC1220_FIXUP_CLEVO_P950,
737 ALC1220_FIXUP_CLEVO_PB51ED,
738 ALC1220_FIXUP_CLEVO_PB51ED_PINS,
739 @@ -2115,6 +2116,29 @@ static void alc1220_fixup_gb_dual_codecs(struct hda_codec *codec,
740 }
741 }
742
743 +static void alc1220_fixup_gb_x570(struct hda_codec *codec,
744 + const struct hda_fixup *fix,
745 + int action)
746 +{
747 + static const hda_nid_t conn1[] = { 0x0c };
748 + static const struct coef_fw gb_x570_coefs[] = {
749 + WRITE_COEF(0x1a, 0x01c1),
750 + WRITE_COEF(0x1b, 0x0202),
751 + WRITE_COEF(0x43, 0x3005),
752 + {}
753 + };
754 +
755 + switch (action) {
756 + case HDA_FIXUP_ACT_PRE_PROBE:
757 + snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), conn1);
758 + snd_hda_override_conn_list(codec, 0x1b, ARRAY_SIZE(conn1), conn1);
759 + break;
760 + case HDA_FIXUP_ACT_INIT:
761 + alc_process_coef_fw(codec, gb_x570_coefs);
762 + break;
763 + }
764 +}
765 +
766 static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
767 const struct hda_fixup *fix,
768 int action)
769 @@ -2417,6 +2441,10 @@ static const struct hda_fixup alc882_fixups[] = {
770 .type = HDA_FIXUP_FUNC,
771 .v.func = alc1220_fixup_gb_dual_codecs,
772 },
773 + [ALC1220_FIXUP_GB_X570] = {
774 + .type = HDA_FIXUP_FUNC,
775 + .v.func = alc1220_fixup_gb_x570,
776 + },
777 [ALC1220_FIXUP_CLEVO_P950] = {
778 .type = HDA_FIXUP_FUNC,
779 .v.func = alc1220_fixup_clevo_p950,
780 @@ -2519,7 +2547,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
781 SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD),
782 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
783 SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
784 - SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
785 + SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_GB_X570),
786 SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
787 SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
788 SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),