Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0142-4.14.43-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 7 months ago) by niro
File size: 123054 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
2     index 258902db14bf..8355e79350b7 100644
3     --- a/Documentation/ABI/testing/sysfs-devices-system-cpu
4     +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
5     @@ -378,6 +378,7 @@ What: /sys/devices/system/cpu/vulnerabilities
6     /sys/devices/system/cpu/vulnerabilities/meltdown
7     /sys/devices/system/cpu/vulnerabilities/spectre_v1
8     /sys/devices/system/cpu/vulnerabilities/spectre_v2
9     + /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
10     Date: January 2018
11     Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
12     Description: Information about CPU vulnerabilities
13     diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
14     index 8cfb44ffe853..0380a45ecf4b 100644
15     --- a/Documentation/admin-guide/kernel-parameters.txt
16     +++ b/Documentation/admin-guide/kernel-parameters.txt
17     @@ -2600,6 +2600,9 @@
18     allow data leaks with this option, which is equivalent
19     to spectre_v2=off.
20    
21     + nospec_store_bypass_disable
22     + [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
23     +
24     noxsave [BUGS=X86] Disables x86 extended register state save
25     and restore using xsave. The kernel will fallback to
26     enabling legacy floating-point and sse state.
27     @@ -3930,6 +3933,48 @@
28     Not specifying this option is equivalent to
29     spectre_v2=auto.
30    
31     + spec_store_bypass_disable=
32     + [HW] Control Speculative Store Bypass (SSB) Disable mitigation
33     + (Speculative Store Bypass vulnerability)
34     +
35     + Certain CPUs are vulnerable to an exploit against a
36     + a common industry wide performance optimization known
37     + as "Speculative Store Bypass" in which recent stores
38     + to the same memory location may not be observed by
39     + later loads during speculative execution. The idea
40     + is that such stores are unlikely and that they can
41     + be detected prior to instruction retirement at the
42     + end of a particular speculation execution window.
43     +
44     + In vulnerable processors, the speculatively forwarded
45     + store can be used in a cache side channel attack, for
46     + example to read memory to which the attacker does not
47     + directly have access (e.g. inside sandboxed code).
48     +
49     + This parameter controls whether the Speculative Store
50     + Bypass optimization is used.
51     +
52     + on - Unconditionally disable Speculative Store Bypass
53     + off - Unconditionally enable Speculative Store Bypass
54     + auto - Kernel detects whether the CPU model contains an
55     + implementation of Speculative Store Bypass and
56     + picks the most appropriate mitigation. If the
57     + CPU is not vulnerable, "off" is selected. If the
58     + CPU is vulnerable the default mitigation is
59     + architecture and Kconfig dependent. See below.
60     + prctl - Control Speculative Store Bypass per thread
61     + via prctl. Speculative Store Bypass is enabled
62     + for a process by default. The state of the control
63     + is inherited on fork.
64     + seccomp - Same as "prctl" above, but all seccomp threads
65     + will disable SSB unless they explicitly opt out.
66     +
67     + Not specifying this option is equivalent to
68     + spec_store_bypass_disable=auto.
69     +
70     + Default mitigations:
71     + X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
72     +
73     spia_io_base= [HW,MTD]
74     spia_fio_base=
75     spia_pedr=
76     diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst
77     index 7b2eb1b7d4ca..a3233da7fa88 100644
78     --- a/Documentation/userspace-api/index.rst
79     +++ b/Documentation/userspace-api/index.rst
80     @@ -19,6 +19,7 @@ place where this information is gathered.
81     no_new_privs
82     seccomp_filter
83     unshare
84     + spec_ctrl
85    
86     .. only:: subproject and html
87    
88     diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst
89     new file mode 100644
90     index 000000000000..32f3d55c54b7
91     --- /dev/null
92     +++ b/Documentation/userspace-api/spec_ctrl.rst
93     @@ -0,0 +1,94 @@
94     +===================
95     +Speculation Control
96     +===================
97     +
98     +Quite some CPUs have speculation-related misfeatures which are in
99     +fact vulnerabilities causing data leaks in various forms even across
100     +privilege domains.
101     +
102     +The kernel provides mitigation for such vulnerabilities in various
103     +forms. Some of these mitigations are compile-time configurable and some
104     +can be supplied on the kernel command line.
105     +
106     +There is also a class of mitigations which are very expensive, but they can
107     +be restricted to a certain set of processes or tasks in controlled
108     +environments. The mechanism to control these mitigations is via
109     +:manpage:`prctl(2)`.
110     +
111     +There are two prctl options which are related to this:
112     +
113     + * PR_GET_SPECULATION_CTRL
114     +
115     + * PR_SET_SPECULATION_CTRL
116     +
117     +PR_GET_SPECULATION_CTRL
118     +-----------------------
119     +
120     +PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
121     +which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
122     +the following meaning:
123     +
124     +==== ===================== ===================================================
125     +Bit Define Description
126     +==== ===================== ===================================================
127     +0 PR_SPEC_PRCTL Mitigation can be controlled per task by
128     + PR_SET_SPECULATION_CTRL.
129     +1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
130     + disabled.
131     +2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
132     + enabled.
133     +3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
134     + subsequent prctl(..., PR_SPEC_ENABLE) will fail.
135     +==== ===================== ===================================================
136     +
137     +If all bits are 0 the CPU is not affected by the speculation misfeature.
138     +
139     +If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is
140     +available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
141     +misfeature will fail.
142     +
143     +PR_SET_SPECULATION_CTRL
144     +-----------------------
145     +
146     +PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
147     +is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
148     +in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or
149     +PR_SPEC_FORCE_DISABLE.
150     +
151     +Common error codes
152     +------------------
153     +======= =================================================================
154     +Value Meaning
155     +======= =================================================================
156     +EINVAL The prctl is not implemented by the architecture or unused
157     + prctl(2) arguments are not 0.
158     +
159     +ENODEV arg2 is selecting a not supported speculation misfeature.
160     +======= =================================================================
161     +
162     +PR_SET_SPECULATION_CTRL error codes
163     +-----------------------------------
164     +======= =================================================================
165     +Value Meaning
166     +======= =================================================================
167     +0 Success
168     +
169     +ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
170     + PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE.
171     +
172     +ENXIO Control of the selected speculation misfeature is not possible.
173     + See PR_GET_SPECULATION_CTRL.
174     +
175     +EPERM Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller
176     + tried to enable it again.
177     +======= =================================================================
178     +
179     +Speculation misfeature controls
180     +-------------------------------
181     +- PR_SPEC_STORE_BYPASS: Speculative Store Bypass
182     +
183     + Invocations:
184     + * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
185     + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
186     + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
187     + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
188     diff --git a/Makefile b/Makefile
189     index 777f5685a36b..9be88c9d9fc9 100644
190     --- a/Makefile
191     +++ b/Makefile
192     @@ -1,7 +1,7 @@
193     # SPDX-License-Identifier: GPL-2.0
194     VERSION = 4
195     PATCHLEVEL = 14
196     -SUBLEVEL = 42
197     +SUBLEVEL = 43
198     EXTRAVERSION =
199     NAME = Petit Gorille
200    
201     diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
202     index bc8d4bbd82e2..9342904cccca 100644
203     --- a/arch/arm/include/asm/assembler.h
204     +++ b/arch/arm/include/asm/assembler.h
205     @@ -536,4 +536,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
206     #endif
207     .endm
208    
209     +#ifdef CONFIG_KPROBES
210     +#define _ASM_NOKPROBE(entry) \
211     + .pushsection "_kprobe_blacklist", "aw" ; \
212     + .balign 4 ; \
213     + .long entry; \
214     + .popsection
215     +#else
216     +#define _ASM_NOKPROBE(entry)
217     +#endif
218     +
219     #endif /* __ASM_ASSEMBLER_H__ */
220     diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
221     index eb46fc81a440..08cd720eae01 100644
222     --- a/arch/arm/include/asm/kvm_mmu.h
223     +++ b/arch/arm/include/asm/kvm_mmu.h
224     @@ -221,6 +221,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
225     return 8;
226     }
227    
228     +/*
229     + * We are not in the kvm->srcu critical section most of the time, so we take
230     + * the SRCU read lock here. Since we copy the data from the user page, we
231     + * can immediately drop the lock again.
232     + */
233     +static inline int kvm_read_guest_lock(struct kvm *kvm,
234     + gpa_t gpa, void *data, unsigned long len)
235     +{
236     + int srcu_idx = srcu_read_lock(&kvm->srcu);
237     + int ret = kvm_read_guest(kvm, gpa, data, len);
238     +
239     + srcu_read_unlock(&kvm->srcu, srcu_idx);
240     +
241     + return ret;
242     +}
243     +
244     static inline void *kvm_get_hyp_vector(void)
245     {
246     return kvm_ksym_ref(__kvm_hyp_vector);
247     diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
248     index b8dc3b516f93..f702f2b37052 100644
249     --- a/arch/arm/kernel/traps.c
250     +++ b/arch/arm/kernel/traps.c
251     @@ -19,6 +19,7 @@
252     #include <linux/uaccess.h>
253     #include <linux/hardirq.h>
254     #include <linux/kdebug.h>
255     +#include <linux/kprobes.h>
256     #include <linux/module.h>
257     #include <linux/kexec.h>
258     #include <linux/bug.h>
259     @@ -417,7 +418,8 @@ void unregister_undef_hook(struct undef_hook *hook)
260     raw_spin_unlock_irqrestore(&undef_lock, flags);
261     }
262    
263     -static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
264     +static nokprobe_inline
265     +int call_undef_hook(struct pt_regs *regs, unsigned int instr)
266     {
267     struct undef_hook *hook;
268     unsigned long flags;
269     @@ -490,6 +492,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
270    
271     arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
272     }
273     +NOKPROBE_SYMBOL(do_undefinstr)
274    
275     /*
276     * Handle FIQ similarly to NMI on x86 systems.
277     diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
278     index df73914e81c8..746e7801dcdf 100644
279     --- a/arch/arm/lib/getuser.S
280     +++ b/arch/arm/lib/getuser.S
281     @@ -38,6 +38,7 @@ ENTRY(__get_user_1)
282     mov r0, #0
283     ret lr
284     ENDPROC(__get_user_1)
285     +_ASM_NOKPROBE(__get_user_1)
286    
287     ENTRY(__get_user_2)
288     check_uaccess r0, 2, r1, r2, __get_user_bad
289     @@ -58,6 +59,7 @@ rb .req r0
290     mov r0, #0
291     ret lr
292     ENDPROC(__get_user_2)
293     +_ASM_NOKPROBE(__get_user_2)
294    
295     ENTRY(__get_user_4)
296     check_uaccess r0, 4, r1, r2, __get_user_bad
297     @@ -65,6 +67,7 @@ ENTRY(__get_user_4)
298     mov r0, #0
299     ret lr
300     ENDPROC(__get_user_4)
301     +_ASM_NOKPROBE(__get_user_4)
302    
303     ENTRY(__get_user_8)
304     check_uaccess r0, 8, r1, r2, __get_user_bad8
305     @@ -78,6 +81,7 @@ ENTRY(__get_user_8)
306     mov r0, #0
307     ret lr
308     ENDPROC(__get_user_8)
309     +_ASM_NOKPROBE(__get_user_8)
310    
311     #ifdef __ARMEB__
312     ENTRY(__get_user_32t_8)
313     @@ -91,6 +95,7 @@ ENTRY(__get_user_32t_8)
314     mov r0, #0
315     ret lr
316     ENDPROC(__get_user_32t_8)
317     +_ASM_NOKPROBE(__get_user_32t_8)
318    
319     ENTRY(__get_user_64t_1)
320     check_uaccess r0, 1, r1, r2, __get_user_bad8
321     @@ -98,6 +103,7 @@ ENTRY(__get_user_64t_1)
322     mov r0, #0
323     ret lr
324     ENDPROC(__get_user_64t_1)
325     +_ASM_NOKPROBE(__get_user_64t_1)
326    
327     ENTRY(__get_user_64t_2)
328     check_uaccess r0, 2, r1, r2, __get_user_bad8
329     @@ -114,6 +120,7 @@ rb .req r0
330     mov r0, #0
331     ret lr
332     ENDPROC(__get_user_64t_2)
333     +_ASM_NOKPROBE(__get_user_64t_2)
334    
335     ENTRY(__get_user_64t_4)
336     check_uaccess r0, 4, r1, r2, __get_user_bad8
337     @@ -121,6 +128,7 @@ ENTRY(__get_user_64t_4)
338     mov r0, #0
339     ret lr
340     ENDPROC(__get_user_64t_4)
341     +_ASM_NOKPROBE(__get_user_64t_4)
342     #endif
343    
344     __get_user_bad8:
345     @@ -131,6 +139,8 @@ __get_user_bad:
346     ret lr
347     ENDPROC(__get_user_bad)
348     ENDPROC(__get_user_bad8)
349     +_ASM_NOKPROBE(__get_user_bad)
350     +_ASM_NOKPROBE(__get_user_bad8)
351    
352     .pushsection __ex_table, "a"
353     .long 1b, __get_user_bad
354     diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
355     index bcdecc25461b..b2aa9b32bff2 100644
356     --- a/arch/arm/probes/kprobes/opt-arm.c
357     +++ b/arch/arm/probes/kprobes/opt-arm.c
358     @@ -165,13 +165,14 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
359     {
360     unsigned long flags;
361     struct kprobe *p = &op->kp;
362     - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
363     + struct kprobe_ctlblk *kcb;
364    
365     /* Save skipped registers */
366     regs->ARM_pc = (unsigned long)op->kp.addr;
367     regs->ARM_ORIG_r0 = ~0UL;
368    
369     local_irq_save(flags);
370     + kcb = get_kprobe_ctlblk();
371    
372     if (kprobe_running()) {
373     kprobes_inc_nmissed_count(&op->kp);
374     @@ -191,6 +192,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
375    
376     local_irq_restore(flags);
377     }
378     +NOKPROBE_SYMBOL(optimized_callback)
379    
380     int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
381     {
382     diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
383     index 2d6d4bd9de52..fe55b516f018 100644
384     --- a/arch/arm64/include/asm/kvm_mmu.h
385     +++ b/arch/arm64/include/asm/kvm_mmu.h
386     @@ -309,6 +309,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
387     return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
388     }
389    
390     +/*
391     + * We are not in the kvm->srcu critical section most of the time, so we take
392     + * the SRCU read lock here. Since we copy the data from the user page, we
393     + * can immediately drop the lock again.
394     + */
395     +static inline int kvm_read_guest_lock(struct kvm *kvm,
396     + gpa_t gpa, void *data, unsigned long len)
397     +{
398     + int srcu_idx = srcu_read_lock(&kvm->srcu);
399     + int ret = kvm_read_guest(kvm, gpa, data, len);
400     +
401     + srcu_read_unlock(&kvm->srcu, srcu_idx);
402     +
403     + return ret;
404     +}
405     +
406     #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
407     #include <asm/mmu.h>
408    
409     diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
410     index 90bc20efb4c7..b4fcb54b9686 100644
411     --- a/arch/powerpc/kernel/setup-common.c
412     +++ b/arch/powerpc/kernel/setup-common.c
413     @@ -242,14 +242,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
414     unsigned short maj;
415     unsigned short min;
416    
417     - /* We only show online cpus: disable preempt (overzealous, I
418     - * knew) to prevent cpu going down. */
419     - preempt_disable();
420     - if (!cpu_online(cpu_id)) {
421     - preempt_enable();
422     - return 0;
423     - }
424     -
425     #ifdef CONFIG_SMP
426     pvr = per_cpu(cpu_pvr, cpu_id);
427     #else
428     @@ -358,9 +350,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
429     #ifdef CONFIG_SMP
430     seq_printf(m, "\n");
431     #endif
432     -
433     - preempt_enable();
434     -
435     /* If this is the last cpu, print the summary */
436     if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
437     show_cpuinfo_summary(m);
438     diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
439     index 1bceb95f422d..5584247f5029 100644
440     --- a/arch/powerpc/platforms/powernv/opal-nvram.c
441     +++ b/arch/powerpc/platforms/powernv/opal-nvram.c
442     @@ -44,6 +44,10 @@ static ssize_t opal_nvram_read(char *buf, size_t count, loff_t *index)
443     return count;
444     }
445    
446     +/*
447     + * This can be called in the panic path with interrupts off, so use
448     + * mdelay in that case.
449     + */
450     static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
451     {
452     s64 rc = OPAL_BUSY;
453     @@ -58,10 +62,16 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
454     while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
455     rc = opal_write_nvram(__pa(buf), count, off);
456     if (rc == OPAL_BUSY_EVENT) {
457     - msleep(OPAL_BUSY_DELAY_MS);
458     + if (in_interrupt() || irqs_disabled())
459     + mdelay(OPAL_BUSY_DELAY_MS);
460     + else
461     + msleep(OPAL_BUSY_DELAY_MS);
462     opal_poll_events(NULL);
463     } else if (rc == OPAL_BUSY) {
464     - msleep(OPAL_BUSY_DELAY_MS);
465     + if (in_interrupt() || irqs_disabled())
466     + mdelay(OPAL_BUSY_DELAY_MS);
467     + else
468     + msleep(OPAL_BUSY_DELAY_MS);
469     }
470     }
471    
472     diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
473     index 94f2099bceb0..3d17c41074ca 100644
474     --- a/arch/s390/kernel/irq.c
475     +++ b/arch/s390/kernel/irq.c
476     @@ -176,10 +176,9 @@ void do_softirq_own_stack(void)
477     new -= STACK_FRAME_OVERHEAD;
478     ((struct stack_frame *) new)->back_chain = old;
479     asm volatile(" la 15,0(%0)\n"
480     - " basr 14,%2\n"
481     + " brasl 14,__do_softirq\n"
482     " la 15,0(%1)\n"
483     - : : "a" (new), "a" (old),
484     - "a" (__do_softirq)
485     + : : "a" (new), "a" (old)
486     : "0", "1", "2", "3", "4", "5", "14",
487     "cc", "memory" );
488     } else {
489     diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
490     index 7e1e40323b78..d99155793c26 100644
491     --- a/arch/s390/kernel/perf_cpum_sf.c
492     +++ b/arch/s390/kernel/perf_cpum_sf.c
493     @@ -739,6 +739,10 @@ static int __hw_perf_event_init(struct perf_event *event)
494     */
495     rate = 0;
496     if (attr->freq) {
497     + if (!attr->sample_freq) {
498     + err = -EINVAL;
499     + goto out;
500     + }
501     rate = freq_to_sample_rate(&si, attr->sample_freq);
502     rate = hw_limit_rate(&si, rate);
503     attr->freq = 0;
504     diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
505     index e56dbc67e837..97c57b5f8d57 100644
506     --- a/arch/x86/boot/compressed/eboot.c
507     +++ b/arch/x86/boot/compressed/eboot.c
508     @@ -163,7 +163,8 @@ __setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom)
509     if (status != EFI_SUCCESS)
510     goto free_struct;
511    
512     - memcpy(rom->romdata, pci->romimage, pci->romsize);
513     + memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
514     + pci->romsize);
515     return status;
516    
517     free_struct:
518     @@ -269,7 +270,8 @@ __setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct pci_setup_rom **__rom)
519     if (status != EFI_SUCCESS)
520     goto free_struct;
521    
522     - memcpy(rom->romdata, pci->romimage, pci->romsize);
523     + memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
524     + pci->romsize);
525     return status;
526    
527     free_struct:
528     diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
529     index 23a65439c37c..403e97d5e243 100644
530     --- a/arch/x86/include/asm/cpufeatures.h
531     +++ b/arch/x86/include/asm/cpufeatures.h
532     @@ -198,7 +198,6 @@
533     #define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
534     #define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
535     #define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
536     -
537     #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
538     #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
539     #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
540     @@ -207,11 +206,19 @@
541     #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
542     #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
543    
544     +#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
545     +#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
546     #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
547     #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
548    
549     #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
550     #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
551     +#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
552     +#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
553     +#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
554     +#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
555     +#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
556     +#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
557    
558     /* Virtualization flags: Linux defined, word 8 */
559     #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
560     @@ -272,9 +279,10 @@
561     #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
562     #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
563     #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
564     -#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
565     -#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
566     -#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
567     +#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
568     +#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
569     +#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
570     +#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
571    
572     /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
573     #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
574     @@ -331,6 +339,7 @@
575     #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
576     #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
577     #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
578     +#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
579    
580     /*
581     * BUG word(s)
582     @@ -360,5 +369,6 @@
583     #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
584     #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
585     #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
586     +#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
587    
588     #endif /* _ASM_X86_CPUFEATURES_H */
589     diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
590     index 4f8b80199672..174b9c41efce 100644
591     --- a/arch/x86/include/asm/kvm_host.h
592     +++ b/arch/x86/include/asm/kvm_host.h
593     @@ -908,7 +908,7 @@ struct kvm_x86_ops {
594     int (*hardware_setup)(void); /* __init */
595     void (*hardware_unsetup)(void); /* __exit */
596     bool (*cpu_has_accelerated_tpr)(void);
597     - bool (*cpu_has_high_real_mode_segbase)(void);
598     + bool (*has_emulated_msr)(int index);
599     void (*cpuid_update)(struct kvm_vcpu *vcpu);
600    
601     int (*vm_init)(struct kvm *kvm);
602     diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
603     index 1de72ce514cd..ed97ef3b48a7 100644
604     --- a/arch/x86/include/asm/mmu_context.h
605     +++ b/arch/x86/include/asm/mmu_context.h
606     @@ -192,7 +192,7 @@ static inline int init_new_context(struct task_struct *tsk,
607    
608     #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
609     if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
610     - /* pkey 0 is the default and always allocated */
611     + /* pkey 0 is the default and allocated implicitly */
612     mm->context.pkey_allocation_map = 0x1;
613     /* -1 means unallocated or invalid */
614     mm->context.execute_only_pkey = -1;
615     diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
616     index eb83ff1bae8f..504b21692d32 100644
617     --- a/arch/x86/include/asm/msr-index.h
618     +++ b/arch/x86/include/asm/msr-index.h
619     @@ -42,6 +42,8 @@
620     #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
621     #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
622     #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
623     +#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
624     +#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
625    
626     #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
627     #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
628     @@ -68,6 +70,11 @@
629     #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
630     #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
631     #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
632     +#define ARCH_CAP_SSB_NO (1 << 4) /*
633     + * Not susceptible to Speculative Store Bypass
634     + * attack, so no Speculative Store Bypass
635     + * control required.
636     + */
637    
638     #define MSR_IA32_BBL_CR_CTL 0x00000119
639     #define MSR_IA32_BBL_CR_CTL3 0x0000011e
640     @@ -337,6 +344,8 @@
641     #define MSR_AMD64_IBSOPDATA4 0xc001103d
642     #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
643    
644     +#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
645     +
646     /* Fam 17h MSRs */
647     #define MSR_F17H_IRPERF 0xc00000e9
648    
649     diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
650     index f928ad9b143f..8b38df98548e 100644
651     --- a/arch/x86/include/asm/nospec-branch.h
652     +++ b/arch/x86/include/asm/nospec-branch.h
653     @@ -217,6 +217,14 @@ enum spectre_v2_mitigation {
654     SPECTRE_V2_IBRS,
655     };
656    
657     +/* The Speculative Store Bypass disable variants */
658     +enum ssb_mitigation {
659     + SPEC_STORE_BYPASS_NONE,
660     + SPEC_STORE_BYPASS_DISABLE,
661     + SPEC_STORE_BYPASS_PRCTL,
662     + SPEC_STORE_BYPASS_SECCOMP,
663     +};
664     +
665     extern char __indirect_thunk_start[];
666     extern char __indirect_thunk_end[];
667    
668     @@ -241,22 +249,27 @@ static inline void vmexit_fill_RSB(void)
669     #endif
670     }
671    
672     -#define alternative_msr_write(_msr, _val, _feature) \
673     - asm volatile(ALTERNATIVE("", \
674     - "movl %[msr], %%ecx\n\t" \
675     - "movl %[val], %%eax\n\t" \
676     - "movl $0, %%edx\n\t" \
677     - "wrmsr", \
678     - _feature) \
679     - : : [msr] "i" (_msr), [val] "i" (_val) \
680     - : "eax", "ecx", "edx", "memory")
681     +static __always_inline
682     +void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
683     +{
684     + asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
685     + : : "c" (msr),
686     + "a" ((u32)val),
687     + "d" ((u32)(val >> 32)),
688     + [feature] "i" (feature)
689     + : "memory");
690     +}
691    
692     static inline void indirect_branch_prediction_barrier(void)
693     {
694     - alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
695     - X86_FEATURE_USE_IBPB);
696     + u64 val = PRED_CMD_IBPB;
697     +
698     + alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
699     }
700    
701     +/* The Intel SPEC CTRL MSR base value cache */
702     +extern u64 x86_spec_ctrl_base;
703     +
704     /*
705     * With retpoline, we must use IBRS to restrict branch prediction
706     * before calling into firmware.
707     @@ -265,14 +278,18 @@ static inline void indirect_branch_prediction_barrier(void)
708     */
709     #define firmware_restrict_branch_speculation_start() \
710     do { \
711     + u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
712     + \
713     preempt_disable(); \
714     - alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \
715     + alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
716     X86_FEATURE_USE_IBRS_FW); \
717     } while (0)
718    
719     #define firmware_restrict_branch_speculation_end() \
720     do { \
721     - alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \
722     + u64 val = x86_spec_ctrl_base; \
723     + \
724     + alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
725     X86_FEATURE_USE_IBRS_FW); \
726     preempt_enable(); \
727     } while (0)
728     diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
729     index a0ba1ffda0df..851c04b7a092 100644
730     --- a/arch/x86/include/asm/pkeys.h
731     +++ b/arch/x86/include/asm/pkeys.h
732     @@ -2,6 +2,8 @@
733     #ifndef _ASM_X86_PKEYS_H
734     #define _ASM_X86_PKEYS_H
735    
736     +#define ARCH_DEFAULT_PKEY 0
737     +
738     #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
739    
740     extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
741     @@ -15,7 +17,7 @@ extern int __execute_only_pkey(struct mm_struct *mm);
742     static inline int execute_only_pkey(struct mm_struct *mm)
743     {
744     if (!boot_cpu_has(X86_FEATURE_OSPKE))
745     - return 0;
746     + return ARCH_DEFAULT_PKEY;
747    
748     return __execute_only_pkey(mm);
749     }
750     @@ -49,13 +51,21 @@ bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
751     {
752     /*
753     * "Allocated" pkeys are those that have been returned
754     - * from pkey_alloc(). pkey 0 is special, and never
755     - * returned from pkey_alloc().
756     + * from pkey_alloc() or pkey 0 which is allocated
757     + * implicitly when the mm is created.
758     */
759     - if (pkey <= 0)
760     + if (pkey < 0)
761     return false;
762     if (pkey >= arch_max_pkey())
763     return false;
764     + /*
765     + * The exec-only pkey is set in the allocation map, but
766     + * is not available to any of the user interfaces like
767     + * mprotect_pkey().
768     + */
769     + if (pkey == mm->context.execute_only_pkey)
770     + return false;
771     +
772     return mm_pkey_allocation_map(mm) & (1U << pkey);
773     }
774    
775     diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
776     new file mode 100644
777     index 000000000000..ae7c2c5cd7f0
778     --- /dev/null
779     +++ b/arch/x86/include/asm/spec-ctrl.h
780     @@ -0,0 +1,80 @@
781     +/* SPDX-License-Identifier: GPL-2.0 */
782     +#ifndef _ASM_X86_SPECCTRL_H_
783     +#define _ASM_X86_SPECCTRL_H_
784     +
785     +#include <linux/thread_info.h>
786     +#include <asm/nospec-branch.h>
787     +
788     +/*
789     + * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
790     + * the guest has, while on VMEXIT we restore the host view. This
791     + * would be easier if SPEC_CTRL were architecturally maskable or
792     + * shadowable for guests but this is not (currently) the case.
793     + * Takes the guest view of SPEC_CTRL MSR as a parameter and also
794     + * the guest's version of VIRT_SPEC_CTRL, if emulated.
795     + */
796     +extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
797     +
798     +/**
799     + * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
800     + * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
801     + * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
802     + * (may get translated to MSR_AMD64_LS_CFG bits)
803     + *
804     + * Avoids writing to the MSR if the content/bits are the same
805     + */
806     +static inline
807     +void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
808     +{
809     + x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
810     +}
811     +
812     +/**
813     + * x86_spec_ctrl_restore_host - Restore host speculation control registers
814     + * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
815     + * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
816     + * (may get translated to MSR_AMD64_LS_CFG bits)
817     + *
818     + * Avoids writing to the MSR if the content/bits are the same
819     + */
820     +static inline
821     +void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
822     +{
823     + x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
824     +}
825     +
826     +/* AMD specific Speculative Store Bypass MSR data */
827     +extern u64 x86_amd_ls_cfg_base;
828     +extern u64 x86_amd_ls_cfg_ssbd_mask;
829     +
830     +static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
831     +{
832     + BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
833     + return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
834     +}
835     +
836     +static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
837     +{
838     + BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
839     + return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
840     +}
841     +
842     +static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
843     +{
844     + return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
845     +}
846     +
847     +#ifdef CONFIG_SMP
848     +extern void speculative_store_bypass_ht_init(void);
849     +#else
850     +static inline void speculative_store_bypass_ht_init(void) { }
851     +#endif
852     +
853     +extern void speculative_store_bypass_update(unsigned long tif);
854     +
855     +static inline void speculative_store_bypass_update_current(void)
856     +{
857     + speculative_store_bypass_update(current_thread_info()->flags);
858     +}
859     +
860     +#endif
861     diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
862     index eda3b6823ca4..95ff2d7f553f 100644
863     --- a/arch/x86/include/asm/thread_info.h
864     +++ b/arch/x86/include/asm/thread_info.h
865     @@ -81,6 +81,7 @@ struct thread_info {
866     #define TIF_SIGPENDING 2 /* signal pending */
867     #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
868     #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
869     +#define TIF_SSBD 5 /* Reduced data speculation */
870     #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
871     #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
872     #define TIF_SECCOMP 8 /* secure computing */
873     @@ -107,6 +108,7 @@ struct thread_info {
874     #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
875     #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
876     #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
877     +#define _TIF_SSBD (1 << TIF_SSBD)
878     #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
879     #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
880     #define _TIF_SECCOMP (1 << TIF_SECCOMP)
881     @@ -146,7 +148,7 @@ struct thread_info {
882    
883     /* flags to check in __switch_to() */
884     #define _TIF_WORK_CTXSW \
885     - (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP)
886     + (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
887    
888     #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
889     #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
890     diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
891     index e7d5a7883632..90574f731c05 100644
892     --- a/arch/x86/kernel/cpu/amd.c
893     +++ b/arch/x86/kernel/cpu/amd.c
894     @@ -10,6 +10,7 @@
895     #include <asm/processor.h>
896     #include <asm/apic.h>
897     #include <asm/cpu.h>
898     +#include <asm/spec-ctrl.h>
899     #include <asm/smp.h>
900     #include <asm/pci-direct.h>
901     #include <asm/delay.h>
902     @@ -554,6 +555,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
903     rdmsrl(MSR_FAM10H_NODE_ID, value);
904     nodes_per_socket = ((value >> 3) & 7) + 1;
905     }
906     +
907     + if (c->x86 >= 0x15 && c->x86 <= 0x17) {
908     + unsigned int bit;
909     +
910     + switch (c->x86) {
911     + case 0x15: bit = 54; break;
912     + case 0x16: bit = 33; break;
913     + case 0x17: bit = 10; break;
914     + default: return;
915     + }
916     + /*
917     + * Try to cache the base value so further operations can
918     + * avoid RMW. If that faults, do not enable SSBD.
919     + */
920     + if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
921     + setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
922     + setup_force_cpu_cap(X86_FEATURE_SSBD);
923     + x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
924     + }
925     + }
926     }
927    
928     static void early_init_amd(struct cpuinfo_x86 *c)
929     @@ -765,6 +786,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
930    
931     static void init_amd_zn(struct cpuinfo_x86 *c)
932     {
933     + set_cpu_cap(c, X86_FEATURE_ZEN);
934     /*
935     * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
936     * all up to and including B1.
937     diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
938     index bfca937bdcc3..7416fc206b4a 100644
939     --- a/arch/x86/kernel/cpu/bugs.c
940     +++ b/arch/x86/kernel/cpu/bugs.c
941     @@ -12,8 +12,10 @@
942     #include <linux/utsname.h>
943     #include <linux/cpu.h>
944     #include <linux/module.h>
945     +#include <linux/nospec.h>
946     +#include <linux/prctl.h>
947    
948     -#include <asm/nospec-branch.h>
949     +#include <asm/spec-ctrl.h>
950     #include <asm/cmdline.h>
951     #include <asm/bugs.h>
952     #include <asm/processor.h>
953     @@ -27,6 +29,27 @@
954     #include <asm/intel-family.h>
955    
956     static void __init spectre_v2_select_mitigation(void);
957     +static void __init ssb_select_mitigation(void);
958     +
959     +/*
960     + * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
961     + * writes to SPEC_CTRL contain whatever reserved bits have been set.
962     + */
963     +u64 __ro_after_init x86_spec_ctrl_base;
964     +EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
965     +
966     +/*
967     + * The vendor and possibly platform specific bits which can be modified in
968     + * x86_spec_ctrl_base.
969     + */
970     +static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
971     +
972     +/*
973     + * AMD specific MSR info for Speculative Store Bypass control.
974     + * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
975     + */
976     +u64 __ro_after_init x86_amd_ls_cfg_base;
977     +u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
978    
979     void __init check_bugs(void)
980     {
981     @@ -37,9 +60,27 @@ void __init check_bugs(void)
982     print_cpu_info(&boot_cpu_data);
983     }
984    
985     + /*
986     + * Read the SPEC_CTRL MSR to account for reserved bits which may
987     + * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
988     + * init code as it is not enumerated and depends on the family.
989     + */
990     + if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
991     + rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
992     +
993     + /* Allow STIBP in MSR_SPEC_CTRL if supported */
994     + if (boot_cpu_has(X86_FEATURE_STIBP))
995     + x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
996     +
997     /* Select the proper spectre mitigation before patching alternatives */
998     spectre_v2_select_mitigation();
999    
1000     + /*
1001     + * Select proper mitigation for any exposure to the Speculative Store
1002     + * Bypass vulnerability.
1003     + */
1004     + ssb_select_mitigation();
1005     +
1006     #ifdef CONFIG_X86_32
1007     /*
1008     * Check whether we are able to run this kernel safely on SMP.
1009     @@ -93,7 +134,76 @@ static const char *spectre_v2_strings[] = {
1010     #undef pr_fmt
1011     #define pr_fmt(fmt) "Spectre V2 : " fmt
1012    
1013     -static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
1014     +static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
1015     + SPECTRE_V2_NONE;
1016     +
1017     +void
1018     +x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
1019     +{
1020     + u64 msrval, guestval, hostval = x86_spec_ctrl_base;
1021     + struct thread_info *ti = current_thread_info();
1022     +
1023     + /* Is MSR_SPEC_CTRL implemented ? */
1024     + if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
1025     + /*
1026     + * Restrict guest_spec_ctrl to supported values. Clear the
1027     + * modifiable bits in the host base value and or the
1028     + * modifiable bits from the guest value.
1029     + */
1030     + guestval = hostval & ~x86_spec_ctrl_mask;
1031     + guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
1032     +
1033     + /* SSBD controlled in MSR_SPEC_CTRL */
1034     + if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
1035     + hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
1036     +
1037     + if (hostval != guestval) {
1038     + msrval = setguest ? guestval : hostval;
1039     + wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
1040     + }
1041     + }
1042     +
1043     + /*
1044     + * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
1045     + * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
1046     + */
1047     + if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
1048     + !static_cpu_has(X86_FEATURE_VIRT_SSBD))
1049     + return;
1050     +
1051     + /*
1052     + * If the host has SSBD mitigation enabled, force it in the host's
1053     + * virtual MSR value. If its not permanently enabled, evaluate
1054     + * current's TIF_SSBD thread flag.
1055     + */
1056     + if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
1057     + hostval = SPEC_CTRL_SSBD;
1058     + else
1059     + hostval = ssbd_tif_to_spec_ctrl(ti->flags);
1060     +
1061     + /* Sanitize the guest value */
1062     + guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
1063     +
1064     + if (hostval != guestval) {
1065     + unsigned long tif;
1066     +
1067     + tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
1068     + ssbd_spec_ctrl_to_tif(hostval);
1069     +
1070     + speculative_store_bypass_update(tif);
1071     + }
1072     +}
1073     +EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
1074     +
1075     +static void x86_amd_ssb_disable(void)
1076     +{
1077     + u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
1078     +
1079     + if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
1080     + wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
1081     + else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
1082     + wrmsrl(MSR_AMD64_LS_CFG, msrval);
1083     +}
1084    
1085     #ifdef RETPOLINE
1086     static bool spectre_v2_bad_module;
1087     @@ -312,32 +422,289 @@ static void __init spectre_v2_select_mitigation(void)
1088     }
1089    
1090     #undef pr_fmt
1091     +#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
1092     +
1093     +static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
1094     +
1095     +/* The kernel command line selection */
1096     +enum ssb_mitigation_cmd {
1097     + SPEC_STORE_BYPASS_CMD_NONE,
1098     + SPEC_STORE_BYPASS_CMD_AUTO,
1099     + SPEC_STORE_BYPASS_CMD_ON,
1100     + SPEC_STORE_BYPASS_CMD_PRCTL,
1101     + SPEC_STORE_BYPASS_CMD_SECCOMP,
1102     +};
1103     +
1104     +static const char *ssb_strings[] = {
1105     + [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
1106     + [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
1107     + [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
1108     + [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1109     +};
1110     +
1111     +static const struct {
1112     + const char *option;
1113     + enum ssb_mitigation_cmd cmd;
1114     +} ssb_mitigation_options[] = {
1115     + { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
1116     + { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
1117     + { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
1118     + { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
1119     + { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
1120     +};
1121     +
1122     +static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
1123     +{
1124     + enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
1125     + char arg[20];
1126     + int ret, i;
1127     +
1128     + if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
1129     + return SPEC_STORE_BYPASS_CMD_NONE;
1130     + } else {
1131     + ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
1132     + arg, sizeof(arg));
1133     + if (ret < 0)
1134     + return SPEC_STORE_BYPASS_CMD_AUTO;
1135     +
1136     + for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
1137     + if (!match_option(arg, ret, ssb_mitigation_options[i].option))
1138     + continue;
1139     +
1140     + cmd = ssb_mitigation_options[i].cmd;
1141     + break;
1142     + }
1143     +
1144     + if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
1145     + pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1146     + return SPEC_STORE_BYPASS_CMD_AUTO;
1147     + }
1148     + }
1149     +
1150     + return cmd;
1151     +}
1152     +
1153     +static enum ssb_mitigation __init __ssb_select_mitigation(void)
1154     +{
1155     + enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
1156     + enum ssb_mitigation_cmd cmd;
1157     +
1158     + if (!boot_cpu_has(X86_FEATURE_SSBD))
1159     + return mode;
1160     +
1161     + cmd = ssb_parse_cmdline();
1162     + if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
1163     + (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
1164     + cmd == SPEC_STORE_BYPASS_CMD_AUTO))
1165     + return mode;
1166     +
1167     + switch (cmd) {
1168     + case SPEC_STORE_BYPASS_CMD_AUTO:
1169     + case SPEC_STORE_BYPASS_CMD_SECCOMP:
1170     + /*
1171     + * Choose prctl+seccomp as the default mode if seccomp is
1172     + * enabled.
1173     + */
1174     + if (IS_ENABLED(CONFIG_SECCOMP))
1175     + mode = SPEC_STORE_BYPASS_SECCOMP;
1176     + else
1177     + mode = SPEC_STORE_BYPASS_PRCTL;
1178     + break;
1179     + case SPEC_STORE_BYPASS_CMD_ON:
1180     + mode = SPEC_STORE_BYPASS_DISABLE;
1181     + break;
1182     + case SPEC_STORE_BYPASS_CMD_PRCTL:
1183     + mode = SPEC_STORE_BYPASS_PRCTL;
1184     + break;
1185     + case SPEC_STORE_BYPASS_CMD_NONE:
1186     + break;
1187     + }
1188     +
1189     + /*
1190     + * We have three CPU feature flags that are in play here:
1191     + * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1192     + * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1193     + * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1194     + */
1195     + if (mode == SPEC_STORE_BYPASS_DISABLE) {
1196     + setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
1197     + /*
1198     + * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
1199     + * a completely different MSR and bit dependent on family.
1200     + */
1201     + switch (boot_cpu_data.x86_vendor) {
1202     + case X86_VENDOR_INTEL:
1203     + x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
1204     + x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
1205     + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1206     + break;
1207     + case X86_VENDOR_AMD:
1208     + x86_amd_ssb_disable();
1209     + break;
1210     + }
1211     + }
1212     +
1213     + return mode;
1214     +}
1215     +
1216     +static void ssb_select_mitigation(void)
1217     +{
1218     + ssb_mode = __ssb_select_mitigation();
1219     +
1220     + if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1221     + pr_info("%s\n", ssb_strings[ssb_mode]);
1222     +}
1223     +
1224     +#undef pr_fmt
1225     +#define pr_fmt(fmt) "Speculation prctl: " fmt
1226     +
1227     +static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
1228     +{
1229     + bool update;
1230     +
1231     + if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
1232     + ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
1233     + return -ENXIO;
1234     +
1235     + switch (ctrl) {
1236     + case PR_SPEC_ENABLE:
1237     + /* If speculation is force disabled, enable is not allowed */
1238     + if (task_spec_ssb_force_disable(task))
1239     + return -EPERM;
1240     + task_clear_spec_ssb_disable(task);
1241     + update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
1242     + break;
1243     + case PR_SPEC_DISABLE:
1244     + task_set_spec_ssb_disable(task);
1245     + update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
1246     + break;
1247     + case PR_SPEC_FORCE_DISABLE:
1248     + task_set_spec_ssb_disable(task);
1249     + task_set_spec_ssb_force_disable(task);
1250     + update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
1251     + break;
1252     + default:
1253     + return -ERANGE;
1254     + }
1255     +
1256     + /*
1257     + * If being set on non-current task, delay setting the CPU
1258     + * mitigation until it is next scheduled.
1259     + */
1260     + if (task == current && update)
1261     + speculative_store_bypass_update_current();
1262     +
1263     + return 0;
1264     +}
1265     +
1266     +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
1267     + unsigned long ctrl)
1268     +{
1269     + switch (which) {
1270     + case PR_SPEC_STORE_BYPASS:
1271     + return ssb_prctl_set(task, ctrl);
1272     + default:
1273     + return -ENODEV;
1274     + }
1275     +}
1276     +
1277     +#ifdef CONFIG_SECCOMP
1278     +void arch_seccomp_spec_mitigate(struct task_struct *task)
1279     +{
1280     + if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
1281     + ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1282     +}
1283     +#endif
1284     +
1285     +static int ssb_prctl_get(struct task_struct *task)
1286     +{
1287     + switch (ssb_mode) {
1288     + case SPEC_STORE_BYPASS_DISABLE:
1289     + return PR_SPEC_DISABLE;
1290     + case SPEC_STORE_BYPASS_SECCOMP:
1291     + case SPEC_STORE_BYPASS_PRCTL:
1292     + if (task_spec_ssb_force_disable(task))
1293     + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1294     + if (task_spec_ssb_disable(task))
1295     + return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1296     + return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1297     + default:
1298     + if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1299     + return PR_SPEC_ENABLE;
1300     + return PR_SPEC_NOT_AFFECTED;
1301     + }
1302     +}
1303     +
1304     +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1305     +{
1306     + switch (which) {
1307     + case PR_SPEC_STORE_BYPASS:
1308     + return ssb_prctl_get(task);
1309     + default:
1310     + return -ENODEV;
1311     + }
1312     +}
1313     +
1314     +void x86_spec_ctrl_setup_ap(void)
1315     +{
1316     + if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1317     + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1318     +
1319     + if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
1320     + x86_amd_ssb_disable();
1321     +}
1322    
1323     #ifdef CONFIG_SYSFS
1324     -ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1325     +
1326     +static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1327     + char *buf, unsigned int bug)
1328     {
1329     - if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
1330     + if (!boot_cpu_has_bug(bug))
1331     return sprintf(buf, "Not affected\n");
1332     - if (boot_cpu_has(X86_FEATURE_PTI))
1333     - return sprintf(buf, "Mitigation: PTI\n");
1334     +
1335     + switch (bug) {
1336     + case X86_BUG_CPU_MELTDOWN:
1337     + if (boot_cpu_has(X86_FEATURE_PTI))
1338     + return sprintf(buf, "Mitigation: PTI\n");
1339     +
1340     + break;
1341     +
1342     + case X86_BUG_SPECTRE_V1:
1343     + return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1344     +
1345     + case X86_BUG_SPECTRE_V2:
1346     + return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1347     + boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
1348     + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1349     + spectre_v2_module_string());
1350     +
1351     + case X86_BUG_SPEC_STORE_BYPASS:
1352     + return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1353     +
1354     + default:
1355     + break;
1356     + }
1357     +
1358     return sprintf(buf, "Vulnerable\n");
1359     }
1360    
1361     +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1362     +{
1363     + return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1364     +}
1365     +
1366     ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1367     {
1368     - if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1369     - return sprintf(buf, "Not affected\n");
1370     - return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1371     + return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
1372     }
1373    
1374     ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1375     {
1376     - if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1377     - return sprintf(buf, "Not affected\n");
1378     + return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
1379     +}
1380    
1381     - return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1382     - boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
1383     - boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1384     - spectre_v2_module_string());
1385     +ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1386     +{
1387     + return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1388     }
1389     #endif
1390     diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
1391     index cf6380200dc2..48e98964ecad 100644
1392     --- a/arch/x86/kernel/cpu/common.c
1393     +++ b/arch/x86/kernel/cpu/common.c
1394     @@ -733,17 +733,32 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
1395     * and they also have a different bit for STIBP support. Also,
1396     * a hypervisor might have set the individual AMD bits even on
1397     * Intel CPUs, for finer-grained selection of what's available.
1398     - *
1399     - * We use the AMD bits in 0x8000_0008 EBX as the generic hardware
1400     - * features, which are visible in /proc/cpuinfo and used by the
1401     - * kernel. So set those accordingly from the Intel bits.
1402     */
1403     if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
1404     set_cpu_cap(c, X86_FEATURE_IBRS);
1405     set_cpu_cap(c, X86_FEATURE_IBPB);
1406     + set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1407     }
1408     +
1409     if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
1410     set_cpu_cap(c, X86_FEATURE_STIBP);
1411     +
1412     + if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
1413     + cpu_has(c, X86_FEATURE_VIRT_SSBD))
1414     + set_cpu_cap(c, X86_FEATURE_SSBD);
1415     +
1416     + if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
1417     + set_cpu_cap(c, X86_FEATURE_IBRS);
1418     + set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1419     + }
1420     +
1421     + if (cpu_has(c, X86_FEATURE_AMD_IBPB))
1422     + set_cpu_cap(c, X86_FEATURE_IBPB);
1423     +
1424     + if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
1425     + set_cpu_cap(c, X86_FEATURE_STIBP);
1426     + set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1427     + }
1428     }
1429    
1430     void get_cpu_cap(struct cpuinfo_x86 *c)
1431     @@ -894,21 +909,55 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
1432     {}
1433     };
1434    
1435     -static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
1436     +static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
1437     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
1438     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
1439     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
1440     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
1441     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
1442     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
1443     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
1444     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
1445     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
1446     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
1447     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
1448     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
1449     + { X86_VENDOR_CENTAUR, 5, },
1450     + { X86_VENDOR_INTEL, 5, },
1451     + { X86_VENDOR_NSC, 5, },
1452     + { X86_VENDOR_AMD, 0x12, },
1453     + { X86_VENDOR_AMD, 0x11, },
1454     + { X86_VENDOR_AMD, 0x10, },
1455     + { X86_VENDOR_AMD, 0xf, },
1456     + { X86_VENDOR_ANY, 4, },
1457     + {}
1458     +};
1459     +
1460     +static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1461     {
1462     u64 ia32_cap = 0;
1463    
1464     - if (x86_match_cpu(cpu_no_meltdown))
1465     - return false;
1466     -
1467     if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
1468     rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
1469    
1470     + if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
1471     + !(ia32_cap & ARCH_CAP_SSB_NO))
1472     + setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
1473     +
1474     + if (x86_match_cpu(cpu_no_speculation))
1475     + return;
1476     +
1477     + setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
1478     + setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1479     +
1480     + if (x86_match_cpu(cpu_no_meltdown))
1481     + return;
1482     +
1483     /* Rogue Data Cache Load? No! */
1484     if (ia32_cap & ARCH_CAP_RDCL_NO)
1485     - return false;
1486     + return;
1487    
1488     - return true;
1489     + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
1490     }
1491    
1492     /*
1493     @@ -958,12 +1007,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
1494    
1495     setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1496    
1497     - if (!x86_match_cpu(cpu_no_speculation)) {
1498     - if (cpu_vulnerable_to_meltdown(c))
1499     - setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
1500     - setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
1501     - setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1502     - }
1503     + cpu_set_bug_bits(c);
1504    
1505     fpu__init_system(c);
1506    
1507     @@ -1322,6 +1366,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
1508     #endif
1509     mtrr_ap_init();
1510     validate_apic_and_package_id(c);
1511     + x86_spec_ctrl_setup_ap();
1512     }
1513    
1514     static __init int setup_noclflush(char *arg)
1515     diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
1516     index e806b11a99af..37672d299e35 100644
1517     --- a/arch/x86/kernel/cpu/cpu.h
1518     +++ b/arch/x86/kernel/cpu/cpu.h
1519     @@ -50,4 +50,6 @@ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
1520    
1521     unsigned int aperfmperf_get_khz(int cpu);
1522    
1523     +extern void x86_spec_ctrl_setup_ap(void);
1524     +
1525     #endif /* ARCH_X86_CPU_H */
1526     diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
1527     index c3af167d0a70..c895f38a7a5e 100644
1528     --- a/arch/x86/kernel/cpu/intel.c
1529     +++ b/arch/x86/kernel/cpu/intel.c
1530     @@ -188,7 +188,10 @@ static void early_init_intel(struct cpuinfo_x86 *c)
1531     setup_clear_cpu_cap(X86_FEATURE_IBPB);
1532     setup_clear_cpu_cap(X86_FEATURE_STIBP);
1533     setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
1534     + setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
1535     setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
1536     + setup_clear_cpu_cap(X86_FEATURE_SSBD);
1537     + setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
1538     }
1539    
1540     /*
1541     diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
1542     index 8bd1d8292cf7..988a98f34c66 100644
1543     --- a/arch/x86/kernel/process.c
1544     +++ b/arch/x86/kernel/process.c
1545     @@ -39,6 +39,7 @@
1546     #include <asm/switch_to.h>
1547     #include <asm/desc.h>
1548     #include <asm/prctl.h>
1549     +#include <asm/spec-ctrl.h>
1550    
1551     /*
1552     * per-CPU TSS segments. Threads are completely 'soft' on Linux,
1553     @@ -279,6 +280,148 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
1554     }
1555     }
1556    
1557     +#ifdef CONFIG_SMP
1558     +
1559     +struct ssb_state {
1560     + struct ssb_state *shared_state;
1561     + raw_spinlock_t lock;
1562     + unsigned int disable_state;
1563     + unsigned long local_state;
1564     +};
1565     +
1566     +#define LSTATE_SSB 0
1567     +
1568     +static DEFINE_PER_CPU(struct ssb_state, ssb_state);
1569     +
1570     +void speculative_store_bypass_ht_init(void)
1571     +{
1572     + struct ssb_state *st = this_cpu_ptr(&ssb_state);
1573     + unsigned int this_cpu = smp_processor_id();
1574     + unsigned int cpu;
1575     +
1576     + st->local_state = 0;
1577     +
1578     + /*
1579     + * Shared state setup happens once on the first bringup
1580     + * of the CPU. It's not destroyed on CPU hotunplug.
1581     + */
1582     + if (st->shared_state)
1583     + return;
1584     +
1585     + raw_spin_lock_init(&st->lock);
1586     +
1587     + /*
1588     + * Go over HT siblings and check whether one of them has set up the
1589     + * shared state pointer already.
1590     + */
1591     + for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
1592     + if (cpu == this_cpu)
1593     + continue;
1594     +
1595     + if (!per_cpu(ssb_state, cpu).shared_state)
1596     + continue;
1597     +
1598     + /* Link it to the state of the sibling: */
1599     + st->shared_state = per_cpu(ssb_state, cpu).shared_state;
1600     + return;
1601     + }
1602     +
1603     + /*
1604     + * First HT sibling to come up on the core. Link shared state of
1605     + * the first HT sibling to itself. The siblings on the same core
1606     + * which come up later will see the shared state pointer and link
1607     + * themself to the state of this CPU.
1608     + */
1609     + st->shared_state = st;
1610     +}
1611     +
1612     +/*
1613     + * Logic is: First HT sibling enables SSBD for both siblings in the core
1614     + * and last sibling to disable it, disables it for the whole core. This how
1615     + * MSR_SPEC_CTRL works in "hardware":
1616     + *
1617     + * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
1618     + */
1619     +static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
1620     +{
1621     + struct ssb_state *st = this_cpu_ptr(&ssb_state);
1622     + u64 msr = x86_amd_ls_cfg_base;
1623     +
1624     + if (!static_cpu_has(X86_FEATURE_ZEN)) {
1625     + msr |= ssbd_tif_to_amd_ls_cfg(tifn);
1626     + wrmsrl(MSR_AMD64_LS_CFG, msr);
1627     + return;
1628     + }
1629     +
1630     + if (tifn & _TIF_SSBD) {
1631     + /*
1632     + * Since this can race with prctl(), block reentry on the
1633     + * same CPU.
1634     + */
1635     + if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
1636     + return;
1637     +
1638     + msr |= x86_amd_ls_cfg_ssbd_mask;
1639     +
1640     + raw_spin_lock(&st->shared_state->lock);
1641     + /* First sibling enables SSBD: */
1642     + if (!st->shared_state->disable_state)
1643     + wrmsrl(MSR_AMD64_LS_CFG, msr);
1644     + st->shared_state->disable_state++;
1645     + raw_spin_unlock(&st->shared_state->lock);
1646     + } else {
1647     + if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
1648     + return;
1649     +
1650     + raw_spin_lock(&st->shared_state->lock);
1651     + st->shared_state->disable_state--;
1652     + if (!st->shared_state->disable_state)
1653     + wrmsrl(MSR_AMD64_LS_CFG, msr);
1654     + raw_spin_unlock(&st->shared_state->lock);
1655     + }
1656     +}
1657     +#else
1658     +static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
1659     +{
1660     + u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
1661     +
1662     + wrmsrl(MSR_AMD64_LS_CFG, msr);
1663     +}
1664     +#endif
1665     +
1666     +static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
1667     +{
1668     + /*
1669     + * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
1670     + * so ssbd_tif_to_spec_ctrl() just works.
1671     + */
1672     + wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
1673     +}
1674     +
1675     +static __always_inline void intel_set_ssb_state(unsigned long tifn)
1676     +{
1677     + u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
1678     +
1679     + wrmsrl(MSR_IA32_SPEC_CTRL, msr);
1680     +}
1681     +
1682     +static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
1683     +{
1684     + if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
1685     + amd_set_ssb_virt_state(tifn);
1686     + else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
1687     + amd_set_core_ssb_state(tifn);
1688     + else
1689     + intel_set_ssb_state(tifn);
1690     +}
1691     +
1692     +void speculative_store_bypass_update(unsigned long tif)
1693     +{
1694     + preempt_disable();
1695     + __speculative_store_bypass_update(tif);
1696     + preempt_enable();
1697     +}
1698     +
1699     void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
1700     struct tss_struct *tss)
1701     {
1702     @@ -310,6 +453,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
1703    
1704     if ((tifp ^ tifn) & _TIF_NOCPUID)
1705     set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
1706     +
1707     + if ((tifp ^ tifn) & _TIF_SSBD)
1708     + __speculative_store_bypass_update(tifn);
1709     }
1710    
1711     /*
1712     diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
1713     index 9eb448c7859d..fa093b77689f 100644
1714     --- a/arch/x86/kernel/process_64.c
1715     +++ b/arch/x86/kernel/process_64.c
1716     @@ -528,6 +528,7 @@ void set_personality_64bit(void)
1717     clear_thread_flag(TIF_X32);
1718     /* Pretend that this comes from a 64bit execve */
1719     task_pt_regs(current)->orig_ax = __NR_execve;
1720     + current_thread_info()->status &= ~TS_COMPAT;
1721    
1722     /* Ensure the corresponding mm is not marked. */
1723     if (current->mm)
1724     diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
1725     index 6b841262b790..4a96aa004390 100644
1726     --- a/arch/x86/kernel/smpboot.c
1727     +++ b/arch/x86/kernel/smpboot.c
1728     @@ -77,6 +77,7 @@
1729     #include <asm/i8259.h>
1730     #include <asm/realmode.h>
1731     #include <asm/misc.h>
1732     +#include <asm/spec-ctrl.h>
1733    
1734     /* Number of siblings per CPU package */
1735     int smp_num_siblings = 1;
1736     @@ -245,6 +246,8 @@ static void notrace start_secondary(void *unused)
1737     */
1738     check_tsc_sync_target();
1739    
1740     + speculative_store_bypass_ht_init();
1741     +
1742     /*
1743     * Lock vector_lock and initialize the vectors on this cpu
1744     * before setting the cpu online. We must set it online with
1745     @@ -1349,6 +1352,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1746     set_mtrr_aps_delayed_init();
1747    
1748     smp_quirk_init_udelay();
1749     +
1750     + speculative_store_bypass_ht_init();
1751     }
1752    
1753     void arch_enable_nonboot_cpus_begin(void)
1754     diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
1755     index 4f544f2a7b06..d67e3b31f3db 100644
1756     --- a/arch/x86/kvm/cpuid.c
1757     +++ b/arch/x86/kvm/cpuid.c
1758     @@ -367,7 +367,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1759    
1760     /* cpuid 0x80000008.ebx */
1761     const u32 kvm_cpuid_8000_0008_ebx_x86_features =
1762     - F(IBPB) | F(IBRS);
1763     + F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
1764    
1765     /* cpuid 0xC0000001.edx */
1766     const u32 kvm_cpuid_C000_0001_edx_x86_features =
1767     @@ -394,7 +394,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1768    
1769     /* cpuid 7.0.edx*/
1770     const u32 kvm_cpuid_7_0_edx_x86_features =
1771     - F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
1772     + F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
1773     F(ARCH_CAPABILITIES);
1774    
1775     /* all calls to cpuid_count() should be made on the same cpu */
1776     @@ -632,13 +632,20 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1777     g_phys_as = phys_as;
1778     entry->eax = g_phys_as | (virt_as << 8);
1779     entry->edx = 0;
1780     - /* IBRS and IBPB aren't necessarily present in hardware cpuid */
1781     - if (boot_cpu_has(X86_FEATURE_IBPB))
1782     - entry->ebx |= F(IBPB);
1783     - if (boot_cpu_has(X86_FEATURE_IBRS))
1784     - entry->ebx |= F(IBRS);
1785     + /*
1786     + * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
1787     + * hardware cpuid
1788     + */
1789     + if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
1790     + entry->ebx |= F(AMD_IBPB);
1791     + if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
1792     + entry->ebx |= F(AMD_IBRS);
1793     + if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
1794     + entry->ebx |= F(VIRT_SSBD);
1795     entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
1796     cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
1797     + if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
1798     + entry->ebx |= F(VIRT_SSBD);
1799     break;
1800     }
1801     case 0x80000019:
1802     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1803     index 9fb0daf628cb..029aa1318874 100644
1804     --- a/arch/x86/kvm/svm.c
1805     +++ b/arch/x86/kvm/svm.c
1806     @@ -46,7 +46,7 @@
1807     #include <asm/kvm_para.h>
1808     #include <asm/irq_remapping.h>
1809     #include <asm/microcode.h>
1810     -#include <asm/nospec-branch.h>
1811     +#include <asm/spec-ctrl.h>
1812    
1813     #include <asm/virtext.h>
1814     #include "trace.h"
1815     @@ -186,6 +186,12 @@ struct vcpu_svm {
1816     } host;
1817    
1818     u64 spec_ctrl;
1819     + /*
1820     + * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
1821     + * translated into the appropriate L2_CFG bits on the host to
1822     + * perform speculative control.
1823     + */
1824     + u64 virt_spec_ctrl;
1825    
1826     u32 *msrpm;
1827    
1828     @@ -1611,6 +1617,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1829     u32 eax = 1;
1830    
1831     svm->spec_ctrl = 0;
1832     + svm->virt_spec_ctrl = 0;
1833    
1834     if (!init_event) {
1835     svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
1836     @@ -3618,11 +3625,18 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1837     break;
1838     case MSR_IA32_SPEC_CTRL:
1839     if (!msr_info->host_initiated &&
1840     - !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
1841     + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
1842     return 1;
1843    
1844     msr_info->data = svm->spec_ctrl;
1845     break;
1846     + case MSR_AMD64_VIRT_SPEC_CTRL:
1847     + if (!msr_info->host_initiated &&
1848     + !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
1849     + return 1;
1850     +
1851     + msr_info->data = svm->virt_spec_ctrl;
1852     + break;
1853     case MSR_IA32_UCODE_REV:
1854     msr_info->data = 0x01000065;
1855     break;
1856     @@ -3716,7 +3730,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1857     break;
1858     case MSR_IA32_SPEC_CTRL:
1859     if (!msr->host_initiated &&
1860     - !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
1861     + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
1862     return 1;
1863    
1864     /* The STIBP bit doesn't fault even if it's not advertised */
1865     @@ -3743,7 +3757,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1866     break;
1867     case MSR_IA32_PRED_CMD:
1868     if (!msr->host_initiated &&
1869     - !guest_cpuid_has(vcpu, X86_FEATURE_IBPB))
1870     + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
1871     return 1;
1872    
1873     if (data & ~PRED_CMD_IBPB)
1874     @@ -3757,6 +3771,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1875     break;
1876     set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
1877     break;
1878     + case MSR_AMD64_VIRT_SPEC_CTRL:
1879     + if (!msr->host_initiated &&
1880     + !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
1881     + return 1;
1882     +
1883     + if (data & ~SPEC_CTRL_SSBD)
1884     + return 1;
1885     +
1886     + svm->virt_spec_ctrl = data;
1887     + break;
1888     case MSR_STAR:
1889     svm->vmcb->save.star = data;
1890     break;
1891     @@ -5015,8 +5039,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
1892     * is no need to worry about the conditional branch over the wrmsr
1893     * being speculatively taken.
1894     */
1895     - if (svm->spec_ctrl)
1896     - native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
1897     + x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
1898    
1899     asm volatile (
1900     "push %%" _ASM_BP "; \n\t"
1901     @@ -5110,6 +5133,18 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
1902     #endif
1903     );
1904    
1905     + /* Eliminate branch target predictions from guest mode */
1906     + vmexit_fill_RSB();
1907     +
1908     +#ifdef CONFIG_X86_64
1909     + wrmsrl(MSR_GS_BASE, svm->host.gs_base);
1910     +#else
1911     + loadsegment(fs, svm->host.fs);
1912     +#ifndef CONFIG_X86_32_LAZY_GS
1913     + loadsegment(gs, svm->host.gs);
1914     +#endif
1915     +#endif
1916     +
1917     /*
1918     * We do not use IBRS in the kernel. If this vCPU has used the
1919     * SPEC_CTRL MSR it may have left it on; save the value and
1920     @@ -5128,20 +5163,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
1921     if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
1922     svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
1923    
1924     - if (svm->spec_ctrl)
1925     - native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
1926     -
1927     - /* Eliminate branch target predictions from guest mode */
1928     - vmexit_fill_RSB();
1929     -
1930     -#ifdef CONFIG_X86_64
1931     - wrmsrl(MSR_GS_BASE, svm->host.gs_base);
1932     -#else
1933     - loadsegment(fs, svm->host.fs);
1934     -#ifndef CONFIG_X86_32_LAZY_GS
1935     - loadsegment(gs, svm->host.gs);
1936     -#endif
1937     -#endif
1938     + x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
1939    
1940     reload_tss(vcpu);
1941    
1942     @@ -5244,7 +5266,7 @@ static bool svm_cpu_has_accelerated_tpr(void)
1943     return false;
1944     }
1945    
1946     -static bool svm_has_high_real_mode_segbase(void)
1947     +static bool svm_has_emulated_msr(int index)
1948     {
1949     return true;
1950     }
1951     @@ -5551,7 +5573,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
1952     .hardware_enable = svm_hardware_enable,
1953     .hardware_disable = svm_hardware_disable,
1954     .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
1955     - .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
1956     + .has_emulated_msr = svm_has_emulated_msr,
1957    
1958     .vcpu_create = svm_create_vcpu,
1959     .vcpu_free = svm_free_vcpu,
1960     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1961     index bdd84ce4491e..2e63edf8312c 100644
1962     --- a/arch/x86/kvm/vmx.c
1963     +++ b/arch/x86/kvm/vmx.c
1964     @@ -52,7 +52,7 @@
1965     #include <asm/irq_remapping.h>
1966     #include <asm/mmu_context.h>
1967     #include <asm/microcode.h>
1968     -#include <asm/nospec-branch.h>
1969     +#include <asm/spec-ctrl.h>
1970    
1971     #include "trace.h"
1972     #include "pmu.h"
1973     @@ -3293,7 +3293,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1974     break;
1975     case MSR_IA32_SPEC_CTRL:
1976     if (!msr_info->host_initiated &&
1977     - !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
1978     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
1979     return 1;
1980    
1981     @@ -3414,12 +3413,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1982     break;
1983     case MSR_IA32_SPEC_CTRL:
1984     if (!msr_info->host_initiated &&
1985     - !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
1986     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
1987     return 1;
1988    
1989     /* The STIBP bit doesn't fault even if it's not advertised */
1990     - if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
1991     + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
1992     return 1;
1993    
1994     vmx->spec_ctrl = data;
1995     @@ -3445,7 +3443,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1996     break;
1997     case MSR_IA32_PRED_CMD:
1998     if (!msr_info->host_initiated &&
1999     - !guest_cpuid_has(vcpu, X86_FEATURE_IBPB) &&
2000     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
2001     return 1;
2002    
2003     @@ -9217,9 +9214,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
2004     }
2005     STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
2006    
2007     -static bool vmx_has_high_real_mode_segbase(void)
2008     +static bool vmx_has_emulated_msr(int index)
2009     {
2010     - return enable_unrestricted_guest || emulate_invalid_guest_state;
2011     + switch (index) {
2012     + case MSR_IA32_SMBASE:
2013     + /*
2014     + * We cannot do SMM unless we can run the guest in big
2015     + * real mode.
2016     + */
2017     + return enable_unrestricted_guest || emulate_invalid_guest_state;
2018     + case MSR_AMD64_VIRT_SPEC_CTRL:
2019     + /* This is AMD only. */
2020     + return false;
2021     + default:
2022     + return true;
2023     + }
2024     }
2025    
2026     static bool vmx_mpx_supported(void)
2027     @@ -9452,10 +9461,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
2028     * is no need to worry about the conditional branch over the wrmsr
2029     * being speculatively taken.
2030     */
2031     - if (vmx->spec_ctrl)
2032     - native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
2033     + x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
2034    
2035     vmx->__launched = vmx->loaded_vmcs->launched;
2036     +
2037     asm(
2038     /* Store host registers */
2039     "push %%" _ASM_DX "; push %%" _ASM_BP ";"
2040     @@ -9591,8 +9600,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
2041     if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
2042     vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
2043    
2044     - if (vmx->spec_ctrl)
2045     - native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
2046     + x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
2047    
2048     /* Eliminate branch target predictions from guest mode */
2049     vmexit_fill_RSB();
2050     @@ -12182,7 +12190,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
2051     .hardware_enable = hardware_enable,
2052     .hardware_disable = hardware_disable,
2053     .cpu_has_accelerated_tpr = report_flexpriority,
2054     - .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
2055     + .has_emulated_msr = vmx_has_emulated_msr,
2056    
2057     .vcpu_create = vmx_create_vcpu,
2058     .vcpu_free = vmx_free_vcpu,
2059     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
2060     index 3b2c3aa2cd07..649f476039de 100644
2061     --- a/arch/x86/kvm/x86.c
2062     +++ b/arch/x86/kvm/x86.c
2063     @@ -1036,6 +1036,7 @@ static u32 emulated_msrs[] = {
2064     MSR_IA32_SMBASE,
2065     MSR_PLATFORM_INFO,
2066     MSR_MISC_FEATURES_ENABLES,
2067     + MSR_AMD64_VIRT_SPEC_CTRL,
2068     };
2069    
2070     static unsigned num_emulated_msrs;
2071     @@ -2721,7 +2722,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2072     * fringe case that is not enabled except via specific settings
2073     * of the module parameters.
2074     */
2075     - r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
2076     + r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
2077     break;
2078     case KVM_CAP_VAPIC:
2079     r = !kvm_x86_ops->cpu_has_accelerated_tpr();
2080     @@ -4324,14 +4325,8 @@ static void kvm_init_msr_list(void)
2081     num_msrs_to_save = j;
2082    
2083     for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
2084     - switch (emulated_msrs[i]) {
2085     - case MSR_IA32_SMBASE:
2086     - if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
2087     - continue;
2088     - break;
2089     - default:
2090     - break;
2091     - }
2092     + if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
2093     + continue;
2094    
2095     if (j < i)
2096     emulated_msrs[j] = emulated_msrs[i];
2097     diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
2098     index d7bc0eea20a5..6e98e0a7c923 100644
2099     --- a/arch/x86/mm/pkeys.c
2100     +++ b/arch/x86/mm/pkeys.c
2101     @@ -94,26 +94,27 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey
2102     */
2103     if (pkey != -1)
2104     return pkey;
2105     - /*
2106     - * Look for a protection-key-drive execute-only mapping
2107     - * which is now being given permissions that are not
2108     - * execute-only. Move it back to the default pkey.
2109     - */
2110     - if (vma_is_pkey_exec_only(vma) &&
2111     - (prot & (PROT_READ|PROT_WRITE))) {
2112     - return 0;
2113     - }
2114     +
2115     /*
2116     * The mapping is execute-only. Go try to get the
2117     * execute-only protection key. If we fail to do that,
2118     * fall through as if we do not have execute-only
2119     - * support.
2120     + * support in this mm.
2121     */
2122     if (prot == PROT_EXEC) {
2123     pkey = execute_only_pkey(vma->vm_mm);
2124     if (pkey > 0)
2125     return pkey;
2126     + } else if (vma_is_pkey_exec_only(vma)) {
2127     + /*
2128     + * Protections are *not* PROT_EXEC, but the mapping
2129     + * is using the exec-only pkey. This mapping was
2130     + * PROT_EXEC and will no longer be. Move back to
2131     + * the default pkey.
2132     + */
2133     + return ARCH_DEFAULT_PKEY;
2134     }
2135     +
2136     /*
2137     * This is a vanilla, non-pkey mprotect (or we failed to
2138     * setup execute-only), inherit the pkey from the VMA we
2139     diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
2140     index 3e15345abfe7..de0263348f2d 100644
2141     --- a/arch/x86/xen/mmu.c
2142     +++ b/arch/x86/xen/mmu.c
2143     @@ -42,13 +42,11 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)
2144     }
2145     EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
2146    
2147     -static void xen_flush_tlb_all(void)
2148     +static noinline void xen_flush_tlb_all(void)
2149     {
2150     struct mmuext_op *op;
2151     struct multicall_space mcs;
2152    
2153     - trace_xen_mmu_flush_tlb_all(0);
2154     -
2155     preempt_disable();
2156    
2157     mcs = xen_mc_entry(sizeof(*op));
2158     diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
2159     index 042e9c422b21..b3526a98a5a5 100644
2160     --- a/arch/x86/xen/mmu_pv.c
2161     +++ b/arch/x86/xen/mmu_pv.c
2162     @@ -1280,13 +1280,11 @@ unsigned long xen_read_cr2_direct(void)
2163     return this_cpu_read(xen_vcpu_info.arch.cr2);
2164     }
2165    
2166     -static void xen_flush_tlb(void)
2167     +static noinline void xen_flush_tlb(void)
2168     {
2169     struct mmuext_op *op;
2170     struct multicall_space mcs;
2171    
2172     - trace_xen_mmu_flush_tlb(0);
2173     -
2174     preempt_disable();
2175    
2176     mcs = xen_mc_entry(sizeof(*op));
2177     diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
2178     index 825964efda1d..433f14bcab15 100644
2179     --- a/drivers/base/cpu.c
2180     +++ b/drivers/base/cpu.c
2181     @@ -521,14 +521,22 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev,
2182     return sprintf(buf, "Not affected\n");
2183     }
2184    
2185     +ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
2186     + struct device_attribute *attr, char *buf)
2187     +{
2188     + return sprintf(buf, "Not affected\n");
2189     +}
2190     +
2191     static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
2192     static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
2193     static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
2194     +static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
2195    
2196     static struct attribute *cpu_root_vulnerabilities_attrs[] = {
2197     &dev_attr_meltdown.attr,
2198     &dev_attr_spectre_v1.attr,
2199     &dev_attr_spectre_v2.attr,
2200     + &dev_attr_spec_store_bypass.attr,
2201     NULL
2202     };
2203    
2204     diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
2205     index be38ac7050d4..a7b6734bc3c3 100644
2206     --- a/drivers/gpu/drm/drm_drv.c
2207     +++ b/drivers/gpu/drm/drm_drv.c
2208     @@ -749,7 +749,7 @@ static void remove_compat_control_link(struct drm_device *dev)
2209     if (!minor)
2210     return;
2211    
2212     - name = kasprintf(GFP_KERNEL, "controlD%d", minor->index);
2213     + name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
2214     if (!name)
2215     return;
2216    
2217     diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
2218     index cc70e2470272..61a2203b75df 100644
2219     --- a/drivers/gpu/drm/i915/i915_reg.h
2220     +++ b/drivers/gpu/drm/i915/i915_reg.h
2221     @@ -7044,6 +7044,9 @@ enum {
2222     #define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
2223     #define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
2224    
2225     +#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
2226     +#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
2227     +
2228     /* WaCatErrorRejectionIssue */
2229     #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
2230     #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
2231     diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
2232     index b6a7e492c1a3..c0e3e2ffb87d 100644
2233     --- a/drivers/gpu/drm/i915/intel_engine_cs.c
2234     +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
2235     @@ -900,6 +900,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
2236     I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
2237     GEN8_LQSC_FLUSH_COHERENT_LINES));
2238    
2239     + /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
2240     + if (IS_GEN9_LP(dev_priv))
2241     + WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
2242     +
2243     /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
2244     ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
2245     if (ret)
2246     diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
2247     index 13e849bf9aa0..4915fa303a7e 100644
2248     --- a/drivers/i2c/busses/i2c-designware-master.c
2249     +++ b/drivers/i2c/busses/i2c-designware-master.c
2250     @@ -207,7 +207,10 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
2251     i2c_dw_disable_int(dev);
2252    
2253     /* Enable the adapter */
2254     - __i2c_dw_enable_and_wait(dev, true);
2255     + __i2c_dw_enable(dev, true);
2256     +
2257     + /* Dummy read to avoid the register getting stuck on Bay Trail */
2258     + dw_readl(dev, DW_IC_ENABLE_STATUS);
2259    
2260     /* Clear and enable interrupts */
2261     dw_readl(dev, DW_IC_CLR_INTR);
2262     diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
2263     index 48b3866a9ded..35286907c636 100644
2264     --- a/drivers/s390/cio/qdio_setup.c
2265     +++ b/drivers/s390/cio/qdio_setup.c
2266     @@ -140,7 +140,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
2267     int i;
2268    
2269     for (i = 0; i < nr_queues; i++) {
2270     - q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
2271     + q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
2272     if (!q)
2273     return -ENOMEM;
2274    
2275     @@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
2276     {
2277     struct ciw *ciw;
2278     struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
2279     - int rc;
2280    
2281     memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
2282     memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
2283     @@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
2284     ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
2285     if (!ciw) {
2286     DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
2287     - rc = -EINVAL;
2288     - goto out_err;
2289     + return -EINVAL;
2290     }
2291     irq_ptr->equeue = *ciw;
2292    
2293     ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
2294     if (!ciw) {
2295     DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
2296     - rc = -EINVAL;
2297     - goto out_err;
2298     + return -EINVAL;
2299     }
2300     irq_ptr->aqueue = *ciw;
2301    
2302     @@ -510,9 +507,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
2303     irq_ptr->orig_handler = init_data->cdev->handler;
2304     init_data->cdev->handler = qdio_int_handler;
2305     return 0;
2306     -out_err:
2307     - qdio_release_memory(irq_ptr);
2308     - return rc;
2309     }
2310    
2311     void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
2312     diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
2313     index 4a39b54732d0..72ce6ad95767 100644
2314     --- a/drivers/s390/cio/vfio_ccw_cp.c
2315     +++ b/drivers/s390/cio/vfio_ccw_cp.c
2316     @@ -703,6 +703,10 @@ void cp_free(struct channel_program *cp)
2317     * and stores the result to ccwchain list. @cp must have been
2318     * initialized by a previous call with cp_init(). Otherwise, undefined
2319     * behavior occurs.
2320     + * For each chain composing the channel program:
2321     + * - On entry ch_len holds the count of CCWs to be translated.
2322     + * - On exit ch_len is adjusted to the count of successfully translated CCWs.
2323     + * This allows cp_free to find in ch_len the count of CCWs to free in a chain.
2324     *
2325     * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
2326     * as helpers to do ccw chain translation inside the kernel. Basically
2327     @@ -737,11 +741,18 @@ int cp_prefetch(struct channel_program *cp)
2328     for (idx = 0; idx < len; idx++) {
2329     ret = ccwchain_fetch_one(chain, idx, cp);
2330     if (ret)
2331     - return ret;
2332     + goto out_err;
2333     }
2334     }
2335    
2336     return 0;
2337     +out_err:
2338     + /* Only cleanup the chain elements that were actually translated. */
2339     + chain->ch_len = idx;
2340     + list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
2341     + chain->ch_len = 0;
2342     + }
2343     + return ret;
2344     }
2345    
2346     /**
2347     diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
2348     index a172ab299e80..ff01f865a173 100644
2349     --- a/drivers/spi/spi-bcm-qspi.c
2350     +++ b/drivers/spi/spi-bcm-qspi.c
2351     @@ -490,7 +490,7 @@ static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
2352    
2353     static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
2354     {
2355     - if (!has_bspi(qspi) || (qspi->bspi_enabled))
2356     + if (!has_bspi(qspi))
2357     return;
2358    
2359     qspi->bspi_enabled = 1;
2360     @@ -505,7 +505,7 @@ static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
2361    
2362     static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
2363     {
2364     - if (!has_bspi(qspi) || (!qspi->bspi_enabled))
2365     + if (!has_bspi(qspi))
2366     return;
2367    
2368     qspi->bspi_enabled = 0;
2369     @@ -519,16 +519,19 @@ static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
2370    
2371     static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
2372     {
2373     - u32 data = 0;
2374     + u32 rd = 0;
2375     + u32 wr = 0;
2376    
2377     - if (qspi->curr_cs == cs)
2378     - return;
2379     if (qspi->base[CHIP_SELECT]) {
2380     - data = bcm_qspi_read(qspi, CHIP_SELECT, 0);
2381     - data = (data & ~0xff) | (1 << cs);
2382     - bcm_qspi_write(qspi, CHIP_SELECT, 0, data);
2383     + rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
2384     + wr = (rd & ~0xff) | (1 << cs);
2385     + if (rd == wr)
2386     + return;
2387     + bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
2388     usleep_range(10, 20);
2389     }
2390     +
2391     + dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
2392     qspi->curr_cs = cs;
2393     }
2394    
2395     @@ -755,8 +758,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
2396     dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
2397     }
2398     mspi_cdram = MSPI_CDRAM_CONT_BIT;
2399     - mspi_cdram |= (~(1 << spi->chip_select) &
2400     - MSPI_CDRAM_PCS);
2401     +
2402     + if (has_bspi(qspi))
2403     + mspi_cdram &= ~1;
2404     + else
2405     + mspi_cdram |= (~(1 << spi->chip_select) &
2406     + MSPI_CDRAM_PCS);
2407     +
2408     mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
2409     MSPI_CDRAM_BITSE_BIT);
2410    
2411     diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
2412     index 94f7b0713281..02a8012a318a 100644
2413     --- a/drivers/spi/spi-pxa2xx.h
2414     +++ b/drivers/spi/spi-pxa2xx.h
2415     @@ -38,7 +38,7 @@ struct driver_data {
2416    
2417     /* SSP register addresses */
2418     void __iomem *ioaddr;
2419     - u32 ssdr_physical;
2420     + phys_addr_t ssdr_physical;
2421    
2422     /* SSP masks*/
2423     u32 dma_cr1;
2424     diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
2425     index 4bc7956cefc4..ea3ce4e17b85 100644
2426     --- a/drivers/tee/tee_shm.c
2427     +++ b/drivers/tee/tee_shm.c
2428     @@ -203,9 +203,10 @@ int tee_shm_get_fd(struct tee_shm *shm)
2429     if ((shm->flags & req_flags) != req_flags)
2430     return -EINVAL;
2431    
2432     + get_dma_buf(shm->dmabuf);
2433     fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
2434     - if (fd >= 0)
2435     - get_dma_buf(shm->dmabuf);
2436     + if (fd < 0)
2437     + dma_buf_put(shm->dmabuf);
2438     return fd;
2439     }
2440    
2441     diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
2442     index 910f027773aa..84c0599b45b7 100644
2443     --- a/drivers/usb/usbip/stub.h
2444     +++ b/drivers/usb/usbip/stub.h
2445     @@ -87,6 +87,7 @@ struct bus_id_priv {
2446     struct stub_device *sdev;
2447     struct usb_device *udev;
2448     char shutdown_busid;
2449     + spinlock_t busid_lock;
2450     };
2451    
2452     /* stub_priv is allocated from stub_priv_cache */
2453     @@ -97,6 +98,7 @@ extern struct usb_device_driver stub_driver;
2454    
2455     /* stub_main.c */
2456     struct bus_id_priv *get_busid_priv(const char *busid);
2457     +void put_busid_priv(struct bus_id_priv *bid);
2458     int del_match_busid(char *busid);
2459     void stub_device_cleanup_urbs(struct stub_device *sdev);
2460    
2461     diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
2462     index b8915513fc84..cc847f2edf38 100644
2463     --- a/drivers/usb/usbip/stub_dev.c
2464     +++ b/drivers/usb/usbip/stub_dev.c
2465     @@ -314,9 +314,9 @@ static int stub_probe(struct usb_device *udev)
2466     struct stub_device *sdev = NULL;
2467     const char *udev_busid = dev_name(&udev->dev);
2468     struct bus_id_priv *busid_priv;
2469     - int rc;
2470     + int rc = 0;
2471    
2472     - dev_dbg(&udev->dev, "Enter\n");
2473     + dev_dbg(&udev->dev, "Enter probe\n");
2474    
2475     /* check we should claim or not by busid_table */
2476     busid_priv = get_busid_priv(udev_busid);
2477     @@ -331,13 +331,15 @@ static int stub_probe(struct usb_device *udev)
2478     * other matched drivers by the driver core.
2479     * See driver_probe_device() in driver/base/dd.c
2480     */
2481     - return -ENODEV;
2482     + rc = -ENODEV;
2483     + goto call_put_busid_priv;
2484     }
2485    
2486     if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
2487     dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
2488     udev_busid);
2489     - return -ENODEV;
2490     + rc = -ENODEV;
2491     + goto call_put_busid_priv;
2492     }
2493    
2494     if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
2495     @@ -345,13 +347,16 @@ static int stub_probe(struct usb_device *udev)
2496     "%s is attached on vhci_hcd... skip!\n",
2497     udev_busid);
2498    
2499     - return -ENODEV;
2500     + rc = -ENODEV;
2501     + goto call_put_busid_priv;
2502     }
2503    
2504     /* ok, this is my device */
2505     sdev = stub_device_alloc(udev);
2506     - if (!sdev)
2507     - return -ENOMEM;
2508     + if (!sdev) {
2509     + rc = -ENOMEM;
2510     + goto call_put_busid_priv;
2511     + }
2512    
2513     dev_info(&udev->dev,
2514     "usbip-host: register new device (bus %u dev %u)\n",
2515     @@ -383,7 +388,9 @@ static int stub_probe(struct usb_device *udev)
2516     }
2517     busid_priv->status = STUB_BUSID_ALLOC;
2518    
2519     - return 0;
2520     + rc = 0;
2521     + goto call_put_busid_priv;
2522     +
2523     err_files:
2524     usb_hub_release_port(udev->parent, udev->portnum,
2525     (struct usb_dev_state *) udev);
2526     @@ -393,6 +400,9 @@ static int stub_probe(struct usb_device *udev)
2527    
2528     busid_priv->sdev = NULL;
2529     stub_device_free(sdev);
2530     +
2531     +call_put_busid_priv:
2532     + put_busid_priv(busid_priv);
2533     return rc;
2534     }
2535    
2536     @@ -418,7 +428,7 @@ static void stub_disconnect(struct usb_device *udev)
2537     struct bus_id_priv *busid_priv;
2538     int rc;
2539    
2540     - dev_dbg(&udev->dev, "Enter\n");
2541     + dev_dbg(&udev->dev, "Enter disconnect\n");
2542    
2543     busid_priv = get_busid_priv(udev_busid);
2544     if (!busid_priv) {
2545     @@ -431,7 +441,7 @@ static void stub_disconnect(struct usb_device *udev)
2546     /* get stub_device */
2547     if (!sdev) {
2548     dev_err(&udev->dev, "could not get device");
2549     - return;
2550     + goto call_put_busid_priv;
2551     }
2552    
2553     dev_set_drvdata(&udev->dev, NULL);
2554     @@ -446,12 +456,12 @@ static void stub_disconnect(struct usb_device *udev)
2555     (struct usb_dev_state *) udev);
2556     if (rc) {
2557     dev_dbg(&udev->dev, "unable to release port\n");
2558     - return;
2559     + goto call_put_busid_priv;
2560     }
2561    
2562     /* If usb reset is called from event handler */
2563     if (usbip_in_eh(current))
2564     - return;
2565     + goto call_put_busid_priv;
2566    
2567     /* shutdown the current connection */
2568     shutdown_busid(busid_priv);
2569     @@ -462,12 +472,11 @@ static void stub_disconnect(struct usb_device *udev)
2570     busid_priv->sdev = NULL;
2571     stub_device_free(sdev);
2572    
2573     - if (busid_priv->status == STUB_BUSID_ALLOC) {
2574     + if (busid_priv->status == STUB_BUSID_ALLOC)
2575     busid_priv->status = STUB_BUSID_ADDED;
2576     - } else {
2577     - busid_priv->status = STUB_BUSID_OTHER;
2578     - del_match_busid((char *)udev_busid);
2579     - }
2580     +
2581     +call_put_busid_priv:
2582     + put_busid_priv(busid_priv);
2583     }
2584    
2585     #ifdef CONFIG_PM
2586     diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
2587     index b59a253a8479..108dd65fbfbc 100644
2588     --- a/drivers/usb/usbip/stub_main.c
2589     +++ b/drivers/usb/usbip/stub_main.c
2590     @@ -28,6 +28,7 @@
2591     #define DRIVER_DESC "USB/IP Host Driver"
2592    
2593     struct kmem_cache *stub_priv_cache;
2594     +
2595     /*
2596     * busid_tables defines matching busids that usbip can grab. A user can change
2597     * dynamically what device is locally used and what device is exported to a
2598     @@ -39,6 +40,8 @@ static spinlock_t busid_table_lock;
2599    
2600     static void init_busid_table(void)
2601     {
2602     + int i;
2603     +
2604     /*
2605     * This also sets the bus_table[i].status to
2606     * STUB_BUSID_OTHER, which is 0.
2607     @@ -46,6 +49,9 @@ static void init_busid_table(void)
2608     memset(busid_table, 0, sizeof(busid_table));
2609    
2610     spin_lock_init(&busid_table_lock);
2611     +
2612     + for (i = 0; i < MAX_BUSID; i++)
2613     + spin_lock_init(&busid_table[i].busid_lock);
2614     }
2615    
2616     /*
2617     @@ -57,15 +63,20 @@ static int get_busid_idx(const char *busid)
2618     int i;
2619     int idx = -1;
2620    
2621     - for (i = 0; i < MAX_BUSID; i++)
2622     + for (i = 0; i < MAX_BUSID; i++) {
2623     + spin_lock(&busid_table[i].busid_lock);
2624     if (busid_table[i].name[0])
2625     if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
2626     idx = i;
2627     + spin_unlock(&busid_table[i].busid_lock);
2628     break;
2629     }
2630     + spin_unlock(&busid_table[i].busid_lock);
2631     + }
2632     return idx;
2633     }
2634    
2635     +/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
2636     struct bus_id_priv *get_busid_priv(const char *busid)
2637     {
2638     int idx;
2639     @@ -73,13 +84,22 @@ struct bus_id_priv *get_busid_priv(const char *busid)
2640    
2641     spin_lock(&busid_table_lock);
2642     idx = get_busid_idx(busid);
2643     - if (idx >= 0)
2644     + if (idx >= 0) {
2645     bid = &(busid_table[idx]);
2646     + /* get busid_lock before returning */
2647     + spin_lock(&bid->busid_lock);
2648     + }
2649     spin_unlock(&busid_table_lock);
2650    
2651     return bid;
2652     }
2653    
2654     +void put_busid_priv(struct bus_id_priv *bid)
2655     +{
2656     + if (bid)
2657     + spin_unlock(&bid->busid_lock);
2658     +}
2659     +
2660     static int add_match_busid(char *busid)
2661     {
2662     int i;
2663     @@ -92,15 +112,19 @@ static int add_match_busid(char *busid)
2664     goto out;
2665     }
2666    
2667     - for (i = 0; i < MAX_BUSID; i++)
2668     + for (i = 0; i < MAX_BUSID; i++) {
2669     + spin_lock(&busid_table[i].busid_lock);
2670     if (!busid_table[i].name[0]) {
2671     strlcpy(busid_table[i].name, busid, BUSID_SIZE);
2672     if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
2673     (busid_table[i].status != STUB_BUSID_REMOV))
2674     busid_table[i].status = STUB_BUSID_ADDED;
2675     ret = 0;
2676     + spin_unlock(&busid_table[i].busid_lock);
2677     break;
2678     }
2679     + spin_unlock(&busid_table[i].busid_lock);
2680     + }
2681    
2682     out:
2683     spin_unlock(&busid_table_lock);
2684     @@ -121,6 +145,8 @@ int del_match_busid(char *busid)
2685     /* found */
2686     ret = 0;
2687    
2688     + spin_lock(&busid_table[idx].busid_lock);
2689     +
2690     if (busid_table[idx].status == STUB_BUSID_OTHER)
2691     memset(busid_table[idx].name, 0, BUSID_SIZE);
2692    
2693     @@ -128,6 +154,7 @@ int del_match_busid(char *busid)
2694     (busid_table[idx].status != STUB_BUSID_ADDED))
2695     busid_table[idx].status = STUB_BUSID_REMOV;
2696    
2697     + spin_unlock(&busid_table[idx].busid_lock);
2698     out:
2699     spin_unlock(&busid_table_lock);
2700    
2701     @@ -140,9 +167,12 @@ static ssize_t match_busid_show(struct device_driver *drv, char *buf)
2702     char *out = buf;
2703    
2704     spin_lock(&busid_table_lock);
2705     - for (i = 0; i < MAX_BUSID; i++)
2706     + for (i = 0; i < MAX_BUSID; i++) {
2707     + spin_lock(&busid_table[i].busid_lock);
2708     if (busid_table[i].name[0])
2709     out += sprintf(out, "%s ", busid_table[i].name);
2710     + spin_unlock(&busid_table[i].busid_lock);
2711     + }
2712     spin_unlock(&busid_table_lock);
2713     out += sprintf(out, "\n");
2714    
2715     @@ -183,6 +213,51 @@ static ssize_t match_busid_store(struct device_driver *dev, const char *buf,
2716     }
2717     static DRIVER_ATTR_RW(match_busid);
2718    
2719     +static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
2720     +{
2721     + int ret;
2722     +
2723     + /* device_attach() callers should hold parent lock for USB */
2724     + if (busid_priv->udev->dev.parent)
2725     + device_lock(busid_priv->udev->dev.parent);
2726     + ret = device_attach(&busid_priv->udev->dev);
2727     + if (busid_priv->udev->dev.parent)
2728     + device_unlock(busid_priv->udev->dev.parent);
2729     + if (ret < 0) {
2730     + dev_err(&busid_priv->udev->dev, "rebind failed\n");
2731     + return ret;
2732     + }
2733     + return 0;
2734     +}
2735     +
2736     +static void stub_device_rebind(void)
2737     +{
2738     +#if IS_MODULE(CONFIG_USBIP_HOST)
2739     + struct bus_id_priv *busid_priv;
2740     + int i;
2741     +
2742     + /* update status to STUB_BUSID_OTHER so probe ignores the device */
2743     + spin_lock(&busid_table_lock);
2744     + for (i = 0; i < MAX_BUSID; i++) {
2745     + if (busid_table[i].name[0] &&
2746     + busid_table[i].shutdown_busid) {
2747     + busid_priv = &(busid_table[i]);
2748     + busid_priv->status = STUB_BUSID_OTHER;
2749     + }
2750     + }
2751     + spin_unlock(&busid_table_lock);
2752     +
2753     + /* now run rebind - no need to hold locks. driver files are removed */
2754     + for (i = 0; i < MAX_BUSID; i++) {
2755     + if (busid_table[i].name[0] &&
2756     + busid_table[i].shutdown_busid) {
2757     + busid_priv = &(busid_table[i]);
2758     + do_rebind(busid_table[i].name, busid_priv);
2759     + }
2760     + }
2761     +#endif
2762     +}
2763     +
2764     static ssize_t rebind_store(struct device_driver *dev, const char *buf,
2765     size_t count)
2766     {
2767     @@ -200,16 +275,17 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
2768     if (!bid)
2769     return -ENODEV;
2770    
2771     - /* device_attach() callers should hold parent lock for USB */
2772     - if (bid->udev->dev.parent)
2773     - device_lock(bid->udev->dev.parent);
2774     - ret = device_attach(&bid->udev->dev);
2775     - if (bid->udev->dev.parent)
2776     - device_unlock(bid->udev->dev.parent);
2777     - if (ret < 0) {
2778     - dev_err(&bid->udev->dev, "rebind failed\n");
2779     + /* mark the device for deletion so probe ignores it during rescan */
2780     + bid->status = STUB_BUSID_OTHER;
2781     + /* release the busid lock */
2782     + put_busid_priv(bid);
2783     +
2784     + ret = do_rebind((char *) buf, bid);
2785     + if (ret < 0)
2786     return ret;
2787     - }
2788     +
2789     + /* delete device from busid_table */
2790     + del_match_busid((char *) buf);
2791    
2792     return count;
2793     }
2794     @@ -331,6 +407,9 @@ static void __exit usbip_host_exit(void)
2795     */
2796     usb_deregister_device_driver(&stub_driver);
2797    
2798     + /* initiate scan to attach devices */
2799     + stub_device_rebind();
2800     +
2801     kmem_cache_destroy(stub_priv_cache);
2802     }
2803    
2804     diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
2805     index 21cc27509993..d1b9900ebc9b 100644
2806     --- a/fs/btrfs/ctree.c
2807     +++ b/fs/btrfs/ctree.c
2808     @@ -2497,10 +2497,8 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2809     if (p->reada != READA_NONE)
2810     reada_for_search(fs_info, p, level, slot, key->objectid);
2811    
2812     - btrfs_release_path(p);
2813     -
2814     ret = -EAGAIN;
2815     - tmp = read_tree_block(fs_info, blocknr, 0);
2816     + tmp = read_tree_block(fs_info, blocknr, gen);
2817     if (!IS_ERR(tmp)) {
2818     /*
2819     * If the read above didn't mark this buffer up to date,
2820     @@ -2514,6 +2512,8 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2821     } else {
2822     ret = PTR_ERR(tmp);
2823     }
2824     +
2825     + btrfs_release_path(p);
2826     return ret;
2827     }
2828    
2829     @@ -5454,12 +5454,24 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
2830     down_read(&fs_info->commit_root_sem);
2831     left_level = btrfs_header_level(left_root->commit_root);
2832     left_root_level = left_level;
2833     - left_path->nodes[left_level] = left_root->commit_root;
2834     + left_path->nodes[left_level] =
2835     + btrfs_clone_extent_buffer(left_root->commit_root);
2836     + if (!left_path->nodes[left_level]) {
2837     + up_read(&fs_info->commit_root_sem);
2838     + ret = -ENOMEM;
2839     + goto out;
2840     + }
2841     extent_buffer_get(left_path->nodes[left_level]);
2842    
2843     right_level = btrfs_header_level(right_root->commit_root);
2844     right_root_level = right_level;
2845     - right_path->nodes[right_level] = right_root->commit_root;
2846     + right_path->nodes[right_level] =
2847     + btrfs_clone_extent_buffer(right_root->commit_root);
2848     + if (!right_path->nodes[right_level]) {
2849     + up_read(&fs_info->commit_root_sem);
2850     + ret = -ENOMEM;
2851     + goto out;
2852     + }
2853     extent_buffer_get(right_path->nodes[right_level]);
2854     up_read(&fs_info->commit_root_sem);
2855    
2856     diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
2857     index 8fc690384c58..c44703e21396 100644
2858     --- a/fs/btrfs/ctree.h
2859     +++ b/fs/btrfs/ctree.h
2860     @@ -3150,6 +3150,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
2861     u64 *orig_start, u64 *orig_block_len,
2862     u64 *ram_bytes);
2863    
2864     +void __btrfs_del_delalloc_inode(struct btrfs_root *root,
2865     + struct btrfs_inode *inode);
2866     struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
2867     int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index);
2868     int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2869     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
2870     index 79f0f282a0ef..4a630aeabb10 100644
2871     --- a/fs/btrfs/disk-io.c
2872     +++ b/fs/btrfs/disk-io.c
2873     @@ -3905,6 +3905,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
2874     set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
2875    
2876     btrfs_free_qgroup_config(fs_info);
2877     + ASSERT(list_empty(&fs_info->delalloc_roots));
2878    
2879     if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
2880     btrfs_info(fs_info, "at unmount delalloc count %lld",
2881     @@ -4203,15 +4204,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info)
2882    
2883     static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
2884     {
2885     + /* cleanup FS via transaction */
2886     + btrfs_cleanup_transaction(fs_info);
2887     +
2888     mutex_lock(&fs_info->cleaner_mutex);
2889     btrfs_run_delayed_iputs(fs_info);
2890     mutex_unlock(&fs_info->cleaner_mutex);
2891    
2892     down_write(&fs_info->cleanup_work_sem);
2893     up_write(&fs_info->cleanup_work_sem);
2894     -
2895     - /* cleanup FS via transaction */
2896     - btrfs_cleanup_transaction(fs_info);
2897     }
2898    
2899     static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
2900     @@ -4334,19 +4335,23 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
2901     list_splice_init(&root->delalloc_inodes, &splice);
2902    
2903     while (!list_empty(&splice)) {
2904     + struct inode *inode = NULL;
2905     btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
2906     delalloc_inodes);
2907     -
2908     - list_del_init(&btrfs_inode->delalloc_inodes);
2909     - clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2910     - &btrfs_inode->runtime_flags);
2911     + __btrfs_del_delalloc_inode(root, btrfs_inode);
2912     spin_unlock(&root->delalloc_lock);
2913    
2914     - btrfs_invalidate_inodes(btrfs_inode->root);
2915     -
2916     + /*
2917     + * Make sure we get a live inode and that it'll not disappear
2918     + * meanwhile.
2919     + */
2920     + inode = igrab(&btrfs_inode->vfs_inode);
2921     + if (inode) {
2922     + invalidate_inode_pages2(inode->i_mapping);
2923     + iput(inode);
2924     + }
2925     spin_lock(&root->delalloc_lock);
2926     }
2927     -
2928     spin_unlock(&root->delalloc_lock);
2929     }
2930    
2931     @@ -4362,7 +4367,6 @@ static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
2932     while (!list_empty(&splice)) {
2933     root = list_first_entry(&splice, struct btrfs_root,
2934     delalloc_root);
2935     - list_del_init(&root->delalloc_root);
2936     root = btrfs_grab_fs_root(root);
2937     BUG_ON(!root);
2938     spin_unlock(&fs_info->delalloc_root_lock);
2939     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2940     index 9f21c29d0259..b0fa3a032143 100644
2941     --- a/fs/btrfs/inode.c
2942     +++ b/fs/btrfs/inode.c
2943     @@ -1754,12 +1754,12 @@ static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
2944     spin_unlock(&root->delalloc_lock);
2945     }
2946    
2947     -static void btrfs_del_delalloc_inode(struct btrfs_root *root,
2948     - struct btrfs_inode *inode)
2949     +
2950     +void __btrfs_del_delalloc_inode(struct btrfs_root *root,
2951     + struct btrfs_inode *inode)
2952     {
2953     struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
2954    
2955     - spin_lock(&root->delalloc_lock);
2956     if (!list_empty(&inode->delalloc_inodes)) {
2957     list_del_init(&inode->delalloc_inodes);
2958     clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2959     @@ -1772,6 +1772,13 @@ static void btrfs_del_delalloc_inode(struct btrfs_root *root,
2960     spin_unlock(&fs_info->delalloc_root_lock);
2961     }
2962     }
2963     +}
2964     +
2965     +static void btrfs_del_delalloc_inode(struct btrfs_root *root,
2966     + struct btrfs_inode *inode)
2967     +{
2968     + spin_lock(&root->delalloc_lock);
2969     + __btrfs_del_delalloc_inode(root, inode);
2970     spin_unlock(&root->delalloc_lock);
2971     }
2972    
2973     diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
2974     index f6a05f836629..cbabc6f2b322 100644
2975     --- a/fs/btrfs/props.c
2976     +++ b/fs/btrfs/props.c
2977     @@ -400,6 +400,7 @@ static int prop_compression_apply(struct inode *inode,
2978     const char *value,
2979     size_t len)
2980     {
2981     + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2982     int type;
2983    
2984     if (len == 0) {
2985     @@ -410,14 +411,17 @@ static int prop_compression_apply(struct inode *inode,
2986     return 0;
2987     }
2988    
2989     - if (!strncmp("lzo", value, 3))
2990     + if (!strncmp("lzo", value, 3)) {
2991     type = BTRFS_COMPRESS_LZO;
2992     - else if (!strncmp("zlib", value, 4))
2993     + btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
2994     + } else if (!strncmp("zlib", value, 4)) {
2995     type = BTRFS_COMPRESS_ZLIB;
2996     - else if (!strncmp("zstd", value, len))
2997     + } else if (!strncmp("zstd", value, len)) {
2998     type = BTRFS_COMPRESS_ZSTD;
2999     - else
3000     + btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
3001     + } else {
3002     return -EINVAL;
3003     + }
3004    
3005     BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
3006     BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
3007     diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3008     index b6dfe7af7a1f..2794f3550db6 100644
3009     --- a/fs/btrfs/tree-log.c
3010     +++ b/fs/btrfs/tree-log.c
3011     @@ -4669,6 +4669,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3012     struct extent_map_tree *em_tree = &inode->extent_tree;
3013     u64 logged_isize = 0;
3014     bool need_log_inode_item = true;
3015     + bool xattrs_logged = false;
3016    
3017     path = btrfs_alloc_path();
3018     if (!path)
3019     @@ -4971,6 +4972,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3020     err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
3021     if (err)
3022     goto out_unlock;
3023     + xattrs_logged = true;
3024     if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
3025     btrfs_release_path(path);
3026     btrfs_release_path(dst_path);
3027     @@ -4983,6 +4985,11 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3028     btrfs_release_path(dst_path);
3029     if (need_log_inode_item) {
3030     err = log_inode_item(trans, log, dst_path, inode);
3031     + if (!err && !xattrs_logged) {
3032     + err = btrfs_log_all_xattrs(trans, root, inode, path,
3033     + dst_path);
3034     + btrfs_release_path(path);
3035     + }
3036     if (err)
3037     goto out_unlock;
3038     }
3039     diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
3040     index b983e7fb200b..08afafb6ecf7 100644
3041     --- a/fs/btrfs/volumes.c
3042     +++ b/fs/btrfs/volumes.c
3043     @@ -3964,6 +3964,15 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3044     return 0;
3045     }
3046    
3047     + /*
3048     + * A ro->rw remount sequence should continue with the paused balance
3049     + * regardless of who pauses it, system or the user as of now, so set
3050     + * the resume flag.
3051     + */
3052     + spin_lock(&fs_info->balance_lock);
3053     + fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
3054     + spin_unlock(&fs_info->balance_lock);
3055     +
3056     tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3057     return PTR_ERR_OR_ZERO(tsk);
3058     }
3059     diff --git a/fs/proc/array.c b/fs/proc/array.c
3060     index e6094a15ef30..4ac811e1a26c 100644
3061     --- a/fs/proc/array.c
3062     +++ b/fs/proc/array.c
3063     @@ -85,6 +85,7 @@
3064     #include <linux/delayacct.h>
3065     #include <linux/seq_file.h>
3066     #include <linux/pid_namespace.h>
3067     +#include <linux/prctl.h>
3068     #include <linux/ptrace.h>
3069     #include <linux/tracehook.h>
3070     #include <linux/string_helpers.h>
3071     @@ -347,6 +348,30 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
3072     #ifdef CONFIG_SECCOMP
3073     seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
3074     #endif
3075     + seq_printf(m, "\nSpeculation_Store_Bypass:\t");
3076     + switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
3077     + case -EINVAL:
3078     + seq_printf(m, "unknown");
3079     + break;
3080     + case PR_SPEC_NOT_AFFECTED:
3081     + seq_printf(m, "not vulnerable");
3082     + break;
3083     + case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE:
3084     + seq_printf(m, "thread force mitigated");
3085     + break;
3086     + case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
3087     + seq_printf(m, "thread mitigated");
3088     + break;
3089     + case PR_SPEC_PRCTL | PR_SPEC_ENABLE:
3090     + seq_printf(m, "thread vulnerable");
3091     + break;
3092     + case PR_SPEC_DISABLE:
3093     + seq_printf(m, "globally mitigated");
3094     + break;
3095     + default:
3096     + seq_printf(m, "vulnerable");
3097     + break;
3098     + }
3099     seq_putc(m, '\n');
3100     }
3101    
3102     diff --git a/include/linux/cpu.h b/include/linux/cpu.h
3103     index c816e6f2730c..9546bf2fe310 100644
3104     --- a/include/linux/cpu.h
3105     +++ b/include/linux/cpu.h
3106     @@ -53,6 +53,8 @@ extern ssize_t cpu_show_spectre_v1(struct device *dev,
3107     struct device_attribute *attr, char *buf);
3108     extern ssize_t cpu_show_spectre_v2(struct device *dev,
3109     struct device_attribute *attr, char *buf);
3110     +extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
3111     + struct device_attribute *attr, char *buf);
3112    
3113     extern __printf(4, 5)
3114     struct device *cpu_device_create(struct device *parent, void *drvdata,
3115     diff --git a/include/linux/efi.h b/include/linux/efi.h
3116     index 29fdf8029cf6..b68b7d199fee 100644
3117     --- a/include/linux/efi.h
3118     +++ b/include/linux/efi.h
3119     @@ -395,8 +395,8 @@ typedef struct {
3120     u32 attributes;
3121     u32 get_bar_attributes;
3122     u32 set_bar_attributes;
3123     - uint64_t romsize;
3124     - void *romimage;
3125     + u64 romsize;
3126     + u32 romimage;
3127     } efi_pci_io_protocol_32;
3128    
3129     typedef struct {
3130     @@ -415,8 +415,8 @@ typedef struct {
3131     u64 attributes;
3132     u64 get_bar_attributes;
3133     u64 set_bar_attributes;
3134     - uint64_t romsize;
3135     - void *romimage;
3136     + u64 romsize;
3137     + u64 romimage;
3138     } efi_pci_io_protocol_64;
3139    
3140     typedef struct {
3141     diff --git a/include/linux/nospec.h b/include/linux/nospec.h
3142     index e791ebc65c9c..0c5ef54fd416 100644
3143     --- a/include/linux/nospec.h
3144     +++ b/include/linux/nospec.h
3145     @@ -7,6 +7,8 @@
3146     #define _LINUX_NOSPEC_H
3147     #include <asm/barrier.h>
3148    
3149     +struct task_struct;
3150     +
3151     /**
3152     * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
3153     * @index: array element index
3154     @@ -55,4 +57,12 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
3155     \
3156     (typeof(_i)) (_i & _mask); \
3157     })
3158     +
3159     +/* Speculation control prctl */
3160     +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
3161     +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
3162     + unsigned long ctrl);
3163     +/* Speculation control for seccomp enforced mitigation */
3164     +void arch_seccomp_spec_mitigate(struct task_struct *task);
3165     +
3166     #endif /* _LINUX_NOSPEC_H */
3167     diff --git a/include/linux/sched.h b/include/linux/sched.h
3168     index 41354690e4e3..2c9790b43881 100644
3169     --- a/include/linux/sched.h
3170     +++ b/include/linux/sched.h
3171     @@ -1363,7 +1363,8 @@ static inline bool is_percpu_thread(void)
3172     #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
3173     #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
3174     #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
3175     -
3176     +#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
3177     +#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
3178    
3179     #define TASK_PFA_TEST(name, func) \
3180     static inline bool task_##func(struct task_struct *p) \
3181     @@ -1388,6 +1389,13 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
3182     TASK_PFA_SET(SPREAD_SLAB, spread_slab)
3183     TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
3184    
3185     +TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
3186     +TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
3187     +TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
3188     +
3189     +TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
3190     +TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
3191     +
3192     static inline void
3193     current_restore_flags(unsigned long orig_flags, unsigned long flags)
3194     {
3195     diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
3196     index 10f25f7e4304..a9d5c52de4ea 100644
3197     --- a/include/linux/seccomp.h
3198     +++ b/include/linux/seccomp.h
3199     @@ -4,8 +4,9 @@
3200    
3201     #include <uapi/linux/seccomp.h>
3202    
3203     -#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
3204     - SECCOMP_FILTER_FLAG_LOG)
3205     +#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
3206     + SECCOMP_FILTER_FLAG_LOG | \
3207     + SECCOMP_FILTER_FLAG_SPEC_ALLOW)
3208    
3209     #ifdef CONFIG_SECCOMP
3210    
3211     diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
3212     index d791863b62fc..2ec9064a2bb7 100644
3213     --- a/include/trace/events/xen.h
3214     +++ b/include/trace/events/xen.h
3215     @@ -349,22 +349,6 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd,
3216     DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
3217     DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
3218    
3219     -TRACE_EVENT(xen_mmu_flush_tlb_all,
3220     - TP_PROTO(int x),
3221     - TP_ARGS(x),
3222     - TP_STRUCT__entry(__array(char, x, 0)),
3223     - TP_fast_assign((void)x),
3224     - TP_printk("%s", "")
3225     - );
3226     -
3227     -TRACE_EVENT(xen_mmu_flush_tlb,
3228     - TP_PROTO(int x),
3229     - TP_ARGS(x),
3230     - TP_STRUCT__entry(__array(char, x, 0)),
3231     - TP_fast_assign((void)x),
3232     - TP_printk("%s", "")
3233     - );
3234     -
3235     TRACE_EVENT(xen_mmu_flush_tlb_one_user,
3236     TP_PROTO(unsigned long addr),
3237     TP_ARGS(addr),
3238     diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
3239     index b640071421f7..3027f943f4b3 100644
3240     --- a/include/uapi/linux/prctl.h
3241     +++ b/include/uapi/linux/prctl.h
3242     @@ -198,4 +198,16 @@ struct prctl_mm_map {
3243     # define PR_CAP_AMBIENT_LOWER 3
3244     # define PR_CAP_AMBIENT_CLEAR_ALL 4
3245    
3246     +/* Per task speculation control */
3247     +#define PR_GET_SPECULATION_CTRL 52
3248     +#define PR_SET_SPECULATION_CTRL 53
3249     +/* Speculation control variants */
3250     +# define PR_SPEC_STORE_BYPASS 0
3251     +/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
3252     +# define PR_SPEC_NOT_AFFECTED 0
3253     +# define PR_SPEC_PRCTL (1UL << 0)
3254     +# define PR_SPEC_ENABLE (1UL << 1)
3255     +# define PR_SPEC_DISABLE (1UL << 2)
3256     +# define PR_SPEC_FORCE_DISABLE (1UL << 3)
3257     +
3258     #endif /* _LINUX_PRCTL_H */
3259     diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
3260     index 2a0bd9dd104d..9efc0e73d50b 100644
3261     --- a/include/uapi/linux/seccomp.h
3262     +++ b/include/uapi/linux/seccomp.h
3263     @@ -17,8 +17,9 @@
3264     #define SECCOMP_GET_ACTION_AVAIL 2
3265    
3266     /* Valid flags for SECCOMP_SET_MODE_FILTER */
3267     -#define SECCOMP_FILTER_FLAG_TSYNC 1
3268     -#define SECCOMP_FILTER_FLAG_LOG 2
3269     +#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
3270     +#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
3271     +#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
3272    
3273     /*
3274     * All BPF programs must return a 32-bit value.
3275     diff --git a/kernel/seccomp.c b/kernel/seccomp.c
3276     index 5f0dfb2abb8d..075e344a87c3 100644
3277     --- a/kernel/seccomp.c
3278     +++ b/kernel/seccomp.c
3279     @@ -19,6 +19,8 @@
3280     #include <linux/compat.h>
3281     #include <linux/coredump.h>
3282     #include <linux/kmemleak.h>
3283     +#include <linux/nospec.h>
3284     +#include <linux/prctl.h>
3285     #include <linux/sched.h>
3286     #include <linux/sched/task_stack.h>
3287     #include <linux/seccomp.h>
3288     @@ -227,8 +229,11 @@ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
3289     return true;
3290     }
3291    
3292     +void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
3293     +
3294     static inline void seccomp_assign_mode(struct task_struct *task,
3295     - unsigned long seccomp_mode)
3296     + unsigned long seccomp_mode,
3297     + unsigned long flags)
3298     {
3299     assert_spin_locked(&task->sighand->siglock);
3300    
3301     @@ -238,6 +243,9 @@ static inline void seccomp_assign_mode(struct task_struct *task,
3302     * filter) is set.
3303     */
3304     smp_mb__before_atomic();
3305     + /* Assume default seccomp processes want spec flaw mitigation. */
3306     + if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
3307     + arch_seccomp_spec_mitigate(task);
3308     set_tsk_thread_flag(task, TIF_SECCOMP);
3309     }
3310    
3311     @@ -305,7 +313,7 @@ static inline pid_t seccomp_can_sync_threads(void)
3312     * without dropping the locks.
3313     *
3314     */
3315     -static inline void seccomp_sync_threads(void)
3316     +static inline void seccomp_sync_threads(unsigned long flags)
3317     {
3318     struct task_struct *thread, *caller;
3319    
3320     @@ -346,7 +354,8 @@ static inline void seccomp_sync_threads(void)
3321     * allow one thread to transition the other.
3322     */
3323     if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
3324     - seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
3325     + seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
3326     + flags);
3327     }
3328     }
3329    
3330     @@ -469,7 +478,7 @@ static long seccomp_attach_filter(unsigned int flags,
3331    
3332     /* Now that the new filter is in place, synchronize to all threads. */
3333     if (flags & SECCOMP_FILTER_FLAG_TSYNC)
3334     - seccomp_sync_threads();
3335     + seccomp_sync_threads(flags);
3336    
3337     return 0;
3338     }
3339     @@ -818,7 +827,7 @@ static long seccomp_set_mode_strict(void)
3340     #ifdef TIF_NOTSC
3341     disable_TSC();
3342     #endif
3343     - seccomp_assign_mode(current, seccomp_mode);
3344     + seccomp_assign_mode(current, seccomp_mode, 0);
3345     ret = 0;
3346    
3347     out:
3348     @@ -876,7 +885,7 @@ static long seccomp_set_mode_filter(unsigned int flags,
3349     /* Do not free the successfully attached filter. */
3350     prepared = NULL;
3351    
3352     - seccomp_assign_mode(current, seccomp_mode);
3353     + seccomp_assign_mode(current, seccomp_mode, flags);
3354     out:
3355     spin_unlock_irq(&current->sighand->siglock);
3356     if (flags & SECCOMP_FILTER_FLAG_TSYNC)
3357     diff --git a/kernel/sys.c b/kernel/sys.c
3358     index 524a4cb9bbe2..b5c1bc9e3769 100644
3359     --- a/kernel/sys.c
3360     +++ b/kernel/sys.c
3361     @@ -61,6 +61,8 @@
3362     #include <linux/uidgid.h>
3363     #include <linux/cred.h>
3364    
3365     +#include <linux/nospec.h>
3366     +
3367     #include <linux/kmsg_dump.h>
3368     /* Move somewhere else to avoid recompiling? */
3369     #include <generated/utsrelease.h>
3370     @@ -2184,6 +2186,17 @@ static int propagate_has_child_subreaper(struct task_struct *p, void *data)
3371     return 1;
3372     }
3373    
3374     +int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
3375     +{
3376     + return -EINVAL;
3377     +}
3378     +
3379     +int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
3380     + unsigned long ctrl)
3381     +{
3382     + return -EINVAL;
3383     +}
3384     +
3385     SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
3386     unsigned long, arg4, unsigned long, arg5)
3387     {
3388     @@ -2386,6 +2399,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
3389     case PR_GET_FP_MODE:
3390     error = GET_FP_MODE(me);
3391     break;
3392     + case PR_GET_SPECULATION_CTRL:
3393     + if (arg3 || arg4 || arg5)
3394     + return -EINVAL;
3395     + error = arch_prctl_spec_ctrl_get(me, arg2);
3396     + break;
3397     + case PR_SET_SPECULATION_CTRL:
3398     + if (arg4 || arg5)
3399     + return -EINVAL;
3400     + error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
3401     + break;
3402     default:
3403     error = -EINVAL;
3404     break;
3405     diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
3406     index b398c2ea69b2..aa2094d5dd27 100644
3407     --- a/kernel/time/tick-broadcast.c
3408     +++ b/kernel/time/tick-broadcast.c
3409     @@ -612,6 +612,14 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
3410     now = ktime_get();
3411     /* Find all expired events */
3412     for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
3413     + /*
3414     + * Required for !SMP because for_each_cpu() reports
3415     + * unconditionally CPU0 as set on UP kernels.
3416     + */
3417     + if (!IS_ENABLED(CONFIG_SMP) &&
3418     + cpumask_empty(tick_broadcast_oneshot_mask))
3419     + break;
3420     +
3421     td = &per_cpu(tick_cpu_device, cpu);
3422     if (td->evtdev->next_event <= now) {
3423     cpumask_set_cpu(cpu, tmpmask);
3424     diff --git a/lib/radix-tree.c b/lib/radix-tree.c
3425     index 8b1feca1230a..70d677820740 100644
3426     --- a/lib/radix-tree.c
3427     +++ b/lib/radix-tree.c
3428     @@ -1613,11 +1613,9 @@ static void set_iter_tags(struct radix_tree_iter *iter,
3429     static void __rcu **skip_siblings(struct radix_tree_node **nodep,
3430     void __rcu **slot, struct radix_tree_iter *iter)
3431     {
3432     - void *sib = node_to_entry(slot - 1);
3433     -
3434     while (iter->index < iter->next_index) {
3435     *nodep = rcu_dereference_raw(*slot);
3436     - if (*nodep && *nodep != sib)
3437     + if (*nodep && !is_sibling_entry(iter->node, *nodep))
3438     return slot;
3439     slot++;
3440     iter->index = __radix_tree_iter_add(iter, 1);
3441     @@ -1632,7 +1630,7 @@ void __rcu **__radix_tree_next_slot(void __rcu **slot,
3442     struct radix_tree_iter *iter, unsigned flags)
3443     {
3444     unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
3445     - struct radix_tree_node *node = rcu_dereference_raw(*slot);
3446     + struct radix_tree_node *node;
3447    
3448     slot = skip_siblings(&node, slot, iter);
3449    
3450     diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
3451     index 0ddf293cfac3..0a6f492fb9d9 100644
3452     --- a/lib/test_bitmap.c
3453     +++ b/lib/test_bitmap.c
3454     @@ -434,23 +434,32 @@ static void noinline __init test_mem_optimisations(void)
3455     unsigned int start, nbits;
3456    
3457     for (start = 0; start < 1024; start += 8) {
3458     - memset(bmap1, 0x5a, sizeof(bmap1));
3459     - memset(bmap2, 0x5a, sizeof(bmap2));
3460     for (nbits = 0; nbits < 1024 - start; nbits += 8) {
3461     + memset(bmap1, 0x5a, sizeof(bmap1));
3462     + memset(bmap2, 0x5a, sizeof(bmap2));
3463     +
3464     bitmap_set(bmap1, start, nbits);
3465     __bitmap_set(bmap2, start, nbits);
3466     - if (!bitmap_equal(bmap1, bmap2, 1024))
3467     + if (!bitmap_equal(bmap1, bmap2, 1024)) {
3468     printk("set not equal %d %d\n", start, nbits);
3469     - if (!__bitmap_equal(bmap1, bmap2, 1024))
3470     + failed_tests++;
3471     + }
3472     + if (!__bitmap_equal(bmap1, bmap2, 1024)) {
3473     printk("set not __equal %d %d\n", start, nbits);
3474     + failed_tests++;
3475     + }
3476    
3477     bitmap_clear(bmap1, start, nbits);
3478     __bitmap_clear(bmap2, start, nbits);
3479     - if (!bitmap_equal(bmap1, bmap2, 1024))
3480     + if (!bitmap_equal(bmap1, bmap2, 1024)) {
3481     printk("clear not equal %d %d\n", start, nbits);
3482     - if (!__bitmap_equal(bmap1, bmap2, 1024))
3483     + failed_tests++;
3484     + }
3485     + if (!__bitmap_equal(bmap1, bmap2, 1024)) {
3486     printk("clear not __equal %d %d\n", start,
3487     nbits);
3488     + failed_tests++;
3489     + }
3490     }
3491     }
3492     }
3493     diff --git a/mm/Kconfig b/mm/Kconfig
3494     index 9c4bdddd80c2..59efbd3337e0 100644
3495     --- a/mm/Kconfig
3496     +++ b/mm/Kconfig
3497     @@ -649,6 +649,7 @@ config DEFERRED_STRUCT_PAGE_INIT
3498     depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
3499     depends on NO_BOOTMEM && MEMORY_HOTPLUG
3500     depends on !FLATMEM
3501     + depends on !NEED_PER_CPU_KM
3502     help
3503     Ordinarily all struct pages are initialised during early boot in a
3504     single thread. On very large machines this can take a considerable
3505     diff --git a/net/ipv4/netfilter/nf_socket_ipv4.c b/net/ipv4/netfilter/nf_socket_ipv4.c
3506     index e9293bdebba0..4824b1e183a1 100644
3507     --- a/net/ipv4/netfilter/nf_socket_ipv4.c
3508     +++ b/net/ipv4/netfilter/nf_socket_ipv4.c
3509     @@ -108,10 +108,12 @@ struct sock *nf_sk_lookup_slow_v4(struct net *net, const struct sk_buff *skb,
3510     int doff = 0;
3511    
3512     if (iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_TCP) {
3513     - struct udphdr _hdr, *hp;
3514     + struct tcphdr _hdr;
3515     + struct udphdr *hp;
3516    
3517     hp = skb_header_pointer(skb, ip_hdrlen(skb),
3518     - sizeof(_hdr), &_hdr);
3519     + iph->protocol == IPPROTO_UDP ?
3520     + sizeof(*hp) : sizeof(_hdr), &_hdr);
3521     if (hp == NULL)
3522     return NULL;
3523    
3524     diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c
3525     index ebb2bf84232a..f14de4b6d639 100644
3526     --- a/net/ipv6/netfilter/nf_socket_ipv6.c
3527     +++ b/net/ipv6/netfilter/nf_socket_ipv6.c
3528     @@ -116,9 +116,11 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
3529     }
3530    
3531     if (tproto == IPPROTO_UDP || tproto == IPPROTO_TCP) {
3532     - struct udphdr _hdr, *hp;
3533     + struct tcphdr _hdr;
3534     + struct udphdr *hp;
3535    
3536     - hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr);
3537     + hp = skb_header_pointer(skb, thoff, tproto == IPPROTO_UDP ?
3538     + sizeof(*hp) : sizeof(_hdr), &_hdr);
3539     if (hp == NULL)
3540     return NULL;
3541    
3542     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
3543     index 5b504aa653f5..689e9c0570ba 100644
3544     --- a/net/netfilter/nf_tables_api.c
3545     +++ b/net/netfilter/nf_tables_api.c
3546     @@ -2344,41 +2344,46 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
3547     }
3548    
3549     if (nlh->nlmsg_flags & NLM_F_REPLACE) {
3550     - if (nft_is_active_next(net, old_rule)) {
3551     - trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
3552     - old_rule);
3553     - if (trans == NULL) {
3554     - err = -ENOMEM;
3555     - goto err2;
3556     - }
3557     - nft_deactivate_next(net, old_rule);
3558     - chain->use--;
3559     - list_add_tail_rcu(&rule->list, &old_rule->list);
3560     - } else {
3561     + if (!nft_is_active_next(net, old_rule)) {
3562     err = -ENOENT;
3563     goto err2;
3564     }
3565     - } else if (nlh->nlmsg_flags & NLM_F_APPEND)
3566     - if (old_rule)
3567     - list_add_rcu(&rule->list, &old_rule->list);
3568     - else
3569     - list_add_tail_rcu(&rule->list, &chain->rules);
3570     - else {
3571     - if (old_rule)
3572     - list_add_tail_rcu(&rule->list, &old_rule->list);
3573     - else
3574     - list_add_rcu(&rule->list, &chain->rules);
3575     - }
3576     + trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
3577     + old_rule);
3578     + if (trans == NULL) {
3579     + err = -ENOMEM;
3580     + goto err2;
3581     + }
3582     + nft_deactivate_next(net, old_rule);
3583     + chain->use--;
3584    
3585     - if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
3586     - err = -ENOMEM;
3587     - goto err3;
3588     + if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
3589     + err = -ENOMEM;
3590     + goto err2;
3591     + }
3592     +
3593     + list_add_tail_rcu(&rule->list, &old_rule->list);
3594     + } else {
3595     + if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
3596     + err = -ENOMEM;
3597     + goto err2;
3598     + }
3599     +
3600     + if (nlh->nlmsg_flags & NLM_F_APPEND) {
3601     + if (old_rule)
3602     + list_add_rcu(&rule->list, &old_rule->list);
3603     + else
3604     + list_add_tail_rcu(&rule->list, &chain->rules);
3605     + } else {
3606     + if (old_rule)
3607     + list_add_tail_rcu(&rule->list, &old_rule->list);
3608     + else
3609     + list_add_rcu(&rule->list, &chain->rules);
3610     + }
3611     }
3612     chain->use++;
3613     return 0;
3614    
3615     -err3:
3616     - list_del_rcu(&rule->list);
3617     err2:
3618     nf_tables_rule_destroy(&ctx, rule);
3619     err1:
3620     @@ -3196,18 +3201,20 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
3621    
3622     err = ops->init(set, &desc, nla);
3623     if (err < 0)
3624     - goto err2;
3625     + goto err3;
3626    
3627     err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
3628     if (err < 0)
3629     - goto err3;
3630     + goto err4;
3631    
3632     list_add_tail_rcu(&set->list, &table->sets);
3633     table->use++;
3634     return 0;
3635    
3636     -err3:
3637     +err4:
3638     ops->destroy(set);
3639     +err3:
3640     + kfree(set->name);
3641     err2:
3642     kvfree(set);
3643     err1:
3644     diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
3645     index a848836a5de0..507fd5210c1c 100644
3646     --- a/sound/core/control_compat.c
3647     +++ b/sound/core/control_compat.c
3648     @@ -396,8 +396,7 @@ static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
3649     if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) ||
3650     copy_from_user(&data->type, &data32->type, 3 * sizeof(u32)))
3651     goto error;
3652     - if (get_user(data->owner, &data32->owner) ||
3653     - get_user(data->type, &data32->type))
3654     + if (get_user(data->owner, &data32->owner))
3655     goto error;
3656     switch (data->type) {
3657     case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
3658     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
3659     index 738e1fe90312..62fbdbe74b93 100644
3660     --- a/sound/pci/hda/hda_intel.c
3661     +++ b/sound/pci/hda/hda_intel.c
3662     @@ -2208,6 +2208,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
3663     SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
3664     /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
3665     SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
3666     + /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
3667     + SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
3668     /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
3669     SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
3670     {}
3671     diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
3672     index 89efec891e68..4d950b7c2f97 100644
3673     --- a/sound/usb/mixer.c
3674     +++ b/sound/usb/mixer.c
3675     @@ -911,6 +911,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
3676     }
3677     break;
3678    
3679     + case USB_ID(0x0d8c, 0x0103):
3680     + if (!strcmp(kctl->id.name, "PCM Playback Volume")) {
3681     + usb_audio_info(chip,
3682     + "set volume quirk for CM102-A+/102S+\n");
3683     + cval->min = -256;
3684     + }
3685     + break;
3686     +
3687     case USB_ID(0x0471, 0x0101):
3688     case USB_ID(0x0471, 0x0104):
3689     case USB_ID(0x0471, 0x0105):
3690     diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
3691     index 0b457e8e0f0c..194759ec9e70 100644
3692     --- a/tools/testing/selftests/seccomp/seccomp_bpf.c
3693     +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
3694     @@ -134,11 +134,15 @@ struct seccomp_data {
3695     #endif
3696    
3697     #ifndef SECCOMP_FILTER_FLAG_TSYNC
3698     -#define SECCOMP_FILTER_FLAG_TSYNC 1
3699     +#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
3700     #endif
3701    
3702     #ifndef SECCOMP_FILTER_FLAG_LOG
3703     -#define SECCOMP_FILTER_FLAG_LOG 2
3704     +#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
3705     +#endif
3706     +
3707     +#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
3708     +#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
3709     #endif
3710    
3711     #ifndef seccomp
3712     @@ -2063,14 +2067,26 @@ TEST(seccomp_syscall_mode_lock)
3713     TEST(detect_seccomp_filter_flags)
3714     {
3715     unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
3716     - SECCOMP_FILTER_FLAG_LOG };
3717     + SECCOMP_FILTER_FLAG_LOG,
3718     + SECCOMP_FILTER_FLAG_SPEC_ALLOW };
3719     unsigned int flag, all_flags;
3720     int i;
3721     long ret;
3722    
3723     /* Test detection of known-good filter flags */
3724     for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
3725     + int bits = 0;
3726     +
3727     flag = flags[i];
3728     + /* Make sure the flag is a single bit! */
3729     + while (flag) {
3730     + if (flag & 0x1)
3731     + bits ++;
3732     + flag >>= 1;
3733     + }
3734     + ASSERT_EQ(1, bits);
3735     + flag = flags[i];
3736     +
3737     ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
3738     ASSERT_NE(ENOSYS, errno) {
3739     TH_LOG("Kernel does not support seccomp syscall!");
3740     diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
3741     index 5a11f4d3972c..d72b8481f250 100644
3742     --- a/virt/kvm/arm/vgic/vgic-its.c
3743     +++ b/virt/kvm/arm/vgic/vgic-its.c
3744     @@ -279,8 +279,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
3745     u8 prop;
3746     int ret;
3747    
3748     - ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
3749     - &prop, 1);
3750     + ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
3751     + &prop, 1);
3752    
3753     if (ret)
3754     return ret;
3755     @@ -413,8 +413,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
3756     * this very same byte in the last iteration. Reuse that.
3757     */
3758     if (byte_offset != last_byte_offset) {
3759     - ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
3760     - &pendmask, 1);
3761     + ret = kvm_read_guest_lock(vcpu->kvm,
3762     + pendbase + byte_offset,
3763     + &pendmask, 1);
3764     if (ret) {
3765     kfree(intids);
3766     return ret;
3767     @@ -740,7 +741,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
3768     return false;
3769    
3770     /* Each 1st level entry is represented by a 64-bit value. */
3771     - if (kvm_read_guest(its->dev->kvm,
3772     + if (kvm_read_guest_lock(its->dev->kvm,
3773     BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
3774     &indirect_ptr, sizeof(indirect_ptr)))
3775     return false;
3776     @@ -1297,8 +1298,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
3777     cbaser = CBASER_ADDRESS(its->cbaser);
3778    
3779     while (its->cwriter != its->creadr) {
3780     - int ret = kvm_read_guest(kvm, cbaser + its->creadr,
3781     - cmd_buf, ITS_CMD_SIZE);
3782     + int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
3783     + cmd_buf, ITS_CMD_SIZE);
3784     /*
3785     * If kvm_read_guest() fails, this could be due to the guest
3786     * programming a bogus value in CBASER or something else going
3787     @@ -1830,7 +1831,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz,
3788     int next_offset;
3789     size_t byte_offset;
3790    
3791     - ret = kvm_read_guest(kvm, gpa, entry, esz);
3792     + ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
3793     if (ret)
3794     return ret;
3795    
3796     @@ -2191,7 +2192,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
3797     int ret;
3798    
3799     BUG_ON(esz > sizeof(val));
3800     - ret = kvm_read_guest(kvm, gpa, &val, esz);
3801     + ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
3802     if (ret)
3803     return ret;
3804     val = le64_to_cpu(val);
3805     diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
3806     index 9dcc31600a8b..6b4fcd52f14c 100644
3807     --- a/virt/kvm/arm/vgic/vgic-v3.c
3808     +++ b/virt/kvm/arm/vgic/vgic-v3.c
3809     @@ -297,7 +297,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
3810     bit_nr = irq->intid % BITS_PER_BYTE;
3811     ptr = pendbase + byte_offset;
3812    
3813     - ret = kvm_read_guest(kvm, ptr, &val, 1);
3814     + ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
3815     if (ret)
3816     return ret;
3817    
3818     @@ -350,7 +350,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
3819     ptr = pendbase + byte_offset;
3820    
3821     if (byte_offset != last_byte_offset) {
3822     - ret = kvm_read_guest(kvm, ptr, &val, 1);
3823     + ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
3824     if (ret)
3825     return ret;
3826     last_byte_offset = byte_offset;