Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.16/0110-4.16.11-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3115 - (hide annotations) (download)
Wed May 23 09:06:28 2018 UTC (6 years, 1 month ago) by niro
File size: 146860 byte(s)
-linux-4.16.10
1 niro 3115 diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
2     index 4ed63b6cfb15..2e5e0232b8fa 100644
3     --- a/Documentation/ABI/testing/sysfs-devices-system-cpu
4     +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
5     @@ -453,6 +453,7 @@ What: /sys/devices/system/cpu/vulnerabilities
6     /sys/devices/system/cpu/vulnerabilities/meltdown
7     /sys/devices/system/cpu/vulnerabilities/spectre_v1
8     /sys/devices/system/cpu/vulnerabilities/spectre_v2
9     + /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
10     Date: January 2018
11     Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
12     Description: Information about CPU vulnerabilities
13     diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
14     index 1d1d53f85ddd..9824d049367e 100644
15     --- a/Documentation/admin-guide/kernel-parameters.txt
16     +++ b/Documentation/admin-guide/kernel-parameters.txt
17     @@ -2647,6 +2647,9 @@
18     allow data leaks with this option, which is equivalent
19     to spectre_v2=off.
20    
21     + nospec_store_bypass_disable
22     + [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
23     +
24     noxsave [BUGS=X86] Disables x86 extended register state save
25     and restore using xsave. The kernel will fallback to
26     enabling legacy floating-point and sse state.
27     @@ -3997,6 +4000,48 @@
28     Not specifying this option is equivalent to
29     spectre_v2=auto.
30    
31     + spec_store_bypass_disable=
32     + [HW] Control Speculative Store Bypass (SSB) Disable mitigation
33     + (Speculative Store Bypass vulnerability)
34     +
35     + Certain CPUs are vulnerable to an exploit against a
36     + a common industry wide performance optimization known
37     + as "Speculative Store Bypass" in which recent stores
38     + to the same memory location may not be observed by
39     + later loads during speculative execution. The idea
40     + is that such stores are unlikely and that they can
41     + be detected prior to instruction retirement at the
42     + end of a particular speculation execution window.
43     +
44     + In vulnerable processors, the speculatively forwarded
45     + store can be used in a cache side channel attack, for
46     + example to read memory to which the attacker does not
47     + directly have access (e.g. inside sandboxed code).
48     +
49     + This parameter controls whether the Speculative Store
50     + Bypass optimization is used.
51     +
52     + on - Unconditionally disable Speculative Store Bypass
53     + off - Unconditionally enable Speculative Store Bypass
54     + auto - Kernel detects whether the CPU model contains an
55     + implementation of Speculative Store Bypass and
56     + picks the most appropriate mitigation. If the
57     + CPU is not vulnerable, "off" is selected. If the
58     + CPU is vulnerable the default mitigation is
59     + architecture and Kconfig dependent. See below.
60     + prctl - Control Speculative Store Bypass per thread
61     + via prctl. Speculative Store Bypass is enabled
62     + for a process by default. The state of the control
63     + is inherited on fork.
64     + seccomp - Same as "prctl" above, but all seccomp threads
65     + will disable SSB unless they explicitly opt out.
66     +
67     + Not specifying this option is equivalent to
68     + spec_store_bypass_disable=auto.
69     +
70     + Default mitigations:
71     + X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
72     +
73     spia_io_base= [HW,MTD]
74     spia_fio_base=
75     spia_pedr=
76     diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt
77     index 1814fa13f6ab..fc019df0d863 100644
78     --- a/Documentation/devicetree/bindings/net/marvell-pp2.txt
79     +++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt
80     @@ -21,9 +21,10 @@ Required properties:
81     - main controller clock (for both armada-375-pp2 and armada-7k-pp2)
82     - GOP clock (for both armada-375-pp2 and armada-7k-pp2)
83     - MG clock (only for armada-7k-pp2)
84     + - MG Core clock (only for armada-7k-pp2)
85     - AXI clock (only for armada-7k-pp2)
86     -- clock-names: names of used clocks, must be "pp_clk", "gop_clk", "mg_clk"
87     - and "axi_clk" (the 2 latter only for armada-7k-pp2).
88     +- clock-names: names of used clocks, must be "pp_clk", "gop_clk", "mg_clk",
89     + "mg_core_clk" and "axi_clk" (the 3 latter only for armada-7k-pp2).
90    
91     The ethernet ports are represented by subnodes. At least one port is
92     required.
93     @@ -80,8 +81,8 @@ cpm_ethernet: ethernet@0 {
94     compatible = "marvell,armada-7k-pp22";
95     reg = <0x0 0x100000>, <0x129000 0xb000>;
96     clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>,
97     - <&cpm_syscon0 1 5>, <&cpm_syscon0 1 18>;
98     - clock-names = "pp_clk", "gop_clk", "gp_clk", "axi_clk";
99     + <&cpm_syscon0 1 5>, <&cpm_syscon0 1 6>, <&cpm_syscon0 1 18>;
100     + clock-names = "pp_clk", "gop_clk", "mg_clk", "mg_core_clk", "axi_clk";
101    
102     eth0: eth0 {
103     interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>,
104     diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst
105     index 7b2eb1b7d4ca..a3233da7fa88 100644
106     --- a/Documentation/userspace-api/index.rst
107     +++ b/Documentation/userspace-api/index.rst
108     @@ -19,6 +19,7 @@ place where this information is gathered.
109     no_new_privs
110     seccomp_filter
111     unshare
112     + spec_ctrl
113    
114     .. only:: subproject and html
115    
116     diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst
117     new file mode 100644
118     index 000000000000..32f3d55c54b7
119     --- /dev/null
120     +++ b/Documentation/userspace-api/spec_ctrl.rst
121     @@ -0,0 +1,94 @@
122     +===================
123     +Speculation Control
124     +===================
125     +
126     +Quite some CPUs have speculation-related misfeatures which are in
127     +fact vulnerabilities causing data leaks in various forms even across
128     +privilege domains.
129     +
130     +The kernel provides mitigation for such vulnerabilities in various
131     +forms. Some of these mitigations are compile-time configurable and some
132     +can be supplied on the kernel command line.
133     +
134     +There is also a class of mitigations which are very expensive, but they can
135     +be restricted to a certain set of processes or tasks in controlled
136     +environments. The mechanism to control these mitigations is via
137     +:manpage:`prctl(2)`.
138     +
139     +There are two prctl options which are related to this:
140     +
141     + * PR_GET_SPECULATION_CTRL
142     +
143     + * PR_SET_SPECULATION_CTRL
144     +
145     +PR_GET_SPECULATION_CTRL
146     +-----------------------
147     +
148     +PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
149     +which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
150     +the following meaning:
151     +
152     +==== ===================== ===================================================
153     +Bit Define Description
154     +==== ===================== ===================================================
155     +0 PR_SPEC_PRCTL Mitigation can be controlled per task by
156     + PR_SET_SPECULATION_CTRL.
157     +1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
158     + disabled.
159     +2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
160     + enabled.
161     +3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
162     + subsequent prctl(..., PR_SPEC_ENABLE) will fail.
163     +==== ===================== ===================================================
164     +
165     +If all bits are 0 the CPU is not affected by the speculation misfeature.
166     +
167     +If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is
168     +available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
169     +misfeature will fail.
170     +
171     +PR_SET_SPECULATION_CTRL
172     +-----------------------
173     +
174     +PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
175     +is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
176     +in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or
177     +PR_SPEC_FORCE_DISABLE.
178     +
179     +Common error codes
180     +------------------
181     +======= =================================================================
182     +Value Meaning
183     +======= =================================================================
184     +EINVAL The prctl is not implemented by the architecture or unused
185     + prctl(2) arguments are not 0.
186     +
187     +ENODEV arg2 is selecting a not supported speculation misfeature.
188     +======= =================================================================
189     +
190     +PR_SET_SPECULATION_CTRL error codes
191     +-----------------------------------
192     +======= =================================================================
193     +Value Meaning
194     +======= =================================================================
195     +0 Success
196     +
197     +ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
198     + PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE.
199     +
200     +ENXIO Control of the selected speculation misfeature is not possible.
201     + See PR_GET_SPECULATION_CTRL.
202     +
203     +EPERM Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller
204     + tried to enable it again.
205     +======= =================================================================
206     +
207     +Speculation misfeature controls
208     +-------------------------------
209     +- PR_SPEC_STORE_BYPASS: Speculative Store Bypass
210     +
211     + Invocations:
212     + * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
213     + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
214     + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
215     + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
216     diff --git a/Makefile b/Makefile
217     index 33f3c94f02ca..79c191442771 100644
218     --- a/Makefile
219     +++ b/Makefile
220     @@ -1,7 +1,7 @@
221     # SPDX-License-Identifier: GPL-2.0
222     VERSION = 4
223     PATCHLEVEL = 16
224     -SUBLEVEL = 10
225     +SUBLEVEL = 11
226     EXTRAVERSION =
227     NAME = Fearless Coyote
228    
229     diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
230     index bc8d4bbd82e2..9342904cccca 100644
231     --- a/arch/arm/include/asm/assembler.h
232     +++ b/arch/arm/include/asm/assembler.h
233     @@ -536,4 +536,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
234     #endif
235     .endm
236    
237     +#ifdef CONFIG_KPROBES
238     +#define _ASM_NOKPROBE(entry) \
239     + .pushsection "_kprobe_blacklist", "aw" ; \
240     + .balign 4 ; \
241     + .long entry; \
242     + .popsection
243     +#else
244     +#define _ASM_NOKPROBE(entry)
245     +#endif
246     +
247     #endif /* __ASM_ASSEMBLER_H__ */
248     diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
249     index de1b919404e4..1f0b07aef85b 100644
250     --- a/arch/arm/include/asm/kvm_mmu.h
251     +++ b/arch/arm/include/asm/kvm_mmu.h
252     @@ -295,6 +295,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
253     return 8;
254     }
255    
256     +/*
257     + * We are not in the kvm->srcu critical section most of the time, so we take
258     + * the SRCU read lock here. Since we copy the data from the user page, we
259     + * can immediately drop the lock again.
260     + */
261     +static inline int kvm_read_guest_lock(struct kvm *kvm,
262     + gpa_t gpa, void *data, unsigned long len)
263     +{
264     + int srcu_idx = srcu_read_lock(&kvm->srcu);
265     + int ret = kvm_read_guest(kvm, gpa, data, len);
266     +
267     + srcu_read_unlock(&kvm->srcu, srcu_idx);
268     +
269     + return ret;
270     +}
271     +
272     static inline void *kvm_get_hyp_vector(void)
273     {
274     return kvm_ksym_ref(__kvm_hyp_vector);
275     diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
276     index 5e3633c24e63..2fe87109ae46 100644
277     --- a/arch/arm/kernel/traps.c
278     +++ b/arch/arm/kernel/traps.c
279     @@ -19,6 +19,7 @@
280     #include <linux/uaccess.h>
281     #include <linux/hardirq.h>
282     #include <linux/kdebug.h>
283     +#include <linux/kprobes.h>
284     #include <linux/module.h>
285     #include <linux/kexec.h>
286     #include <linux/bug.h>
287     @@ -417,7 +418,8 @@ void unregister_undef_hook(struct undef_hook *hook)
288     raw_spin_unlock_irqrestore(&undef_lock, flags);
289     }
290    
291     -static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
292     +static nokprobe_inline
293     +int call_undef_hook(struct pt_regs *regs, unsigned int instr)
294     {
295     struct undef_hook *hook;
296     unsigned long flags;
297     @@ -490,6 +492,7 @@ asmlinkage void do_undefinstr(struct pt_regs *regs)
298    
299     arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
300     }
301     +NOKPROBE_SYMBOL(do_undefinstr)
302    
303     /*
304     * Handle FIQ similarly to NMI on x86 systems.
305     diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
306     index df73914e81c8..746e7801dcdf 100644
307     --- a/arch/arm/lib/getuser.S
308     +++ b/arch/arm/lib/getuser.S
309     @@ -38,6 +38,7 @@ ENTRY(__get_user_1)
310     mov r0, #0
311     ret lr
312     ENDPROC(__get_user_1)
313     +_ASM_NOKPROBE(__get_user_1)
314    
315     ENTRY(__get_user_2)
316     check_uaccess r0, 2, r1, r2, __get_user_bad
317     @@ -58,6 +59,7 @@ rb .req r0
318     mov r0, #0
319     ret lr
320     ENDPROC(__get_user_2)
321     +_ASM_NOKPROBE(__get_user_2)
322    
323     ENTRY(__get_user_4)
324     check_uaccess r0, 4, r1, r2, __get_user_bad
325     @@ -65,6 +67,7 @@ ENTRY(__get_user_4)
326     mov r0, #0
327     ret lr
328     ENDPROC(__get_user_4)
329     +_ASM_NOKPROBE(__get_user_4)
330    
331     ENTRY(__get_user_8)
332     check_uaccess r0, 8, r1, r2, __get_user_bad8
333     @@ -78,6 +81,7 @@ ENTRY(__get_user_8)
334     mov r0, #0
335     ret lr
336     ENDPROC(__get_user_8)
337     +_ASM_NOKPROBE(__get_user_8)
338    
339     #ifdef __ARMEB__
340     ENTRY(__get_user_32t_8)
341     @@ -91,6 +95,7 @@ ENTRY(__get_user_32t_8)
342     mov r0, #0
343     ret lr
344     ENDPROC(__get_user_32t_8)
345     +_ASM_NOKPROBE(__get_user_32t_8)
346    
347     ENTRY(__get_user_64t_1)
348     check_uaccess r0, 1, r1, r2, __get_user_bad8
349     @@ -98,6 +103,7 @@ ENTRY(__get_user_64t_1)
350     mov r0, #0
351     ret lr
352     ENDPROC(__get_user_64t_1)
353     +_ASM_NOKPROBE(__get_user_64t_1)
354    
355     ENTRY(__get_user_64t_2)
356     check_uaccess r0, 2, r1, r2, __get_user_bad8
357     @@ -114,6 +120,7 @@ rb .req r0
358     mov r0, #0
359     ret lr
360     ENDPROC(__get_user_64t_2)
361     +_ASM_NOKPROBE(__get_user_64t_2)
362    
363     ENTRY(__get_user_64t_4)
364     check_uaccess r0, 4, r1, r2, __get_user_bad8
365     @@ -121,6 +128,7 @@ ENTRY(__get_user_64t_4)
366     mov r0, #0
367     ret lr
368     ENDPROC(__get_user_64t_4)
369     +_ASM_NOKPROBE(__get_user_64t_4)
370     #endif
371    
372     __get_user_bad8:
373     @@ -131,6 +139,8 @@ __get_user_bad:
374     ret lr
375     ENDPROC(__get_user_bad)
376     ENDPROC(__get_user_bad8)
377     +_ASM_NOKPROBE(__get_user_bad)
378     +_ASM_NOKPROBE(__get_user_bad8)
379    
380     .pushsection __ex_table, "a"
381     .long 1b, __get_user_bad
382     diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
383     index bcdecc25461b..b2aa9b32bff2 100644
384     --- a/arch/arm/probes/kprobes/opt-arm.c
385     +++ b/arch/arm/probes/kprobes/opt-arm.c
386     @@ -165,13 +165,14 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
387     {
388     unsigned long flags;
389     struct kprobe *p = &op->kp;
390     - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
391     + struct kprobe_ctlblk *kcb;
392    
393     /* Save skipped registers */
394     regs->ARM_pc = (unsigned long)op->kp.addr;
395     regs->ARM_ORIG_r0 = ~0UL;
396    
397     local_irq_save(flags);
398     + kcb = get_kprobe_ctlblk();
399    
400     if (kprobe_running()) {
401     kprobes_inc_nmissed_count(&op->kp);
402     @@ -191,6 +192,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
403    
404     local_irq_restore(flags);
405     }
406     +NOKPROBE_SYMBOL(optimized_callback)
407    
408     int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
409     {
410     diff --git a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
411     index a8af4136dbe7..a97d9245e883 100644
412     --- a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
413     +++ b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
414     @@ -40,9 +40,10 @@
415     compatible = "marvell,armada-7k-pp22";
416     reg = <0x0 0x100000>, <0x129000 0xb000>;
417     clocks = <&CP110_LABEL(clk) 1 3>, <&CP110_LABEL(clk) 1 9>,
418     - <&CP110_LABEL(clk) 1 5>, <&CP110_LABEL(clk) 1 18>;
419     + <&CP110_LABEL(clk) 1 5>, <&CP110_LABEL(clk) 1 6>,
420     + <&CP110_LABEL(clk) 1 18>;
421     clock-names = "pp_clk", "gop_clk",
422     - "mg_clk", "axi_clk";
423     + "mg_clk", "mg_core_clk", "axi_clk";
424     marvell,system-controller = <&CP110_LABEL(syscon0)>;
425     status = "disabled";
426     dma-coherent;
427     @@ -143,6 +144,8 @@
428     #size-cells = <0>;
429     compatible = "marvell,xmdio";
430     reg = <0x12a600 0x10>;
431     + clocks = <&CP110_LABEL(clk) 1 5>,
432     + <&CP110_LABEL(clk) 1 6>, <&CP110_LABEL(clk) 1 18>;
433     status = "disabled";
434     };
435    
436     diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
437     index 7faed6e48b46..c2b7a77702e7 100644
438     --- a/arch/arm64/include/asm/kvm_mmu.h
439     +++ b/arch/arm64/include/asm/kvm_mmu.h
440     @@ -348,6 +348,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
441     return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
442     }
443    
444     +/*
445     + * We are not in the kvm->srcu critical section most of the time, so we take
446     + * the SRCU read lock here. Since we copy the data from the user page, we
447     + * can immediately drop the lock again.
448     + */
449     +static inline int kvm_read_guest_lock(struct kvm *kvm,
450     + gpa_t gpa, void *data, unsigned long len)
451     +{
452     + int srcu_idx = srcu_read_lock(&kvm->srcu);
453     + int ret = kvm_read_guest(kvm, gpa, data, len);
454     +
455     + srcu_read_unlock(&kvm->srcu, srcu_idx);
456     +
457     + return ret;
458     +}
459     +
460     #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
461     #include <asm/mmu.h>
462    
463     diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
464     index 1bceb95f422d..5584247f5029 100644
465     --- a/arch/powerpc/platforms/powernv/opal-nvram.c
466     +++ b/arch/powerpc/platforms/powernv/opal-nvram.c
467     @@ -44,6 +44,10 @@ static ssize_t opal_nvram_read(char *buf, size_t count, loff_t *index)
468     return count;
469     }
470    
471     +/*
472     + * This can be called in the panic path with interrupts off, so use
473     + * mdelay in that case.
474     + */
475     static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
476     {
477     s64 rc = OPAL_BUSY;
478     @@ -58,10 +62,16 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
479     while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
480     rc = opal_write_nvram(__pa(buf), count, off);
481     if (rc == OPAL_BUSY_EVENT) {
482     - msleep(OPAL_BUSY_DELAY_MS);
483     + if (in_interrupt() || irqs_disabled())
484     + mdelay(OPAL_BUSY_DELAY_MS);
485     + else
486     + msleep(OPAL_BUSY_DELAY_MS);
487     opal_poll_events(NULL);
488     } else if (rc == OPAL_BUSY) {
489     - msleep(OPAL_BUSY_DELAY_MS);
490     + if (in_interrupt() || irqs_disabled())
491     + mdelay(OPAL_BUSY_DELAY_MS);
492     + else
493     + msleep(OPAL_BUSY_DELAY_MS);
494     }
495     }
496    
497     diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
498     index 94f2099bceb0..3d17c41074ca 100644
499     --- a/arch/s390/kernel/irq.c
500     +++ b/arch/s390/kernel/irq.c
501     @@ -176,10 +176,9 @@ void do_softirq_own_stack(void)
502     new -= STACK_FRAME_OVERHEAD;
503     ((struct stack_frame *) new)->back_chain = old;
504     asm volatile(" la 15,0(%0)\n"
505     - " basr 14,%2\n"
506     + " brasl 14,__do_softirq\n"
507     " la 15,0(%1)\n"
508     - : : "a" (new), "a" (old),
509     - "a" (__do_softirq)
510     + : : "a" (new), "a" (old)
511     : "0", "1", "2", "3", "4", "5", "14",
512     "cc", "memory" );
513     } else {
514     diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
515     index 1c9ddd7aa5ec..0292d68e7dde 100644
516     --- a/arch/s390/kernel/perf_cpum_sf.c
517     +++ b/arch/s390/kernel/perf_cpum_sf.c
518     @@ -753,6 +753,10 @@ static int __hw_perf_event_init(struct perf_event *event)
519     */
520     rate = 0;
521     if (attr->freq) {
522     + if (!attr->sample_freq) {
523     + err = -EINVAL;
524     + goto out;
525     + }
526     rate = freq_to_sample_rate(&si, attr->sample_freq);
527     rate = hw_limit_rate(&si, rate);
528     attr->freq = 0;
529     diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
530     index 886a9115af62..48db9732b684 100644
531     --- a/arch/x86/boot/compressed/eboot.c
532     +++ b/arch/x86/boot/compressed/eboot.c
533     @@ -163,7 +163,8 @@ __setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom)
534     if (status != EFI_SUCCESS)
535     goto free_struct;
536    
537     - memcpy(rom->romdata, pci->romimage, pci->romsize);
538     + memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
539     + pci->romsize);
540     return status;
541    
542     free_struct:
543     @@ -269,7 +270,8 @@ __setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct pci_setup_rom **__rom)
544     if (status != EFI_SUCCESS)
545     goto free_struct;
546    
547     - memcpy(rom->romdata, pci->romimage, pci->romsize);
548     + memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
549     + pci->romsize);
550     return status;
551    
552     free_struct:
553     diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
554     index d554c11e01ff..2464ad88287c 100644
555     --- a/arch/x86/include/asm/cpufeatures.h
556     +++ b/arch/x86/include/asm/cpufeatures.h
557     @@ -198,7 +198,6 @@
558     #define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
559     #define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
560     #define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
561     -
562     #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
563     #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
564     #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
565     @@ -207,13 +206,19 @@
566     #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
567     #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
568     #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
569     -
570     +#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
571     +#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
572     #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
573     #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
574     #define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
575     -
576     #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
577     #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
578     +#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
579     +#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
580     +#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
581     +#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
582     +#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
583     +#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
584    
585     /* Virtualization flags: Linux defined, word 8 */
586     #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
587     @@ -274,9 +279,10 @@
588     #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
589     #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
590     #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
591     -#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
592     -#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
593     -#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
594     +#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
595     +#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
596     +#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
597     +#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
598    
599     /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
600     #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
601     @@ -333,6 +339,7 @@
602     #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
603     #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
604     #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
605     +#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
606    
607     /*
608     * BUG word(s)
609     @@ -362,5 +369,6 @@
610     #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
611     #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
612     #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
613     +#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
614    
615     #endif /* _ASM_X86_CPUFEATURES_H */
616     diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
617     index b605a5b6a30c..4b0539a52c4c 100644
618     --- a/arch/x86/include/asm/kvm_host.h
619     +++ b/arch/x86/include/asm/kvm_host.h
620     @@ -933,7 +933,7 @@ struct kvm_x86_ops {
621     int (*hardware_setup)(void); /* __init */
622     void (*hardware_unsetup)(void); /* __exit */
623     bool (*cpu_has_accelerated_tpr)(void);
624     - bool (*cpu_has_high_real_mode_segbase)(void);
625     + bool (*has_emulated_msr)(int index);
626     void (*cpuid_update)(struct kvm_vcpu *vcpu);
627    
628     int (*vm_init)(struct kvm *kvm);
629     diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
630     index 1de72ce514cd..ed97ef3b48a7 100644
631     --- a/arch/x86/include/asm/mmu_context.h
632     +++ b/arch/x86/include/asm/mmu_context.h
633     @@ -192,7 +192,7 @@ static inline int init_new_context(struct task_struct *tsk,
634    
635     #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
636     if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
637     - /* pkey 0 is the default and always allocated */
638     + /* pkey 0 is the default and allocated implicitly */
639     mm->context.pkey_allocation_map = 0x1;
640     /* -1 means unallocated or invalid */
641     mm->context.execute_only_pkey = -1;
642     diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
643     index c9084dedfcfa..1fce70c0f799 100644
644     --- a/arch/x86/include/asm/msr-index.h
645     +++ b/arch/x86/include/asm/msr-index.h
646     @@ -42,6 +42,8 @@
647     #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
648     #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
649     #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
650     +#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
651     +#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
652    
653     #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
654     #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
655     @@ -68,6 +70,11 @@
656     #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
657     #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
658     #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
659     +#define ARCH_CAP_SSB_NO (1 << 4) /*
660     + * Not susceptible to Speculative Store Bypass
661     + * attack, so no Speculative Store Bypass
662     + * control required.
663     + */
664    
665     #define MSR_IA32_BBL_CR_CTL 0x00000119
666     #define MSR_IA32_BBL_CR_CTL3 0x0000011e
667     @@ -340,6 +347,8 @@
668     #define MSR_AMD64_SEV_ENABLED_BIT 0
669     #define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
670    
671     +#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
672     +
673     /* Fam 17h MSRs */
674     #define MSR_F17H_IRPERF 0xc00000e9
675    
676     diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
677     index f928ad9b143f..8b38df98548e 100644
678     --- a/arch/x86/include/asm/nospec-branch.h
679     +++ b/arch/x86/include/asm/nospec-branch.h
680     @@ -217,6 +217,14 @@ enum spectre_v2_mitigation {
681     SPECTRE_V2_IBRS,
682     };
683    
684     +/* The Speculative Store Bypass disable variants */
685     +enum ssb_mitigation {
686     + SPEC_STORE_BYPASS_NONE,
687     + SPEC_STORE_BYPASS_DISABLE,
688     + SPEC_STORE_BYPASS_PRCTL,
689     + SPEC_STORE_BYPASS_SECCOMP,
690     +};
691     +
692     extern char __indirect_thunk_start[];
693     extern char __indirect_thunk_end[];
694    
695     @@ -241,22 +249,27 @@ static inline void vmexit_fill_RSB(void)
696     #endif
697     }
698    
699     -#define alternative_msr_write(_msr, _val, _feature) \
700     - asm volatile(ALTERNATIVE("", \
701     - "movl %[msr], %%ecx\n\t" \
702     - "movl %[val], %%eax\n\t" \
703     - "movl $0, %%edx\n\t" \
704     - "wrmsr", \
705     - _feature) \
706     - : : [msr] "i" (_msr), [val] "i" (_val) \
707     - : "eax", "ecx", "edx", "memory")
708     +static __always_inline
709     +void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
710     +{
711     + asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
712     + : : "c" (msr),
713     + "a" ((u32)val),
714     + "d" ((u32)(val >> 32)),
715     + [feature] "i" (feature)
716     + : "memory");
717     +}
718    
719     static inline void indirect_branch_prediction_barrier(void)
720     {
721     - alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
722     - X86_FEATURE_USE_IBPB);
723     + u64 val = PRED_CMD_IBPB;
724     +
725     + alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
726     }
727    
728     +/* The Intel SPEC CTRL MSR base value cache */
729     +extern u64 x86_spec_ctrl_base;
730     +
731     /*
732     * With retpoline, we must use IBRS to restrict branch prediction
733     * before calling into firmware.
734     @@ -265,14 +278,18 @@ static inline void indirect_branch_prediction_barrier(void)
735     */
736     #define firmware_restrict_branch_speculation_start() \
737     do { \
738     + u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
739     + \
740     preempt_disable(); \
741     - alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \
742     + alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
743     X86_FEATURE_USE_IBRS_FW); \
744     } while (0)
745    
746     #define firmware_restrict_branch_speculation_end() \
747     do { \
748     - alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \
749     + u64 val = x86_spec_ctrl_base; \
750     + \
751     + alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
752     X86_FEATURE_USE_IBRS_FW); \
753     preempt_enable(); \
754     } while (0)
755     diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
756     index a0ba1ffda0df..851c04b7a092 100644
757     --- a/arch/x86/include/asm/pkeys.h
758     +++ b/arch/x86/include/asm/pkeys.h
759     @@ -2,6 +2,8 @@
760     #ifndef _ASM_X86_PKEYS_H
761     #define _ASM_X86_PKEYS_H
762    
763     +#define ARCH_DEFAULT_PKEY 0
764     +
765     #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
766    
767     extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
768     @@ -15,7 +17,7 @@ extern int __execute_only_pkey(struct mm_struct *mm);
769     static inline int execute_only_pkey(struct mm_struct *mm)
770     {
771     if (!boot_cpu_has(X86_FEATURE_OSPKE))
772     - return 0;
773     + return ARCH_DEFAULT_PKEY;
774    
775     return __execute_only_pkey(mm);
776     }
777     @@ -49,13 +51,21 @@ bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
778     {
779     /*
780     * "Allocated" pkeys are those that have been returned
781     - * from pkey_alloc(). pkey 0 is special, and never
782     - * returned from pkey_alloc().
783     + * from pkey_alloc() or pkey 0 which is allocated
784     + * implicitly when the mm is created.
785     */
786     - if (pkey <= 0)
787     + if (pkey < 0)
788     return false;
789     if (pkey >= arch_max_pkey())
790     return false;
791     + /*
792     + * The exec-only pkey is set in the allocation map, but
793     + * is not available to any of the user interfaces like
794     + * mprotect_pkey().
795     + */
796     + if (pkey == mm->context.execute_only_pkey)
797     + return false;
798     +
799     return mm_pkey_allocation_map(mm) & (1U << pkey);
800     }
801    
802     diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
803     new file mode 100644
804     index 000000000000..ae7c2c5cd7f0
805     --- /dev/null
806     +++ b/arch/x86/include/asm/spec-ctrl.h
807     @@ -0,0 +1,80 @@
808     +/* SPDX-License-Identifier: GPL-2.0 */
809     +#ifndef _ASM_X86_SPECCTRL_H_
810     +#define _ASM_X86_SPECCTRL_H_
811     +
812     +#include <linux/thread_info.h>
813     +#include <asm/nospec-branch.h>
814     +
815     +/*
816     + * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
817     + * the guest has, while on VMEXIT we restore the host view. This
818     + * would be easier if SPEC_CTRL were architecturally maskable or
819     + * shadowable for guests but this is not (currently) the case.
820     + * Takes the guest view of SPEC_CTRL MSR as a parameter and also
821     + * the guest's version of VIRT_SPEC_CTRL, if emulated.
822     + */
823     +extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
824     +
825     +/**
826     + * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
827     + * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
828     + * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
829     + * (may get translated to MSR_AMD64_LS_CFG bits)
830     + *
831     + * Avoids writing to the MSR if the content/bits are the same
832     + */
833     +static inline
834     +void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
835     +{
836     + x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
837     +}
838     +
839     +/**
840     + * x86_spec_ctrl_restore_host - Restore host speculation control registers
841     + * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
842     + * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
843     + * (may get translated to MSR_AMD64_LS_CFG bits)
844     + *
845     + * Avoids writing to the MSR if the content/bits are the same
846     + */
847     +static inline
848     +void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
849     +{
850     + x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
851     +}
852     +
853     +/* AMD specific Speculative Store Bypass MSR data */
854     +extern u64 x86_amd_ls_cfg_base;
855     +extern u64 x86_amd_ls_cfg_ssbd_mask;
856     +
857     +static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
858     +{
859     + BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
860     + return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
861     +}
862     +
863     +static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
864     +{
865     + BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
866     + return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
867     +}
868     +
869     +static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
870     +{
871     + return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
872     +}
873     +
874     +#ifdef CONFIG_SMP
875     +extern void speculative_store_bypass_ht_init(void);
876     +#else
877     +static inline void speculative_store_bypass_ht_init(void) { }
878     +#endif
879     +
880     +extern void speculative_store_bypass_update(unsigned long tif);
881     +
882     +static inline void speculative_store_bypass_update_current(void)
883     +{
884     + speculative_store_bypass_update(current_thread_info()->flags);
885     +}
886     +
887     +#endif
888     diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
889     index a5d9521bb2cb..2ff2a30a264f 100644
890     --- a/arch/x86/include/asm/thread_info.h
891     +++ b/arch/x86/include/asm/thread_info.h
892     @@ -79,6 +79,7 @@ struct thread_info {
893     #define TIF_SIGPENDING 2 /* signal pending */
894     #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
895     #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
896     +#define TIF_SSBD 5 /* Reduced data speculation */
897     #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
898     #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
899     #define TIF_SECCOMP 8 /* secure computing */
900     @@ -105,6 +106,7 @@ struct thread_info {
901     #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
902     #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
903     #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
904     +#define _TIF_SSBD (1 << TIF_SSBD)
905     #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
906     #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
907     #define _TIF_SECCOMP (1 << TIF_SECCOMP)
908     @@ -144,7 +146,7 @@ struct thread_info {
909    
910     /* flags to check in __switch_to() */
911     #define _TIF_WORK_CTXSW \
912     - (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP)
913     + (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
914    
915     #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
916     #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
917     diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
918     index c88e0b127810..b481b95bd8f6 100644
919     --- a/arch/x86/kernel/amd_nb.c
920     +++ b/arch/x86/kernel/amd_nb.c
921     @@ -14,8 +14,11 @@
922     #include <asm/amd_nb.h>
923    
924     #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
925     +#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
926     #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
927     #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
928     +#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
929     +#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
930    
931     /* Protect the PCI config register pairs used for SMN and DF indirect access. */
932     static DEFINE_MUTEX(smn_mutex);
933     @@ -24,6 +27,7 @@ static u32 *flush_words;
934    
935     static const struct pci_device_id amd_root_ids[] = {
936     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
937     + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
938     {}
939     };
940    
941     @@ -39,6 +43,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
942     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
943     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
944     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
945     + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
946     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
947     {}
948     };
949     @@ -51,6 +56,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
950     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
951     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
952     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
953     + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
954     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
955     {}
956     };
957     diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
958     index 8b04234e010b..7685444a106b 100644
959     --- a/arch/x86/kernel/apic/x2apic_cluster.c
960     +++ b/arch/x86/kernel/apic/x2apic_cluster.c
961     @@ -116,6 +116,7 @@ static void init_x2apic_ldr(void)
962     goto update;
963     }
964     cmsk = cluster_hotplug_mask;
965     + cmsk->clusterid = cluster;
966     cluster_hotplug_mask = NULL;
967     update:
968     this_cpu_write(cluster_masks, cmsk);
969     diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
970     index f0e6456ca7d3..718fae428124 100644
971     --- a/arch/x86/kernel/cpu/amd.c
972     +++ b/arch/x86/kernel/cpu/amd.c
973     @@ -10,6 +10,7 @@
974     #include <asm/processor.h>
975     #include <asm/apic.h>
976     #include <asm/cpu.h>
977     +#include <asm/spec-ctrl.h>
978     #include <asm/smp.h>
979     #include <asm/pci-direct.h>
980     #include <asm/delay.h>
981     @@ -554,6 +555,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
982     rdmsrl(MSR_FAM10H_NODE_ID, value);
983     nodes_per_socket = ((value >> 3) & 7) + 1;
984     }
985     +
986     + if (c->x86 >= 0x15 && c->x86 <= 0x17) {
987     + unsigned int bit;
988     +
989     + switch (c->x86) {
990     + case 0x15: bit = 54; break;
991     + case 0x16: bit = 33; break;
992     + case 0x17: bit = 10; break;
993     + default: return;
994     + }
995     + /*
996     + * Try to cache the base value so further operations can
997     + * avoid RMW. If that faults, do not enable SSBD.
998     + */
999     + if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
1000     + setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
1001     + setup_force_cpu_cap(X86_FEATURE_SSBD);
1002     + x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
1003     + }
1004     + }
1005     }
1006    
1007     static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
1008     @@ -791,6 +812,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
1009    
1010     static void init_amd_zn(struct cpuinfo_x86 *c)
1011     {
1012     + set_cpu_cap(c, X86_FEATURE_ZEN);
1013     /*
1014     * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
1015     * all up to and including B1.
1016     diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
1017     index bfca937bdcc3..7416fc206b4a 100644
1018     --- a/arch/x86/kernel/cpu/bugs.c
1019     +++ b/arch/x86/kernel/cpu/bugs.c
1020     @@ -12,8 +12,10 @@
1021     #include <linux/utsname.h>
1022     #include <linux/cpu.h>
1023     #include <linux/module.h>
1024     +#include <linux/nospec.h>
1025     +#include <linux/prctl.h>
1026    
1027     -#include <asm/nospec-branch.h>
1028     +#include <asm/spec-ctrl.h>
1029     #include <asm/cmdline.h>
1030     #include <asm/bugs.h>
1031     #include <asm/processor.h>
1032     @@ -27,6 +29,27 @@
1033     #include <asm/intel-family.h>
1034    
1035     static void __init spectre_v2_select_mitigation(void);
1036     +static void __init ssb_select_mitigation(void);
1037     +
1038     +/*
1039     + * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
1040     + * writes to SPEC_CTRL contain whatever reserved bits have been set.
1041     + */
1042     +u64 __ro_after_init x86_spec_ctrl_base;
1043     +EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
1044     +
1045     +/*
1046     + * The vendor and possibly platform specific bits which can be modified in
1047     + * x86_spec_ctrl_base.
1048     + */
1049     +static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
1050     +
1051     +/*
1052     + * AMD specific MSR info for Speculative Store Bypass control.
1053     + * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
1054     + */
1055     +u64 __ro_after_init x86_amd_ls_cfg_base;
1056     +u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
1057    
1058     void __init check_bugs(void)
1059     {
1060     @@ -37,9 +60,27 @@ void __init check_bugs(void)
1061     print_cpu_info(&boot_cpu_data);
1062     }
1063    
1064     + /*
1065     + * Read the SPEC_CTRL MSR to account for reserved bits which may
1066     + * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
1067     + * init code as it is not enumerated and depends on the family.
1068     + */
1069     + if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1070     + rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1071     +
1072     + /* Allow STIBP in MSR_SPEC_CTRL if supported */
1073     + if (boot_cpu_has(X86_FEATURE_STIBP))
1074     + x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
1075     +
1076     /* Select the proper spectre mitigation before patching alternatives */
1077     spectre_v2_select_mitigation();
1078    
1079     + /*
1080     + * Select proper mitigation for any exposure to the Speculative Store
1081     + * Bypass vulnerability.
1082     + */
1083     + ssb_select_mitigation();
1084     +
1085     #ifdef CONFIG_X86_32
1086     /*
1087     * Check whether we are able to run this kernel safely on SMP.
1088     @@ -93,7 +134,76 @@ static const char *spectre_v2_strings[] = {
1089     #undef pr_fmt
1090     #define pr_fmt(fmt) "Spectre V2 : " fmt
1091    
1092     -static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
1093     +static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
1094     + SPECTRE_V2_NONE;
1095     +
1096     +void
1097     +x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
1098     +{
1099     + u64 msrval, guestval, hostval = x86_spec_ctrl_base;
1100     + struct thread_info *ti = current_thread_info();
1101     +
1102     + /* Is MSR_SPEC_CTRL implemented ? */
1103     + if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
1104     + /*
1105     + * Restrict guest_spec_ctrl to supported values. Clear the
1106     + * modifiable bits in the host base value and or the
1107     + * modifiable bits from the guest value.
1108     + */
1109     + guestval = hostval & ~x86_spec_ctrl_mask;
1110     + guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
1111     +
1112     + /* SSBD controlled in MSR_SPEC_CTRL */
1113     + if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
1114     + hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
1115     +
1116     + if (hostval != guestval) {
1117     + msrval = setguest ? guestval : hostval;
1118     + wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
1119     + }
1120     + }
1121     +
1122     + /*
1123     + * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
1124     + * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
1125     + */
1126     + if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
1127     + !static_cpu_has(X86_FEATURE_VIRT_SSBD))
1128     + return;
1129     +
1130     + /*
1131     + * If the host has SSBD mitigation enabled, force it in the host's
1132     + * virtual MSR value. If its not permanently enabled, evaluate
1133     + * current's TIF_SSBD thread flag.
1134     + */
1135     + if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
1136     + hostval = SPEC_CTRL_SSBD;
1137     + else
1138     + hostval = ssbd_tif_to_spec_ctrl(ti->flags);
1139     +
1140     + /* Sanitize the guest value */
1141     + guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
1142     +
1143     + if (hostval != guestval) {
1144     + unsigned long tif;
1145     +
1146     + tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
1147     + ssbd_spec_ctrl_to_tif(hostval);
1148     +
1149     + speculative_store_bypass_update(tif);
1150     + }
1151     +}
1152     +EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
1153     +
1154     +static void x86_amd_ssb_disable(void)
1155     +{
1156     + u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
1157     +
1158     + if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
1159     + wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
1160     + else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
1161     + wrmsrl(MSR_AMD64_LS_CFG, msrval);
1162     +}
1163    
1164     #ifdef RETPOLINE
1165     static bool spectre_v2_bad_module;
1166     @@ -312,32 +422,289 @@ static void __init spectre_v2_select_mitigation(void)
1167     }
1168    
1169     #undef pr_fmt
1170     +#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
1171     +
1172     +static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
1173     +
1174     +/* The kernel command line selection */
1175     +enum ssb_mitigation_cmd {
1176     + SPEC_STORE_BYPASS_CMD_NONE,
1177     + SPEC_STORE_BYPASS_CMD_AUTO,
1178     + SPEC_STORE_BYPASS_CMD_ON,
1179     + SPEC_STORE_BYPASS_CMD_PRCTL,
1180     + SPEC_STORE_BYPASS_CMD_SECCOMP,
1181     +};
1182     +
1183     +static const char *ssb_strings[] = {
1184     + [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
1185     + [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
1186     + [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
1187     + [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1188     +};
1189     +
1190     +static const struct {
1191     + const char *option;
1192     + enum ssb_mitigation_cmd cmd;
1193     +} ssb_mitigation_options[] = {
1194     + { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
1195     + { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
1196     + { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
1197     + { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
1198     + { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
1199     +};
1200     +
1201     +static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
1202     +{
1203     + enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
1204     + char arg[20];
1205     + int ret, i;
1206     +
1207     + if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
1208     + return SPEC_STORE_BYPASS_CMD_NONE;
1209     + } else {
1210     + ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
1211     + arg, sizeof(arg));
1212     + if (ret < 0)
1213     + return SPEC_STORE_BYPASS_CMD_AUTO;
1214     +
1215     + for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
1216     + if (!match_option(arg, ret, ssb_mitigation_options[i].option))
1217     + continue;
1218     +
1219     + cmd = ssb_mitigation_options[i].cmd;
1220     + break;
1221     + }
1222     +
1223     + if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
1224     + pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1225     + return SPEC_STORE_BYPASS_CMD_AUTO;
1226     + }
1227     + }
1228     +
1229     + return cmd;
1230     +}
1231     +
1232     +static enum ssb_mitigation __init __ssb_select_mitigation(void)
1233     +{
1234     + enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
1235     + enum ssb_mitigation_cmd cmd;
1236     +
1237     + if (!boot_cpu_has(X86_FEATURE_SSBD))
1238     + return mode;
1239     +
1240     + cmd = ssb_parse_cmdline();
1241     + if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
1242     + (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
1243     + cmd == SPEC_STORE_BYPASS_CMD_AUTO))
1244     + return mode;
1245     +
1246     + switch (cmd) {
1247     + case SPEC_STORE_BYPASS_CMD_AUTO:
1248     + case SPEC_STORE_BYPASS_CMD_SECCOMP:
1249     + /*
1250     + * Choose prctl+seccomp as the default mode if seccomp is
1251     + * enabled.
1252     + */
1253     + if (IS_ENABLED(CONFIG_SECCOMP))
1254     + mode = SPEC_STORE_BYPASS_SECCOMP;
1255     + else
1256     + mode = SPEC_STORE_BYPASS_PRCTL;
1257     + break;
1258     + case SPEC_STORE_BYPASS_CMD_ON:
1259     + mode = SPEC_STORE_BYPASS_DISABLE;
1260     + break;
1261     + case SPEC_STORE_BYPASS_CMD_PRCTL:
1262     + mode = SPEC_STORE_BYPASS_PRCTL;
1263     + break;
1264     + case SPEC_STORE_BYPASS_CMD_NONE:
1265     + break;
1266     + }
1267     +
1268     + /*
1269     + * We have three CPU feature flags that are in play here:
1270     + * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1271     + * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1272     + * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1273     + */
1274     + if (mode == SPEC_STORE_BYPASS_DISABLE) {
1275     + setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
1276     + /*
1277     + * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
1278     + * a completely different MSR and bit dependent on family.
1279     + */
1280     + switch (boot_cpu_data.x86_vendor) {
1281     + case X86_VENDOR_INTEL:
1282     + x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
1283     + x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
1284     + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1285     + break;
1286     + case X86_VENDOR_AMD:
1287     + x86_amd_ssb_disable();
1288     + break;
1289     + }
1290     + }
1291     +
1292     + return mode;
1293     +}
1294     +
1295     +static void ssb_select_mitigation(void)
1296     +{
1297     + ssb_mode = __ssb_select_mitigation();
1298     +
1299     + if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1300     + pr_info("%s\n", ssb_strings[ssb_mode]);
1301     +}
1302     +
1303     +#undef pr_fmt
1304     +#define pr_fmt(fmt) "Speculation prctl: " fmt
1305     +
1306     +static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
1307     +{
1308     + bool update;
1309     +
1310     + if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
1311     + ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
1312     + return -ENXIO;
1313     +
1314     + switch (ctrl) {
1315     + case PR_SPEC_ENABLE:
1316     + /* If speculation is force disabled, enable is not allowed */
1317     + if (task_spec_ssb_force_disable(task))
1318     + return -EPERM;
1319     + task_clear_spec_ssb_disable(task);
1320     + update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
1321     + break;
1322     + case PR_SPEC_DISABLE:
1323     + task_set_spec_ssb_disable(task);
1324     + update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
1325     + break;
1326     + case PR_SPEC_FORCE_DISABLE:
1327     + task_set_spec_ssb_disable(task);
1328     + task_set_spec_ssb_force_disable(task);
1329     + update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
1330     + break;
1331     + default:
1332     + return -ERANGE;
1333     + }
1334     +
1335     + /*
1336     + * If being set on non-current task, delay setting the CPU
1337     + * mitigation until it is next scheduled.
1338     + */
1339     + if (task == current && update)
1340     + speculative_store_bypass_update_current();
1341     +
1342     + return 0;
1343     +}
1344     +
1345     +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
1346     + unsigned long ctrl)
1347     +{
1348     + switch (which) {
1349     + case PR_SPEC_STORE_BYPASS:
1350     + return ssb_prctl_set(task, ctrl);
1351     + default:
1352     + return -ENODEV;
1353     + }
1354     +}
1355     +
1356     +#ifdef CONFIG_SECCOMP
1357     +void arch_seccomp_spec_mitigate(struct task_struct *task)
1358     +{
1359     + if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
1360     + ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1361     +}
1362     +#endif
1363     +
1364     +static int ssb_prctl_get(struct task_struct *task)
1365     +{
1366     + switch (ssb_mode) {
1367     + case SPEC_STORE_BYPASS_DISABLE:
1368     + return PR_SPEC_DISABLE;
1369     + case SPEC_STORE_BYPASS_SECCOMP:
1370     + case SPEC_STORE_BYPASS_PRCTL:
1371     + if (task_spec_ssb_force_disable(task))
1372     + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1373     + if (task_spec_ssb_disable(task))
1374     + return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1375     + return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1376     + default:
1377     + if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1378     + return PR_SPEC_ENABLE;
1379     + return PR_SPEC_NOT_AFFECTED;
1380     + }
1381     +}
1382     +
1383     +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1384     +{
1385     + switch (which) {
1386     + case PR_SPEC_STORE_BYPASS:
1387     + return ssb_prctl_get(task);
1388     + default:
1389     + return -ENODEV;
1390     + }
1391     +}
1392     +
1393     +void x86_spec_ctrl_setup_ap(void)
1394     +{
1395     + if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1396     + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1397     +
1398     + if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
1399     + x86_amd_ssb_disable();
1400     +}
1401    
1402     #ifdef CONFIG_SYSFS
1403     -ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1404     +
1405     +static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1406     + char *buf, unsigned int bug)
1407     {
1408     - if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
1409     + if (!boot_cpu_has_bug(bug))
1410     return sprintf(buf, "Not affected\n");
1411     - if (boot_cpu_has(X86_FEATURE_PTI))
1412     - return sprintf(buf, "Mitigation: PTI\n");
1413     +
1414     + switch (bug) {
1415     + case X86_BUG_CPU_MELTDOWN:
1416     + if (boot_cpu_has(X86_FEATURE_PTI))
1417     + return sprintf(buf, "Mitigation: PTI\n");
1418     +
1419     + break;
1420     +
1421     + case X86_BUG_SPECTRE_V1:
1422     + return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1423     +
1424     + case X86_BUG_SPECTRE_V2:
1425     + return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1426     + boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
1427     + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1428     + spectre_v2_module_string());
1429     +
1430     + case X86_BUG_SPEC_STORE_BYPASS:
1431     + return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1432     +
1433     + default:
1434     + break;
1435     + }
1436     +
1437     return sprintf(buf, "Vulnerable\n");
1438     }
1439    
1440     +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1441     +{
1442     + return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1443     +}
1444     +
1445     ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1446     {
1447     - if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1448     - return sprintf(buf, "Not affected\n");
1449     - return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1450     + return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
1451     }
1452    
1453     ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1454     {
1455     - if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1456     - return sprintf(buf, "Not affected\n");
1457     + return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
1458     +}
1459    
1460     - return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1461     - boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
1462     - boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1463     - spectre_v2_module_string());
1464     +ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1465     +{
1466     + return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1467     }
1468     #endif
1469     diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
1470     index 348cf4821240..5f74f94244e1 100644
1471     --- a/arch/x86/kernel/cpu/common.c
1472     +++ b/arch/x86/kernel/cpu/common.c
1473     @@ -757,17 +757,32 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
1474     * and they also have a different bit for STIBP support. Also,
1475     * a hypervisor might have set the individual AMD bits even on
1476     * Intel CPUs, for finer-grained selection of what's available.
1477     - *
1478     - * We use the AMD bits in 0x8000_0008 EBX as the generic hardware
1479     - * features, which are visible in /proc/cpuinfo and used by the
1480     - * kernel. So set those accordingly from the Intel bits.
1481     */
1482     if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
1483     set_cpu_cap(c, X86_FEATURE_IBRS);
1484     set_cpu_cap(c, X86_FEATURE_IBPB);
1485     + set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1486     }
1487     +
1488     if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
1489     set_cpu_cap(c, X86_FEATURE_STIBP);
1490     +
1491     + if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
1492     + cpu_has(c, X86_FEATURE_VIRT_SSBD))
1493     + set_cpu_cap(c, X86_FEATURE_SSBD);
1494     +
1495     + if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
1496     + set_cpu_cap(c, X86_FEATURE_IBRS);
1497     + set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1498     + }
1499     +
1500     + if (cpu_has(c, X86_FEATURE_AMD_IBPB))
1501     + set_cpu_cap(c, X86_FEATURE_IBPB);
1502     +
1503     + if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
1504     + set_cpu_cap(c, X86_FEATURE_STIBP);
1505     + set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1506     + }
1507     }
1508    
1509     void get_cpu_cap(struct cpuinfo_x86 *c)
1510     @@ -918,21 +933,55 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
1511     {}
1512     };
1513    
1514     -static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
1515     +static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
1516     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
1517     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
1518     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
1519     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
1520     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
1521     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
1522     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
1523     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
1524     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
1525     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
1526     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
1527     + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
1528     + { X86_VENDOR_CENTAUR, 5, },
1529     + { X86_VENDOR_INTEL, 5, },
1530     + { X86_VENDOR_NSC, 5, },
1531     + { X86_VENDOR_AMD, 0x12, },
1532     + { X86_VENDOR_AMD, 0x11, },
1533     + { X86_VENDOR_AMD, 0x10, },
1534     + { X86_VENDOR_AMD, 0xf, },
1535     + { X86_VENDOR_ANY, 4, },
1536     + {}
1537     +};
1538     +
1539     +static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1540     {
1541     u64 ia32_cap = 0;
1542    
1543     - if (x86_match_cpu(cpu_no_meltdown))
1544     - return false;
1545     -
1546     if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
1547     rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
1548    
1549     + if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
1550     + !(ia32_cap & ARCH_CAP_SSB_NO))
1551     + setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
1552     +
1553     + if (x86_match_cpu(cpu_no_speculation))
1554     + return;
1555     +
1556     + setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
1557     + setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1558     +
1559     + if (x86_match_cpu(cpu_no_meltdown))
1560     + return;
1561     +
1562     /* Rogue Data Cache Load? No! */
1563     if (ia32_cap & ARCH_CAP_RDCL_NO)
1564     - return false;
1565     + return;
1566    
1567     - return true;
1568     + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
1569     }
1570    
1571     /*
1572     @@ -982,12 +1031,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
1573    
1574     setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1575    
1576     - if (!x86_match_cpu(cpu_no_speculation)) {
1577     - if (cpu_vulnerable_to_meltdown(c))
1578     - setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
1579     - setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
1580     - setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1581     - }
1582     + cpu_set_bug_bits(c);
1583    
1584     fpu__init_system(c);
1585    
1586     @@ -1347,6 +1391,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
1587     #endif
1588     mtrr_ap_init();
1589     validate_apic_and_package_id(c);
1590     + x86_spec_ctrl_setup_ap();
1591     }
1592    
1593     static __init int setup_noclflush(char *arg)
1594     diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
1595     index e806b11a99af..37672d299e35 100644
1596     --- a/arch/x86/kernel/cpu/cpu.h
1597     +++ b/arch/x86/kernel/cpu/cpu.h
1598     @@ -50,4 +50,6 @@ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
1599    
1600     unsigned int aperfmperf_get_khz(int cpu);
1601    
1602     +extern void x86_spec_ctrl_setup_ap(void);
1603     +
1604     #endif /* ARCH_X86_CPU_H */
1605     diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
1606     index c3af167d0a70..c895f38a7a5e 100644
1607     --- a/arch/x86/kernel/cpu/intel.c
1608     +++ b/arch/x86/kernel/cpu/intel.c
1609     @@ -188,7 +188,10 @@ static void early_init_intel(struct cpuinfo_x86 *c)
1610     setup_clear_cpu_cap(X86_FEATURE_IBPB);
1611     setup_clear_cpu_cap(X86_FEATURE_STIBP);
1612     setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
1613     + setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
1614     setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
1615     + setup_clear_cpu_cap(X86_FEATURE_SSBD);
1616     + setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
1617     }
1618    
1619     /*
1620     diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
1621     index 03408b942adb..30ca2d1a9231 100644
1622     --- a/arch/x86/kernel/process.c
1623     +++ b/arch/x86/kernel/process.c
1624     @@ -38,6 +38,7 @@
1625     #include <asm/switch_to.h>
1626     #include <asm/desc.h>
1627     #include <asm/prctl.h>
1628     +#include <asm/spec-ctrl.h>
1629    
1630     /*
1631     * per-CPU TSS segments. Threads are completely 'soft' on Linux,
1632     @@ -278,6 +279,148 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
1633     }
1634     }
1635    
1636     +#ifdef CONFIG_SMP
1637     +
1638     +struct ssb_state {
1639     + struct ssb_state *shared_state;
1640     + raw_spinlock_t lock;
1641     + unsigned int disable_state;
1642     + unsigned long local_state;
1643     +};
1644     +
1645     +#define LSTATE_SSB 0
1646     +
1647     +static DEFINE_PER_CPU(struct ssb_state, ssb_state);
1648     +
1649     +void speculative_store_bypass_ht_init(void)
1650     +{
1651     + struct ssb_state *st = this_cpu_ptr(&ssb_state);
1652     + unsigned int this_cpu = smp_processor_id();
1653     + unsigned int cpu;
1654     +
1655     + st->local_state = 0;
1656     +
1657     + /*
1658     + * Shared state setup happens once on the first bringup
1659     + * of the CPU. It's not destroyed on CPU hotunplug.
1660     + */
1661     + if (st->shared_state)
1662     + return;
1663     +
1664     + raw_spin_lock_init(&st->lock);
1665     +
1666     + /*
1667     + * Go over HT siblings and check whether one of them has set up the
1668     + * shared state pointer already.
1669     + */
1670     + for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
1671     + if (cpu == this_cpu)
1672     + continue;
1673     +
1674     + if (!per_cpu(ssb_state, cpu).shared_state)
1675     + continue;
1676     +
1677     + /* Link it to the state of the sibling: */
1678     + st->shared_state = per_cpu(ssb_state, cpu).shared_state;
1679     + return;
1680     + }
1681     +
1682     + /*
1683     + * First HT sibling to come up on the core. Link shared state of
1684     + * the first HT sibling to itself. The siblings on the same core
1685     + * which come up later will see the shared state pointer and link
1686     + * themself to the state of this CPU.
1687     + */
1688     + st->shared_state = st;
1689     +}
1690     +
1691     +/*
1692     + * Logic is: First HT sibling enables SSBD for both siblings in the core
1693     + * and last sibling to disable it, disables it for the whole core. This how
1694     + * MSR_SPEC_CTRL works in "hardware":
1695     + *
1696     + * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
1697     + */
1698     +static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
1699     +{
1700     + struct ssb_state *st = this_cpu_ptr(&ssb_state);
1701     + u64 msr = x86_amd_ls_cfg_base;
1702     +
1703     + if (!static_cpu_has(X86_FEATURE_ZEN)) {
1704     + msr |= ssbd_tif_to_amd_ls_cfg(tifn);
1705     + wrmsrl(MSR_AMD64_LS_CFG, msr);
1706     + return;
1707     + }
1708     +
1709     + if (tifn & _TIF_SSBD) {
1710     + /*
1711     + * Since this can race with prctl(), block reentry on the
1712     + * same CPU.
1713     + */
1714     + if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
1715     + return;
1716     +
1717     + msr |= x86_amd_ls_cfg_ssbd_mask;
1718     +
1719     + raw_spin_lock(&st->shared_state->lock);
1720     + /* First sibling enables SSBD: */
1721     + if (!st->shared_state->disable_state)
1722     + wrmsrl(MSR_AMD64_LS_CFG, msr);
1723     + st->shared_state->disable_state++;
1724     + raw_spin_unlock(&st->shared_state->lock);
1725     + } else {
1726     + if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
1727     + return;
1728     +
1729     + raw_spin_lock(&st->shared_state->lock);
1730     + st->shared_state->disable_state--;
1731     + if (!st->shared_state->disable_state)
1732     + wrmsrl(MSR_AMD64_LS_CFG, msr);
1733     + raw_spin_unlock(&st->shared_state->lock);
1734     + }
1735     +}
1736     +#else
1737     +static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
1738     +{
1739     + u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
1740     +
1741     + wrmsrl(MSR_AMD64_LS_CFG, msr);
1742     +}
1743     +#endif
1744     +
1745     +static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
1746     +{
1747     + /*
1748     + * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
1749     + * so ssbd_tif_to_spec_ctrl() just works.
1750     + */
1751     + wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
1752     +}
1753     +
1754     +static __always_inline void intel_set_ssb_state(unsigned long tifn)
1755     +{
1756     + u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
1757     +
1758     + wrmsrl(MSR_IA32_SPEC_CTRL, msr);
1759     +}
1760     +
1761     +static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
1762     +{
1763     + if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
1764     + amd_set_ssb_virt_state(tifn);
1765     + else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
1766     + amd_set_core_ssb_state(tifn);
1767     + else
1768     + intel_set_ssb_state(tifn);
1769     +}
1770     +
1771     +void speculative_store_bypass_update(unsigned long tif)
1772     +{
1773     + preempt_disable();
1774     + __speculative_store_bypass_update(tif);
1775     + preempt_enable();
1776     +}
1777     +
1778     void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
1779     struct tss_struct *tss)
1780     {
1781     @@ -309,6 +452,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
1782    
1783     if ((tifp ^ tifn) & _TIF_NOCPUID)
1784     set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
1785     +
1786     + if ((tifp ^ tifn) & _TIF_SSBD)
1787     + __speculative_store_bypass_update(tifn);
1788     }
1789    
1790     /*
1791     diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
1792     index 9eb448c7859d..fa093b77689f 100644
1793     --- a/arch/x86/kernel/process_64.c
1794     +++ b/arch/x86/kernel/process_64.c
1795     @@ -528,6 +528,7 @@ void set_personality_64bit(void)
1796     clear_thread_flag(TIF_X32);
1797     /* Pretend that this comes from a 64bit execve */
1798     task_pt_regs(current)->orig_ax = __NR_execve;
1799     + current_thread_info()->status &= ~TS_COMPAT;
1800    
1801     /* Ensure the corresponding mm is not marked. */
1802     if (current->mm)
1803     diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
1804     index 12599e55e040..d50bc80f5172 100644
1805     --- a/arch/x86/kernel/smpboot.c
1806     +++ b/arch/x86/kernel/smpboot.c
1807     @@ -77,6 +77,7 @@
1808     #include <asm/i8259.h>
1809     #include <asm/misc.h>
1810     #include <asm/qspinlock.h>
1811     +#include <asm/spec-ctrl.h>
1812    
1813     /* Number of siblings per CPU package */
1814     int smp_num_siblings = 1;
1815     @@ -242,6 +243,8 @@ static void notrace start_secondary(void *unused)
1816     */
1817     check_tsc_sync_target();
1818    
1819     + speculative_store_bypass_ht_init();
1820     +
1821     /*
1822     * Lock vector_lock, set CPU online and bring the vector
1823     * allocator online. Online must be set with vector_lock held
1824     @@ -1257,6 +1260,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1825     set_mtrr_aps_delayed_init();
1826    
1827     smp_quirk_init_udelay();
1828     +
1829     + speculative_store_bypass_ht_init();
1830     }
1831    
1832     void arch_enable_nonboot_cpus_begin(void)
1833     diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
1834     index b671fc2d0422..3f400004f602 100644
1835     --- a/arch/x86/kvm/cpuid.c
1836     +++ b/arch/x86/kvm/cpuid.c
1837     @@ -374,7 +374,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1838    
1839     /* cpuid 0x80000008.ebx */
1840     const u32 kvm_cpuid_8000_0008_ebx_x86_features =
1841     - F(IBPB) | F(IBRS);
1842     + F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
1843    
1844     /* cpuid 0xC0000001.edx */
1845     const u32 kvm_cpuid_C000_0001_edx_x86_features =
1846     @@ -402,7 +402,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1847    
1848     /* cpuid 7.0.edx*/
1849     const u32 kvm_cpuid_7_0_edx_x86_features =
1850     - F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
1851     + F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
1852     F(ARCH_CAPABILITIES);
1853    
1854     /* all calls to cpuid_count() should be made on the same cpu */
1855     @@ -642,13 +642,20 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1856     g_phys_as = phys_as;
1857     entry->eax = g_phys_as | (virt_as << 8);
1858     entry->edx = 0;
1859     - /* IBRS and IBPB aren't necessarily present in hardware cpuid */
1860     - if (boot_cpu_has(X86_FEATURE_IBPB))
1861     - entry->ebx |= F(IBPB);
1862     - if (boot_cpu_has(X86_FEATURE_IBRS))
1863     - entry->ebx |= F(IBRS);
1864     + /*
1865     + * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
1866     + * hardware cpuid
1867     + */
1868     + if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
1869     + entry->ebx |= F(AMD_IBPB);
1870     + if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
1871     + entry->ebx |= F(AMD_IBRS);
1872     + if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
1873     + entry->ebx |= F(VIRT_SSBD);
1874     entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
1875     cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
1876     + if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
1877     + entry->ebx |= F(VIRT_SSBD);
1878     break;
1879     }
1880     case 0x80000019:
1881     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1882     index be9c839e2c89..dbbd762359a9 100644
1883     --- a/arch/x86/kvm/svm.c
1884     +++ b/arch/x86/kvm/svm.c
1885     @@ -50,7 +50,7 @@
1886     #include <asm/kvm_para.h>
1887     #include <asm/irq_remapping.h>
1888     #include <asm/microcode.h>
1889     -#include <asm/nospec-branch.h>
1890     +#include <asm/spec-ctrl.h>
1891    
1892     #include <asm/virtext.h>
1893     #include "trace.h"
1894     @@ -192,6 +192,12 @@ struct vcpu_svm {
1895     } host;
1896    
1897     u64 spec_ctrl;
1898     + /*
1899     + * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
1900     + * translated into the appropriate L2_CFG bits on the host to
1901     + * perform speculative control.
1902     + */
1903     + u64 virt_spec_ctrl;
1904    
1905     u32 *msrpm;
1906    
1907     @@ -1910,6 +1916,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1908    
1909     vcpu->arch.microcode_version = 0x01000065;
1910     svm->spec_ctrl = 0;
1911     + svm->virt_spec_ctrl = 0;
1912    
1913     if (!init_event) {
1914     svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
1915     @@ -3959,11 +3966,18 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1916     break;
1917     case MSR_IA32_SPEC_CTRL:
1918     if (!msr_info->host_initiated &&
1919     - !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
1920     + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
1921     return 1;
1922    
1923     msr_info->data = svm->spec_ctrl;
1924     break;
1925     + case MSR_AMD64_VIRT_SPEC_CTRL:
1926     + if (!msr_info->host_initiated &&
1927     + !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
1928     + return 1;
1929     +
1930     + msr_info->data = svm->virt_spec_ctrl;
1931     + break;
1932     case MSR_F15H_IC_CFG: {
1933    
1934     int family, model;
1935     @@ -4057,7 +4071,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1936     break;
1937     case MSR_IA32_SPEC_CTRL:
1938     if (!msr->host_initiated &&
1939     - !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
1940     + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
1941     return 1;
1942    
1943     /* The STIBP bit doesn't fault even if it's not advertised */
1944     @@ -4084,7 +4098,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1945     break;
1946     case MSR_IA32_PRED_CMD:
1947     if (!msr->host_initiated &&
1948     - !guest_cpuid_has(vcpu, X86_FEATURE_IBPB))
1949     + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
1950     return 1;
1951    
1952     if (data & ~PRED_CMD_IBPB)
1953     @@ -4098,6 +4112,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1954     break;
1955     set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
1956     break;
1957     + case MSR_AMD64_VIRT_SPEC_CTRL:
1958     + if (!msr->host_initiated &&
1959     + !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
1960     + return 1;
1961     +
1962     + if (data & ~SPEC_CTRL_SSBD)
1963     + return 1;
1964     +
1965     + svm->virt_spec_ctrl = data;
1966     + break;
1967     case MSR_STAR:
1968     svm->vmcb->save.star = data;
1969     break;
1970     @@ -5401,8 +5425,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
1971     * is no need to worry about the conditional branch over the wrmsr
1972     * being speculatively taken.
1973     */
1974     - if (svm->spec_ctrl)
1975     - native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
1976     + x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
1977    
1978     asm volatile (
1979     "push %%" _ASM_BP "; \n\t"
1980     @@ -5496,6 +5519,18 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
1981     #endif
1982     );
1983    
1984     + /* Eliminate branch target predictions from guest mode */
1985     + vmexit_fill_RSB();
1986     +
1987     +#ifdef CONFIG_X86_64
1988     + wrmsrl(MSR_GS_BASE, svm->host.gs_base);
1989     +#else
1990     + loadsegment(fs, svm->host.fs);
1991     +#ifndef CONFIG_X86_32_LAZY_GS
1992     + loadsegment(gs, svm->host.gs);
1993     +#endif
1994     +#endif
1995     +
1996     /*
1997     * We do not use IBRS in the kernel. If this vCPU has used the
1998     * SPEC_CTRL MSR it may have left it on; save the value and
1999     @@ -5514,20 +5549,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
2000     if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
2001     svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
2002    
2003     - if (svm->spec_ctrl)
2004     - native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
2005     -
2006     - /* Eliminate branch target predictions from guest mode */
2007     - vmexit_fill_RSB();
2008     -
2009     -#ifdef CONFIG_X86_64
2010     - wrmsrl(MSR_GS_BASE, svm->host.gs_base);
2011     -#else
2012     - loadsegment(fs, svm->host.fs);
2013     -#ifndef CONFIG_X86_32_LAZY_GS
2014     - loadsegment(gs, svm->host.gs);
2015     -#endif
2016     -#endif
2017     + x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
2018    
2019     reload_tss(vcpu);
2020    
2021     @@ -5630,7 +5652,7 @@ static bool svm_cpu_has_accelerated_tpr(void)
2022     return false;
2023     }
2024    
2025     -static bool svm_has_high_real_mode_segbase(void)
2026     +static bool svm_has_emulated_msr(int index)
2027     {
2028     return true;
2029     }
2030     @@ -6854,7 +6876,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
2031     .hardware_enable = svm_hardware_enable,
2032     .hardware_disable = svm_hardware_disable,
2033     .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
2034     - .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
2035     + .has_emulated_msr = svm_has_emulated_msr,
2036    
2037     .vcpu_create = svm_create_vcpu,
2038     .vcpu_free = svm_free_vcpu,
2039     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
2040     index 657c93409042..3deb153bf9d9 100644
2041     --- a/arch/x86/kvm/vmx.c
2042     +++ b/arch/x86/kvm/vmx.c
2043     @@ -52,7 +52,7 @@
2044     #include <asm/irq_remapping.h>
2045     #include <asm/mmu_context.h>
2046     #include <asm/microcode.h>
2047     -#include <asm/nospec-branch.h>
2048     +#include <asm/spec-ctrl.h>
2049    
2050     #include "trace.h"
2051     #include "pmu.h"
2052     @@ -1314,6 +1314,12 @@ static inline bool cpu_has_vmx_vmfunc(void)
2053     SECONDARY_EXEC_ENABLE_VMFUNC;
2054     }
2055    
2056     +static bool vmx_umip_emulated(void)
2057     +{
2058     + return vmcs_config.cpu_based_2nd_exec_ctrl &
2059     + SECONDARY_EXEC_DESC;
2060     +}
2061     +
2062     static inline bool report_flexpriority(void)
2063     {
2064     return flexpriority_enabled;
2065     @@ -3269,7 +3275,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2066     break;
2067     case MSR_IA32_SPEC_CTRL:
2068     if (!msr_info->host_initiated &&
2069     - !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
2070     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
2071     return 1;
2072    
2073     @@ -3390,12 +3395,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2074     break;
2075     case MSR_IA32_SPEC_CTRL:
2076     if (!msr_info->host_initiated &&
2077     - !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
2078     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
2079     return 1;
2080    
2081     /* The STIBP bit doesn't fault even if it's not advertised */
2082     - if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
2083     + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
2084     return 1;
2085    
2086     vmx->spec_ctrl = data;
2087     @@ -3421,7 +3425,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2088     break;
2089     case MSR_IA32_PRED_CMD:
2090     if (!msr_info->host_initiated &&
2091     - !guest_cpuid_has(vcpu, X86_FEATURE_IBPB) &&
2092     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
2093     return 1;
2094    
2095     @@ -4494,14 +4497,16 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
2096     (to_vmx(vcpu)->rmode.vm86_active ?
2097     KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
2098    
2099     - if ((cr4 & X86_CR4_UMIP) && !boot_cpu_has(X86_FEATURE_UMIP)) {
2100     - vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
2101     - SECONDARY_EXEC_DESC);
2102     - hw_cr4 &= ~X86_CR4_UMIP;
2103     - } else if (!is_guest_mode(vcpu) ||
2104     - !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
2105     - vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
2106     + if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
2107     + if (cr4 & X86_CR4_UMIP) {
2108     + vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
2109     SECONDARY_EXEC_DESC);
2110     + hw_cr4 &= ~X86_CR4_UMIP;
2111     + } else if (!is_guest_mode(vcpu) ||
2112     + !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
2113     + vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
2114     + SECONDARY_EXEC_DESC);
2115     + }
2116    
2117     if (cr4 & X86_CR4_VMXE) {
2118     /*
2119     @@ -9226,9 +9231,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
2120     }
2121     STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
2122    
2123     -static bool vmx_has_high_real_mode_segbase(void)
2124     +static bool vmx_has_emulated_msr(int index)
2125     {
2126     - return enable_unrestricted_guest || emulate_invalid_guest_state;
2127     + switch (index) {
2128     + case MSR_IA32_SMBASE:
2129     + /*
2130     + * We cannot do SMM unless we can run the guest in big
2131     + * real mode.
2132     + */
2133     + return enable_unrestricted_guest || emulate_invalid_guest_state;
2134     + case MSR_AMD64_VIRT_SPEC_CTRL:
2135     + /* This is AMD only. */
2136     + return false;
2137     + default:
2138     + return true;
2139     + }
2140     }
2141    
2142     static bool vmx_mpx_supported(void)
2143     @@ -9243,12 +9260,6 @@ static bool vmx_xsaves_supported(void)
2144     SECONDARY_EXEC_XSAVES;
2145     }
2146    
2147     -static bool vmx_umip_emulated(void)
2148     -{
2149     - return vmcs_config.cpu_based_2nd_exec_ctrl &
2150     - SECONDARY_EXEC_DESC;
2151     -}
2152     -
2153     static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
2154     {
2155     u32 exit_intr_info;
2156     @@ -9466,10 +9477,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
2157     * is no need to worry about the conditional branch over the wrmsr
2158     * being speculatively taken.
2159     */
2160     - if (vmx->spec_ctrl)
2161     - native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
2162     + x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
2163    
2164     vmx->__launched = vmx->loaded_vmcs->launched;
2165     +
2166     asm(
2167     /* Store host registers */
2168     "push %%" _ASM_DX "; push %%" _ASM_BP ";"
2169     @@ -9605,8 +9616,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
2170     if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
2171     vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
2172    
2173     - if (vmx->spec_ctrl)
2174     - native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
2175     + x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
2176    
2177     /* Eliminate branch target predictions from guest mode */
2178     vmexit_fill_RSB();
2179     @@ -12299,7 +12309,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
2180     .hardware_enable = hardware_enable,
2181     .hardware_disable = hardware_disable,
2182     .cpu_has_accelerated_tpr = report_flexpriority,
2183     - .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
2184     + .has_emulated_msr = vmx_has_emulated_msr,
2185    
2186     .vcpu_create = vmx_create_vcpu,
2187     .vcpu_free = vmx_free_vcpu,
2188     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
2189     index 18b5ca7a3197..f3df3a934733 100644
2190     --- a/arch/x86/kvm/x86.c
2191     +++ b/arch/x86/kvm/x86.c
2192     @@ -1045,6 +1045,7 @@ static u32 emulated_msrs[] = {
2193     MSR_SMI_COUNT,
2194     MSR_PLATFORM_INFO,
2195     MSR_MISC_FEATURES_ENABLES,
2196     + MSR_AMD64_VIRT_SPEC_CTRL,
2197     };
2198    
2199     static unsigned num_emulated_msrs;
2200     @@ -2843,7 +2844,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2201     * fringe case that is not enabled except via specific settings
2202     * of the module parameters.
2203     */
2204     - r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
2205     + r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
2206     break;
2207     case KVM_CAP_VAPIC:
2208     r = !kvm_x86_ops->cpu_has_accelerated_tpr();
2209     @@ -4522,14 +4523,8 @@ static void kvm_init_msr_list(void)
2210     num_msrs_to_save = j;
2211    
2212     for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
2213     - switch (emulated_msrs[i]) {
2214     - case MSR_IA32_SMBASE:
2215     - if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
2216     - continue;
2217     - break;
2218     - default:
2219     - break;
2220     - }
2221     + if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
2222     + continue;
2223    
2224     if (j < i)
2225     emulated_msrs[j] = emulated_msrs[i];
2226     diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
2227     index d7bc0eea20a5..6e98e0a7c923 100644
2228     --- a/arch/x86/mm/pkeys.c
2229     +++ b/arch/x86/mm/pkeys.c
2230     @@ -94,26 +94,27 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey
2231     */
2232     if (pkey != -1)
2233     return pkey;
2234     - /*
2235     - * Look for a protection-key-drive execute-only mapping
2236     - * which is now being given permissions that are not
2237     - * execute-only. Move it back to the default pkey.
2238     - */
2239     - if (vma_is_pkey_exec_only(vma) &&
2240     - (prot & (PROT_READ|PROT_WRITE))) {
2241     - return 0;
2242     - }
2243     +
2244     /*
2245     * The mapping is execute-only. Go try to get the
2246     * execute-only protection key. If we fail to do that,
2247     * fall through as if we do not have execute-only
2248     - * support.
2249     + * support in this mm.
2250     */
2251     if (prot == PROT_EXEC) {
2252     pkey = execute_only_pkey(vma->vm_mm);
2253     if (pkey > 0)
2254     return pkey;
2255     + } else if (vma_is_pkey_exec_only(vma)) {
2256     + /*
2257     + * Protections are *not* PROT_EXEC, but the mapping
2258     + * is using the exec-only pkey. This mapping was
2259     + * PROT_EXEC and will no longer be. Move back to
2260     + * the default pkey.
2261     + */
2262     + return ARCH_DEFAULT_PKEY;
2263     }
2264     +
2265     /*
2266     * This is a vanilla, non-pkey mprotect (or we failed to
2267     * setup execute-only), inherit the pkey from the VMA we
2268     diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
2269     index d33e7dbe3129..2d76106788a3 100644
2270     --- a/arch/x86/xen/mmu.c
2271     +++ b/arch/x86/xen/mmu.c
2272     @@ -42,13 +42,11 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)
2273     }
2274     EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
2275    
2276     -static void xen_flush_tlb_all(void)
2277     +static noinline void xen_flush_tlb_all(void)
2278     {
2279     struct mmuext_op *op;
2280     struct multicall_space mcs;
2281    
2282     - trace_xen_mmu_flush_tlb_all(0);
2283     -
2284     preempt_disable();
2285    
2286     mcs = xen_mc_entry(sizeof(*op));
2287     diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
2288     index aae88fec9941..ac17c206afd1 100644
2289     --- a/arch/x86/xen/mmu_pv.c
2290     +++ b/arch/x86/xen/mmu_pv.c
2291     @@ -1280,13 +1280,11 @@ unsigned long xen_read_cr2_direct(void)
2292     return this_cpu_read(xen_vcpu_info.arch.cr2);
2293     }
2294    
2295     -static void xen_flush_tlb(void)
2296     +static noinline void xen_flush_tlb(void)
2297     {
2298     struct mmuext_op *op;
2299     struct multicall_space mcs;
2300    
2301     - trace_xen_mmu_flush_tlb(0);
2302     -
2303     preempt_disable();
2304    
2305     mcs = xen_mc_entry(sizeof(*op));
2306     diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
2307     index d21a2d913107..827905794b48 100644
2308     --- a/drivers/base/cpu.c
2309     +++ b/drivers/base/cpu.c
2310     @@ -532,14 +532,22 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev,
2311     return sprintf(buf, "Not affected\n");
2312     }
2313    
2314     +ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
2315     + struct device_attribute *attr, char *buf)
2316     +{
2317     + return sprintf(buf, "Not affected\n");
2318     +}
2319     +
2320     static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
2321     static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
2322     static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
2323     +static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
2324    
2325     static struct attribute *cpu_root_vulnerabilities_attrs[] = {
2326     &dev_attr_meltdown.attr,
2327     &dev_attr_spectre_v1.attr,
2328     &dev_attr_spectre_v2.attr,
2329     + &dev_attr_spec_store_bypass.attr,
2330     NULL
2331     };
2332    
2333     diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
2334     index fb586e09682d..a8bec064d14a 100644
2335     --- a/drivers/cpufreq/Kconfig.arm
2336     +++ b/drivers/cpufreq/Kconfig.arm
2337     @@ -20,7 +20,7 @@ config ACPI_CPPC_CPUFREQ
2338    
2339     config ARM_ARMADA_37XX_CPUFREQ
2340     tristate "Armada 37xx CPUFreq support"
2341     - depends on ARCH_MVEBU
2342     + depends on ARCH_MVEBU && CPUFREQ_DT
2343     help
2344     This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
2345     The Armada 37xx PMU supports 4 frequency and VDD levels.
2346     diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
2347     index 9acc1e157813..6d3e01b2bde9 100644
2348     --- a/drivers/gpu/drm/drm_drv.c
2349     +++ b/drivers/gpu/drm/drm_drv.c
2350     @@ -716,7 +716,7 @@ static void remove_compat_control_link(struct drm_device *dev)
2351     if (!minor)
2352     return;
2353    
2354     - name = kasprintf(GFP_KERNEL, "controlD%d", minor->index);
2355     + name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
2356     if (!name)
2357     return;
2358    
2359     diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
2360     index 175d552c8bae..a2917dfd45cd 100644
2361     --- a/drivers/gpu/drm/i915/i915_reg.h
2362     +++ b/drivers/gpu/drm/i915/i915_reg.h
2363     @@ -7139,6 +7139,9 @@ enum {
2364     #define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
2365     #define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
2366    
2367     +#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
2368     +#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
2369     +
2370     /* WaCatErrorRejectionIssue */
2371     #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
2372     #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
2373     diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
2374     index fa960cfd2764..02ec078b0979 100644
2375     --- a/drivers/gpu/drm/i915/intel_engine_cs.c
2376     +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
2377     @@ -1098,6 +1098,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
2378     WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
2379     GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
2380    
2381     + /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
2382     + if (IS_GEN9_LP(dev_priv))
2383     + WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
2384     +
2385     /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
2386     ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
2387     if (ret)
2388     diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
2389     index ef23553ff5cb..001a80dfad31 100644
2390     --- a/drivers/hwmon/Kconfig
2391     +++ b/drivers/hwmon/Kconfig
2392     @@ -272,7 +272,7 @@ config SENSORS_K8TEMP
2393    
2394     config SENSORS_K10TEMP
2395     tristate "AMD Family 10h+ temperature sensor"
2396     - depends on X86 && PCI
2397     + depends on X86 && PCI && AMD_NB
2398     help
2399     If you say yes here you get support for the temperature
2400     sensor(s) inside your CPU. Supported are later revisions of
2401     diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
2402     index d2cc55e21374..3b73dee6fdc6 100644
2403     --- a/drivers/hwmon/k10temp.c
2404     +++ b/drivers/hwmon/k10temp.c
2405     @@ -23,6 +23,7 @@
2406     #include <linux/init.h>
2407     #include <linux/module.h>
2408     #include <linux/pci.h>
2409     +#include <asm/amd_nb.h>
2410     #include <asm/processor.h>
2411    
2412     MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
2413     @@ -40,8 +41,8 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
2414     #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
2415     #endif
2416    
2417     -#ifndef PCI_DEVICE_ID_AMD_17H_RR_NB
2418     -#define PCI_DEVICE_ID_AMD_17H_RR_NB 0x15d0
2419     +#ifndef PCI_DEVICE_ID_AMD_17H_M10H_DF_F3
2420     +#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
2421     #endif
2422    
2423     /* CPUID function 0x80000001, ebx */
2424     @@ -63,10 +64,12 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
2425     #define NB_CAP_HTC 0x00000400
2426    
2427     /*
2428     - * For F15h M60h, functionality of REG_REPORTED_TEMPERATURE
2429     - * has been moved to D0F0xBC_xD820_0CA4 [Reported Temperature
2430     - * Control]
2431     + * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
2432     + * and REG_REPORTED_TEMPERATURE have been moved to
2433     + * D0F0xBC_xD820_0C64 [Hardware Temperature Control]
2434     + * D0F0xBC_xD820_0CA4 [Reported Temperature Control]
2435     */
2436     +#define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64
2437     #define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
2438    
2439     /* F17h M01h Access througn SMN */
2440     @@ -74,6 +77,7 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
2441    
2442     struct k10temp_data {
2443     struct pci_dev *pdev;
2444     + void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
2445     void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
2446     int temp_offset;
2447     u32 temp_adjust_mask;
2448     @@ -98,6 +102,11 @@ static const struct tctl_offset tctl_offset_table[] = {
2449     { 0x17, "AMD Ryzen Threadripper 1910", 10000 },
2450     };
2451    
2452     +static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
2453     +{
2454     + pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
2455     +}
2456     +
2457     static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
2458     {
2459     pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
2460     @@ -114,6 +123,12 @@ static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
2461     mutex_unlock(&nb_smu_ind_mutex);
2462     }
2463    
2464     +static void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval)
2465     +{
2466     + amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
2467     + F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET, regval);
2468     +}
2469     +
2470     static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
2471     {
2472     amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
2473     @@ -122,8 +137,8 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
2474    
2475     static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
2476     {
2477     - amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60,
2478     - F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
2479     + amd_smn_read(amd_pci_dev_to_node_id(pdev),
2480     + F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
2481     }
2482    
2483     static ssize_t temp1_input_show(struct device *dev,
2484     @@ -160,8 +175,7 @@ static ssize_t show_temp_crit(struct device *dev,
2485     u32 regval;
2486     int value;
2487    
2488     - pci_read_config_dword(data->pdev,
2489     - REG_HARDWARE_THERMAL_CONTROL, &regval);
2490     + data->read_htcreg(data->pdev, &regval);
2491     value = ((regval >> 16) & 0x7f) * 500 + 52000;
2492     if (show_hyst)
2493     value -= ((regval >> 24) & 0xf) * 500;
2494     @@ -181,13 +195,18 @@ static umode_t k10temp_is_visible(struct kobject *kobj,
2495     struct pci_dev *pdev = data->pdev;
2496    
2497     if (index >= 2) {
2498     - u32 reg_caps, reg_htc;
2499     + u32 reg;
2500     +
2501     + if (!data->read_htcreg)
2502     + return 0;
2503    
2504     pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
2505     - &reg_caps);
2506     - pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL,
2507     - &reg_htc);
2508     - if (!(reg_caps & NB_CAP_HTC) || !(reg_htc & HTC_ENABLE))
2509     + &reg);
2510     + if (!(reg & NB_CAP_HTC))
2511     + return 0;
2512     +
2513     + data->read_htcreg(data->pdev, &reg);
2514     + if (!(reg & HTC_ENABLE))
2515     return 0;
2516     }
2517     return attr->mode;
2518     @@ -268,11 +287,13 @@ static int k10temp_probe(struct pci_dev *pdev,
2519    
2520     if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
2521     boot_cpu_data.x86_model == 0x70)) {
2522     + data->read_htcreg = read_htcreg_nb_f15;
2523     data->read_tempreg = read_tempreg_nb_f15;
2524     } else if (boot_cpu_data.x86 == 0x17) {
2525     data->temp_adjust_mask = 0x80000;
2526     data->read_tempreg = read_tempreg_nb_f17;
2527     } else {
2528     + data->read_htcreg = read_htcreg_pci;
2529     data->read_tempreg = read_tempreg_pci;
2530     }
2531    
2532     @@ -302,7 +323,7 @@ static const struct pci_device_id k10temp_id_table[] = {
2533     { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
2534     { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
2535     { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
2536     - { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) },
2537     + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
2538     {}
2539     };
2540     MODULE_DEVICE_TABLE(pci, k10temp_id_table);
2541     diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
2542     index 05732531829f..d64d6ee1de01 100644
2543     --- a/drivers/i2c/busses/i2c-designware-master.c
2544     +++ b/drivers/i2c/busses/i2c-designware-master.c
2545     @@ -209,7 +209,10 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
2546     i2c_dw_disable_int(dev);
2547    
2548     /* Enable the adapter */
2549     - __i2c_dw_enable_and_wait(dev, true);
2550     + __i2c_dw_enable(dev, true);
2551     +
2552     + /* Dummy read to avoid the register getting stuck on Bay Trail */
2553     + dw_readl(dev, DW_IC_ENABLE_STATUS);
2554    
2555     /* Clear and enable interrupts */
2556     dw_readl(dev, DW_IC_CLR_INTR);
2557     diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
2558     index af89408befe8..b218426a6493 100644
2559     --- a/drivers/md/bcache/debug.c
2560     +++ b/drivers/md/bcache/debug.c
2561     @@ -251,6 +251,9 @@ void bch_debug_exit(void)
2562    
2563     int __init bch_debug_init(struct kobject *kobj)
2564     {
2565     + if (!IS_ENABLED(CONFIG_DEBUG_FS))
2566     + return 0;
2567     +
2568     debug = debugfs_create_dir("bcache", NULL);
2569    
2570     return IS_ERR_OR_NULL(debug);
2571     diff --git a/drivers/mtd/nand/marvell_nand.c b/drivers/mtd/nand/marvell_nand.c
2572     index f978c06fbd7d..e81ad33ab849 100644
2573     --- a/drivers/mtd/nand/marvell_nand.c
2574     +++ b/drivers/mtd/nand/marvell_nand.c
2575     @@ -1190,11 +1190,13 @@ static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
2576     NDCB0_CMD2(NAND_CMD_READSTART);
2577    
2578     /*
2579     - * Trigger the naked read operation only on the last chunk.
2580     - * Otherwise, use monolithic read.
2581     + * Trigger the monolithic read on the first chunk, then naked read on
2582     + * intermediate chunks and finally a last naked read on the last chunk.
2583     */
2584     - if (lt->nchunks == 1 || (chunk < lt->nchunks - 1))
2585     + if (chunk == 0)
2586     nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
2587     + else if (chunk < lt->nchunks - 1)
2588     + nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
2589     else
2590     nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
2591    
2592     diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
2593     index 98f3cfdc0d02..5a116db5a65f 100644
2594     --- a/drivers/s390/cio/qdio_setup.c
2595     +++ b/drivers/s390/cio/qdio_setup.c
2596     @@ -141,7 +141,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
2597     int i;
2598    
2599     for (i = 0; i < nr_queues; i++) {
2600     - q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
2601     + q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
2602     if (!q)
2603     return -ENOMEM;
2604    
2605     @@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
2606     {
2607     struct ciw *ciw;
2608     struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
2609     - int rc;
2610    
2611     memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
2612     memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
2613     @@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
2614     ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
2615     if (!ciw) {
2616     DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
2617     - rc = -EINVAL;
2618     - goto out_err;
2619     + return -EINVAL;
2620     }
2621     irq_ptr->equeue = *ciw;
2622    
2623     ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
2624     if (!ciw) {
2625     DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
2626     - rc = -EINVAL;
2627     - goto out_err;
2628     + return -EINVAL;
2629     }
2630     irq_ptr->aqueue = *ciw;
2631    
2632     @@ -510,9 +507,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
2633     irq_ptr->orig_handler = init_data->cdev->handler;
2634     init_data->cdev->handler = qdio_int_handler;
2635     return 0;
2636     -out_err:
2637     - qdio_release_memory(irq_ptr);
2638     - return rc;
2639     }
2640    
2641     void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
2642     diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
2643     index 2c7550797ec2..dce92b2a895d 100644
2644     --- a/drivers/s390/cio/vfio_ccw_cp.c
2645     +++ b/drivers/s390/cio/vfio_ccw_cp.c
2646     @@ -715,6 +715,10 @@ void cp_free(struct channel_program *cp)
2647     * and stores the result to ccwchain list. @cp must have been
2648     * initialized by a previous call with cp_init(). Otherwise, undefined
2649     * behavior occurs.
2650     + * For each chain composing the channel program:
2651     + * - On entry ch_len holds the count of CCWs to be translated.
2652     + * - On exit ch_len is adjusted to the count of successfully translated CCWs.
2653     + * This allows cp_free to find in ch_len the count of CCWs to free in a chain.
2654     *
2655     * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
2656     * as helpers to do ccw chain translation inside the kernel. Basically
2657     @@ -749,11 +753,18 @@ int cp_prefetch(struct channel_program *cp)
2658     for (idx = 0; idx < len; idx++) {
2659     ret = ccwchain_fetch_one(chain, idx, cp);
2660     if (ret)
2661     - return ret;
2662     + goto out_err;
2663     }
2664     }
2665    
2666     return 0;
2667     +out_err:
2668     + /* Only cleanup the chain elements that were actually translated. */
2669     + chain->ch_len = idx;
2670     + list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
2671     + chain->ch_len = 0;
2672     + }
2673     + return ret;
2674     }
2675    
2676     /**
2677     diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
2678     index a172ab299e80..ff01f865a173 100644
2679     --- a/drivers/spi/spi-bcm-qspi.c
2680     +++ b/drivers/spi/spi-bcm-qspi.c
2681     @@ -490,7 +490,7 @@ static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
2682    
2683     static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
2684     {
2685     - if (!has_bspi(qspi) || (qspi->bspi_enabled))
2686     + if (!has_bspi(qspi))
2687     return;
2688    
2689     qspi->bspi_enabled = 1;
2690     @@ -505,7 +505,7 @@ static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
2691    
2692     static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
2693     {
2694     - if (!has_bspi(qspi) || (!qspi->bspi_enabled))
2695     + if (!has_bspi(qspi))
2696     return;
2697    
2698     qspi->bspi_enabled = 0;
2699     @@ -519,16 +519,19 @@ static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
2700    
2701     static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
2702     {
2703     - u32 data = 0;
2704     + u32 rd = 0;
2705     + u32 wr = 0;
2706    
2707     - if (qspi->curr_cs == cs)
2708     - return;
2709     if (qspi->base[CHIP_SELECT]) {
2710     - data = bcm_qspi_read(qspi, CHIP_SELECT, 0);
2711     - data = (data & ~0xff) | (1 << cs);
2712     - bcm_qspi_write(qspi, CHIP_SELECT, 0, data);
2713     + rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
2714     + wr = (rd & ~0xff) | (1 << cs);
2715     + if (rd == wr)
2716     + return;
2717     + bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
2718     usleep_range(10, 20);
2719     }
2720     +
2721     + dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
2722     qspi->curr_cs = cs;
2723     }
2724    
2725     @@ -755,8 +758,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
2726     dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
2727     }
2728     mspi_cdram = MSPI_CDRAM_CONT_BIT;
2729     - mspi_cdram |= (~(1 << spi->chip_select) &
2730     - MSPI_CDRAM_PCS);
2731     +
2732     + if (has_bspi(qspi))
2733     + mspi_cdram &= ~1;
2734     + else
2735     + mspi_cdram |= (~(1 << spi->chip_select) &
2736     + MSPI_CDRAM_PCS);
2737     +
2738     mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
2739     MSPI_CDRAM_BITSE_BIT);
2740    
2741     diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
2742     index 94f7b0713281..02a8012a318a 100644
2743     --- a/drivers/spi/spi-pxa2xx.h
2744     +++ b/drivers/spi/spi-pxa2xx.h
2745     @@ -38,7 +38,7 @@ struct driver_data {
2746    
2747     /* SSP register addresses */
2748     void __iomem *ioaddr;
2749     - u32 ssdr_physical;
2750     + phys_addr_t ssdr_physical;
2751    
2752     /* SSP masks*/
2753     u32 dma_cr1;
2754     diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
2755     index 556960a1bab3..07d3be6f0780 100644
2756     --- a/drivers/tee/tee_shm.c
2757     +++ b/drivers/tee/tee_shm.c
2758     @@ -360,9 +360,10 @@ int tee_shm_get_fd(struct tee_shm *shm)
2759     if (!(shm->flags & TEE_SHM_DMA_BUF))
2760     return -EINVAL;
2761    
2762     + get_dma_buf(shm->dmabuf);
2763     fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
2764     - if (fd >= 0)
2765     - get_dma_buf(shm->dmabuf);
2766     + if (fd < 0)
2767     + dma_buf_put(shm->dmabuf);
2768     return fd;
2769     }
2770    
2771     diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
2772     index 72ebbc908e19..32cd52ca8318 100644
2773     --- a/drivers/usb/host/xhci-hub.c
2774     +++ b/drivers/usb/host/xhci-hub.c
2775     @@ -354,7 +354,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
2776    
2777     slot_id = 0;
2778     for (i = 0; i < MAX_HC_SLOTS; i++) {
2779     - if (!xhci->devs[i])
2780     + if (!xhci->devs[i] || !xhci->devs[i]->udev)
2781     continue;
2782     speed = xhci->devs[i]->udev->speed;
2783     if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
2784     diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
2785     index 14a72357800a..35618ceb2791 100644
2786     --- a/drivers/usb/usbip/stub.h
2787     +++ b/drivers/usb/usbip/stub.h
2788     @@ -73,6 +73,7 @@ struct bus_id_priv {
2789     struct stub_device *sdev;
2790     struct usb_device *udev;
2791     char shutdown_busid;
2792     + spinlock_t busid_lock;
2793     };
2794    
2795     /* stub_priv is allocated from stub_priv_cache */
2796     @@ -83,6 +84,7 @@ extern struct usb_device_driver stub_driver;
2797    
2798     /* stub_main.c */
2799     struct bus_id_priv *get_busid_priv(const char *busid);
2800     +void put_busid_priv(struct bus_id_priv *bid);
2801     int del_match_busid(char *busid);
2802     void stub_device_cleanup_urbs(struct stub_device *sdev);
2803    
2804     diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
2805     index dd8ef36ab10e..c0d6ff1baa72 100644
2806     --- a/drivers/usb/usbip/stub_dev.c
2807     +++ b/drivers/usb/usbip/stub_dev.c
2808     @@ -300,9 +300,9 @@ static int stub_probe(struct usb_device *udev)
2809     struct stub_device *sdev = NULL;
2810     const char *udev_busid = dev_name(&udev->dev);
2811     struct bus_id_priv *busid_priv;
2812     - int rc;
2813     + int rc = 0;
2814    
2815     - dev_dbg(&udev->dev, "Enter\n");
2816     + dev_dbg(&udev->dev, "Enter probe\n");
2817    
2818     /* check we should claim or not by busid_table */
2819     busid_priv = get_busid_priv(udev_busid);
2820     @@ -317,13 +317,15 @@ static int stub_probe(struct usb_device *udev)
2821     * other matched drivers by the driver core.
2822     * See driver_probe_device() in driver/base/dd.c
2823     */
2824     - return -ENODEV;
2825     + rc = -ENODEV;
2826     + goto call_put_busid_priv;
2827     }
2828    
2829     if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
2830     dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
2831     udev_busid);
2832     - return -ENODEV;
2833     + rc = -ENODEV;
2834     + goto call_put_busid_priv;
2835     }
2836    
2837     if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
2838     @@ -331,13 +333,16 @@ static int stub_probe(struct usb_device *udev)
2839     "%s is attached on vhci_hcd... skip!\n",
2840     udev_busid);
2841    
2842     - return -ENODEV;
2843     + rc = -ENODEV;
2844     + goto call_put_busid_priv;
2845     }
2846    
2847     /* ok, this is my device */
2848     sdev = stub_device_alloc(udev);
2849     - if (!sdev)
2850     - return -ENOMEM;
2851     + if (!sdev) {
2852     + rc = -ENOMEM;
2853     + goto call_put_busid_priv;
2854     + }
2855    
2856     dev_info(&udev->dev,
2857     "usbip-host: register new device (bus %u dev %u)\n",
2858     @@ -369,7 +374,9 @@ static int stub_probe(struct usb_device *udev)
2859     }
2860     busid_priv->status = STUB_BUSID_ALLOC;
2861    
2862     - return 0;
2863     + rc = 0;
2864     + goto call_put_busid_priv;
2865     +
2866     err_files:
2867     usb_hub_release_port(udev->parent, udev->portnum,
2868     (struct usb_dev_state *) udev);
2869     @@ -379,6 +386,9 @@ static int stub_probe(struct usb_device *udev)
2870    
2871     busid_priv->sdev = NULL;
2872     stub_device_free(sdev);
2873     +
2874     +call_put_busid_priv:
2875     + put_busid_priv(busid_priv);
2876     return rc;
2877     }
2878    
2879     @@ -404,7 +414,7 @@ static void stub_disconnect(struct usb_device *udev)
2880     struct bus_id_priv *busid_priv;
2881     int rc;
2882    
2883     - dev_dbg(&udev->dev, "Enter\n");
2884     + dev_dbg(&udev->dev, "Enter disconnect\n");
2885    
2886     busid_priv = get_busid_priv(udev_busid);
2887     if (!busid_priv) {
2888     @@ -417,7 +427,7 @@ static void stub_disconnect(struct usb_device *udev)
2889     /* get stub_device */
2890     if (!sdev) {
2891     dev_err(&udev->dev, "could not get device");
2892     - return;
2893     + goto call_put_busid_priv;
2894     }
2895    
2896     dev_set_drvdata(&udev->dev, NULL);
2897     @@ -432,12 +442,12 @@ static void stub_disconnect(struct usb_device *udev)
2898     (struct usb_dev_state *) udev);
2899     if (rc) {
2900     dev_dbg(&udev->dev, "unable to release port\n");
2901     - return;
2902     + goto call_put_busid_priv;
2903     }
2904    
2905     /* If usb reset is called from event handler */
2906     if (usbip_in_eh(current))
2907     - return;
2908     + goto call_put_busid_priv;
2909    
2910     /* shutdown the current connection */
2911     shutdown_busid(busid_priv);
2912     @@ -448,12 +458,11 @@ static void stub_disconnect(struct usb_device *udev)
2913     busid_priv->sdev = NULL;
2914     stub_device_free(sdev);
2915    
2916     - if (busid_priv->status == STUB_BUSID_ALLOC) {
2917     + if (busid_priv->status == STUB_BUSID_ALLOC)
2918     busid_priv->status = STUB_BUSID_ADDED;
2919     - } else {
2920     - busid_priv->status = STUB_BUSID_OTHER;
2921     - del_match_busid((char *)udev_busid);
2922     - }
2923     +
2924     +call_put_busid_priv:
2925     + put_busid_priv(busid_priv);
2926     }
2927    
2928     #ifdef CONFIG_PM
2929     diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
2930     index d41d0cdeec0f..bf8a5feb0ee9 100644
2931     --- a/drivers/usb/usbip/stub_main.c
2932     +++ b/drivers/usb/usbip/stub_main.c
2933     @@ -14,6 +14,7 @@
2934     #define DRIVER_DESC "USB/IP Host Driver"
2935    
2936     struct kmem_cache *stub_priv_cache;
2937     +
2938     /*
2939     * busid_tables defines matching busids that usbip can grab. A user can change
2940     * dynamically what device is locally used and what device is exported to a
2941     @@ -25,6 +26,8 @@ static spinlock_t busid_table_lock;
2942    
2943     static void init_busid_table(void)
2944     {
2945     + int i;
2946     +
2947     /*
2948     * This also sets the bus_table[i].status to
2949     * STUB_BUSID_OTHER, which is 0.
2950     @@ -32,6 +35,9 @@ static void init_busid_table(void)
2951     memset(busid_table, 0, sizeof(busid_table));
2952    
2953     spin_lock_init(&busid_table_lock);
2954     +
2955     + for (i = 0; i < MAX_BUSID; i++)
2956     + spin_lock_init(&busid_table[i].busid_lock);
2957     }
2958    
2959     /*
2960     @@ -43,15 +49,20 @@ static int get_busid_idx(const char *busid)
2961     int i;
2962     int idx = -1;
2963    
2964     - for (i = 0; i < MAX_BUSID; i++)
2965     + for (i = 0; i < MAX_BUSID; i++) {
2966     + spin_lock(&busid_table[i].busid_lock);
2967     if (busid_table[i].name[0])
2968     if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
2969     idx = i;
2970     + spin_unlock(&busid_table[i].busid_lock);
2971     break;
2972     }
2973     + spin_unlock(&busid_table[i].busid_lock);
2974     + }
2975     return idx;
2976     }
2977    
2978     +/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
2979     struct bus_id_priv *get_busid_priv(const char *busid)
2980     {
2981     int idx;
2982     @@ -59,13 +70,22 @@ struct bus_id_priv *get_busid_priv(const char *busid)
2983    
2984     spin_lock(&busid_table_lock);
2985     idx = get_busid_idx(busid);
2986     - if (idx >= 0)
2987     + if (idx >= 0) {
2988     bid = &(busid_table[idx]);
2989     + /* get busid_lock before returning */
2990     + spin_lock(&bid->busid_lock);
2991     + }
2992     spin_unlock(&busid_table_lock);
2993    
2994     return bid;
2995     }
2996    
2997     +void put_busid_priv(struct bus_id_priv *bid)
2998     +{
2999     + if (bid)
3000     + spin_unlock(&bid->busid_lock);
3001     +}
3002     +
3003     static int add_match_busid(char *busid)
3004     {
3005     int i;
3006     @@ -78,15 +98,19 @@ static int add_match_busid(char *busid)
3007     goto out;
3008     }
3009    
3010     - for (i = 0; i < MAX_BUSID; i++)
3011     + for (i = 0; i < MAX_BUSID; i++) {
3012     + spin_lock(&busid_table[i].busid_lock);
3013     if (!busid_table[i].name[0]) {
3014     strlcpy(busid_table[i].name, busid, BUSID_SIZE);
3015     if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
3016     (busid_table[i].status != STUB_BUSID_REMOV))
3017     busid_table[i].status = STUB_BUSID_ADDED;
3018     ret = 0;
3019     + spin_unlock(&busid_table[i].busid_lock);
3020     break;
3021     }
3022     + spin_unlock(&busid_table[i].busid_lock);
3023     + }
3024    
3025     out:
3026     spin_unlock(&busid_table_lock);
3027     @@ -107,6 +131,8 @@ int del_match_busid(char *busid)
3028     /* found */
3029     ret = 0;
3030    
3031     + spin_lock(&busid_table[idx].busid_lock);
3032     +
3033     if (busid_table[idx].status == STUB_BUSID_OTHER)
3034     memset(busid_table[idx].name, 0, BUSID_SIZE);
3035    
3036     @@ -114,6 +140,7 @@ int del_match_busid(char *busid)
3037     (busid_table[idx].status != STUB_BUSID_ADDED))
3038     busid_table[idx].status = STUB_BUSID_REMOV;
3039    
3040     + spin_unlock(&busid_table[idx].busid_lock);
3041     out:
3042     spin_unlock(&busid_table_lock);
3043    
3044     @@ -126,9 +153,12 @@ static ssize_t match_busid_show(struct device_driver *drv, char *buf)
3045     char *out = buf;
3046    
3047     spin_lock(&busid_table_lock);
3048     - for (i = 0; i < MAX_BUSID; i++)
3049     + for (i = 0; i < MAX_BUSID; i++) {
3050     + spin_lock(&busid_table[i].busid_lock);
3051     if (busid_table[i].name[0])
3052     out += sprintf(out, "%s ", busid_table[i].name);
3053     + spin_unlock(&busid_table[i].busid_lock);
3054     + }
3055     spin_unlock(&busid_table_lock);
3056     out += sprintf(out, "\n");
3057    
3058     @@ -169,6 +199,51 @@ static ssize_t match_busid_store(struct device_driver *dev, const char *buf,
3059     }
3060     static DRIVER_ATTR_RW(match_busid);
3061    
3062     +static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
3063     +{
3064     + int ret;
3065     +
3066     + /* device_attach() callers should hold parent lock for USB */
3067     + if (busid_priv->udev->dev.parent)
3068     + device_lock(busid_priv->udev->dev.parent);
3069     + ret = device_attach(&busid_priv->udev->dev);
3070     + if (busid_priv->udev->dev.parent)
3071     + device_unlock(busid_priv->udev->dev.parent);
3072     + if (ret < 0) {
3073     + dev_err(&busid_priv->udev->dev, "rebind failed\n");
3074     + return ret;
3075     + }
3076     + return 0;
3077     +}
3078     +
3079     +static void stub_device_rebind(void)
3080     +{
3081     +#if IS_MODULE(CONFIG_USBIP_HOST)
3082     + struct bus_id_priv *busid_priv;
3083     + int i;
3084     +
3085     + /* update status to STUB_BUSID_OTHER so probe ignores the device */
3086     + spin_lock(&busid_table_lock);
3087     + for (i = 0; i < MAX_BUSID; i++) {
3088     + if (busid_table[i].name[0] &&
3089     + busid_table[i].shutdown_busid) {
3090     + busid_priv = &(busid_table[i]);
3091     + busid_priv->status = STUB_BUSID_OTHER;
3092     + }
3093     + }
3094     + spin_unlock(&busid_table_lock);
3095     +
3096     + /* now run rebind - no need to hold locks. driver files are removed */
3097     + for (i = 0; i < MAX_BUSID; i++) {
3098     + if (busid_table[i].name[0] &&
3099     + busid_table[i].shutdown_busid) {
3100     + busid_priv = &(busid_table[i]);
3101     + do_rebind(busid_table[i].name, busid_priv);
3102     + }
3103     + }
3104     +#endif
3105     +}
3106     +
3107     static ssize_t rebind_store(struct device_driver *dev, const char *buf,
3108     size_t count)
3109     {
3110     @@ -186,16 +261,17 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
3111     if (!bid)
3112     return -ENODEV;
3113    
3114     - /* device_attach() callers should hold parent lock for USB */
3115     - if (bid->udev->dev.parent)
3116     - device_lock(bid->udev->dev.parent);
3117     - ret = device_attach(&bid->udev->dev);
3118     - if (bid->udev->dev.parent)
3119     - device_unlock(bid->udev->dev.parent);
3120     - if (ret < 0) {
3121     - dev_err(&bid->udev->dev, "rebind failed\n");
3122     + /* mark the device for deletion so probe ignores it during rescan */
3123     + bid->status = STUB_BUSID_OTHER;
3124     + /* release the busid lock */
3125     + put_busid_priv(bid);
3126     +
3127     + ret = do_rebind((char *) buf, bid);
3128     + if (ret < 0)
3129     return ret;
3130     - }
3131     +
3132     + /* delete device from busid_table */
3133     + del_match_busid((char *) buf);
3134    
3135     return count;
3136     }
3137     @@ -317,6 +393,9 @@ static void __exit usbip_host_exit(void)
3138     */
3139     usb_deregister_device_driver(&stub_driver);
3140    
3141     + /* initiate scan to attach devices */
3142     + stub_device_rebind();
3143     +
3144     kmem_cache_destroy(stub_priv_cache);
3145     }
3146    
3147     diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
3148     index b88a79e69ddf..d3b59f14f9c2 100644
3149     --- a/fs/btrfs/ctree.c
3150     +++ b/fs/btrfs/ctree.c
3151     @@ -2491,10 +2491,8 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
3152     if (p->reada != READA_NONE)
3153     reada_for_search(fs_info, p, level, slot, key->objectid);
3154    
3155     - btrfs_release_path(p);
3156     -
3157     ret = -EAGAIN;
3158     - tmp = read_tree_block(fs_info, blocknr, 0);
3159     + tmp = read_tree_block(fs_info, blocknr, gen);
3160     if (!IS_ERR(tmp)) {
3161     /*
3162     * If the read above didn't mark this buffer up to date,
3163     @@ -2508,6 +2506,8 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
3164     } else {
3165     ret = PTR_ERR(tmp);
3166     }
3167     +
3168     + btrfs_release_path(p);
3169     return ret;
3170     }
3171    
3172     @@ -5460,12 +5460,24 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
3173     down_read(&fs_info->commit_root_sem);
3174     left_level = btrfs_header_level(left_root->commit_root);
3175     left_root_level = left_level;
3176     - left_path->nodes[left_level] = left_root->commit_root;
3177     + left_path->nodes[left_level] =
3178     + btrfs_clone_extent_buffer(left_root->commit_root);
3179     + if (!left_path->nodes[left_level]) {
3180     + up_read(&fs_info->commit_root_sem);
3181     + ret = -ENOMEM;
3182     + goto out;
3183     + }
3184     extent_buffer_get(left_path->nodes[left_level]);
3185    
3186     right_level = btrfs_header_level(right_root->commit_root);
3187     right_root_level = right_level;
3188     - right_path->nodes[right_level] = right_root->commit_root;
3189     + right_path->nodes[right_level] =
3190     + btrfs_clone_extent_buffer(right_root->commit_root);
3191     + if (!right_path->nodes[right_level]) {
3192     + up_read(&fs_info->commit_root_sem);
3193     + ret = -ENOMEM;
3194     + goto out;
3195     + }
3196     extent_buffer_get(right_path->nodes[right_level]);
3197     up_read(&fs_info->commit_root_sem);
3198    
3199     diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
3200     index da308774b8a4..13b66ff1719c 100644
3201     --- a/fs/btrfs/ctree.h
3202     +++ b/fs/btrfs/ctree.h
3203     @@ -3153,6 +3153,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
3204     u64 *orig_start, u64 *orig_block_len,
3205     u64 *ram_bytes);
3206    
3207     +void __btrfs_del_delalloc_inode(struct btrfs_root *root,
3208     + struct btrfs_inode *inode);
3209     struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
3210     int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index);
3211     int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3212     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
3213     index 21f34ad0d411..fea78d138073 100644
3214     --- a/fs/btrfs/disk-io.c
3215     +++ b/fs/btrfs/disk-io.c
3216     @@ -3744,6 +3744,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
3217     set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
3218    
3219     btrfs_free_qgroup_config(fs_info);
3220     + ASSERT(list_empty(&fs_info->delalloc_roots));
3221    
3222     if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3223     btrfs_info(fs_info, "at unmount delalloc count %lld",
3224     @@ -4049,15 +4050,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info)
3225    
3226     static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
3227     {
3228     + /* cleanup FS via transaction */
3229     + btrfs_cleanup_transaction(fs_info);
3230     +
3231     mutex_lock(&fs_info->cleaner_mutex);
3232     btrfs_run_delayed_iputs(fs_info);
3233     mutex_unlock(&fs_info->cleaner_mutex);
3234    
3235     down_write(&fs_info->cleanup_work_sem);
3236     up_write(&fs_info->cleanup_work_sem);
3237     -
3238     - /* cleanup FS via transaction */
3239     - btrfs_cleanup_transaction(fs_info);
3240     }
3241    
3242     static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
3243     @@ -4182,19 +4183,23 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3244     list_splice_init(&root->delalloc_inodes, &splice);
3245    
3246     while (!list_empty(&splice)) {
3247     + struct inode *inode = NULL;
3248     btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
3249     delalloc_inodes);
3250     -
3251     - list_del_init(&btrfs_inode->delalloc_inodes);
3252     - clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
3253     - &btrfs_inode->runtime_flags);
3254     + __btrfs_del_delalloc_inode(root, btrfs_inode);
3255     spin_unlock(&root->delalloc_lock);
3256    
3257     - btrfs_invalidate_inodes(btrfs_inode->root);
3258     -
3259     + /*
3260     + * Make sure we get a live inode and that it'll not disappear
3261     + * meanwhile.
3262     + */
3263     + inode = igrab(&btrfs_inode->vfs_inode);
3264     + if (inode) {
3265     + invalidate_inode_pages2(inode->i_mapping);
3266     + iput(inode);
3267     + }
3268     spin_lock(&root->delalloc_lock);
3269     }
3270     -
3271     spin_unlock(&root->delalloc_lock);
3272     }
3273    
3274     @@ -4210,7 +4215,6 @@ static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
3275     while (!list_empty(&splice)) {
3276     root = list_first_entry(&splice, struct btrfs_root,
3277     delalloc_root);
3278     - list_del_init(&root->delalloc_root);
3279     root = btrfs_grab_fs_root(root);
3280     BUG_ON(!root);
3281     spin_unlock(&fs_info->delalloc_root_lock);
3282     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3283     index ef1cf323832a..f370bdc126b8 100644
3284     --- a/fs/btrfs/inode.c
3285     +++ b/fs/btrfs/inode.c
3286     @@ -1762,12 +1762,12 @@ static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
3287     spin_unlock(&root->delalloc_lock);
3288     }
3289    
3290     -static void btrfs_del_delalloc_inode(struct btrfs_root *root,
3291     - struct btrfs_inode *inode)
3292     +
3293     +void __btrfs_del_delalloc_inode(struct btrfs_root *root,
3294     + struct btrfs_inode *inode)
3295     {
3296     struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
3297    
3298     - spin_lock(&root->delalloc_lock);
3299     if (!list_empty(&inode->delalloc_inodes)) {
3300     list_del_init(&inode->delalloc_inodes);
3301     clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
3302     @@ -1780,6 +1780,13 @@ static void btrfs_del_delalloc_inode(struct btrfs_root *root,
3303     spin_unlock(&fs_info->delalloc_root_lock);
3304     }
3305     }
3306     +}
3307     +
3308     +static void btrfs_del_delalloc_inode(struct btrfs_root *root,
3309     + struct btrfs_inode *inode)
3310     +{
3311     + spin_lock(&root->delalloc_lock);
3312     + __btrfs_del_delalloc_inode(root, inode);
3313     spin_unlock(&root->delalloc_lock);
3314     }
3315    
3316     diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
3317     index b30a056963ab..29619496e358 100644
3318     --- a/fs/btrfs/props.c
3319     +++ b/fs/btrfs/props.c
3320     @@ -393,6 +393,7 @@ static int prop_compression_apply(struct inode *inode,
3321     const char *value,
3322     size_t len)
3323     {
3324     + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3325     int type;
3326    
3327     if (len == 0) {
3328     @@ -403,14 +404,17 @@ static int prop_compression_apply(struct inode *inode,
3329     return 0;
3330     }
3331    
3332     - if (!strncmp("lzo", value, 3))
3333     + if (!strncmp("lzo", value, 3)) {
3334     type = BTRFS_COMPRESS_LZO;
3335     - else if (!strncmp("zlib", value, 4))
3336     + btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
3337     + } else if (!strncmp("zlib", value, 4)) {
3338     type = BTRFS_COMPRESS_ZLIB;
3339     - else if (!strncmp("zstd", value, len))
3340     + } else if (!strncmp("zstd", value, len)) {
3341     type = BTRFS_COMPRESS_ZSTD;
3342     - else
3343     + btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
3344     + } else {
3345     return -EINVAL;
3346     + }
3347    
3348     BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
3349     BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
3350     diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3351     index 434457794c27..ac6ea1503cd6 100644
3352     --- a/fs/btrfs/tree-log.c
3353     +++ b/fs/btrfs/tree-log.c
3354     @@ -4749,6 +4749,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3355     struct extent_map_tree *em_tree = &inode->extent_tree;
3356     u64 logged_isize = 0;
3357     bool need_log_inode_item = true;
3358     + bool xattrs_logged = false;
3359    
3360     path = btrfs_alloc_path();
3361     if (!path)
3362     @@ -5050,6 +5051,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3363     err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
3364     if (err)
3365     goto out_unlock;
3366     + xattrs_logged = true;
3367     if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
3368     btrfs_release_path(path);
3369     btrfs_release_path(dst_path);
3370     @@ -5062,6 +5064,11 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3371     btrfs_release_path(dst_path);
3372     if (need_log_inode_item) {
3373     err = log_inode_item(trans, log, dst_path, inode);
3374     + if (!err && !xattrs_logged) {
3375     + err = btrfs_log_all_xattrs(trans, root, inode, path,
3376     + dst_path);
3377     + btrfs_release_path(path);
3378     + }
3379     if (err)
3380     goto out_unlock;
3381     }
3382     diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
3383     index b2d05c6b1c56..854cb4533f88 100644
3384     --- a/fs/btrfs/volumes.c
3385     +++ b/fs/btrfs/volumes.c
3386     @@ -4046,6 +4046,15 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3387     return 0;
3388     }
3389    
3390     + /*
3391     + * A ro->rw remount sequence should continue with the paused balance
3392     + * regardless of who pauses it, system or the user as of now, so set
3393     + * the resume flag.
3394     + */
3395     + spin_lock(&fs_info->balance_lock);
3396     + fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
3397     + spin_unlock(&fs_info->balance_lock);
3398     +
3399     tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3400     return PTR_ERR_OR_ZERO(tsk);
3401     }
3402     diff --git a/fs/proc/array.c b/fs/proc/array.c
3403     index 598803576e4c..9a517c03ac78 100644
3404     --- a/fs/proc/array.c
3405     +++ b/fs/proc/array.c
3406     @@ -85,6 +85,7 @@
3407     #include <linux/delayacct.h>
3408     #include <linux/seq_file.h>
3409     #include <linux/pid_namespace.h>
3410     +#include <linux/prctl.h>
3411     #include <linux/ptrace.h>
3412     #include <linux/tracehook.h>
3413     #include <linux/string_helpers.h>
3414     @@ -347,6 +348,30 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
3415     #ifdef CONFIG_SECCOMP
3416     seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
3417     #endif
3418     + seq_printf(m, "\nSpeculation_Store_Bypass:\t");
3419     + switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
3420     + case -EINVAL:
3421     + seq_printf(m, "unknown");
3422     + break;
3423     + case PR_SPEC_NOT_AFFECTED:
3424     + seq_printf(m, "not vulnerable");
3425     + break;
3426     + case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE:
3427     + seq_printf(m, "thread force mitigated");
3428     + break;
3429     + case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
3430     + seq_printf(m, "thread mitigated");
3431     + break;
3432     + case PR_SPEC_PRCTL | PR_SPEC_ENABLE:
3433     + seq_printf(m, "thread vulnerable");
3434     + break;
3435     + case PR_SPEC_DISABLE:
3436     + seq_printf(m, "globally mitigated");
3437     + break;
3438     + default:
3439     + seq_printf(m, "vulnerable");
3440     + break;
3441     + }
3442     seq_putc(m, '\n');
3443     }
3444    
3445     diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
3446     index 6b66cd1aa0b9..660a7d5e4702 100644
3447     --- a/include/linux/bpf_verifier.h
3448     +++ b/include/linux/bpf_verifier.h
3449     @@ -146,6 +146,7 @@ struct bpf_insn_aux_data {
3450     s32 call_imm; /* saved imm field of call insn */
3451     };
3452     int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
3453     + int sanitize_stack_off; /* stack slot to be cleared */
3454     bool seen; /* this insn was processed by the verifier */
3455     };
3456    
3457     diff --git a/include/linux/cpu.h b/include/linux/cpu.h
3458     index 7b01bc11c692..a97a63eef59f 100644
3459     --- a/include/linux/cpu.h
3460     +++ b/include/linux/cpu.h
3461     @@ -53,6 +53,8 @@ extern ssize_t cpu_show_spectre_v1(struct device *dev,
3462     struct device_attribute *attr, char *buf);
3463     extern ssize_t cpu_show_spectre_v2(struct device *dev,
3464     struct device_attribute *attr, char *buf);
3465     +extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
3466     + struct device_attribute *attr, char *buf);
3467    
3468     extern __printf(4, 5)
3469     struct device *cpu_device_create(struct device *parent, void *drvdata,
3470     diff --git a/include/linux/efi.h b/include/linux/efi.h
3471     index f5083aa72eae..c2520ca123aa 100644
3472     --- a/include/linux/efi.h
3473     +++ b/include/linux/efi.h
3474     @@ -395,8 +395,8 @@ typedef struct {
3475     u32 attributes;
3476     u32 get_bar_attributes;
3477     u32 set_bar_attributes;
3478     - uint64_t romsize;
3479     - void *romimage;
3480     + u64 romsize;
3481     + u32 romimage;
3482     } efi_pci_io_protocol_32;
3483    
3484     typedef struct {
3485     @@ -415,8 +415,8 @@ typedef struct {
3486     u64 attributes;
3487     u64 get_bar_attributes;
3488     u64 set_bar_attributes;
3489     - uint64_t romsize;
3490     - void *romimage;
3491     + u64 romsize;
3492     + u64 romimage;
3493     } efi_pci_io_protocol_64;
3494    
3495     typedef struct {
3496     diff --git a/include/linux/nospec.h b/include/linux/nospec.h
3497     index e791ebc65c9c..0c5ef54fd416 100644
3498     --- a/include/linux/nospec.h
3499     +++ b/include/linux/nospec.h
3500     @@ -7,6 +7,8 @@
3501     #define _LINUX_NOSPEC_H
3502     #include <asm/barrier.h>
3503    
3504     +struct task_struct;
3505     +
3506     /**
3507     * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
3508     * @index: array element index
3509     @@ -55,4 +57,12 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
3510     \
3511     (typeof(_i)) (_i & _mask); \
3512     })
3513     +
3514     +/* Speculation control prctl */
3515     +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
3516     +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
3517     + unsigned long ctrl);
3518     +/* Speculation control for seccomp enforced mitigation */
3519     +void arch_seccomp_spec_mitigate(struct task_struct *task);
3520     +
3521     #endif /* _LINUX_NOSPEC_H */
3522     diff --git a/include/linux/sched.h b/include/linux/sched.h
3523     index b161ef8a902e..710508af02c8 100644
3524     --- a/include/linux/sched.h
3525     +++ b/include/linux/sched.h
3526     @@ -1365,7 +1365,8 @@ static inline bool is_percpu_thread(void)
3527     #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
3528     #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
3529     #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
3530     -
3531     +#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
3532     +#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
3533    
3534     #define TASK_PFA_TEST(name, func) \
3535     static inline bool task_##func(struct task_struct *p) \
3536     @@ -1390,6 +1391,13 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
3537     TASK_PFA_SET(SPREAD_SLAB, spread_slab)
3538     TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
3539    
3540     +TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
3541     +TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
3542     +TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
3543     +
3544     +TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
3545     +TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
3546     +
3547     static inline void
3548     current_restore_flags(unsigned long orig_flags, unsigned long flags)
3549     {
3550     diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
3551     index c723a5c4e3ff..e5320f6c8654 100644
3552     --- a/include/linux/seccomp.h
3553     +++ b/include/linux/seccomp.h
3554     @@ -4,8 +4,9 @@
3555    
3556     #include <uapi/linux/seccomp.h>
3557    
3558     -#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
3559     - SECCOMP_FILTER_FLAG_LOG)
3560     +#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
3561     + SECCOMP_FILTER_FLAG_LOG | \
3562     + SECCOMP_FILTER_FLAG_SPEC_ALLOW)
3563    
3564     #ifdef CONFIG_SECCOMP
3565    
3566     diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
3567     index 7dd8f34c37df..fdcf88bcf0ea 100644
3568     --- a/include/trace/events/xen.h
3569     +++ b/include/trace/events/xen.h
3570     @@ -352,22 +352,6 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd,
3571     DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
3572     DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
3573    
3574     -TRACE_EVENT(xen_mmu_flush_tlb_all,
3575     - TP_PROTO(int x),
3576     - TP_ARGS(x),
3577     - TP_STRUCT__entry(__array(char, x, 0)),
3578     - TP_fast_assign((void)x),
3579     - TP_printk("%s", "")
3580     - );
3581     -
3582     -TRACE_EVENT(xen_mmu_flush_tlb,
3583     - TP_PROTO(int x),
3584     - TP_ARGS(x),
3585     - TP_STRUCT__entry(__array(char, x, 0)),
3586     - TP_fast_assign((void)x),
3587     - TP_printk("%s", "")
3588     - );
3589     -
3590     TRACE_EVENT(xen_mmu_flush_tlb_one_user,
3591     TP_PROTO(unsigned long addr),
3592     TP_ARGS(addr),
3593     diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
3594     index af5f8c2df87a..db9f15f5db04 100644
3595     --- a/include/uapi/linux/prctl.h
3596     +++ b/include/uapi/linux/prctl.h
3597     @@ -207,4 +207,16 @@ struct prctl_mm_map {
3598     # define PR_SVE_VL_LEN_MASK 0xffff
3599     # define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
3600    
3601     +/* Per task speculation control */
3602     +#define PR_GET_SPECULATION_CTRL 52
3603     +#define PR_SET_SPECULATION_CTRL 53
3604     +/* Speculation control variants */
3605     +# define PR_SPEC_STORE_BYPASS 0
3606     +/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
3607     +# define PR_SPEC_NOT_AFFECTED 0
3608     +# define PR_SPEC_PRCTL (1UL << 0)
3609     +# define PR_SPEC_ENABLE (1UL << 1)
3610     +# define PR_SPEC_DISABLE (1UL << 2)
3611     +# define PR_SPEC_FORCE_DISABLE (1UL << 3)
3612     +
3613     #endif /* _LINUX_PRCTL_H */
3614     diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
3615     index 2a0bd9dd104d..9efc0e73d50b 100644
3616     --- a/include/uapi/linux/seccomp.h
3617     +++ b/include/uapi/linux/seccomp.h
3618     @@ -17,8 +17,9 @@
3619     #define SECCOMP_GET_ACTION_AVAIL 2
3620    
3621     /* Valid flags for SECCOMP_SET_MODE_FILTER */
3622     -#define SECCOMP_FILTER_FLAG_TSYNC 1
3623     -#define SECCOMP_FILTER_FLAG_LOG 2
3624     +#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
3625     +#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
3626     +#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
3627    
3628     /*
3629     * All BPF programs must return a 32-bit value.
3630     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
3631     index c6eff108aa99..1a17e0d84347 100644
3632     --- a/kernel/bpf/verifier.c
3633     +++ b/kernel/bpf/verifier.c
3634     @@ -970,7 +970,7 @@ static bool register_is_null(struct bpf_reg_state *reg)
3635     */
3636     static int check_stack_write(struct bpf_verifier_env *env,
3637     struct bpf_func_state *state, /* func where register points to */
3638     - int off, int size, int value_regno)
3639     + int off, int size, int value_regno, int insn_idx)
3640     {
3641     struct bpf_func_state *cur; /* state of the current function */
3642     int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
3643     @@ -1009,8 +1009,33 @@ static int check_stack_write(struct bpf_verifier_env *env,
3644     state->stack[spi].spilled_ptr = cur->regs[value_regno];
3645     state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
3646    
3647     - for (i = 0; i < BPF_REG_SIZE; i++)
3648     + for (i = 0; i < BPF_REG_SIZE; i++) {
3649     + if (state->stack[spi].slot_type[i] == STACK_MISC &&
3650     + !env->allow_ptr_leaks) {
3651     + int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
3652     + int soff = (-spi - 1) * BPF_REG_SIZE;
3653     +
3654     + /* detected reuse of integer stack slot with a pointer
3655     + * which means either llvm is reusing stack slot or
3656     + * an attacker is trying to exploit CVE-2018-3639
3657     + * (speculative store bypass)
3658     + * Have to sanitize that slot with preemptive
3659     + * store of zero.
3660     + */
3661     + if (*poff && *poff != soff) {
3662     + /* disallow programs where single insn stores
3663     + * into two different stack slots, since verifier
3664     + * cannot sanitize them
3665     + */
3666     + verbose(env,
3667     + "insn %d cannot access two stack slots fp%d and fp%d",
3668     + insn_idx, *poff, soff);
3669     + return -EINVAL;
3670     + }
3671     + *poff = soff;
3672     + }
3673     state->stack[spi].slot_type[i] = STACK_SPILL;
3674     + }
3675     } else {
3676     u8 type = STACK_MISC;
3677    
3678     @@ -1685,7 +1710,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
3679    
3680     if (t == BPF_WRITE)
3681     err = check_stack_write(env, state, off, size,
3682     - value_regno);
3683     + value_regno, insn_idx);
3684     else
3685     err = check_stack_read(env, state, off, size,
3686     value_regno);
3687     @@ -5156,6 +5181,34 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
3688     else
3689     continue;
3690    
3691     + if (type == BPF_WRITE &&
3692     + env->insn_aux_data[i + delta].sanitize_stack_off) {
3693     + struct bpf_insn patch[] = {
3694     + /* Sanitize suspicious stack slot with zero.
3695     + * There are no memory dependencies for this store,
3696     + * since it's only using frame pointer and immediate
3697     + * constant of zero
3698     + */
3699     + BPF_ST_MEM(BPF_DW, BPF_REG_FP,
3700     + env->insn_aux_data[i + delta].sanitize_stack_off,
3701     + 0),
3702     + /* the original STX instruction will immediately
3703     + * overwrite the same stack slot with appropriate value
3704     + */
3705     + *insn,
3706     + };
3707     +
3708     + cnt = ARRAY_SIZE(patch);
3709     + new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
3710     + if (!new_prog)
3711     + return -ENOMEM;
3712     +
3713     + delta += cnt - 1;
3714     + env->prog = new_prog;
3715     + insn = new_prog->insnsi + i + delta;
3716     + continue;
3717     + }
3718     +
3719     if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
3720     continue;
3721    
3722     diff --git a/kernel/seccomp.c b/kernel/seccomp.c
3723     index dc77548167ef..e691d9a6c58d 100644
3724     --- a/kernel/seccomp.c
3725     +++ b/kernel/seccomp.c
3726     @@ -19,6 +19,8 @@
3727     #include <linux/compat.h>
3728     #include <linux/coredump.h>
3729     #include <linux/kmemleak.h>
3730     +#include <linux/nospec.h>
3731     +#include <linux/prctl.h>
3732     #include <linux/sched.h>
3733     #include <linux/sched/task_stack.h>
3734     #include <linux/seccomp.h>
3735     @@ -227,8 +229,11 @@ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
3736     return true;
3737     }
3738    
3739     +void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
3740     +
3741     static inline void seccomp_assign_mode(struct task_struct *task,
3742     - unsigned long seccomp_mode)
3743     + unsigned long seccomp_mode,
3744     + unsigned long flags)
3745     {
3746     assert_spin_locked(&task->sighand->siglock);
3747    
3748     @@ -238,6 +243,9 @@ static inline void seccomp_assign_mode(struct task_struct *task,
3749     * filter) is set.
3750     */
3751     smp_mb__before_atomic();
3752     + /* Assume default seccomp processes want spec flaw mitigation. */
3753     + if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
3754     + arch_seccomp_spec_mitigate(task);
3755     set_tsk_thread_flag(task, TIF_SECCOMP);
3756     }
3757    
3758     @@ -305,7 +313,7 @@ static inline pid_t seccomp_can_sync_threads(void)
3759     * without dropping the locks.
3760     *
3761     */
3762     -static inline void seccomp_sync_threads(void)
3763     +static inline void seccomp_sync_threads(unsigned long flags)
3764     {
3765     struct task_struct *thread, *caller;
3766    
3767     @@ -346,7 +354,8 @@ static inline void seccomp_sync_threads(void)
3768     * allow one thread to transition the other.
3769     */
3770     if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
3771     - seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
3772     + seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
3773     + flags);
3774     }
3775     }
3776    
3777     @@ -469,7 +478,7 @@ static long seccomp_attach_filter(unsigned int flags,
3778    
3779     /* Now that the new filter is in place, synchronize to all threads. */
3780     if (flags & SECCOMP_FILTER_FLAG_TSYNC)
3781     - seccomp_sync_threads();
3782     + seccomp_sync_threads(flags);
3783    
3784     return 0;
3785     }
3786     @@ -818,7 +827,7 @@ static long seccomp_set_mode_strict(void)
3787     #ifdef TIF_NOTSC
3788     disable_TSC();
3789     #endif
3790     - seccomp_assign_mode(current, seccomp_mode);
3791     + seccomp_assign_mode(current, seccomp_mode, 0);
3792     ret = 0;
3793    
3794     out:
3795     @@ -876,7 +885,7 @@ static long seccomp_set_mode_filter(unsigned int flags,
3796     /* Do not free the successfully attached filter. */
3797     prepared = NULL;
3798    
3799     - seccomp_assign_mode(current, seccomp_mode);
3800     + seccomp_assign_mode(current, seccomp_mode, flags);
3801     out:
3802     spin_unlock_irq(&current->sighand->siglock);
3803     if (flags & SECCOMP_FILTER_FLAG_TSYNC)
3804     diff --git a/kernel/sys.c b/kernel/sys.c
3805     index f2289de20e19..9afc4cb5acf5 100644
3806     --- a/kernel/sys.c
3807     +++ b/kernel/sys.c
3808     @@ -61,6 +61,8 @@
3809     #include <linux/uidgid.h>
3810     #include <linux/cred.h>
3811    
3812     +#include <linux/nospec.h>
3813     +
3814     #include <linux/kmsg_dump.h>
3815     /* Move somewhere else to avoid recompiling? */
3816     #include <generated/utsrelease.h>
3817     @@ -2190,6 +2192,17 @@ static int propagate_has_child_subreaper(struct task_struct *p, void *data)
3818     return 1;
3819     }
3820    
3821     +int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
3822     +{
3823     + return -EINVAL;
3824     +}
3825     +
3826     +int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
3827     + unsigned long ctrl)
3828     +{
3829     + return -EINVAL;
3830     +}
3831     +
3832     SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
3833     unsigned long, arg4, unsigned long, arg5)
3834     {
3835     @@ -2398,6 +2411,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
3836     case PR_SVE_GET_VL:
3837     error = SVE_GET_VL();
3838     break;
3839     + case PR_GET_SPECULATION_CTRL:
3840     + if (arg3 || arg4 || arg5)
3841     + return -EINVAL;
3842     + error = arch_prctl_spec_ctrl_get(me, arg2);
3843     + break;
3844     + case PR_SET_SPECULATION_CTRL:
3845     + if (arg4 || arg5)
3846     + return -EINVAL;
3847     + error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
3848     + break;
3849     default:
3850     error = -EINVAL;
3851     break;
3852     diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
3853     index b398c2ea69b2..aa2094d5dd27 100644
3854     --- a/kernel/time/tick-broadcast.c
3855     +++ b/kernel/time/tick-broadcast.c
3856     @@ -612,6 +612,14 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
3857     now = ktime_get();
3858     /* Find all expired events */
3859     for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
3860     + /*
3861     + * Required for !SMP because for_each_cpu() reports
3862     + * unconditionally CPU0 as set on UP kernels.
3863     + */
3864     + if (!IS_ENABLED(CONFIG_SMP) &&
3865     + cpumask_empty(tick_broadcast_oneshot_mask))
3866     + break;
3867     +
3868     td = &per_cpu(tick_cpu_device, cpu);
3869     if (td->evtdev->next_event <= now) {
3870     cpumask_set_cpu(cpu, tmpmask);
3871     diff --git a/lib/radix-tree.c b/lib/radix-tree.c
3872     index 8e00138d593f..a7705b0f139c 100644
3873     --- a/lib/radix-tree.c
3874     +++ b/lib/radix-tree.c
3875     @@ -1612,11 +1612,9 @@ static void set_iter_tags(struct radix_tree_iter *iter,
3876     static void __rcu **skip_siblings(struct radix_tree_node **nodep,
3877     void __rcu **slot, struct radix_tree_iter *iter)
3878     {
3879     - void *sib = node_to_entry(slot - 1);
3880     -
3881     while (iter->index < iter->next_index) {
3882     *nodep = rcu_dereference_raw(*slot);
3883     - if (*nodep && *nodep != sib)
3884     + if (*nodep && !is_sibling_entry(iter->node, *nodep))
3885     return slot;
3886     slot++;
3887     iter->index = __radix_tree_iter_add(iter, 1);
3888     @@ -1631,7 +1629,7 @@ void __rcu **__radix_tree_next_slot(void __rcu **slot,
3889     struct radix_tree_iter *iter, unsigned flags)
3890     {
3891     unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
3892     - struct radix_tree_node *node = rcu_dereference_raw(*slot);
3893     + struct radix_tree_node *node;
3894    
3895     slot = skip_siblings(&node, slot, iter);
3896    
3897     diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
3898     index 413367cf569e..d47a802220f5 100644
3899     --- a/lib/test_bitmap.c
3900     +++ b/lib/test_bitmap.c
3901     @@ -329,23 +329,32 @@ static void noinline __init test_mem_optimisations(void)
3902     unsigned int start, nbits;
3903    
3904     for (start = 0; start < 1024; start += 8) {
3905     - memset(bmap1, 0x5a, sizeof(bmap1));
3906     - memset(bmap2, 0x5a, sizeof(bmap2));
3907     for (nbits = 0; nbits < 1024 - start; nbits += 8) {
3908     + memset(bmap1, 0x5a, sizeof(bmap1));
3909     + memset(bmap2, 0x5a, sizeof(bmap2));
3910     +
3911     bitmap_set(bmap1, start, nbits);
3912     __bitmap_set(bmap2, start, nbits);
3913     - if (!bitmap_equal(bmap1, bmap2, 1024))
3914     + if (!bitmap_equal(bmap1, bmap2, 1024)) {
3915     printk("set not equal %d %d\n", start, nbits);
3916     - if (!__bitmap_equal(bmap1, bmap2, 1024))
3917     + failed_tests++;
3918     + }
3919     + if (!__bitmap_equal(bmap1, bmap2, 1024)) {
3920     printk("set not __equal %d %d\n", start, nbits);
3921     + failed_tests++;
3922     + }
3923    
3924     bitmap_clear(bmap1, start, nbits);
3925     __bitmap_clear(bmap2, start, nbits);
3926     - if (!bitmap_equal(bmap1, bmap2, 1024))
3927     + if (!bitmap_equal(bmap1, bmap2, 1024)) {
3928     printk("clear not equal %d %d\n", start, nbits);
3929     - if (!__bitmap_equal(bmap1, bmap2, 1024))
3930     + failed_tests++;
3931     + }
3932     + if (!__bitmap_equal(bmap1, bmap2, 1024)) {
3933     printk("clear not __equal %d %d\n", start,
3934     nbits);
3935     + failed_tests++;
3936     + }
3937     }
3938     }
3939     }
3940     diff --git a/lib/vsprintf.c b/lib/vsprintf.c
3941     index 89f8a4a4b770..38b509cc6b46 100644
3942     --- a/lib/vsprintf.c
3943     +++ b/lib/vsprintf.c
3944     @@ -1659,19 +1659,22 @@ char *pointer_string(char *buf, char *end, const void *ptr,
3945     return number(buf, end, (unsigned long int)ptr, spec);
3946     }
3947    
3948     -static bool have_filled_random_ptr_key __read_mostly;
3949     +static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
3950     static siphash_key_t ptr_key __read_mostly;
3951    
3952     -static void fill_random_ptr_key(struct random_ready_callback *unused)
3953     +static void enable_ptr_key_workfn(struct work_struct *work)
3954     {
3955     get_random_bytes(&ptr_key, sizeof(ptr_key));
3956     - /*
3957     - * have_filled_random_ptr_key==true is dependent on get_random_bytes().
3958     - * ptr_to_id() needs to see have_filled_random_ptr_key==true
3959     - * after get_random_bytes() returns.
3960     - */
3961     - smp_mb();
3962     - WRITE_ONCE(have_filled_random_ptr_key, true);
3963     + /* Needs to run from preemptible context */
3964     + static_branch_disable(&not_filled_random_ptr_key);
3965     +}
3966     +
3967     +static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
3968     +
3969     +static void fill_random_ptr_key(struct random_ready_callback *unused)
3970     +{
3971     + /* This may be in an interrupt handler. */
3972     + queue_work(system_unbound_wq, &enable_ptr_key_work);
3973     }
3974    
3975     static struct random_ready_callback random_ready = {
3976     @@ -1685,7 +1688,8 @@ static int __init initialize_ptr_random(void)
3977     if (!ret) {
3978     return 0;
3979     } else if (ret == -EALREADY) {
3980     - fill_random_ptr_key(&random_ready);
3981     + /* This is in preemptible context */
3982     + enable_ptr_key_workfn(&enable_ptr_key_work);
3983     return 0;
3984     }
3985    
3986     @@ -1699,7 +1703,7 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
3987     unsigned long hashval;
3988     const int default_width = 2 * sizeof(ptr);
3989    
3990     - if (unlikely(!have_filled_random_ptr_key)) {
3991     + if (static_branch_unlikely(&not_filled_random_ptr_key)) {
3992     spec.field_width = default_width;
3993     /* string length must be less than default_width */
3994     return string(buf, end, "(ptrval)", spec);
3995     diff --git a/mm/Kconfig b/mm/Kconfig
3996     index c782e8fb7235..e07608f64d47 100644
3997     --- a/mm/Kconfig
3998     +++ b/mm/Kconfig
3999     @@ -644,6 +644,7 @@ config DEFERRED_STRUCT_PAGE_INIT
4000     default n
4001     depends on NO_BOOTMEM
4002     depends on !FLATMEM
4003     + depends on !NEED_PER_CPU_KM
4004     help
4005     Ordinarily all struct pages are initialised during early boot in a
4006     single thread. On very large machines this can take a considerable
4007     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
4008     index 530e12ae52d7..c853386b86ff 100644
4009     --- a/net/netfilter/nf_tables_api.c
4010     +++ b/net/netfilter/nf_tables_api.c
4011     @@ -2357,41 +2357,46 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
4012     }
4013    
4014     if (nlh->nlmsg_flags & NLM_F_REPLACE) {
4015     - if (nft_is_active_next(net, old_rule)) {
4016     - trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
4017     - old_rule);
4018     - if (trans == NULL) {
4019     - err = -ENOMEM;
4020     - goto err2;
4021     - }
4022     - nft_deactivate_next(net, old_rule);
4023     - chain->use--;
4024     - list_add_tail_rcu(&rule->list, &old_rule->list);
4025     - } else {
4026     + if (!nft_is_active_next(net, old_rule)) {
4027     err = -ENOENT;
4028     goto err2;
4029     }
4030     - } else if (nlh->nlmsg_flags & NLM_F_APPEND)
4031     - if (old_rule)
4032     - list_add_rcu(&rule->list, &old_rule->list);
4033     - else
4034     - list_add_tail_rcu(&rule->list, &chain->rules);
4035     - else {
4036     - if (old_rule)
4037     - list_add_tail_rcu(&rule->list, &old_rule->list);
4038     - else
4039     - list_add_rcu(&rule->list, &chain->rules);
4040     - }
4041     + trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
4042     + old_rule);
4043     + if (trans == NULL) {
4044     + err = -ENOMEM;
4045     + goto err2;
4046     + }
4047     + nft_deactivate_next(net, old_rule);
4048     + chain->use--;
4049    
4050     - if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
4051     - err = -ENOMEM;
4052     - goto err3;
4053     + if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
4054     + err = -ENOMEM;
4055     + goto err2;
4056     + }
4057     +
4058     + list_add_tail_rcu(&rule->list, &old_rule->list);
4059     + } else {
4060     + if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
4061     + err = -ENOMEM;
4062     + goto err2;
4063     + }
4064     +
4065     + if (nlh->nlmsg_flags & NLM_F_APPEND) {
4066     + if (old_rule)
4067     + list_add_rcu(&rule->list, &old_rule->list);
4068     + else
4069     + list_add_tail_rcu(&rule->list, &chain->rules);
4070     + } else {
4071     + if (old_rule)
4072     + list_add_tail_rcu(&rule->list, &old_rule->list);
4073     + else
4074     + list_add_rcu(&rule->list, &chain->rules);
4075     + }
4076     }
4077     chain->use++;
4078     return 0;
4079    
4080     -err3:
4081     - list_del_rcu(&rule->list);
4082     err2:
4083     nf_tables_rule_destroy(&ctx, rule);
4084     err1:
4085     @@ -3203,18 +3208,20 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
4086    
4087     err = ops->init(set, &desc, nla);
4088     if (err < 0)
4089     - goto err2;
4090     + goto err3;
4091    
4092     err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
4093     if (err < 0)
4094     - goto err3;
4095     + goto err4;
4096    
4097     list_add_tail_rcu(&set->list, &table->sets);
4098     table->use++;
4099     return 0;
4100    
4101     -err3:
4102     +err4:
4103     ops->destroy(set);
4104     +err3:
4105     + kfree(set->name);
4106     err2:
4107     kvfree(set);
4108     err1:
4109     @@ -4392,9 +4399,9 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
4110     }
4111     EXPORT_SYMBOL_GPL(nf_tables_obj_lookup);
4112    
4113     -struct nft_object *nf_tables_obj_lookup_byhandle(const struct nft_table *table,
4114     - const struct nlattr *nla,
4115     - u32 objtype, u8 genmask)
4116     +static struct nft_object *nf_tables_obj_lookup_byhandle(const struct nft_table *table,
4117     + const struct nlattr *nla,
4118     + u32 objtype, u8 genmask)
4119     {
4120     struct nft_object *obj;
4121    
4122     @@ -4914,7 +4921,7 @@ struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table,
4123     }
4124     EXPORT_SYMBOL_GPL(nf_tables_flowtable_lookup);
4125    
4126     -struct nft_flowtable *
4127     +static struct nft_flowtable *
4128     nf_tables_flowtable_lookup_byhandle(const struct nft_table *table,
4129     const struct nlattr *nla, u8 genmask)
4130     {
4131     diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
4132     index a848836a5de0..507fd5210c1c 100644
4133     --- a/sound/core/control_compat.c
4134     +++ b/sound/core/control_compat.c
4135     @@ -396,8 +396,7 @@ static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
4136     if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) ||
4137     copy_from_user(&data->type, &data32->type, 3 * sizeof(u32)))
4138     goto error;
4139     - if (get_user(data->owner, &data32->owner) ||
4140     - get_user(data->type, &data32->type))
4141     + if (get_user(data->owner, &data32->owner))
4142     goto error;
4143     switch (data->type) {
4144     case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
4145     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4146     index 738e1fe90312..62fbdbe74b93 100644
4147     --- a/sound/pci/hda/hda_intel.c
4148     +++ b/sound/pci/hda/hda_intel.c
4149     @@ -2208,6 +2208,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
4150     SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
4151     /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
4152     SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
4153     + /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
4154     + SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
4155     /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
4156     SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
4157     {}
4158     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4159     index 2dd34dd77447..01a6643fc7d4 100644
4160     --- a/sound/pci/hda/patch_realtek.c
4161     +++ b/sound/pci/hda/patch_realtek.c
4162     @@ -2363,6 +2363,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
4163     SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
4164     SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
4165     SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
4166     + SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
4167     SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
4168     SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
4169     SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
4170     diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
4171     index 06b22624ab7a..1a4c107b1940 100644
4172     --- a/sound/usb/mixer.c
4173     +++ b/sound/usb/mixer.c
4174     @@ -915,6 +915,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
4175     }
4176     break;
4177    
4178     + case USB_ID(0x0d8c, 0x0103):
4179     + if (!strcmp(kctl->id.name, "PCM Playback Volume")) {
4180     + usb_audio_info(chip,
4181     + "set volume quirk for CM102-A+/102S+\n");
4182     + cval->min = -256;
4183     + }
4184     + break;
4185     +
4186     case USB_ID(0x0471, 0x0101):
4187     case USB_ID(0x0471, 0x0104):
4188     case USB_ID(0x0471, 0x0105):
4189     diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
4190     index 5df609950a66..e350cf3d4f90 100644
4191     --- a/tools/testing/selftests/seccomp/seccomp_bpf.c
4192     +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
4193     @@ -134,11 +134,15 @@ struct seccomp_data {
4194     #endif
4195    
4196     #ifndef SECCOMP_FILTER_FLAG_TSYNC
4197     -#define SECCOMP_FILTER_FLAG_TSYNC 1
4198     +#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
4199     #endif
4200    
4201     #ifndef SECCOMP_FILTER_FLAG_LOG
4202     -#define SECCOMP_FILTER_FLAG_LOG 2
4203     +#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
4204     +#endif
4205     +
4206     +#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
4207     +#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
4208     #endif
4209    
4210     #ifndef PTRACE_SECCOMP_GET_METADATA
4211     @@ -2072,14 +2076,26 @@ TEST(seccomp_syscall_mode_lock)
4212     TEST(detect_seccomp_filter_flags)
4213     {
4214     unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
4215     - SECCOMP_FILTER_FLAG_LOG };
4216     + SECCOMP_FILTER_FLAG_LOG,
4217     + SECCOMP_FILTER_FLAG_SPEC_ALLOW };
4218     unsigned int flag, all_flags;
4219     int i;
4220     long ret;
4221    
4222     /* Test detection of known-good filter flags */
4223     for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
4224     + int bits = 0;
4225     +
4226     flag = flags[i];
4227     + /* Make sure the flag is a single bit! */
4228     + while (flag) {
4229     + if (flag & 0x1)
4230     + bits ++;
4231     + flag >>= 1;
4232     + }
4233     + ASSERT_EQ(1, bits);
4234     + flag = flags[i];
4235     +
4236     ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
4237     ASSERT_NE(ENOSYS, errno) {
4238     TH_LOG("Kernel does not support seccomp syscall!");
4239     diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
4240     index 10b38178cff2..4ffc0b5e6105 100644
4241     --- a/virt/kvm/arm/vgic/vgic-debug.c
4242     +++ b/virt/kvm/arm/vgic/vgic-debug.c
4243     @@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
4244     struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
4245     struct vgic_irq *irq;
4246     struct kvm_vcpu *vcpu = NULL;
4247     + unsigned long flags;
4248    
4249     if (iter->dist_id == 0) {
4250     print_dist_state(s, &kvm->arch.vgic);
4251     @@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
4252     irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
4253     }
4254    
4255     - spin_lock(&irq->irq_lock);
4256     + spin_lock_irqsave(&irq->irq_lock, flags);
4257     print_irq_state(s, irq, vcpu);
4258     - spin_unlock(&irq->irq_lock);
4259     + spin_unlock_irqrestore(&irq->irq_lock, flags);
4260    
4261     return 0;
4262     }
4263     diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
4264     index a8f07243aa9f..4ed79c939fb4 100644
4265     --- a/virt/kvm/arm/vgic/vgic-its.c
4266     +++ b/virt/kvm/arm/vgic/vgic-its.c
4267     @@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
4268     {
4269     struct vgic_dist *dist = &kvm->arch.vgic;
4270     struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
4271     + unsigned long flags;
4272     int ret;
4273    
4274     /* In this case there is no put, since we keep the reference. */
4275     @@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
4276     irq->intid = intid;
4277     irq->target_vcpu = vcpu;
4278    
4279     - spin_lock(&dist->lpi_list_lock);
4280     + spin_lock_irqsave(&dist->lpi_list_lock, flags);
4281    
4282     /*
4283     * There could be a race with another vgic_add_lpi(), so we need to
4284     @@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
4285     dist->lpi_list_count++;
4286    
4287     out_unlock:
4288     - spin_unlock(&dist->lpi_list_lock);
4289     + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
4290    
4291     /*
4292     * We "cache" the configuration table entries in our struct vgic_irq's.
4293     @@ -280,8 +281,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
4294     int ret;
4295     unsigned long flags;
4296    
4297     - ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
4298     - &prop, 1);
4299     + ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
4300     + &prop, 1);
4301    
4302     if (ret)
4303     return ret;
4304     @@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
4305     {
4306     struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
4307     struct vgic_irq *irq;
4308     + unsigned long flags;
4309     u32 *intids;
4310     int irq_count, i = 0;
4311    
4312     @@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
4313     if (!intids)
4314     return -ENOMEM;
4315    
4316     - spin_lock(&dist->lpi_list_lock);
4317     + spin_lock_irqsave(&dist->lpi_list_lock, flags);
4318     list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
4319     if (i == irq_count)
4320     break;
4321     @@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
4322     continue;
4323     intids[i++] = irq->intid;
4324     }
4325     - spin_unlock(&dist->lpi_list_lock);
4326     + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
4327    
4328     *intid_ptr = intids;
4329     return i;
4330     @@ -348,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
4331     static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
4332     {
4333     int ret = 0;
4334     + unsigned long flags;
4335    
4336     - spin_lock(&irq->irq_lock);
4337     + spin_lock_irqsave(&irq->irq_lock, flags);
4338     irq->target_vcpu = vcpu;
4339     - spin_unlock(&irq->irq_lock);
4340     + spin_unlock_irqrestore(&irq->irq_lock, flags);
4341    
4342     if (irq->hw) {
4343     struct its_vlpi_map map;
4344     @@ -441,8 +444,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
4345     * this very same byte in the last iteration. Reuse that.
4346     */
4347     if (byte_offset != last_byte_offset) {
4348     - ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
4349     - &pendmask, 1);
4350     + ret = kvm_read_guest_lock(vcpu->kvm,
4351     + pendbase + byte_offset,
4352     + &pendmask, 1);
4353     if (ret) {
4354     kfree(intids);
4355     return ret;
4356     @@ -786,7 +790,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
4357     return false;
4358    
4359     /* Each 1st level entry is represented by a 64-bit value. */
4360     - if (kvm_read_guest(its->dev->kvm,
4361     + if (kvm_read_guest_lock(its->dev->kvm,
4362     BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
4363     &indirect_ptr, sizeof(indirect_ptr)))
4364     return false;
4365     @@ -1367,8 +1371,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
4366     cbaser = CBASER_ADDRESS(its->cbaser);
4367    
4368     while (its->cwriter != its->creadr) {
4369     - int ret = kvm_read_guest(kvm, cbaser + its->creadr,
4370     - cmd_buf, ITS_CMD_SIZE);
4371     + int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
4372     + cmd_buf, ITS_CMD_SIZE);
4373     /*
4374     * If kvm_read_guest() fails, this could be due to the guest
4375     * programming a bogus value in CBASER or something else going
4376     @@ -1893,7 +1897,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz,
4377     int next_offset;
4378     size_t byte_offset;
4379    
4380     - ret = kvm_read_guest(kvm, gpa, entry, esz);
4381     + ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
4382     if (ret)
4383     return ret;
4384    
4385     @@ -2263,7 +2267,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
4386     int ret;
4387    
4388     BUG_ON(esz > sizeof(val));
4389     - ret = kvm_read_guest(kvm, gpa, &val, esz);
4390     + ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
4391     if (ret)
4392     return ret;
4393     val = le64_to_cpu(val);
4394     diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
4395     index 0ff2006f3781..cf2f716532ac 100644
4396     --- a/virt/kvm/arm/vgic/vgic-v3.c
4397     +++ b/virt/kvm/arm/vgic/vgic-v3.c
4398     @@ -329,7 +329,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
4399     bit_nr = irq->intid % BITS_PER_BYTE;
4400     ptr = pendbase + byte_offset;
4401    
4402     - ret = kvm_read_guest(kvm, ptr, &val, 1);
4403     + ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
4404     if (ret)
4405     return ret;
4406    
4407     @@ -382,7 +382,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
4408     ptr = pendbase + byte_offset;
4409    
4410     if (byte_offset != last_byte_offset) {
4411     - ret = kvm_read_guest(kvm, ptr, &val, 1);
4412     + ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
4413     if (ret)
4414     return ret;
4415     last_byte_offset = byte_offset;
4416     diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
4417     index 8201899126f6..4232c40b34f8 100644
4418     --- a/virt/kvm/arm/vgic/vgic.c
4419     +++ b/virt/kvm/arm/vgic/vgic.c
4420     @@ -40,9 +40,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
4421     * kvm->lock (mutex)
4422     * its->cmd_lock (mutex)
4423     * its->its_lock (mutex)
4424     - * vgic_cpu->ap_list_lock
4425     - * kvm->lpi_list_lock
4426     - * vgic_irq->irq_lock
4427     + * vgic_cpu->ap_list_lock must be taken with IRQs disabled
4428     + * kvm->lpi_list_lock must be taken with IRQs disabled
4429     + * vgic_irq->irq_lock must be taken with IRQs disabled
4430     + *
4431     + * As the ap_list_lock might be taken from the timer interrupt handler,
4432     + * we have to disable IRQs before taking this lock and everything lower
4433     + * than it.
4434     *
4435     * If you need to take multiple locks, always take the upper lock first,
4436     * then the lower ones, e.g. first take the its_lock, then the irq_lock.
4437     @@ -69,8 +73,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
4438     {
4439     struct vgic_dist *dist = &kvm->arch.vgic;
4440     struct vgic_irq *irq = NULL;
4441     + unsigned long flags;
4442    
4443     - spin_lock(&dist->lpi_list_lock);
4444     + spin_lock_irqsave(&dist->lpi_list_lock, flags);
4445    
4446     list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
4447     if (irq->intid != intid)
4448     @@ -86,7 +91,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
4449     irq = NULL;
4450    
4451     out_unlock:
4452     - spin_unlock(&dist->lpi_list_lock);
4453     + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
4454    
4455     return irq;
4456     }
4457     @@ -127,19 +132,20 @@ static void vgic_irq_release(struct kref *ref)
4458     void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
4459     {
4460     struct vgic_dist *dist = &kvm->arch.vgic;
4461     + unsigned long flags;
4462    
4463     if (irq->intid < VGIC_MIN_LPI)
4464     return;
4465    
4466     - spin_lock(&dist->lpi_list_lock);
4467     + spin_lock_irqsave(&dist->lpi_list_lock, flags);
4468     if (!kref_put(&irq->refcount, vgic_irq_release)) {
4469     - spin_unlock(&dist->lpi_list_lock);
4470     + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
4471     return;
4472     };
4473    
4474     list_del(&irq->lpi_list);
4475     dist->lpi_list_count--;
4476     - spin_unlock(&dist->lpi_list_lock);
4477     + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
4478    
4479     kfree(irq);
4480     }