Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0285-5.4.186-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (show annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (18 months ago) by niro
File size: 73323 byte(s)
-add missing
1 diff --git a/Makefile b/Makefile
2 index bd3bdf86b992e..f9054b4e8a123 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 185
10 +SUBLEVEL = 186
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
15 index 140e22d74dcfb..d393bb481e747 100644
16 --- a/arch/arm/boot/dts/rk322x.dtsi
17 +++ b/arch/arm/boot/dts/rk322x.dtsi
18 @@ -635,8 +635,8 @@
19 interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
20 assigned-clocks = <&cru SCLK_HDMI_PHY>;
21 assigned-clock-parents = <&hdmi_phy>;
22 - clocks = <&cru SCLK_HDMI_HDCP>, <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_CEC>;
23 - clock-names = "isfr", "iahb", "cec";
24 + clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>, <&cru SCLK_HDMI_CEC>;
25 + clock-names = "iahb", "isfr", "cec";
26 pinctrl-names = "default";
27 pinctrl-0 = <&hdmii2c_xfer &hdmi_hpd &hdmi_cec>;
28 resets = <&cru SRST_HDMI_P>;
29 diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
30 index 658ceb96d8bd1..7dcafd0833ba8 100644
31 --- a/arch/arm/boot/dts/rk3288.dtsi
32 +++ b/arch/arm/boot/dts/rk3288.dtsi
33 @@ -975,7 +975,7 @@
34 status = "disabled";
35 };
36
37 - crypto: cypto-controller@ff8a0000 {
38 + crypto: crypto@ff8a0000 {
39 compatible = "rockchip,rk3288-crypto";
40 reg = <0x0 0xff8a0000 0x0 0x4000>;
41 interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
42 diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
43 index 32564b017ba06..d8ac89879327d 100644
44 --- a/arch/arm/include/asm/kvm_host.h
45 +++ b/arch/arm/include/asm/kvm_host.h
46 @@ -15,6 +15,7 @@
47 #include <asm/kvm_asm.h>
48 #include <asm/kvm_mmio.h>
49 #include <asm/fpstate.h>
50 +#include <asm/spectre.h>
51 #include <kvm/arm_arch_timer.h>
52
53 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
54 @@ -424,4 +425,10 @@ static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
55
56 #define kvm_arm_vcpu_loaded(vcpu) (false)
57
58 +static inline int kvm_arm_get_spectre_bhb_state(void)
59 +{
60 + /* 32bit guests don't need firmware for this */
61 + return SPECTRE_VULNERABLE; /* aka SMCCC_RET_NOT_SUPPORTED */
62 +}
63 +
64 #endif /* __ARM_KVM_HOST_H__ */
65 diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
66 index 2769360f195ca..89b8e70068a13 100644
67 --- a/arch/arm/include/uapi/asm/kvm.h
68 +++ b/arch/arm/include/uapi/asm/kvm.h
69 @@ -227,6 +227,12 @@ struct kvm_vcpu_events {
70 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
71 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
72
73 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 KVM_REG_ARM_FW_REG(3)
74 + /* Higher values mean better protection. */
75 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL 0
76 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL 1
77 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED 2
78 +
79 /* Device Control API: ARM VGIC */
80 #define KVM_DEV_ARM_VGIC_GRP_ADDR 0
81 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
82 diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
83 index 9c8ea59398658..a1a828ca188cf 100644
84 --- a/arch/arm64/Kconfig
85 +++ b/arch/arm64/Kconfig
86 @@ -1139,6 +1139,15 @@ config ARM64_SSBD
87
88 If unsure, say Y.
89
90 +config MITIGATE_SPECTRE_BRANCH_HISTORY
91 + bool "Mitigate Spectre style attacks against branch history" if EXPERT
92 + default y
93 + help
94 + Speculation attacks against some high-performance processors can
95 + make use of branch history to influence future speculation.
96 + When taking an exception from user-space, a sequence of branches
97 + or a firmware call overwrites the branch history.
98 +
99 config RODATA_FULL_DEFAULT_ENABLED
100 bool "Apply r/o permissions of VM areas also to their linear aliases"
101 default y
102 diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
103 index d911d38877e52..19f17bb29e4bd 100644
104 --- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
105 +++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
106 @@ -369,7 +369,7 @@
107 };
108
109 usb0: usb@ffb00000 {
110 - compatible = "snps,dwc2";
111 + compatible = "intel,socfpga-agilex-hsotg", "snps,dwc2";
112 reg = <0xffb00000 0x40000>;
113 interrupts = <0 93 4>;
114 phys = <&usbphy0>;
115 @@ -381,7 +381,7 @@
116 };
117
118 usb1: usb@ffb40000 {
119 - compatible = "snps,dwc2";
120 + compatible = "intel,socfpga-agilex-hsotg", "snps,dwc2";
121 reg = <0xffb40000 0x40000>;
122 interrupts = <0 94 4>;
123 phys = <&usbphy0>;
124 diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
125 index 45b86933c6ea0..390b86ec65389 100644
126 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
127 +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
128 @@ -467,6 +467,12 @@
129 };
130
131 &sdhci {
132 + /*
133 + * Signal integrity isn't great at 200MHz but 100MHz has proven stable
134 + * enough.
135 + */
136 + max-frequency = <100000000>;
137 +
138 bus-width = <8>;
139 mmc-hs400-1_8v;
140 mmc-hs400-enhanced-strobe;
141 diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
142 index 750dad0d17400..95942d917de53 100644
143 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
144 +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
145 @@ -1746,10 +1746,10 @@
146 interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH 0>;
147 clocks = <&cru PCLK_HDMI_CTRL>,
148 <&cru SCLK_HDMI_SFR>,
149 - <&cru PLL_VPLL>,
150 + <&cru SCLK_HDMI_CEC>,
151 <&cru PCLK_VIO_GRF>,
152 - <&cru SCLK_HDMI_CEC>;
153 - clock-names = "iahb", "isfr", "vpll", "grf", "cec";
154 + <&cru PLL_VPLL>;
155 + clock-names = "iahb", "isfr", "cec", "grf", "vpll";
156 power-domains = <&power RK3399_PD_HDCP>;
157 reg-io-width = <4>;
158 rockchip,grf = <&grf>;
159 diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
160 index 4a4258f17c868..01112f9767bc3 100644
161 --- a/arch/arm64/include/asm/assembler.h
162 +++ b/arch/arm64/include/asm/assembler.h
163 @@ -110,6 +110,13 @@
164 hint #20
165 .endm
166
167 +/*
168 + * Clear Branch History instruction
169 + */
170 + .macro clearbhb
171 + hint #22
172 + .endm
173 +
174 /*
175 * Speculation barrier
176 */
177 @@ -757,4 +764,30 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
178 .Lyield_out_\@ :
179 .endm
180
181 + .macro __mitigate_spectre_bhb_loop tmp
182 +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
183 +alternative_cb spectre_bhb_patch_loop_iter
184 + mov \tmp, #32 // Patched to correct the immediate
185 +alternative_cb_end
186 +.Lspectre_bhb_loop\@:
187 + b . + 4
188 + subs \tmp, \tmp, #1
189 + b.ne .Lspectre_bhb_loop\@
190 + sb
191 +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
192 + .endm
193 +
194 + /* Save/restores x0-x3 to the stack */
195 + .macro __mitigate_spectre_bhb_fw
196 +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
197 + stp x0, x1, [sp, #-16]!
198 + stp x2, x3, [sp, #-16]!
199 + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
200 +alternative_cb arm64_update_smccc_conduit
201 + nop // Patched to SMC/HVC #0
202 +alternative_cb_end
203 + ldp x2, x3, [sp], #16
204 + ldp x0, x1, [sp], #16
205 +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
206 + .endm
207 #endif /* __ASM_ASSEMBLER_H */
208 diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
209 index d72d995b7e258..85cc06380e93e 100644
210 --- a/arch/arm64/include/asm/cpu.h
211 +++ b/arch/arm64/include/asm/cpu.h
212 @@ -25,6 +25,7 @@ struct cpuinfo_arm64 {
213 u64 reg_id_aa64dfr1;
214 u64 reg_id_aa64isar0;
215 u64 reg_id_aa64isar1;
216 + u64 reg_id_aa64isar2;
217 u64 reg_id_aa64mmfr0;
218 u64 reg_id_aa64mmfr1;
219 u64 reg_id_aa64mmfr2;
220 diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
221 index 1dc3c762fdcb9..4ffa86149d28d 100644
222 --- a/arch/arm64/include/asm/cpucaps.h
223 +++ b/arch/arm64/include/asm/cpucaps.h
224 @@ -55,7 +55,8 @@
225 #define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
226 #define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
227 #define ARM64_WORKAROUND_1542419 47
228 +#define ARM64_SPECTRE_BHB 48
229
230 -#define ARM64_NCAPS 48
231 +#define ARM64_NCAPS 49
232
233 #endif /* __ASM_CPUCAPS_H */
234 diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
235 index ccae05da98a7f..f63438474dd54 100644
236 --- a/arch/arm64/include/asm/cpufeature.h
237 +++ b/arch/arm64/include/asm/cpufeature.h
238 @@ -508,6 +508,34 @@ static inline bool cpu_supports_mixed_endian_el0(void)
239 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
240 }
241
242 +static inline bool supports_csv2p3(int scope)
243 +{
244 + u64 pfr0;
245 + u8 csv2_val;
246 +
247 + if (scope == SCOPE_LOCAL_CPU)
248 + pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
249 + else
250 + pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
251 +
252 + csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
253 + ID_AA64PFR0_CSV2_SHIFT);
254 + return csv2_val == 3;
255 +}
256 +
257 +static inline bool supports_clearbhb(int scope)
258 +{
259 + u64 isar2;
260 +
261 + if (scope == SCOPE_LOCAL_CPU)
262 + isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
263 + else
264 + isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
265 +
266 + return cpuid_feature_extract_unsigned_field(isar2,
267 + ID_AA64ISAR2_CLEARBHB_SHIFT);
268 +}
269 +
270 static inline bool system_supports_32bit_el0(void)
271 {
272 return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
273 @@ -639,6 +667,18 @@ static inline int arm64_get_ssbd_state(void)
274
275 void arm64_set_ssbd_mitigation(bool state);
276
277 +/* Watch out, ordering is important here. */
278 +enum mitigation_state {
279 + SPECTRE_UNAFFECTED,
280 + SPECTRE_MITIGATED,
281 + SPECTRE_VULNERABLE,
282 +};
283 +
284 +enum mitigation_state arm64_get_spectre_bhb_state(void);
285 +bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
286 +u8 spectre_bhb_loop_affected(int scope);
287 +void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
288 +
289 extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
290
291 static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
292 diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
293 index aca07c2f6e6e3..f0165df489a38 100644
294 --- a/arch/arm64/include/asm/cputype.h
295 +++ b/arch/arm64/include/asm/cputype.h
296 @@ -71,6 +71,14 @@
297 #define ARM_CPU_PART_CORTEX_A55 0xD05
298 #define ARM_CPU_PART_CORTEX_A76 0xD0B
299 #define ARM_CPU_PART_NEOVERSE_N1 0xD0C
300 +#define ARM_CPU_PART_CORTEX_A77 0xD0D
301 +#define ARM_CPU_PART_NEOVERSE_V1 0xD40
302 +#define ARM_CPU_PART_CORTEX_A78 0xD41
303 +#define ARM_CPU_PART_CORTEX_X1 0xD44
304 +#define ARM_CPU_PART_CORTEX_A710 0xD47
305 +#define ARM_CPU_PART_CORTEX_X2 0xD48
306 +#define ARM_CPU_PART_NEOVERSE_N2 0xD49
307 +#define ARM_CPU_PART_CORTEX_A78C 0xD4B
308
309 #define APM_CPU_PART_POTENZA 0x000
310
311 @@ -102,6 +110,14 @@
312 #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
313 #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
314 #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
315 +#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
316 +#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
317 +#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
318 +#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
319 +#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
320 +#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
321 +#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
322 +#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
323 #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
324 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
325 #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
326 diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
327 index f987b8a8f325e..928a96b9b1617 100644
328 --- a/arch/arm64/include/asm/fixmap.h
329 +++ b/arch/arm64/include/asm/fixmap.h
330 @@ -63,9 +63,11 @@ enum fixed_addresses {
331 #endif /* CONFIG_ACPI_APEI_GHES */
332
333 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
334 + FIX_ENTRY_TRAMP_TEXT3,
335 + FIX_ENTRY_TRAMP_TEXT2,
336 + FIX_ENTRY_TRAMP_TEXT1,
337 FIX_ENTRY_TRAMP_DATA,
338 - FIX_ENTRY_TRAMP_TEXT,
339 -#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
340 +#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1))
341 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
342 __end_of_permanent_fixed_addresses,
343
344 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
345 index 697702a1a1ff1..e6efdbe88c0a9 100644
346 --- a/arch/arm64/include/asm/kvm_host.h
347 +++ b/arch/arm64/include/asm/kvm_host.h
348 @@ -684,4 +684,9 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
349
350 #define kvm_arm_vcpu_loaded(vcpu) ((vcpu)->arch.sysregs_loaded_on_cpu)
351
352 +static inline enum mitigation_state kvm_arm_get_spectre_bhb_state(void)
353 +{
354 + return arm64_get_spectre_bhb_state();
355 +}
356 +
357 #endif /* __ARM64_KVM_HOST_H__ */
358 diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
359 index befe37d4bc0e5..78d110667c0c7 100644
360 --- a/arch/arm64/include/asm/kvm_mmu.h
361 +++ b/arch/arm64/include/asm/kvm_mmu.h
362 @@ -478,7 +478,8 @@ static inline void *kvm_get_hyp_vector(void)
363 void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
364 int slot = -1;
365
366 - if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
367 + if ((cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) ||
368 + cpus_have_const_cap(ARM64_SPECTRE_BHB)) && data->template_start) {
369 vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start));
370 slot = data->hyp_vectors_slot;
371 }
372 @@ -507,7 +508,8 @@ static inline int kvm_map_vectors(void)
373 * !HBP + HEL2 -> allocate one vector slot and use exec mapping
374 * HBP + HEL2 -> use hardened vertors and use exec mapping
375 */
376 - if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
377 + if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) ||
378 + cpus_have_const_cap(ARM64_SPECTRE_BHB)) {
379 __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
380 __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
381 }
382 diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
383 index f217e32929193..1b9e49fb0e1b7 100644
384 --- a/arch/arm64/include/asm/mmu.h
385 +++ b/arch/arm64/include/asm/mmu.h
386 @@ -29,7 +29,7 @@ typedef struct {
387 */
388 #define ASID(mm) ((mm)->context.id.counter & 0xffff)
389
390 -static inline bool arm64_kernel_unmapped_at_el0(void)
391 +static __always_inline bool arm64_kernel_unmapped_at_el0(void)
392 {
393 return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
394 cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
395 @@ -82,6 +82,12 @@ typedef void (*bp_hardening_cb_t)(void);
396 struct bp_hardening_data {
397 int hyp_vectors_slot;
398 bp_hardening_cb_t fn;
399 +
400 + /*
401 + * template_start is only used by the BHB mitigation to identify the
402 + * hyp_vectors_slot sequence.
403 + */
404 + const char *template_start;
405 };
406
407 #if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \
408 diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
409 index 25a73aab438f9..a75f2882cc7cb 100644
410 --- a/arch/arm64/include/asm/sections.h
411 +++ b/arch/arm64/include/asm/sections.h
412 @@ -20,4 +20,9 @@ extern char __irqentry_text_start[], __irqentry_text_end[];
413 extern char __mmuoff_data_start[], __mmuoff_data_end[];
414 extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
415
416 +static inline size_t entry_tramp_text_size(void)
417 +{
418 + return __entry_tramp_text_end - __entry_tramp_text_start;
419 +}
420 +
421 #endif /* __ASM_SECTIONS_H */
422 diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
423 index 9b68f1b3915ec..5b3bdad66b27e 100644
424 --- a/arch/arm64/include/asm/sysreg.h
425 +++ b/arch/arm64/include/asm/sysreg.h
426 @@ -165,6 +165,7 @@
427
428 #define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0)
429 #define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1)
430 +#define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2)
431
432 #define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
433 #define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
434 @@ -575,6 +576,21 @@
435 #define ID_AA64ISAR1_GPI_NI 0x0
436 #define ID_AA64ISAR1_GPI_IMP_DEF 0x1
437
438 +/* id_aa64isar2 */
439 +#define ID_AA64ISAR2_CLEARBHB_SHIFT 28
440 +#define ID_AA64ISAR2_RPRES_SHIFT 4
441 +#define ID_AA64ISAR2_WFXT_SHIFT 0
442 +
443 +#define ID_AA64ISAR2_RPRES_8BIT 0x0
444 +#define ID_AA64ISAR2_RPRES_12BIT 0x1
445 +/*
446 + * Value 0x1 has been removed from the architecture, and is
447 + * reserved, but has not yet been removed from the ARM ARM
448 + * as of ARM DDI 0487G.b.
449 + */
450 +#define ID_AA64ISAR2_WFXT_NI 0x0
451 +#define ID_AA64ISAR2_WFXT_SUPPORTED 0x2
452 +
453 /* id_aa64pfr0 */
454 #define ID_AA64PFR0_CSV3_SHIFT 60
455 #define ID_AA64PFR0_CSV2_SHIFT 56
456 @@ -646,6 +662,7 @@
457 #endif
458
459 /* id_aa64mmfr1 */
460 +#define ID_AA64MMFR1_ECBHB_SHIFT 60
461 #define ID_AA64MMFR1_PAN_SHIFT 20
462 #define ID_AA64MMFR1_LOR_SHIFT 16
463 #define ID_AA64MMFR1_HPD_SHIFT 12
464 diff --git a/arch/arm64/include/asm/vectors.h b/arch/arm64/include/asm/vectors.h
465 new file mode 100644
466 index 0000000000000..f64613a96d530
467 --- /dev/null
468 +++ b/arch/arm64/include/asm/vectors.h
469 @@ -0,0 +1,73 @@
470 +/* SPDX-License-Identifier: GPL-2.0-only */
471 +/*
472 + * Copyright (C) 2022 ARM Ltd.
473 + */
474 +#ifndef __ASM_VECTORS_H
475 +#define __ASM_VECTORS_H
476 +
477 +#include <linux/bug.h>
478 +#include <linux/percpu.h>
479 +
480 +#include <asm/fixmap.h>
481 +
482 +extern char vectors[];
483 +extern char tramp_vectors[];
484 +extern char __bp_harden_el1_vectors[];
485 +
486 +/*
487 + * Note: the order of this enum corresponds to two arrays in entry.S:
488 + * tramp_vecs and __bp_harden_el1_vectors. By default the canonical
489 + * 'full fat' vectors are used directly.
490 + */
491 +enum arm64_bp_harden_el1_vectors {
492 +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
493 + /*
494 + * Perform the BHB loop mitigation, before branching to the canonical
495 + * vectors.
496 + */
497 + EL1_VECTOR_BHB_LOOP,
498 +
499 + /*
500 + * Make the SMC call for firmware mitigation, before branching to the
501 + * canonical vectors.
502 + */
503 + EL1_VECTOR_BHB_FW,
504 +
505 + /*
506 + * Use the ClearBHB instruction, before branching to the canonical
507 + * vectors.
508 + */
509 + EL1_VECTOR_BHB_CLEAR_INSN,
510 +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
511 +
512 + /*
513 + * Remap the kernel before branching to the canonical vectors.
514 + */
515 + EL1_VECTOR_KPTI,
516 +};
517 +
518 +#ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
519 +#define EL1_VECTOR_BHB_LOOP -1
520 +#define EL1_VECTOR_BHB_FW -1
521 +#define EL1_VECTOR_BHB_CLEAR_INSN -1
522 +#endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
523 +
524 +/* The vectors to use on return from EL0. e.g. to remap the kernel */
525 +DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector);
526 +
527 +#ifndef CONFIG_UNMAP_KERNEL_AT_EL0
528 +#define TRAMP_VALIAS 0
529 +#endif
530 +
531 +static inline const char *
532 +arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot)
533 +{
534 + if (arm64_kernel_unmapped_at_el0())
535 + return (char *)TRAMP_VALIAS + SZ_2K * slot;
536 +
537 + WARN_ON_ONCE(slot == EL1_VECTOR_KPTI);
538 +
539 + return __bp_harden_el1_vectors + SZ_2K * slot;
540 +}
541 +
542 +#endif /* __ASM_VECTORS_H */
543 diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
544 index 67c21f9bdbad2..08440ce57a1c2 100644
545 --- a/arch/arm64/include/uapi/asm/kvm.h
546 +++ b/arch/arm64/include/uapi/asm/kvm.h
547 @@ -240,6 +240,11 @@ struct kvm_vcpu_events {
548 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
549 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
550
551 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 KVM_REG_ARM_FW_REG(3)
552 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL 0
553 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL 1
554 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED 2
555 +
556 /* SVE registers */
557 #define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT)
558
559 diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
560 index 1e16c4e00e771..33b33416fea42 100644
561 --- a/arch/arm64/kernel/cpu_errata.c
562 +++ b/arch/arm64/kernel/cpu_errata.c
563 @@ -13,6 +13,7 @@
564 #include <asm/cputype.h>
565 #include <asm/cpufeature.h>
566 #include <asm/smp_plat.h>
567 +#include <asm/vectors.h>
568
569 static bool __maybe_unused
570 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
571 @@ -116,6 +117,16 @@ DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
572 #ifdef CONFIG_KVM_INDIRECT_VECTORS
573 extern char __smccc_workaround_1_smc_start[];
574 extern char __smccc_workaround_1_smc_end[];
575 +extern char __smccc_workaround_3_smc_start[];
576 +extern char __smccc_workaround_3_smc_end[];
577 +extern char __spectre_bhb_loop_k8_start[];
578 +extern char __spectre_bhb_loop_k8_end[];
579 +extern char __spectre_bhb_loop_k24_start[];
580 +extern char __spectre_bhb_loop_k24_end[];
581 +extern char __spectre_bhb_loop_k32_start[];
582 +extern char __spectre_bhb_loop_k32_end[];
583 +extern char __spectre_bhb_clearbhb_start[];
584 +extern char __spectre_bhb_clearbhb_end[];
585
586 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
587 const char *hyp_vecs_end)
588 @@ -129,11 +140,11 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
589 __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
590 }
591
592 +static DEFINE_RAW_SPINLOCK(bp_lock);
593 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
594 const char *hyp_vecs_start,
595 const char *hyp_vecs_end)
596 {
597 - static DEFINE_RAW_SPINLOCK(bp_lock);
598 int cpu, slot = -1;
599
600 /*
601 @@ -161,6 +172,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
602
603 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
604 __this_cpu_write(bp_hardening_data.fn, fn);
605 + __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
606 raw_spin_unlock(&bp_lock);
607 }
608 #else
609 @@ -927,6 +939,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
610 .cpu_enable = cpu_enable_ssbd_mitigation,
611 .midr_range_list = arm64_ssb_cpus,
612 },
613 + {
614 + .desc = "Spectre-BHB",
615 + .capability = ARM64_SPECTRE_BHB,
616 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
617 + .matches = is_spectre_bhb_affected,
618 + .cpu_enable = spectre_bhb_enable_mitigation,
619 + },
620 #ifdef CONFIG_ARM64_ERRATUM_1418040
621 {
622 .desc = "ARM erratum 1418040",
623 @@ -989,15 +1008,41 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
624 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
625 }
626
627 +static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
628 +{
629 + switch (bhb_state) {
630 + case SPECTRE_UNAFFECTED:
631 + return "";
632 + default:
633 + case SPECTRE_VULNERABLE:
634 + return ", but not BHB";
635 + case SPECTRE_MITIGATED:
636 + return ", BHB";
637 + }
638 +}
639 +
640 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
641 char *buf)
642 {
643 + enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
644 + const char *bhb_str = get_bhb_affected_string(bhb_state);
645 + const char *v2_str = "Branch predictor hardening";
646 +
647 switch (get_spectre_v2_workaround_state()) {
648 case ARM64_BP_HARDEN_NOT_REQUIRED:
649 - return sprintf(buf, "Not affected\n");
650 - case ARM64_BP_HARDEN_WA_NEEDED:
651 - return sprintf(buf, "Mitigation: Branch predictor hardening\n");
652 - case ARM64_BP_HARDEN_UNKNOWN:
653 + if (bhb_state == SPECTRE_UNAFFECTED)
654 + return sprintf(buf, "Not affected\n");
655 +
656 + /*
657 + * Platforms affected by Spectre-BHB can't report
658 + * "Not affected" for Spectre-v2.
659 + */
660 + v2_str = "CSV2";
661 + fallthrough;
662 + case ARM64_BP_HARDEN_WA_NEEDED:
663 + return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
664 + case ARM64_BP_HARDEN_UNKNOWN:
665 + fallthrough;
666 default:
667 return sprintf(buf, "Vulnerable\n");
668 }
669 @@ -1019,3 +1064,333 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev,
670
671 return sprintf(buf, "Vulnerable\n");
672 }
673 +
674 +/*
675 + * We try to ensure that the mitigation state can never change as the result of
676 + * onlining a late CPU.
677 + */
678 +static void update_mitigation_state(enum mitigation_state *oldp,
679 + enum mitigation_state new)
680 +{
681 + enum mitigation_state state;
682 +
683 + do {
684 + state = READ_ONCE(*oldp);
685 + if (new <= state)
686 + break;
687 + } while (cmpxchg_relaxed(oldp, state, new) != state);
688 +}
689 +
690 +/*
691 + * Spectre BHB.
692 + *
693 + * A CPU is either:
694 + * - Mitigated by a branchy loop a CPU specific number of times, and listed
695 + * in our "loop mitigated list".
696 + * - Mitigated in software by the firmware Spectre v2 call.
697 + * - Has the ClearBHB instruction to perform the mitigation.
698 + * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
699 + * software mitigation in the vectors is needed.
700 + * - Has CSV2.3, so is unaffected.
701 + */
702 +static enum mitigation_state spectre_bhb_state;
703 +
704 +enum mitigation_state arm64_get_spectre_bhb_state(void)
705 +{
706 + return spectre_bhb_state;
707 +}
708 +
709 +/*
710 + * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
711 + * SCOPE_SYSTEM call will give the right answer.
712 + */
713 +u8 spectre_bhb_loop_affected(int scope)
714 +{
715 + u8 k = 0;
716 + static u8 max_bhb_k;
717 +
718 + if (scope == SCOPE_LOCAL_CPU) {
719 + static const struct midr_range spectre_bhb_k32_list[] = {
720 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
721 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
722 + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
723 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
724 + MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
725 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
726 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
727 + {},
728 + };
729 + static const struct midr_range spectre_bhb_k24_list[] = {
730 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
731 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
732 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
733 + {},
734 + };
735 + static const struct midr_range spectre_bhb_k8_list[] = {
736 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
737 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
738 + {},
739 + };
740 +
741 + if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
742 + k = 32;
743 + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
744 + k = 24;
745 + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
746 + k = 8;
747 +
748 + max_bhb_k = max(max_bhb_k, k);
749 + } else {
750 + k = max_bhb_k;
751 + }
752 +
753 + return k;
754 +}
755 +
756 +static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
757 +{
758 + int ret;
759 + struct arm_smccc_res res;
760 +
761 + if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
762 + return SPECTRE_VULNERABLE;
763 +
764 + switch (psci_ops.conduit) {
765 + case PSCI_CONDUIT_HVC:
766 + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
767 + ARM_SMCCC_ARCH_WORKAROUND_3, &res);
768 + break;
769 +
770 + case PSCI_CONDUIT_SMC:
771 + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
772 + ARM_SMCCC_ARCH_WORKAROUND_3, &res);
773 + break;
774 +
775 + default:
776 + return SPECTRE_VULNERABLE;
777 + }
778 +
779 + ret = res.a0;
780 + switch (ret) {
781 + case SMCCC_RET_SUCCESS:
782 + return SPECTRE_MITIGATED;
783 + case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
784 + return SPECTRE_UNAFFECTED;
785 + default:
786 + fallthrough;
787 + case SMCCC_RET_NOT_SUPPORTED:
788 + return SPECTRE_VULNERABLE;
789 + }
790 +}
791 +
792 +static bool is_spectre_bhb_fw_affected(int scope)
793 +{
794 + static bool system_affected;
795 + enum mitigation_state fw_state;
796 + bool has_smccc = (psci_ops.smccc_version >= SMCCC_VERSION_1_1);
797 + static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
798 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
799 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
800 + {},
801 + };
802 + bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
803 + spectre_bhb_firmware_mitigated_list);
804 +
805 + if (scope != SCOPE_LOCAL_CPU)
806 + return system_affected;
807 +
808 + fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
809 + if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
810 + system_affected = true;
811 + return true;
812 + }
813 +
814 + return false;
815 +}
816 +
817 +static bool supports_ecbhb(int scope)
818 +{
819 + u64 mmfr1;
820 +
821 + if (scope == SCOPE_LOCAL_CPU)
822 + mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
823 + else
824 + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
825 +
826 + return cpuid_feature_extract_unsigned_field(mmfr1,
827 + ID_AA64MMFR1_ECBHB_SHIFT);
828 +}
829 +
830 +bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
831 + int scope)
832 +{
833 + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
834 +
835 + if (supports_csv2p3(scope))
836 + return false;
837 +
838 + if (supports_clearbhb(scope))
839 + return true;
840 +
841 + if (spectre_bhb_loop_affected(scope))
842 + return true;
843 +
844 + if (is_spectre_bhb_fw_affected(scope))
845 + return true;
846 +
847 + return false;
848 +}
849 +
850 +static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
851 +{
852 + const char *v = arm64_get_bp_hardening_vector(slot);
853 +
854 + if (slot < 0)
855 + return;
856 +
857 + __this_cpu_write(this_cpu_vector, v);
858 +
859 + /*
860 + * When KPTI is in use, the vectors are switched when exiting to
861 + * user-space.
862 + */
863 + if (arm64_kernel_unmapped_at_el0())
864 + return;
865 +
866 + write_sysreg(v, vbar_el1);
867 + isb();
868 +}
869 +
870 +#ifdef CONFIG_KVM_INDIRECT_VECTORS
871 +static const char *kvm_bhb_get_vecs_end(const char *start)
872 +{
873 + if (start == __smccc_workaround_3_smc_start)
874 + return __smccc_workaround_3_smc_end;
875 + else if (start == __spectre_bhb_loop_k8_start)
876 + return __spectre_bhb_loop_k8_end;
877 + else if (start == __spectre_bhb_loop_k24_start)
878 + return __spectre_bhb_loop_k24_end;
879 + else if (start == __spectre_bhb_loop_k32_start)
880 + return __spectre_bhb_loop_k32_end;
881 + else if (start == __spectre_bhb_clearbhb_start)
882 + return __spectre_bhb_clearbhb_end;
883 +
884 + return NULL;
885 +}
886 +
887 +static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
888 +{
889 + int cpu, slot = -1;
890 + const char *hyp_vecs_end;
891 +
892 + if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
893 + return;
894 +
895 + hyp_vecs_end = kvm_bhb_get_vecs_end(hyp_vecs_start);
896 + if (WARN_ON_ONCE(!hyp_vecs_start || !hyp_vecs_end))
897 + return;
898 +
899 + raw_spin_lock(&bp_lock);
900 + for_each_possible_cpu(cpu) {
901 + if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
902 + slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
903 + break;
904 + }
905 + }
906 +
907 + if (slot == -1) {
908 + slot = atomic_inc_return(&arm64_el2_vector_last_slot);
909 + BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
910 + __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
911 + }
912 +
913 + __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
914 + __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
915 + raw_spin_unlock(&bp_lock);
916 +}
917 +#else
918 +#define __smccc_workaround_3_smc_start NULL
919 +#define __spectre_bhb_loop_k8_start NULL
920 +#define __spectre_bhb_loop_k24_start NULL
921 +#define __spectre_bhb_loop_k32_start NULL
922 +#define __spectre_bhb_clearbhb_start NULL
923 +
924 +static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { }
925 +#endif
926 +
927 +void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
928 +{
929 + enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
930 +
931 + if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
932 + return;
933 +
934 + if (get_spectre_v2_workaround_state() == ARM64_BP_HARDEN_UNKNOWN) {
935 + /* No point mitigating Spectre-BHB alone. */
936 + } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
937 + pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
938 + } else if (cpu_mitigations_off()) {
939 + pr_info_once("spectre-bhb mitigation disabled by command line option\n");
940 + } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
941 + state = SPECTRE_MITIGATED;
942 + } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
943 + kvm_setup_bhb_slot(__spectre_bhb_clearbhb_start);
944 + this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
945 +
946 + state = SPECTRE_MITIGATED;
947 + } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
948 + switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
949 + case 8:
950 + kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
951 + break;
952 + case 24:
953 + kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);
954 + break;
955 + case 32:
956 + kvm_setup_bhb_slot(__spectre_bhb_loop_k32_start);
957 + break;
958 + default:
959 + WARN_ON_ONCE(1);
960 + }
961 + this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
962 +
963 + state = SPECTRE_MITIGATED;
964 + } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
965 + fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
966 + if (fw_state == SPECTRE_MITIGATED) {
967 + kvm_setup_bhb_slot(__smccc_workaround_3_smc_start);
968 + this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
969 +
970 + /*
971 + * With WA3 in the vectors, the WA1 calls can be
972 + * removed.
973 + */
974 + __this_cpu_write(bp_hardening_data.fn, NULL);
975 +
976 + state = SPECTRE_MITIGATED;
977 + }
978 + }
979 +
980 + update_mitigation_state(&spectre_bhb_state, state);
981 +}
982 +
983 +/* Patched to correct the immediate */
984 +void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
985 + __le32 *origptr, __le32 *updptr, int nr_inst)
986 +{
987 + u8 rd;
988 + u32 insn;
989 + u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
990 +
991 + BUG_ON(nr_inst != 1); /* MOV -> MOV */
992 +
993 + if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
994 + return;
995 +
996 + insn = le32_to_cpu(*origptr);
997 + rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
998 + insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
999 + AARCH64_INSN_VARIANT_64BIT,
1000 + AARCH64_INSN_MOVEWIDE_ZERO);
1001 + *updptr++ = cpu_to_le32(insn);
1002 +}
1003 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
1004 index acdef8d76c64d..d07dadd6b8ff7 100644
1005 --- a/arch/arm64/kernel/cpufeature.c
1006 +++ b/arch/arm64/kernel/cpufeature.c
1007 @@ -10,11 +10,13 @@
1008 #include <linux/bsearch.h>
1009 #include <linux/cpumask.h>
1010 #include <linux/crash_dump.h>
1011 +#include <linux/percpu.h>
1012 #include <linux/sort.h>
1013 #include <linux/stop_machine.h>
1014 #include <linux/types.h>
1015 #include <linux/mm.h>
1016 #include <linux/cpu.h>
1017 +
1018 #include <asm/cpu.h>
1019 #include <asm/cpufeature.h>
1020 #include <asm/cpu_ops.h>
1021 @@ -23,6 +25,7 @@
1022 #include <asm/processor.h>
1023 #include <asm/sysreg.h>
1024 #include <asm/traps.h>
1025 +#include <asm/vectors.h>
1026 #include <asm/virt.h>
1027
1028 /* Kernel representation of AT_HWCAP and AT_HWCAP2 */
1029 @@ -45,6 +48,8 @@ static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM6
1030 /* Need also bit for ARM64_CB_PATCH */
1031 DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
1032
1033 +DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
1034 +
1035 /*
1036 * Flag to indicate if we have computed the system wide
1037 * capabilities based on the boot time active CPUs. This
1038 @@ -150,6 +155,11 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
1039 ARM64_FTR_END,
1040 };
1041
1042 +static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
1043 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
1044 + ARM64_FTR_END,
1045 +};
1046 +
1047 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
1048 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
1049 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
1050 @@ -410,6 +420,7 @@ static const struct __ftr_reg_entry {
1051 /* Op1 = 0, CRn = 0, CRm = 6 */
1052 ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
1053 ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
1054 + ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
1055
1056 /* Op1 = 0, CRn = 0, CRm = 7 */
1057 ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
1058 @@ -581,6 +592,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
1059 init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
1060 init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
1061 init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
1062 + init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2);
1063 init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
1064 init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
1065 init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
1066 @@ -704,6 +716,8 @@ void update_cpu_features(int cpu,
1067 info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
1068 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
1069 info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
1070 + taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
1071 + info->reg_id_aa64isar2, boot->reg_id_aa64isar2);
1072
1073 /*
1074 * Differing PARange support is fine as long as all peripherals and
1075 @@ -838,6 +852,7 @@ static u64 __read_sysreg_by_encoding(u32 sys_id)
1076 read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
1077 read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
1078 read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
1079 + read_sysreg_case(SYS_ID_AA64ISAR2_EL1);
1080
1081 read_sysreg_case(SYS_CNTFRQ_EL0);
1082 read_sysreg_case(SYS_CTR_EL0);
1083 @@ -1038,6 +1053,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
1084 static bool kpti_applied = false;
1085 int cpu = smp_processor_id();
1086
1087 + if (__this_cpu_read(this_cpu_vector) == vectors) {
1088 + const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
1089 +
1090 + __this_cpu_write(this_cpu_vector, v);
1091 + }
1092 +
1093 /*
1094 * We don't need to rewrite the page-tables if either we've done
1095 * it already or we have KASLR enabled and therefore have not
1096 diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
1097 index 05933c065732b..90b35011a22f8 100644
1098 --- a/arch/arm64/kernel/cpuinfo.c
1099 +++ b/arch/arm64/kernel/cpuinfo.c
1100 @@ -344,6 +344,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
1101 info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
1102 info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
1103 info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
1104 + info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1);
1105 info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
1106 info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
1107 info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
1108 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
1109 index db137746c6fa3..8a4c108a0c0b6 100644
1110 --- a/arch/arm64/kernel/entry.S
1111 +++ b/arch/arm64/kernel/entry.S
1112 @@ -59,18 +59,21 @@
1113
1114 .macro kernel_ventry, el, label, regsize = 64
1115 .align 7
1116 -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1117 -alternative_if ARM64_UNMAP_KERNEL_AT_EL0
1118 +.Lventry_start\@:
1119 .if \el == 0
1120 + /*
1121 + * This must be the first instruction of the EL0 vector entries. It is
1122 + * skipped by the trampoline vectors, to trigger the cleanup.
1123 + */
1124 + b .Lskip_tramp_vectors_cleanup\@
1125 .if \regsize == 64
1126 mrs x30, tpidrro_el0
1127 msr tpidrro_el0, xzr
1128 .else
1129 mov x30, xzr
1130 .endif
1131 +.Lskip_tramp_vectors_cleanup\@:
1132 .endif
1133 -alternative_else_nop_endif
1134 -#endif
1135
1136 sub sp, sp, #S_FRAME_SIZE
1137 #ifdef CONFIG_VMAP_STACK
1138 @@ -116,11 +119,15 @@ alternative_else_nop_endif
1139 mrs x0, tpidrro_el0
1140 #endif
1141 b el\()\el\()_\label
1142 +.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
1143 .endm
1144
1145 - .macro tramp_alias, dst, sym
1146 + .macro tramp_alias, dst, sym, tmp
1147 mov_q \dst, TRAMP_VALIAS
1148 - add \dst, \dst, #(\sym - .entry.tramp.text)
1149 + adr_l \tmp, \sym
1150 + add \dst, \dst, \tmp
1151 + adr_l \tmp, .entry.tramp.text
1152 + sub \dst, \dst, \tmp
1153 .endm
1154
1155 // This macro corrupts x0-x3. It is the caller's duty
1156 @@ -361,21 +368,25 @@ alternative_else_nop_endif
1157 ldp x24, x25, [sp, #16 * 12]
1158 ldp x26, x27, [sp, #16 * 13]
1159 ldp x28, x29, [sp, #16 * 14]
1160 - ldr lr, [sp, #S_LR]
1161 - add sp, sp, #S_FRAME_SIZE // restore sp
1162
1163 .if \el == 0
1164 -alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
1165 +alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1166 + ldr lr, [sp, #S_LR]
1167 + add sp, sp, #S_FRAME_SIZE // restore sp
1168 + eret
1169 +alternative_else_nop_endif
1170 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1171 bne 5f
1172 - msr far_el1, x30
1173 - tramp_alias x30, tramp_exit_native
1174 + msr far_el1, x29
1175 + tramp_alias x30, tramp_exit_native, x29
1176 br x30
1177 5:
1178 - tramp_alias x30, tramp_exit_compat
1179 + tramp_alias x30, tramp_exit_compat, x29
1180 br x30
1181 #endif
1182 .else
1183 + ldr lr, [sp, #S_LR]
1184 + add sp, sp, #S_FRAME_SIZE // restore sp
1185 eret
1186 .endif
1187 sb
1188 @@ -1012,12 +1023,6 @@ ENDPROC(el0_svc)
1189
1190 .popsection // .entry.text
1191
1192 -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1193 -/*
1194 - * Exception vectors trampoline.
1195 - */
1196 - .pushsection ".entry.tramp.text", "ax"
1197 -
1198 // Move from tramp_pg_dir to swapper_pg_dir
1199 .macro tramp_map_kernel, tmp
1200 mrs \tmp, ttbr1_el1
1201 @@ -1051,12 +1056,47 @@ alternative_else_nop_endif
1202 */
1203 .endm
1204
1205 - .macro tramp_ventry, regsize = 64
1206 + .macro tramp_data_page dst
1207 + adr_l \dst, .entry.tramp.text
1208 + sub \dst, \dst, PAGE_SIZE
1209 + .endm
1210 +
1211 + .macro tramp_data_read_var dst, var
1212 +#ifdef CONFIG_RANDOMIZE_BASE
1213 + tramp_data_page \dst
1214 + add \dst, \dst, #:lo12:__entry_tramp_data_\var
1215 + ldr \dst, [\dst]
1216 +#else
1217 + ldr \dst, =\var
1218 +#endif
1219 + .endm
1220 +
1221 +#define BHB_MITIGATION_NONE 0
1222 +#define BHB_MITIGATION_LOOP 1
1223 +#define BHB_MITIGATION_FW 2
1224 +#define BHB_MITIGATION_INSN 3
1225 +
1226 + .macro tramp_ventry, vector_start, regsize, kpti, bhb
1227 .align 7
1228 1:
1229 .if \regsize == 64
1230 msr tpidrro_el0, x30 // Restored in kernel_ventry
1231 .endif
1232 +
1233 + .if \bhb == BHB_MITIGATION_LOOP
1234 + /*
1235 + * This sequence must appear before the first indirect branch. i.e. the
1236 + * ret out of tramp_ventry. It appears here because x30 is free.
1237 + */
1238 + __mitigate_spectre_bhb_loop x30
1239 + .endif // \bhb == BHB_MITIGATION_LOOP
1240 +
1241 + .if \bhb == BHB_MITIGATION_INSN
1242 + clearbhb
1243 + isb
1244 + .endif // \bhb == BHB_MITIGATION_INSN
1245 +
1246 + .if \kpti == 1
1247 /*
1248 * Defend against branch aliasing attacks by pushing a dummy
1249 * entry onto the return stack and using a RET instruction to
1250 @@ -1066,46 +1106,79 @@ alternative_else_nop_endif
1251 b .
1252 2:
1253 tramp_map_kernel x30
1254 -#ifdef CONFIG_RANDOMIZE_BASE
1255 - adr x30, tramp_vectors + PAGE_SIZE
1256 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
1257 - ldr x30, [x30]
1258 -#else
1259 - ldr x30, =vectors
1260 -#endif
1261 + tramp_data_read_var x30, vectors
1262 alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
1263 - prfm plil1strm, [x30, #(1b - tramp_vectors)]
1264 + prfm plil1strm, [x30, #(1b - \vector_start)]
1265 alternative_else_nop_endif
1266 +
1267 msr vbar_el1, x30
1268 - add x30, x30, #(1b - tramp_vectors)
1269 isb
1270 + .else
1271 + ldr x30, =vectors
1272 + .endif // \kpti == 1
1273 +
1274 + .if \bhb == BHB_MITIGATION_FW
1275 + /*
1276 + * The firmware sequence must appear before the first indirect branch.
1277 + * i.e. the ret out of tramp_ventry. But it also needs the stack to be
1278 + * mapped to save/restore the registers the SMC clobbers.
1279 + */
1280 + __mitigate_spectre_bhb_fw
1281 + .endif // \bhb == BHB_MITIGATION_FW
1282 +
1283 + add x30, x30, #(1b - \vector_start + 4)
1284 ret
1285 +.org 1b + 128 // Did we overflow the ventry slot?
1286 .endm
1287
1288 .macro tramp_exit, regsize = 64
1289 - adr x30, tramp_vectors
1290 + tramp_data_read_var x30, this_cpu_vector
1291 +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
1292 + mrs x29, tpidr_el1
1293 +alternative_else
1294 + mrs x29, tpidr_el2
1295 +alternative_endif
1296 + ldr x30, [x30, x29]
1297 +
1298 msr vbar_el1, x30
1299 - tramp_unmap_kernel x30
1300 + ldr lr, [sp, #S_LR]
1301 + tramp_unmap_kernel x29
1302 .if \regsize == 64
1303 - mrs x30, far_el1
1304 + mrs x29, far_el1
1305 .endif
1306 + add sp, sp, #S_FRAME_SIZE // restore sp
1307 eret
1308 sb
1309 .endm
1310
1311 - .align 11
1312 -ENTRY(tramp_vectors)
1313 + .macro generate_tramp_vector, kpti, bhb
1314 +.Lvector_start\@:
1315 .space 0x400
1316
1317 - tramp_ventry
1318 - tramp_ventry
1319 - tramp_ventry
1320 - tramp_ventry
1321 + .rept 4
1322 + tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
1323 + .endr
1324 + .rept 4
1325 + tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
1326 + .endr
1327 + .endm
1328
1329 - tramp_ventry 32
1330 - tramp_ventry 32
1331 - tramp_ventry 32
1332 - tramp_ventry 32
1333 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1334 +/*
1335 + * Exception vectors trampoline.
1336 + * The order must match __bp_harden_el1_vectors and the
1337 + * arm64_bp_harden_el1_vectors enum.
1338 + */
1339 + .pushsection ".entry.tramp.text", "ax"
1340 + .align 11
1341 +ENTRY(tramp_vectors)
1342 +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
1343 + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
1344 + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
1345 + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
1346 +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
1347 + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
1348 END(tramp_vectors)
1349
1350 ENTRY(tramp_exit_native)
1351 @@ -1123,11 +1196,55 @@ END(tramp_exit_compat)
1352 .align PAGE_SHIFT
1353 .globl __entry_tramp_data_start
1354 __entry_tramp_data_start:
1355 +__entry_tramp_data_vectors:
1356 .quad vectors
1357 +#ifdef CONFIG_ARM_SDE_INTERFACE
1358 +__entry_tramp_data___sdei_asm_handler:
1359 + .quad __sdei_asm_handler
1360 +#endif /* CONFIG_ARM_SDE_INTERFACE */
1361 +__entry_tramp_data_this_cpu_vector:
1362 + .quad this_cpu_vector
1363 .popsection // .rodata
1364 #endif /* CONFIG_RANDOMIZE_BASE */
1365 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1366
1367 +/*
1368 + * Exception vectors for spectre mitigations on entry from EL1 when
1369 + * kpti is not in use.
1370 + */
1371 + .macro generate_el1_vector, bhb
1372 +.Lvector_start\@:
1373 + kernel_ventry 1, sync_invalid // Synchronous EL1t
1374 + kernel_ventry 1, irq_invalid // IRQ EL1t
1375 + kernel_ventry 1, fiq_invalid // FIQ EL1t
1376 + kernel_ventry 1, error_invalid // Error EL1t
1377 +
1378 + kernel_ventry 1, sync // Synchronous EL1h
1379 + kernel_ventry 1, irq // IRQ EL1h
1380 + kernel_ventry 1, fiq_invalid // FIQ EL1h
1381 + kernel_ventry 1, error // Error EL1h
1382 +
1383 + .rept 4
1384 + tramp_ventry .Lvector_start\@, 64, 0, \bhb
1385 + .endr
1386 + .rept 4
1387 + tramp_ventry .Lvector_start\@, 32, 0, \bhb
1388 + .endr
1389 + .endm
1390 +
1391 +/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
1392 + .pushsection ".entry.text", "ax"
1393 + .align 11
1394 +SYM_CODE_START(__bp_harden_el1_vectors)
1395 +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
1396 + generate_el1_vector bhb=BHB_MITIGATION_LOOP
1397 + generate_el1_vector bhb=BHB_MITIGATION_FW
1398 + generate_el1_vector bhb=BHB_MITIGATION_INSN
1399 +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
1400 +SYM_CODE_END(__bp_harden_el1_vectors)
1401 + .popsection
1402 +
1403 +
1404 /*
1405 * Register switch for AArch64. The callee-saved registers need to be saved
1406 * and restored. On entry:
1407 @@ -1214,13 +1331,7 @@ ENTRY(__sdei_asm_entry_trampoline)
1408 */
1409 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1410
1411 -#ifdef CONFIG_RANDOMIZE_BASE
1412 - adr x4, tramp_vectors + PAGE_SIZE
1413 - add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
1414 - ldr x4, [x4]
1415 -#else
1416 - ldr x4, =__sdei_asm_handler
1417 -#endif
1418 + tramp_data_read_var x4, __sdei_asm_handler
1419 br x4
1420 ENDPROC(__sdei_asm_entry_trampoline)
1421 NOKPROBE(__sdei_asm_entry_trampoline)
1422 @@ -1243,12 +1354,6 @@ ENDPROC(__sdei_asm_exit_trampoline)
1423 NOKPROBE(__sdei_asm_exit_trampoline)
1424 .ltorg
1425 .popsection // .entry.tramp.text
1426 -#ifdef CONFIG_RANDOMIZE_BASE
1427 -.pushsection ".rodata", "a"
1428 -__sdei_asm_trampoline_next_handler:
1429 - .quad __sdei_asm_handler
1430 -.popsection // .rodata
1431 -#endif /* CONFIG_RANDOMIZE_BASE */
1432 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1433
1434 /*
1435 @@ -1344,7 +1449,7 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1436 alternative_else_nop_endif
1437
1438 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1439 - tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
1440 + tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
1441 br x5
1442 #endif
1443 ENDPROC(__sdei_asm_handler)
1444 diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
1445 index 1f82cf631c3c4..fbab044b3a39a 100644
1446 --- a/arch/arm64/kernel/vmlinux.lds.S
1447 +++ b/arch/arm64/kernel/vmlinux.lds.S
1448 @@ -276,7 +276,7 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
1449 <= SZ_4K, "Hibernate exit text too big or misaligned")
1450 #endif
1451 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1452 -ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
1453 +ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
1454 "Entry trampoline text too big")
1455 #endif
1456 /*
1457 diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
1458 index f36aad0f207bb..99b8ecaae8109 100644
1459 --- a/arch/arm64/kvm/hyp/hyp-entry.S
1460 +++ b/arch/arm64/kvm/hyp/hyp-entry.S
1461 @@ -113,6 +113,10 @@ el1_hvc_guest:
1462 /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
1463 eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
1464 ARM_SMCCC_ARCH_WORKAROUND_2)
1465 + cbz w1, wa_epilogue
1466 +
1467 + eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
1468 + ARM_SMCCC_ARCH_WORKAROUND_3)
1469 cbnz w1, el1_trap
1470
1471 #ifdef CONFIG_ARM64_SSBD
1472 @@ -347,4 +351,64 @@ ENTRY(__smccc_workaround_1_smc_start)
1473 ldp x0, x1, [sp, #(8 * 2)]
1474 add sp, sp, #(8 * 4)
1475 ENTRY(__smccc_workaround_1_smc_end)
1476 +
1477 +ENTRY(__smccc_workaround_3_smc_start)
1478 + esb
1479 + sub sp, sp, #(8 * 4)
1480 + stp x2, x3, [sp, #(8 * 0)]
1481 + stp x0, x1, [sp, #(8 * 2)]
1482 + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
1483 + smc #0
1484 + ldp x2, x3, [sp, #(8 * 0)]
1485 + ldp x0, x1, [sp, #(8 * 2)]
1486 + add sp, sp, #(8 * 4)
1487 +ENTRY(__smccc_workaround_3_smc_end)
1488 +
1489 +ENTRY(__spectre_bhb_loop_k8_start)
1490 + esb
1491 + sub sp, sp, #(8 * 2)
1492 + stp x0, x1, [sp, #(8 * 0)]
1493 + mov x0, #8
1494 +2: b . + 4
1495 + subs x0, x0, #1
1496 + b.ne 2b
1497 + dsb nsh
1498 + isb
1499 + ldp x0, x1, [sp, #(8 * 0)]
1500 + add sp, sp, #(8 * 2)
1501 +ENTRY(__spectre_bhb_loop_k8_end)
1502 +
1503 +ENTRY(__spectre_bhb_loop_k24_start)
1504 + esb
1505 + sub sp, sp, #(8 * 2)
1506 + stp x0, x1, [sp, #(8 * 0)]
1507 + mov x0, #24
1508 +2: b . + 4
1509 + subs x0, x0, #1
1510 + b.ne 2b
1511 + dsb nsh
1512 + isb
1513 + ldp x0, x1, [sp, #(8 * 0)]
1514 + add sp, sp, #(8 * 2)
1515 +ENTRY(__spectre_bhb_loop_k24_end)
1516 +
1517 +ENTRY(__spectre_bhb_loop_k32_start)
1518 + esb
1519 + sub sp, sp, #(8 * 2)
1520 + stp x0, x1, [sp, #(8 * 0)]
1521 + mov x0, #32
1522 +2: b . + 4
1523 + subs x0, x0, #1
1524 + b.ne 2b
1525 + dsb nsh
1526 + isb
1527 + ldp x0, x1, [sp, #(8 * 0)]
1528 + add sp, sp, #(8 * 2)
1529 +ENTRY(__spectre_bhb_loop_k32_end)
1530 +
1531 +ENTRY(__spectre_bhb_clearbhb_start)
1532 + esb
1533 + clearbhb
1534 + isb
1535 +ENTRY(__spectre_bhb_clearbhb_end)
1536 #endif
1537 diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
1538 index 14607fac7ca38..768983bd23261 100644
1539 --- a/arch/arm64/kvm/hyp/switch.c
1540 +++ b/arch/arm64/kvm/hyp/switch.c
1541 @@ -25,6 +25,7 @@
1542 #include <asm/debug-monitors.h>
1543 #include <asm/processor.h>
1544 #include <asm/thread_info.h>
1545 +#include <asm/vectors.h>
1546
1547 extern struct exception_table_entry __start___kvm_ex_table;
1548 extern struct exception_table_entry __stop___kvm_ex_table;
1549 @@ -152,7 +153,7 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
1550
1551 static void deactivate_traps_vhe(void)
1552 {
1553 - extern char vectors[]; /* kernel exception vectors */
1554 + const char *host_vectors = vectors;
1555 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
1556
1557 /*
1558 @@ -163,7 +164,10 @@ static void deactivate_traps_vhe(void)
1559 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
1560
1561 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
1562 - write_sysreg(vectors, vbar_el1);
1563 +
1564 + if (!arm64_kernel_unmapped_at_el0())
1565 + host_vectors = __this_cpu_read(this_cpu_vector);
1566 + write_sysreg(host_vectors, vbar_el1);
1567 }
1568 NOKPROBE_SYMBOL(deactivate_traps_vhe);
1569
1570 diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
1571 index da649e90240c8..a25f737dfa0bb 100644
1572 --- a/arch/arm64/kvm/sys_regs.c
1573 +++ b/arch/arm64/kvm/sys_regs.c
1574 @@ -1454,7 +1454,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
1575 /* CRm=6 */
1576 ID_SANITISED(ID_AA64ISAR0_EL1),
1577 ID_SANITISED(ID_AA64ISAR1_EL1),
1578 - ID_UNALLOCATED(6,2),
1579 + ID_SANITISED(ID_AA64ISAR2_EL1),
1580 ID_UNALLOCATED(6,3),
1581 ID_UNALLOCATED(6,4),
1582 ID_UNALLOCATED(6,5),
1583 diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
1584 index 99bc0289ab2b6..5cf575f23af28 100644
1585 --- a/arch/arm64/mm/mmu.c
1586 +++ b/arch/arm64/mm/mmu.c
1587 @@ -583,6 +583,8 @@ early_param("rodata", parse_rodata);
1588 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1589 static int __init map_entry_trampoline(void)
1590 {
1591 + int i;
1592 +
1593 pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
1594 phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
1595
1596 @@ -591,11 +593,15 @@ static int __init map_entry_trampoline(void)
1597
1598 /* Map only the text into the trampoline page table */
1599 memset(tramp_pg_dir, 0, PGD_SIZE);
1600 - __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
1601 - prot, __pgd_pgtable_alloc, 0);
1602 + __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
1603 + entry_tramp_text_size(), prot,
1604 + __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS);
1605
1606 /* Map both the text and data into the kernel page table */
1607 - __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
1608 + for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
1609 + __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
1610 + pa_start + i * PAGE_SIZE, prot);
1611 +
1612 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
1613 extern char __entry_tramp_data_start[];
1614
1615 diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
1616 index f510c00bda882..c563b03bdccc1 100644
1617 --- a/arch/mips/kernel/smp.c
1618 +++ b/arch/mips/kernel/smp.c
1619 @@ -361,6 +361,9 @@ asmlinkage void start_secondary(void)
1620 cpu = smp_processor_id();
1621 cpu_data[cpu].udelay_val = loops_per_jiffy;
1622
1623 + set_cpu_sibling_map(cpu);
1624 + set_cpu_core_map(cpu);
1625 +
1626 cpumask_set_cpu(cpu, &cpu_coherent_mask);
1627 notify_cpu_starting(cpu);
1628
1629 @@ -372,9 +375,6 @@ asmlinkage void start_secondary(void)
1630 /* The CPU is running and counters synchronised, now mark it online */
1631 set_cpu_online(cpu, true);
1632
1633 - set_cpu_sibling_map(cpu);
1634 - set_cpu_core_map(cpu);
1635 -
1636 calculate_cpu_foreign_map();
1637
1638 /*
1639 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
1640 index 5acb459856752..8995c39330fac 100644
1641 --- a/drivers/atm/firestream.c
1642 +++ b/drivers/atm/firestream.c
1643 @@ -1677,6 +1677,8 @@ static int fs_init(struct fs_dev *dev)
1644 dev->hw_base = pci_resource_start(pci_dev, 0);
1645
1646 dev->base = ioremap(dev->hw_base, 0x1000);
1647 + if (!dev->base)
1648 + return 1;
1649
1650 reset_chip (dev);
1651
1652 diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
1653 index 2337b3827e6a4..11a81e8ba9639 100644
1654 --- a/drivers/gpu/drm/drm_connector.c
1655 +++ b/drivers/gpu/drm/drm_connector.c
1656 @@ -1984,6 +1984,9 @@ EXPORT_SYMBOL(drm_connector_attach_max_bpc_property);
1657 void drm_connector_set_vrr_capable_property(
1658 struct drm_connector *connector, bool capable)
1659 {
1660 + if (!connector->vrr_capable_property)
1661 + return;
1662 +
1663 drm_object_property_set_value(&connector->base,
1664 connector->vrr_capable_property,
1665 capable);
1666 diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
1667 index edaa1ca972c15..d4e9815ca26ff 100644
1668 --- a/drivers/net/can/rcar/rcar_canfd.c
1669 +++ b/drivers/net/can/rcar/rcar_canfd.c
1670 @@ -1598,15 +1598,15 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
1671
1672 netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll,
1673 RCANFD_NAPI_WEIGHT);
1674 + spin_lock_init(&priv->tx_lock);
1675 + devm_can_led_init(ndev);
1676 + gpriv->ch[priv->channel] = priv;
1677 err = register_candev(ndev);
1678 if (err) {
1679 dev_err(&pdev->dev,
1680 "register_candev() failed, error %d\n", err);
1681 goto fail_candev;
1682 }
1683 - spin_lock_init(&priv->tx_lock);
1684 - devm_can_led_init(ndev);
1685 - gpriv->ch[priv->channel] = priv;
1686 dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel);
1687 return 0;
1688
1689 diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
1690 index 2713300343c7f..ec551def58359 100644
1691 --- a/drivers/net/ethernet/sfc/mcdi.c
1692 +++ b/drivers/net/ethernet/sfc/mcdi.c
1693 @@ -163,9 +163,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
1694 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
1695 spin_lock_bh(&mcdi->iface_lock);
1696 ++mcdi->seqno;
1697 + seqno = mcdi->seqno & SEQ_MASK;
1698 spin_unlock_bh(&mcdi->iface_lock);
1699
1700 - seqno = mcdi->seqno & SEQ_MASK;
1701 xflags = 0;
1702 if (mcdi->mode == MCDI_MODE_EVENTS)
1703 xflags |= MCDI_HEADER_XFLAGS_EVREQ;
1704 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
1705 index 022f2faccab41..4a433e34ee7a1 100644
1706 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
1707 +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
1708 @@ -519,8 +519,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
1709 .has_he = true,
1710 .he_cap_elem = {
1711 .mac_cap_info[0] =
1712 - IEEE80211_HE_MAC_CAP0_HTC_HE |
1713 - IEEE80211_HE_MAC_CAP0_TWT_REQ,
1714 + IEEE80211_HE_MAC_CAP0_HTC_HE,
1715 .mac_cap_info[1] =
1716 IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
1717 IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
1718 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
1719 index 29ad7804d77aa..3c523774ef0e6 100644
1720 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
1721 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
1722 @@ -343,7 +343,6 @@ const static u8 he_if_types_ext_capa_sta[] = {
1723 [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
1724 [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
1725 [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
1726 - [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
1727 };
1728
1729 const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
1730 diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
1731 index 4e97ba64dbb42..3e6ef64e74d3d 100644
1732 --- a/include/linux/arm-smccc.h
1733 +++ b/include/linux/arm-smccc.h
1734 @@ -76,6 +76,11 @@
1735 ARM_SMCCC_SMC_32, \
1736 0, 0x7fff)
1737
1738 +#define ARM_SMCCC_ARCH_WORKAROUND_3 \
1739 + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
1740 + ARM_SMCCC_SMC_32, \
1741 + 0, 0x3fff)
1742 +
1743 #define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1
1744
1745 #ifndef __ASSEMBLY__
1746 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
1747 index 614f19bbad74f..96faf09186945 100644
1748 --- a/include/net/xfrm.h
1749 +++ b/include/net/xfrm.h
1750 @@ -1663,14 +1663,15 @@ int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1751 const struct xfrm_migrate *m, int num_bundles,
1752 const struct xfrm_kmaddress *k,
1753 const struct xfrm_encap_tmpl *encap);
1754 -struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net);
1755 +struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
1756 + u32 if_id);
1757 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1758 struct xfrm_migrate *m,
1759 struct xfrm_encap_tmpl *encap);
1760 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1761 struct xfrm_migrate *m, int num_bundles,
1762 struct xfrm_kmaddress *k, struct net *net,
1763 - struct xfrm_encap_tmpl *encap);
1764 + struct xfrm_encap_tmpl *encap, u32 if_id);
1765 #endif
1766
1767 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1768 diff --git a/lib/Kconfig b/lib/Kconfig
1769 index 3321d04dfa5a5..fa129b5c4320d 100644
1770 --- a/lib/Kconfig
1771 +++ b/lib/Kconfig
1772 @@ -42,7 +42,6 @@ config BITREVERSE
1773 config HAVE_ARCH_BITREVERSE
1774 bool
1775 default n
1776 - depends on BITREVERSE
1777 help
1778 This option enables the use of hardware bit-reversal instructions on
1779 architectures which support such operations.
1780 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1781 index 9f53d25e047e3..4815cf72569e0 100644
1782 --- a/net/ipv4/tcp.c
1783 +++ b/net/ipv4/tcp.c
1784 @@ -1652,11 +1652,13 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1785 if (!copied)
1786 copied = used;
1787 break;
1788 - } else if (used <= len) {
1789 - seq += used;
1790 - copied += used;
1791 - offset += used;
1792 }
1793 + if (WARN_ON_ONCE(used > len))
1794 + used = len;
1795 + seq += used;
1796 + copied += used;
1797 + offset += used;
1798 +
1799 /* If recv_actor drops the lock (e.g. TCP splice
1800 * receive) the skb pointer might be invalid when
1801 * getting here: tcp_collapse might have deleted it
1802 diff --git a/net/key/af_key.c b/net/key/af_key.c
1803 index 907d04a474597..406e13478b01b 100644
1804 --- a/net/key/af_key.c
1805 +++ b/net/key/af_key.c
1806 @@ -2627,7 +2627,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
1807 }
1808
1809 return xfrm_migrate(&sel, dir, XFRM_POLICY_TYPE_MAIN, m, i,
1810 - kma ? &k : NULL, net, NULL);
1811 + kma ? &k : NULL, net, NULL, 0);
1812
1813 out:
1814 return err;
1815 diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
1816 index f140c2b94b2c6..f30cdd7f3a73a 100644
1817 --- a/net/mac80211/agg-tx.c
1818 +++ b/net/mac80211/agg-tx.c
1819 @@ -9,7 +9,7 @@
1820 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
1821 * Copyright 2007-2010, Intel Corporation
1822 * Copyright(c) 2015-2017 Intel Deutschland GmbH
1823 - * Copyright (C) 2018 - 2021 Intel Corporation
1824 + * Copyright (C) 2018 - 2022 Intel Corporation
1825 */
1826
1827 #include <linux/ieee80211.h>
1828 @@ -615,6 +615,14 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
1829 return -EINVAL;
1830 }
1831
1832 + if (test_sta_flag(sta, WLAN_STA_MFP) &&
1833 + !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1834 + ht_dbg(sdata,
1835 + "MFP STA not authorized - deny BA session request %pM tid %d\n",
1836 + sta->sta.addr, tid);
1837 + return -EINVAL;
1838 + }
1839 +
1840 /*
1841 * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
1842 * member of an IBSS, and has no other existing Block Ack agreement
1843 diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
1844 index 7c6dcbc8e98ba..1d2f633c6c7c3 100644
1845 --- a/net/sctp/sm_statefuns.c
1846 +++ b/net/sctp/sm_statefuns.c
1847 @@ -149,6 +149,12 @@ static enum sctp_disposition __sctp_sf_do_9_1_abort(
1848 void *arg,
1849 struct sctp_cmd_seq *commands);
1850
1851 +static enum sctp_disposition
1852 +__sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
1853 + const struct sctp_association *asoc,
1854 + const union sctp_subtype type, void *arg,
1855 + struct sctp_cmd_seq *commands);
1856 +
1857 /* Small helper function that checks if the chunk length
1858 * is of the appropriate length. The 'required_length' argument
1859 * is set to be the size of a specific chunk we are testing.
1860 @@ -330,6 +336,14 @@ enum sctp_disposition sctp_sf_do_5_1B_init(struct net *net,
1861 if (!chunk->singleton)
1862 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1863
1864 + /* Make sure that the INIT chunk has a valid length.
1865 + * Normally, this would cause an ABORT with a Protocol Violation
1866 + * error, but since we don't have an association, we'll
1867 + * just discard the packet.
1868 + */
1869 + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
1870 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1871 +
1872 /* If the packet is an OOTB packet which is temporarily on the
1873 * control endpoint, respond with an ABORT.
1874 */
1875 @@ -344,14 +358,6 @@ enum sctp_disposition sctp_sf_do_5_1B_init(struct net *net,
1876 if (chunk->sctp_hdr->vtag != 0)
1877 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
1878
1879 - /* Make sure that the INIT chunk has a valid length.
1880 - * Normally, this would cause an ABORT with a Protocol Violation
1881 - * error, but since we don't have an association, we'll
1882 - * just discard the packet.
1883 - */
1884 - if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
1885 - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1886 -
1887 /* If the INIT is coming toward a closing socket, we'll send back
1888 * and ABORT. Essentially, this catches the race of INIT being
1889 * backloged to the socket at the same time as the user isses close().
1890 @@ -1484,19 +1490,16 @@ static enum sctp_disposition sctp_sf_do_unexpected_init(
1891 if (!chunk->singleton)
1892 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1893
1894 + /* Make sure that the INIT chunk has a valid length. */
1895 + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
1896 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1897 +
1898 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
1899 * Tag.
1900 */
1901 if (chunk->sctp_hdr->vtag != 0)
1902 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
1903
1904 - /* Make sure that the INIT chunk has a valid length.
1905 - * In this case, we generate a protocol violation since we have
1906 - * an association established.
1907 - */
1908 - if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
1909 - return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
1910 - commands);
1911 /* Grab the INIT header. */
1912 chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data;
1913
1914 @@ -1814,9 +1817,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
1915 * its peer.
1916 */
1917 if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) {
1918 - disposition = sctp_sf_do_9_2_reshutack(net, ep, asoc,
1919 - SCTP_ST_CHUNK(chunk->chunk_hdr->type),
1920 - chunk, commands);
1921 + disposition = __sctp_sf_do_9_2_reshutack(net, ep, asoc,
1922 + SCTP_ST_CHUNK(chunk->chunk_hdr->type),
1923 + chunk, commands);
1924 if (SCTP_DISPOSITION_NOMEM == disposition)
1925 goto nomem;
1926
1927 @@ -2915,13 +2918,11 @@ enum sctp_disposition sctp_sf_do_9_2_shut_ctsn(
1928 * that belong to this association, it should discard the INIT chunk and
1929 * retransmit the SHUTDOWN ACK chunk.
1930 */
1931 -enum sctp_disposition sctp_sf_do_9_2_reshutack(
1932 - struct net *net,
1933 - const struct sctp_endpoint *ep,
1934 - const struct sctp_association *asoc,
1935 - const union sctp_subtype type,
1936 - void *arg,
1937 - struct sctp_cmd_seq *commands)
1938 +static enum sctp_disposition
1939 +__sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
1940 + const struct sctp_association *asoc,
1941 + const union sctp_subtype type, void *arg,
1942 + struct sctp_cmd_seq *commands)
1943 {
1944 struct sctp_chunk *chunk = arg;
1945 struct sctp_chunk *reply;
1946 @@ -2955,6 +2956,26 @@ nomem:
1947 return SCTP_DISPOSITION_NOMEM;
1948 }
1949
1950 +enum sctp_disposition
1951 +sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
1952 + const struct sctp_association *asoc,
1953 + const union sctp_subtype type, void *arg,
1954 + struct sctp_cmd_seq *commands)
1955 +{
1956 + struct sctp_chunk *chunk = arg;
1957 +
1958 + if (!chunk->singleton)
1959 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1960 +
1961 + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
1962 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1963 +
1964 + if (chunk->sctp_hdr->vtag != 0)
1965 + return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
1966 +
1967 + return __sctp_sf_do_9_2_reshutack(net, ep, asoc, type, arg, commands);
1968 +}
1969 +
1970 /*
1971 * sctp_sf_do_ecn_cwr
1972 *
1973 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
1974 index f2bc465de2845..d3e2b97d5d051 100644
1975 --- a/net/wireless/nl80211.c
1976 +++ b/net/wireless/nl80211.c
1977 @@ -16314,7 +16314,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
1978 wdev->chandef = *chandef;
1979 wdev->preset_chandef = *chandef;
1980
1981 - if (wdev->iftype == NL80211_IFTYPE_STATION &&
1982 + if ((wdev->iftype == NL80211_IFTYPE_STATION ||
1983 + wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) &&
1984 !WARN_ON(!wdev->current_bss))
1985 cfg80211_update_assoc_bss_entry(wdev, chandef->chan);
1986
1987 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
1988 index 404823c5eb7d5..3ecb77c58c44e 100644
1989 --- a/net/xfrm/xfrm_policy.c
1990 +++ b/net/xfrm/xfrm_policy.c
1991 @@ -4271,7 +4271,7 @@ static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
1992 }
1993
1994 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
1995 - u8 dir, u8 type, struct net *net)
1996 + u8 dir, u8 type, struct net *net, u32 if_id)
1997 {
1998 struct xfrm_policy *pol, *ret = NULL;
1999 struct hlist_head *chain;
2000 @@ -4280,7 +4280,8 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *
2001 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2002 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
2003 hlist_for_each_entry(pol, chain, bydst) {
2004 - if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2005 + if ((if_id == 0 || pol->if_id == if_id) &&
2006 + xfrm_migrate_selector_match(sel, &pol->selector) &&
2007 pol->type == type) {
2008 ret = pol;
2009 priority = ret->priority;
2010 @@ -4292,7 +4293,8 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *
2011 if ((pol->priority >= priority) && ret)
2012 break;
2013
2014 - if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2015 + if ((if_id == 0 || pol->if_id == if_id) &&
2016 + xfrm_migrate_selector_match(sel, &pol->selector) &&
2017 pol->type == type) {
2018 ret = pol;
2019 break;
2020 @@ -4408,7 +4410,7 @@ static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
2021 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2022 struct xfrm_migrate *m, int num_migrate,
2023 struct xfrm_kmaddress *k, struct net *net,
2024 - struct xfrm_encap_tmpl *encap)
2025 + struct xfrm_encap_tmpl *encap, u32 if_id)
2026 {
2027 int i, err, nx_cur = 0, nx_new = 0;
2028 struct xfrm_policy *pol = NULL;
2029 @@ -4427,14 +4429,14 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2030 }
2031
2032 /* Stage 1 - find policy */
2033 - if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
2034 + if ((pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id)) == NULL) {
2035 err = -ENOENT;
2036 goto out;
2037 }
2038
2039 /* Stage 2 - find and update state(s) */
2040 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
2041 - if ((x = xfrm_migrate_state_find(mp, net))) {
2042 + if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
2043 x_cur[nx_cur] = x;
2044 nx_cur++;
2045 xc = xfrm_state_migrate(x, mp, encap);
2046 diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
2047 index 1423e2b7cb42a..268bba29bb603 100644
2048 --- a/net/xfrm/xfrm_state.c
2049 +++ b/net/xfrm/xfrm_state.c
2050 @@ -1539,9 +1539,6 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
2051 memcpy(&x->mark, &orig->mark, sizeof(x->mark));
2052 memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark));
2053
2054 - if (xfrm_init_state(x) < 0)
2055 - goto error;
2056 -
2057 x->props.flags = orig->props.flags;
2058 x->props.extra_flags = orig->props.extra_flags;
2059
2060 @@ -1563,7 +1560,8 @@ out:
2061 return NULL;
2062 }
2063
2064 -struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net)
2065 +struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
2066 + u32 if_id)
2067 {
2068 unsigned int h;
2069 struct xfrm_state *x = NULL;
2070 @@ -1579,6 +1577,8 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n
2071 continue;
2072 if (m->reqid && x->props.reqid != m->reqid)
2073 continue;
2074 + if (if_id != 0 && x->if_id != if_id)
2075 + continue;
2076 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
2077 m->old_family) ||
2078 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
2079 @@ -1594,6 +1594,8 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n
2080 if (x->props.mode != m->mode ||
2081 x->id.proto != m->proto)
2082 continue;
2083 + if (if_id != 0 && x->if_id != if_id)
2084 + continue;
2085 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
2086 m->old_family) ||
2087 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
2088 @@ -1620,6 +1622,11 @@ struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
2089 if (!xc)
2090 return NULL;
2091
2092 + xc->props.family = m->new_family;
2093 +
2094 + if (xfrm_init_state(xc) < 0)
2095 + goto error;
2096 +
2097 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
2098 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
2099
2100 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
2101 index 42ff32700d68b..bd44a800e7db7 100644
2102 --- a/net/xfrm/xfrm_user.c
2103 +++ b/net/xfrm/xfrm_user.c
2104 @@ -621,13 +621,8 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
2105
2106 xfrm_smark_init(attrs, &x->props.smark);
2107
2108 - if (attrs[XFRMA_IF_ID]) {
2109 + if (attrs[XFRMA_IF_ID])
2110 x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2111 - if (!x->if_id) {
2112 - err = -EINVAL;
2113 - goto error;
2114 - }
2115 - }
2116
2117 err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]);
2118 if (err)
2119 @@ -1333,13 +1328,8 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
2120
2121 mark = xfrm_mark_get(attrs, &m);
2122
2123 - if (attrs[XFRMA_IF_ID]) {
2124 + if (attrs[XFRMA_IF_ID])
2125 if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2126 - if (!if_id) {
2127 - err = -EINVAL;
2128 - goto out_noput;
2129 - }
2130 - }
2131
2132 if (p->info.seq) {
2133 x = xfrm_find_acq_byseq(net, mark, p->info.seq);
2134 @@ -1641,13 +1631,8 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_us
2135
2136 xfrm_mark_get(attrs, &xp->mark);
2137
2138 - if (attrs[XFRMA_IF_ID]) {
2139 + if (attrs[XFRMA_IF_ID])
2140 xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2141 - if (!xp->if_id) {
2142 - err = -EINVAL;
2143 - goto error;
2144 - }
2145 - }
2146
2147 return xp;
2148 error:
2149 @@ -2389,6 +2374,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2150 int n = 0;
2151 struct net *net = sock_net(skb->sk);
2152 struct xfrm_encap_tmpl *encap = NULL;
2153 + u32 if_id = 0;
2154
2155 if (attrs[XFRMA_MIGRATE] == NULL)
2156 return -EINVAL;
2157 @@ -2413,7 +2399,10 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2158 return 0;
2159 }
2160
2161 - err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap);
2162 + if (attrs[XFRMA_IF_ID])
2163 + if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2164 +
2165 + err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap, if_id);
2166
2167 kfree(encap);
2168
2169 diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
2170 index 9ba7feffe344b..9814a0a15ba78 100644
2171 --- a/tools/testing/selftests/vm/userfaultfd.c
2172 +++ b/tools/testing/selftests/vm/userfaultfd.c
2173 @@ -46,6 +46,7 @@
2174 #include <signal.h>
2175 #include <poll.h>
2176 #include <string.h>
2177 +#include <linux/mman.h>
2178 #include <sys/mman.h>
2179 #include <sys/syscall.h>
2180 #include <sys/ioctl.h>
2181 diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
2182 index 48fde38d64c37..2f5dc7fb437bd 100644
2183 --- a/virt/kvm/arm/psci.c
2184 +++ b/virt/kvm/arm/psci.c
2185 @@ -426,6 +426,18 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
2186 break;
2187 }
2188 break;
2189 + case ARM_SMCCC_ARCH_WORKAROUND_3:
2190 + switch (kvm_arm_get_spectre_bhb_state()) {
2191 + case SPECTRE_VULNERABLE:
2192 + break;
2193 + case SPECTRE_MITIGATED:
2194 + val = SMCCC_RET_SUCCESS;
2195 + break;
2196 + case SPECTRE_UNAFFECTED:
2197 + val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
2198 + break;
2199 + }
2200 + break;
2201 }
2202 break;
2203 default:
2204 @@ -438,7 +450,7 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
2205
2206 int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
2207 {
2208 - return 3; /* PSCI version and two workaround registers */
2209 + return 4; /* PSCI version and three workaround registers */
2210 }
2211
2212 int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2213 @@ -452,6 +464,9 @@ int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2214 if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++))
2215 return -EFAULT;
2216
2217 + if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3, uindices++))
2218 + return -EFAULT;
2219 +
2220 return 0;
2221 }
2222
2223 @@ -486,9 +501,20 @@ static int get_kernel_wa_level(u64 regid)
2224 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
2225 case KVM_SSBD_UNKNOWN:
2226 default:
2227 - return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN;
2228 + break;
2229 }
2230 - }
2231 + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN;
2232 + case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
2233 + switch (kvm_arm_get_spectre_bhb_state()) {
2234 + case SPECTRE_VULNERABLE:
2235 + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
2236 + case SPECTRE_MITIGATED:
2237 + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL;
2238 + case SPECTRE_UNAFFECTED:
2239 + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED;
2240 + }
2241 + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
2242 + }
2243
2244 return -EINVAL;
2245 }
2246 @@ -503,6 +529,7 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2247 val = kvm_psci_version(vcpu, vcpu->kvm);
2248 break;
2249 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
2250 + case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
2251 val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
2252 break;
2253 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
2254 @@ -555,6 +582,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2255 }
2256
2257 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
2258 + case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
2259 if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
2260 return -EINVAL;
2261