Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0178-4.19.79-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3475 - (show annotations) (download)
Tue Oct 29 10:31:34 2019 UTC (4 years, 5 months ago) by niro
File size: 143853 byte(s)
-linux-4.19.79
1 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
2 index e8ddf0ef232e..16607b178b47 100644
3 --- a/Documentation/admin-guide/kernel-parameters.txt
4 +++ b/Documentation/admin-guide/kernel-parameters.txt
5 @@ -2503,8 +2503,8 @@
6 http://repo.or.cz/w/linux-2.6/mini2440.git
7
8 mitigations=
9 - [X86,PPC,S390] Control optional mitigations for CPU
10 - vulnerabilities. This is a set of curated,
11 + [X86,PPC,S390,ARM64] Control optional mitigations for
12 + CPU vulnerabilities. This is a set of curated,
13 arch-independent options, each of which is an
14 aggregation of existing arch-specific options.
15
16 @@ -2513,12 +2513,14 @@
17 improves system performance, but it may also
18 expose users to several CPU vulnerabilities.
19 Equivalent to: nopti [X86,PPC]
20 + kpti=0 [ARM64]
21 nospectre_v1 [PPC]
22 nobp=0 [S390]
23 nospectre_v1 [X86]
24 - nospectre_v2 [X86,PPC,S390]
25 + nospectre_v2 [X86,PPC,S390,ARM64]
26 spectre_v2_user=off [X86]
27 spec_store_bypass_disable=off [X86,PPC]
28 + ssbd=force-off [ARM64]
29 l1tf=off [X86]
30 mds=off [X86]
31
32 @@ -2866,10 +2868,10 @@
33 (bounds check bypass). With this option data leaks
34 are possible in the system.
35
36 - nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
37 - (indirect branch prediction) vulnerability. System may
38 - allow data leaks with this option, which is equivalent
39 - to spectre_v2=off.
40 + nospectre_v2 [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
41 + the Spectre variant 2 (indirect branch prediction)
42 + vulnerability. System may allow data leaks with this
43 + option.
44
45 nospec_store_bypass_disable
46 [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
47 diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt
48 index d6aff2c5e9e2..6feaffe90e22 100644
49 --- a/Documentation/arm64/elf_hwcaps.txt
50 +++ b/Documentation/arm64/elf_hwcaps.txt
51 @@ -178,3 +178,7 @@ HWCAP_ILRCPC
52 HWCAP_FLAGM
53
54 Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0001.
55 +
56 +HWCAP_SSBS
57 +
58 + Functionality implied by ID_AA64PFR1_EL1.SSBS == 0b0010.
59 diff --git a/Makefile b/Makefile
60 index 440c5b5c4f4b..4d29c7370b46 100644
61 --- a/Makefile
62 +++ b/Makefile
63 @@ -1,7 +1,7 @@
64 # SPDX-License-Identifier: GPL-2.0
65 VERSION = 4
66 PATCHLEVEL = 19
67 -SUBLEVEL = 78
68 +SUBLEVEL = 79
69 EXTRAVERSION =
70 NAME = "People's Front"
71
72 diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
73 index e3ebece79617..51fe21f5d078 100644
74 --- a/arch/arm64/Kconfig
75 +++ b/arch/arm64/Kconfig
76 @@ -84,6 +84,7 @@ config ARM64
77 select GENERIC_CLOCKEVENTS
78 select GENERIC_CLOCKEVENTS_BROADCAST
79 select GENERIC_CPU_AUTOPROBE
80 + select GENERIC_CPU_VULNERABILITIES
81 select GENERIC_EARLY_IOREMAP
82 select GENERIC_IDLE_POLL_SETUP
83 select GENERIC_IRQ_MULTI_HANDLER
84 diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
85 index 25ce9056cf64..c3de0bbf0e9a 100644
86 --- a/arch/arm64/include/asm/cpucaps.h
87 +++ b/arch/arm64/include/asm/cpucaps.h
88 @@ -52,7 +52,8 @@
89 #define ARM64_MISMATCHED_CACHE_TYPE 31
90 #define ARM64_HAS_STAGE2_FWB 32
91 #define ARM64_WORKAROUND_1463225 33
92 +#define ARM64_SSBS 34
93
94 -#define ARM64_NCAPS 34
95 +#define ARM64_NCAPS 35
96
97 #endif /* __ASM_CPUCAPS_H */
98 diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
99 index 510f687d269a..dda6e5056810 100644
100 --- a/arch/arm64/include/asm/cpufeature.h
101 +++ b/arch/arm64/include/asm/cpufeature.h
102 @@ -525,11 +525,7 @@ static inline int arm64_get_ssbd_state(void)
103 #endif
104 }
105
106 -#ifdef CONFIG_ARM64_SSBD
107 void arm64_set_ssbd_mitigation(bool state);
108 -#else
109 -static inline void arm64_set_ssbd_mitigation(bool state) {}
110 -#endif
111
112 #endif /* __ASSEMBLY__ */
113
114 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
115 index 6abe4002945f..367b2e0b6d76 100644
116 --- a/arch/arm64/include/asm/kvm_host.h
117 +++ b/arch/arm64/include/asm/kvm_host.h
118 @@ -398,6 +398,8 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
119
120 DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
121
122 +void __kvm_enable_ssbs(void);
123 +
124 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
125 unsigned long hyp_stack_ptr,
126 unsigned long vector_ptr)
127 @@ -418,6 +420,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
128 */
129 BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
130 __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
131 +
132 + /*
133 + * Disabling SSBD on a non-VHE system requires us to enable SSBS
134 + * at EL2.
135 + */
136 + if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
137 + arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
138 + kvm_call_hyp(__kvm_enable_ssbs);
139 + }
140 }
141
142 static inline bool kvm_arch_check_sve_has_vhe(void)
143 diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
144 index def5a5e807f0..773ea8e0e442 100644
145 --- a/arch/arm64/include/asm/processor.h
146 +++ b/arch/arm64/include/asm/processor.h
147 @@ -177,11 +177,25 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
148 regs->pc = pc;
149 }
150
151 +static inline void set_ssbs_bit(struct pt_regs *regs)
152 +{
153 + regs->pstate |= PSR_SSBS_BIT;
154 +}
155 +
156 +static inline void set_compat_ssbs_bit(struct pt_regs *regs)
157 +{
158 + regs->pstate |= PSR_AA32_SSBS_BIT;
159 +}
160 +
161 static inline void start_thread(struct pt_regs *regs, unsigned long pc,
162 unsigned long sp)
163 {
164 start_thread_common(regs, pc);
165 regs->pstate = PSR_MODE_EL0t;
166 +
167 + if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
168 + set_ssbs_bit(regs);
169 +
170 regs->sp = sp;
171 }
172
173 @@ -198,6 +212,9 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
174 regs->pstate |= PSR_AA32_E_BIT;
175 #endif
176
177 + if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
178 + set_compat_ssbs_bit(regs);
179 +
180 regs->compat_sp = sp;
181 }
182 #endif
183 diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
184 index 177b851ca6d9..6bc43889d11e 100644
185 --- a/arch/arm64/include/asm/ptrace.h
186 +++ b/arch/arm64/include/asm/ptrace.h
187 @@ -50,6 +50,7 @@
188 #define PSR_AA32_I_BIT 0x00000080
189 #define PSR_AA32_A_BIT 0x00000100
190 #define PSR_AA32_E_BIT 0x00000200
191 +#define PSR_AA32_SSBS_BIT 0x00800000
192 #define PSR_AA32_DIT_BIT 0x01000000
193 #define PSR_AA32_Q_BIT 0x08000000
194 #define PSR_AA32_V_BIT 0x10000000
195 diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
196 index c1470931b897..3091ae5975a3 100644
197 --- a/arch/arm64/include/asm/sysreg.h
198 +++ b/arch/arm64/include/asm/sysreg.h
199 @@ -86,11 +86,14 @@
200
201 #define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
202 #define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
203 +#define REG_PSTATE_SSBS_IMM sys_reg(0, 3, 4, 0, 1)
204
205 #define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \
206 (!!x)<<8 | 0x1f)
207 #define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \
208 (!!x)<<8 | 0x1f)
209 +#define SET_PSTATE_SSBS(x) __emit_inst(0xd5000000 | REG_PSTATE_SSBS_IMM | \
210 + (!!x)<<8 | 0x1f)
211
212 #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
213 #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
214 @@ -419,6 +422,7 @@
215 #define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7)
216
217 /* Common SCTLR_ELx flags. */
218 +#define SCTLR_ELx_DSSBS (1UL << 44)
219 #define SCTLR_ELx_EE (1 << 25)
220 #define SCTLR_ELx_IESB (1 << 21)
221 #define SCTLR_ELx_WXN (1 << 19)
222 @@ -439,7 +443,7 @@
223 (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
224 (1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
225 (1 << 27) | (1 << 30) | (1 << 31) | \
226 - (0xffffffffUL << 32))
227 + (0xffffefffUL << 32))
228
229 #ifdef CONFIG_CPU_BIG_ENDIAN
230 #define ENDIAN_SET_EL2 SCTLR_ELx_EE
231 @@ -453,7 +457,7 @@
232 #define SCTLR_EL2_SET (SCTLR_ELx_IESB | ENDIAN_SET_EL2 | SCTLR_EL2_RES1)
233 #define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
234 SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
235 - ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
236 + SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
237
238 #if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
239 #error "Inconsistent SCTLR_EL2 set/clear bits"
240 @@ -477,7 +481,7 @@
241 (1 << 29))
242 #define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \
243 (1 << 27) | (1 << 30) | (1 << 31) | \
244 - (0xffffffffUL << 32))
245 + (0xffffefffUL << 32))
246
247 #ifdef CONFIG_CPU_BIG_ENDIAN
248 #define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
249 @@ -494,7 +498,7 @@
250 ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
251 #define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\
252 SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
253 - SCTLR_EL1_RES0)
254 + SCTLR_ELx_DSSBS | SCTLR_EL1_RES0)
255
256 #if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
257 #error "Inconsistent SCTLR_EL1 set/clear bits"
258 @@ -544,6 +548,13 @@
259 #define ID_AA64PFR0_EL0_64BIT_ONLY 0x1
260 #define ID_AA64PFR0_EL0_32BIT_64BIT 0x2
261
262 +/* id_aa64pfr1 */
263 +#define ID_AA64PFR1_SSBS_SHIFT 4
264 +
265 +#define ID_AA64PFR1_SSBS_PSTATE_NI 0
266 +#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
267 +#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
268 +
269 /* id_aa64mmfr0 */
270 #define ID_AA64MMFR0_TGRAN4_SHIFT 28
271 #define ID_AA64MMFR0_TGRAN64_SHIFT 24
272 diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
273 index 17c65c8f33cb..2bcd6e4f3474 100644
274 --- a/arch/arm64/include/uapi/asm/hwcap.h
275 +++ b/arch/arm64/include/uapi/asm/hwcap.h
276 @@ -48,5 +48,6 @@
277 #define HWCAP_USCAT (1 << 25)
278 #define HWCAP_ILRCPC (1 << 26)
279 #define HWCAP_FLAGM (1 << 27)
280 +#define HWCAP_SSBS (1 << 28)
281
282 #endif /* _UAPI__ASM_HWCAP_H */
283 diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
284 index 5dff8eccd17d..b0fd1d300154 100644
285 --- a/arch/arm64/include/uapi/asm/ptrace.h
286 +++ b/arch/arm64/include/uapi/asm/ptrace.h
287 @@ -46,6 +46,7 @@
288 #define PSR_I_BIT 0x00000080
289 #define PSR_A_BIT 0x00000100
290 #define PSR_D_BIT 0x00000200
291 +#define PSR_SSBS_BIT 0x00001000
292 #define PSR_PAN_BIT 0x00400000
293 #define PSR_UAO_BIT 0x00800000
294 #define PSR_V_BIT 0x10000000
295 diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
296 index dc6c535cbd13..9ccf16939d13 100644
297 --- a/arch/arm64/kernel/cpu_errata.c
298 +++ b/arch/arm64/kernel/cpu_errata.c
299 @@ -19,6 +19,7 @@
300 #include <linux/arm-smccc.h>
301 #include <linux/psci.h>
302 #include <linux/types.h>
303 +#include <linux/cpu.h>
304 #include <asm/cpu.h>
305 #include <asm/cputype.h>
306 #include <asm/cpufeature.h>
307 @@ -87,7 +88,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
308
309 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
310
311 -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
312 #include <asm/mmu_context.h>
313 #include <asm/cacheflush.h>
314
315 @@ -109,9 +109,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
316 __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
317 }
318
319 -static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
320 - const char *hyp_vecs_start,
321 - const char *hyp_vecs_end)
322 +static void install_bp_hardening_cb(bp_hardening_cb_t fn,
323 + const char *hyp_vecs_start,
324 + const char *hyp_vecs_end)
325 {
326 static DEFINE_SPINLOCK(bp_lock);
327 int cpu, slot = -1;
328 @@ -138,7 +138,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
329 #define __smccc_workaround_1_smc_start NULL
330 #define __smccc_workaround_1_smc_end NULL
331
332 -static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
333 +static void install_bp_hardening_cb(bp_hardening_cb_t fn,
334 const char *hyp_vecs_start,
335 const char *hyp_vecs_end)
336 {
337 @@ -146,23 +146,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
338 }
339 #endif /* CONFIG_KVM_INDIRECT_VECTORS */
340
341 -static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
342 - bp_hardening_cb_t fn,
343 - const char *hyp_vecs_start,
344 - const char *hyp_vecs_end)
345 -{
346 - u64 pfr0;
347 -
348 - if (!entry->matches(entry, SCOPE_LOCAL_CPU))
349 - return;
350 -
351 - pfr0 = read_cpuid(ID_AA64PFR0_EL1);
352 - if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
353 - return;
354 -
355 - __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
356 -}
357 -
358 #include <uapi/linux/psci.h>
359 #include <linux/arm-smccc.h>
360 #include <linux/psci.h>
361 @@ -189,60 +172,83 @@ static void qcom_link_stack_sanitization(void)
362 : "=&r" (tmp));
363 }
364
365 -static void
366 -enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
367 +static bool __nospectre_v2;
368 +static int __init parse_nospectre_v2(char *str)
369 +{
370 + __nospectre_v2 = true;
371 + return 0;
372 +}
373 +early_param("nospectre_v2", parse_nospectre_v2);
374 +
375 +/*
376 + * -1: No workaround
377 + * 0: No workaround required
378 + * 1: Workaround installed
379 + */
380 +static int detect_harden_bp_fw(void)
381 {
382 bp_hardening_cb_t cb;
383 void *smccc_start, *smccc_end;
384 struct arm_smccc_res res;
385 u32 midr = read_cpuid_id();
386
387 - if (!entry->matches(entry, SCOPE_LOCAL_CPU))
388 - return;
389 -
390 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
391 - return;
392 + return -1;
393
394 switch (psci_ops.conduit) {
395 case PSCI_CONDUIT_HVC:
396 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
397 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
398 - if ((int)res.a0 < 0)
399 - return;
400 - cb = call_hvc_arch_workaround_1;
401 - /* This is a guest, no need to patch KVM vectors */
402 - smccc_start = NULL;
403 - smccc_end = NULL;
404 + switch ((int)res.a0) {
405 + case 1:
406 + /* Firmware says we're just fine */
407 + return 0;
408 + case 0:
409 + cb = call_hvc_arch_workaround_1;
410 + /* This is a guest, no need to patch KVM vectors */
411 + smccc_start = NULL;
412 + smccc_end = NULL;
413 + break;
414 + default:
415 + return -1;
416 + }
417 break;
418
419 case PSCI_CONDUIT_SMC:
420 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
421 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
422 - if ((int)res.a0 < 0)
423 - return;
424 - cb = call_smc_arch_workaround_1;
425 - smccc_start = __smccc_workaround_1_smc_start;
426 - smccc_end = __smccc_workaround_1_smc_end;
427 + switch ((int)res.a0) {
428 + case 1:
429 + /* Firmware says we're just fine */
430 + return 0;
431 + case 0:
432 + cb = call_smc_arch_workaround_1;
433 + smccc_start = __smccc_workaround_1_smc_start;
434 + smccc_end = __smccc_workaround_1_smc_end;
435 + break;
436 + default:
437 + return -1;
438 + }
439 break;
440
441 default:
442 - return;
443 + return -1;
444 }
445
446 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
447 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
448 cb = qcom_link_stack_sanitization;
449
450 - install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
451 + if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
452 + install_bp_hardening_cb(cb, smccc_start, smccc_end);
453
454 - return;
455 + return 1;
456 }
457 -#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
458
459 -#ifdef CONFIG_ARM64_SSBD
460 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
461
462 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
463 +static bool __ssb_safe = true;
464
465 static const struct ssbd_options {
466 const char *str;
467 @@ -312,6 +318,19 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
468
469 void arm64_set_ssbd_mitigation(bool state)
470 {
471 + if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
472 + pr_info_once("SSBD disabled by kernel configuration\n");
473 + return;
474 + }
475 +
476 + if (this_cpu_has_cap(ARM64_SSBS)) {
477 + if (state)
478 + asm volatile(SET_PSTATE_SSBS(0));
479 + else
480 + asm volatile(SET_PSTATE_SSBS(1));
481 + return;
482 + }
483 +
484 switch (psci_ops.conduit) {
485 case PSCI_CONDUIT_HVC:
486 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
487 @@ -333,11 +352,28 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
488 struct arm_smccc_res res;
489 bool required = true;
490 s32 val;
491 + bool this_cpu_safe = false;
492
493 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
494
495 + if (cpu_mitigations_off())
496 + ssbd_state = ARM64_SSBD_FORCE_DISABLE;
497 +
498 + /* delay setting __ssb_safe until we get a firmware response */
499 + if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
500 + this_cpu_safe = true;
501 +
502 + if (this_cpu_has_cap(ARM64_SSBS)) {
503 + if (!this_cpu_safe)
504 + __ssb_safe = false;
505 + required = false;
506 + goto out_printmsg;
507 + }
508 +
509 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
510 ssbd_state = ARM64_SSBD_UNKNOWN;
511 + if (!this_cpu_safe)
512 + __ssb_safe = false;
513 return false;
514 }
515
516 @@ -354,6 +390,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
517
518 default:
519 ssbd_state = ARM64_SSBD_UNKNOWN;
520 + if (!this_cpu_safe)
521 + __ssb_safe = false;
522 return false;
523 }
524
525 @@ -362,14 +400,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
526 switch (val) {
527 case SMCCC_RET_NOT_SUPPORTED:
528 ssbd_state = ARM64_SSBD_UNKNOWN;
529 + if (!this_cpu_safe)
530 + __ssb_safe = false;
531 return false;
532
533 + /* machines with mixed mitigation requirements must not return this */
534 case SMCCC_RET_NOT_REQUIRED:
535 pr_info_once("%s mitigation not required\n", entry->desc);
536 ssbd_state = ARM64_SSBD_MITIGATED;
537 return false;
538
539 case SMCCC_RET_SUCCESS:
540 + __ssb_safe = false;
541 required = true;
542 break;
543
544 @@ -379,12 +421,13 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
545
546 default:
547 WARN_ON(1);
548 + if (!this_cpu_safe)
549 + __ssb_safe = false;
550 return false;
551 }
552
553 switch (ssbd_state) {
554 case ARM64_SSBD_FORCE_DISABLE:
555 - pr_info_once("%s disabled from command-line\n", entry->desc);
556 arm64_set_ssbd_mitigation(false);
557 required = false;
558 break;
559 @@ -397,7 +440,6 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
560 break;
561
562 case ARM64_SSBD_FORCE_ENABLE:
563 - pr_info_once("%s forced from command-line\n", entry->desc);
564 arm64_set_ssbd_mitigation(true);
565 required = true;
566 break;
567 @@ -407,9 +449,27 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
568 break;
569 }
570
571 +out_printmsg:
572 + switch (ssbd_state) {
573 + case ARM64_SSBD_FORCE_DISABLE:
574 + pr_info_once("%s disabled from command-line\n", entry->desc);
575 + break;
576 +
577 + case ARM64_SSBD_FORCE_ENABLE:
578 + pr_info_once("%s forced from command-line\n", entry->desc);
579 + break;
580 + }
581 +
582 return required;
583 }
584 -#endif /* CONFIG_ARM64_SSBD */
585 +
586 +/* known invulnerable cores */
587 +static const struct midr_range arm64_ssb_cpus[] = {
588 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
589 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
590 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
591 + {},
592 +};
593
594 #ifdef CONFIG_ARM64_ERRATUM_1463225
595 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
596 @@ -464,6 +524,10 @@ has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
597 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
598 CAP_MIDR_RANGE_LIST(midr_list)
599
600 +/* Track overall mitigation state. We are only mitigated if all cores are ok */
601 +static bool __hardenbp_enab = true;
602 +static bool __spectrev2_safe = true;
603 +
604 /*
605 * Generic helper for handling capabilties with multiple (match,enable) pairs
606 * of call backs, sharing the same capability bit.
607 @@ -496,26 +560,63 @@ multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
608 caps->cpu_enable(caps);
609 }
610
611 -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
612 -
613 /*
614 - * List of CPUs where we need to issue a psci call to
615 - * harden the branch predictor.
616 + * List of CPUs that do not need any Spectre-v2 mitigation at all.
617 */
618 -static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
619 - MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
620 - MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
621 - MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
622 - MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
623 - MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
624 - MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
625 - MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
626 - MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
627 - MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
628 - {},
629 +static const struct midr_range spectre_v2_safe_list[] = {
630 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
631 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
632 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
633 + { /* sentinel */ }
634 };
635
636 -#endif
637 +/*
638 + * Track overall bp hardening for all heterogeneous cores in the machine.
639 + * We are only considered "safe" if all booted cores are known safe.
640 + */
641 +static bool __maybe_unused
642 +check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
643 +{
644 + int need_wa;
645 +
646 + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
647 +
648 + /* If the CPU has CSV2 set, we're safe */
649 + if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
650 + ID_AA64PFR0_CSV2_SHIFT))
651 + return false;
652 +
653 + /* Alternatively, we have a list of unaffected CPUs */
654 + if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
655 + return false;
656 +
657 + /* Fallback to firmware detection */
658 + need_wa = detect_harden_bp_fw();
659 + if (!need_wa)
660 + return false;
661 +
662 + __spectrev2_safe = false;
663 +
664 + if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
665 + pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
666 + __hardenbp_enab = false;
667 + return false;
668 + }
669 +
670 + /* forced off */
671 + if (__nospectre_v2 || cpu_mitigations_off()) {
672 + pr_info_once("spectrev2 mitigation disabled by command line option\n");
673 + __hardenbp_enab = false;
674 + return false;
675 + }
676 +
677 + if (need_wa < 0) {
678 + pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
679 + __hardenbp_enab = false;
680 + }
681 +
682 + return (need_wa > 0);
683 +}
684
685 #ifdef CONFIG_HARDEN_EL2_VECTORS
686
687 @@ -674,13 +775,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
688 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
689 },
690 #endif
691 -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
692 {
693 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
694 - .cpu_enable = enable_smccc_arch_workaround_1,
695 - ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
696 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
697 + .matches = check_branch_predictor,
698 },
699 -#endif
700 #ifdef CONFIG_HARDEN_EL2_VECTORS
701 {
702 .desc = "EL2 vector hardening",
703 @@ -688,14 +787,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
704 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
705 },
706 #endif
707 -#ifdef CONFIG_ARM64_SSBD
708 {
709 .desc = "Speculative Store Bypass Disable",
710 .capability = ARM64_SSBD,
711 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
712 .matches = has_ssbd_mitigation,
713 + .midr_range_list = arm64_ssb_cpus,
714 },
715 -#endif
716 #ifdef CONFIG_ARM64_ERRATUM_1463225
717 {
718 .desc = "ARM erratum 1463225",
719 @@ -707,3 +805,38 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
720 {
721 }
722 };
723 +
724 +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
725 + char *buf)
726 +{
727 + return sprintf(buf, "Mitigation: __user pointer sanitization\n");
728 +}
729 +
730 +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
731 + char *buf)
732 +{
733 + if (__spectrev2_safe)
734 + return sprintf(buf, "Not affected\n");
735 +
736 + if (__hardenbp_enab)
737 + return sprintf(buf, "Mitigation: Branch predictor hardening\n");
738 +
739 + return sprintf(buf, "Vulnerable\n");
740 +}
741 +
742 +ssize_t cpu_show_spec_store_bypass(struct device *dev,
743 + struct device_attribute *attr, char *buf)
744 +{
745 + if (__ssb_safe)
746 + return sprintf(buf, "Not affected\n");
747 +
748 + switch (ssbd_state) {
749 + case ARM64_SSBD_KERNEL:
750 + case ARM64_SSBD_FORCE_ENABLE:
751 + if (IS_ENABLED(CONFIG_ARM64_SSBD))
752 + return sprintf(buf,
753 + "Mitigation: Speculative Store Bypass disabled via prctl\n");
754 + }
755 +
756 + return sprintf(buf, "Vulnerable\n");
757 +}
758 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
759 index a897efdb3ddd..ff5beb59b3dc 100644
760 --- a/arch/arm64/kernel/cpufeature.c
761 +++ b/arch/arm64/kernel/cpufeature.c
762 @@ -24,6 +24,7 @@
763 #include <linux/stop_machine.h>
764 #include <linux/types.h>
765 #include <linux/mm.h>
766 +#include <linux/cpu.h>
767 #include <asm/cpu.h>
768 #include <asm/cpufeature.h>
769 #include <asm/cpu_ops.h>
770 @@ -164,6 +165,11 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
771 ARM64_FTR_END,
772 };
773
774 +static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
775 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
776 + ARM64_FTR_END,
777 +};
778 +
779 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
780 /*
781 * We already refuse to boot CPUs that don't support our configured
782 @@ -379,7 +385,7 @@ static const struct __ftr_reg_entry {
783
784 /* Op1 = 0, CRn = 0, CRm = 4 */
785 ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
786 - ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
787 + ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
788 ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
789
790 /* Op1 = 0, CRn = 0, CRm = 5 */
791 @@ -669,7 +675,6 @@ void update_cpu_features(int cpu,
792
793 /*
794 * EL3 is not our concern.
795 - * ID_AA64PFR1 is currently RES0.
796 */
797 taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
798 info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
799 @@ -885,7 +890,7 @@ static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
800 return ctr & BIT(CTR_DIC_SHIFT);
801 }
802
803 -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
804 +static bool __meltdown_safe = true;
805 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
806
807 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
808 @@ -903,7 +908,17 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
809 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
810 { /* sentinel */ }
811 };
812 - char const *str = "command line option";
813 + char const *str = "kpti command line option";
814 + bool meltdown_safe;
815 +
816 + meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
817 +
818 + /* Defer to CPU feature registers */
819 + if (has_cpuid_feature(entry, scope))
820 + meltdown_safe = true;
821 +
822 + if (!meltdown_safe)
823 + __meltdown_safe = false;
824
825 /*
826 * For reasons that aren't entirely clear, enabling KPTI on Cavium
827 @@ -915,6 +930,24 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
828 __kpti_forced = -1;
829 }
830
831 + /* Useful for KASLR robustness */
832 + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
833 + if (!__kpti_forced) {
834 + str = "KASLR";
835 + __kpti_forced = 1;
836 + }
837 + }
838 +
839 + if (cpu_mitigations_off() && !__kpti_forced) {
840 + str = "mitigations=off";
841 + __kpti_forced = -1;
842 + }
843 +
844 + if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
845 + pr_info_once("kernel page table isolation disabled by kernel configuration\n");
846 + return false;
847 + }
848 +
849 /* Forced? */
850 if (__kpti_forced) {
851 pr_info_once("kernel page table isolation forced %s by %s\n",
852 @@ -922,18 +955,10 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
853 return __kpti_forced > 0;
854 }
855
856 - /* Useful for KASLR robustness */
857 - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
858 - return true;
859 -
860 - /* Don't force KPTI for CPUs that are not vulnerable */
861 - if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
862 - return false;
863 -
864 - /* Defer to CPU feature registers */
865 - return !has_cpuid_feature(entry, scope);
866 + return !meltdown_safe;
867 }
868
869 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
870 static void
871 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
872 {
873 @@ -958,6 +983,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
874
875 return;
876 }
877 +#else
878 +static void
879 +kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
880 +{
881 +}
882 +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
883
884 static int __init parse_kpti(char *str)
885 {
886 @@ -971,7 +1002,6 @@ static int __init parse_kpti(char *str)
887 return 0;
888 }
889 early_param("kpti", parse_kpti);
890 -#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
891
892 #ifdef CONFIG_ARM64_HW_AFDBM
893 static inline void __cpu_enable_hw_dbm(void)
894 @@ -1067,6 +1097,48 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
895 WARN_ON(val & (7 << 27 | 7 << 21));
896 }
897
898 +#ifdef CONFIG_ARM64_SSBD
899 +static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
900 +{
901 + if (user_mode(regs))
902 + return 1;
903 +
904 + if (instr & BIT(CRm_shift))
905 + regs->pstate |= PSR_SSBS_BIT;
906 + else
907 + regs->pstate &= ~PSR_SSBS_BIT;
908 +
909 + arm64_skip_faulting_instruction(regs, 4);
910 + return 0;
911 +}
912 +
913 +static struct undef_hook ssbs_emulation_hook = {
914 + .instr_mask = ~(1U << CRm_shift),
915 + .instr_val = 0xd500001f | REG_PSTATE_SSBS_IMM,
916 + .fn = ssbs_emulation_handler,
917 +};
918 +
919 +static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
920 +{
921 + static bool undef_hook_registered = false;
922 + static DEFINE_SPINLOCK(hook_lock);
923 +
924 + spin_lock(&hook_lock);
925 + if (!undef_hook_registered) {
926 + register_undef_hook(&ssbs_emulation_hook);
927 + undef_hook_registered = true;
928 + }
929 + spin_unlock(&hook_lock);
930 +
931 + if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
932 + sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
933 + arm64_set_ssbd_mitigation(false);
934 + } else {
935 + arm64_set_ssbd_mitigation(true);
936 + }
937 +}
938 +#endif /* CONFIG_ARM64_SSBD */
939 +
940 static const struct arm64_cpu_capabilities arm64_features[] = {
941 {
942 .desc = "GIC system register CPU interface",
943 @@ -1150,7 +1222,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
944 .field_pos = ID_AA64PFR0_EL0_SHIFT,
945 .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
946 },
947 -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
948 {
949 .desc = "Kernel page table isolation (KPTI)",
950 .capability = ARM64_UNMAP_KERNEL_AT_EL0,
951 @@ -1166,7 +1237,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
952 .matches = unmap_kernel_at_el0,
953 .cpu_enable = kpti_install_ng_mappings,
954 },
955 -#endif
956 {
957 /* FP/SIMD is not implemented */
958 .capability = ARM64_HAS_NO_FPSIMD,
959 @@ -1253,6 +1323,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
960 .matches = has_hw_dbm,
961 .cpu_enable = cpu_enable_hw_dbm,
962 },
963 +#endif
964 +#ifdef CONFIG_ARM64_SSBD
965 + {
966 + .desc = "Speculative Store Bypassing Safe (SSBS)",
967 + .capability = ARM64_SSBS,
968 + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
969 + .matches = has_cpuid_feature,
970 + .sys_reg = SYS_ID_AA64PFR1_EL1,
971 + .field_pos = ID_AA64PFR1_SSBS_SHIFT,
972 + .sign = FTR_UNSIGNED,
973 + .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
974 + .cpu_enable = cpu_enable_ssbs,
975 + },
976 #endif
977 {},
978 };
979 @@ -1299,6 +1382,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
980 #ifdef CONFIG_ARM64_SVE
981 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
982 #endif
983 + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
984 {},
985 };
986
987 @@ -1793,3 +1877,15 @@ void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
988 /* Firmware may have left a deferred SError in this register. */
989 write_sysreg_s(0, SYS_DISR_EL1);
990 }
991 +
992 +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
993 + char *buf)
994 +{
995 + if (__meltdown_safe)
996 + return sprintf(buf, "Not affected\n");
997 +
998 + if (arm64_kernel_unmapped_at_el0())
999 + return sprintf(buf, "Mitigation: PTI\n");
1000 +
1001 + return sprintf(buf, "Vulnerable\n");
1002 +}
1003 diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
1004 index e9ab7b3ed317..dce971f2c167 100644
1005 --- a/arch/arm64/kernel/cpuinfo.c
1006 +++ b/arch/arm64/kernel/cpuinfo.c
1007 @@ -81,6 +81,7 @@ static const char *const hwcap_str[] = {
1008 "uscat",
1009 "ilrcpc",
1010 "flagm",
1011 + "ssbs",
1012 NULL
1013 };
1014
1015 diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
1016 index 7f1628effe6d..bc2226608e13 100644
1017 --- a/arch/arm64/kernel/process.c
1018 +++ b/arch/arm64/kernel/process.c
1019 @@ -358,6 +358,10 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
1020 if (IS_ENABLED(CONFIG_ARM64_UAO) &&
1021 cpus_have_const_cap(ARM64_HAS_UAO))
1022 childregs->pstate |= PSR_UAO_BIT;
1023 +
1024 + if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
1025 + set_ssbs_bit(childregs);
1026 +
1027 p->thread.cpu_context.x19 = stack_start;
1028 p->thread.cpu_context.x20 = stk_sz;
1029 }
1030 @@ -397,6 +401,32 @@ void uao_thread_switch(struct task_struct *next)
1031 }
1032 }
1033
1034 +/*
1035 + * Force SSBS state on context-switch, since it may be lost after migrating
1036 + * from a CPU which treats the bit as RES0 in a heterogeneous system.
1037 + */
1038 +static void ssbs_thread_switch(struct task_struct *next)
1039 +{
1040 + struct pt_regs *regs = task_pt_regs(next);
1041 +
1042 + /*
1043 + * Nothing to do for kernel threads, but 'regs' may be junk
1044 + * (e.g. idle task) so check the flags and bail early.
1045 + */
1046 + if (unlikely(next->flags & PF_KTHREAD))
1047 + return;
1048 +
1049 + /* If the mitigation is enabled, then we leave SSBS clear. */
1050 + if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
1051 + test_tsk_thread_flag(next, TIF_SSBD))
1052 + return;
1053 +
1054 + if (compat_user_mode(regs))
1055 + set_compat_ssbs_bit(regs);
1056 + else if (user_mode(regs))
1057 + set_ssbs_bit(regs);
1058 +}
1059 +
1060 /*
1061 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
1062 * shadow copy so that we can restore this upon entry from userspace.
1063 @@ -425,6 +455,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
1064 contextidr_thread_switch(next);
1065 entry_task_switch(next);
1066 uao_thread_switch(next);
1067 + ssbs_thread_switch(next);
1068
1069 /*
1070 * Complete any pending TLB or cache maintenance on this CPU in case
1071 diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
1072 index 6219486fa25f..0211c3c7533b 100644
1073 --- a/arch/arm64/kernel/ptrace.c
1074 +++ b/arch/arm64/kernel/ptrace.c
1075 @@ -1666,19 +1666,20 @@ void syscall_trace_exit(struct pt_regs *regs)
1076 }
1077
1078 /*
1079 - * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
1080 - * We also take into account DIT (bit 24), which is not yet documented, and
1081 - * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
1082 - * allocated an EL0 meaning in future.
1083 + * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
1084 + * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
1085 + * not described in ARM DDI 0487D.a.
1086 + * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
1087 + * be allocated an EL0 meaning in future.
1088 * Userspace cannot use these until they have an architectural meaning.
1089 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
1090 * We also reserve IL for the kernel; SS is handled dynamically.
1091 */
1092 #define SPSR_EL1_AARCH64_RES0_BITS \
1093 - (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
1094 - GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
1095 + (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
1096 + GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
1097 #define SPSR_EL1_AARCH32_RES0_BITS \
1098 - (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
1099 + (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
1100
1101 static int valid_compat_regs(struct user_pt_regs *regs)
1102 {
1103 diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
1104 index 388f8fc13080..f496fb2f7122 100644
1105 --- a/arch/arm64/kernel/ssbd.c
1106 +++ b/arch/arm64/kernel/ssbd.c
1107 @@ -3,13 +3,31 @@
1108 * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
1109 */
1110
1111 +#include <linux/compat.h>
1112 #include <linux/errno.h>
1113 #include <linux/prctl.h>
1114 #include <linux/sched.h>
1115 +#include <linux/sched/task_stack.h>
1116 #include <linux/thread_info.h>
1117
1118 #include <asm/cpufeature.h>
1119
1120 +static void ssbd_ssbs_enable(struct task_struct *task)
1121 +{
1122 + u64 val = is_compat_thread(task_thread_info(task)) ?
1123 + PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
1124 +
1125 + task_pt_regs(task)->pstate |= val;
1126 +}
1127 +
1128 +static void ssbd_ssbs_disable(struct task_struct *task)
1129 +{
1130 + u64 val = is_compat_thread(task_thread_info(task)) ?
1131 + PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
1132 +
1133 + task_pt_regs(task)->pstate &= ~val;
1134 +}
1135 +
1136 /*
1137 * prctl interface for SSBD
1138 * FIXME: Drop the below ifdefery once merged in 4.18.
1139 @@ -47,12 +65,14 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
1140 return -EPERM;
1141 task_clear_spec_ssb_disable(task);
1142 clear_tsk_thread_flag(task, TIF_SSBD);
1143 + ssbd_ssbs_enable(task);
1144 break;
1145 case PR_SPEC_DISABLE:
1146 if (state == ARM64_SSBD_FORCE_DISABLE)
1147 return -EPERM;
1148 task_set_spec_ssb_disable(task);
1149 set_tsk_thread_flag(task, TIF_SSBD);
1150 + ssbd_ssbs_disable(task);
1151 break;
1152 case PR_SPEC_FORCE_DISABLE:
1153 if (state == ARM64_SSBD_FORCE_DISABLE)
1154 @@ -60,6 +80,7 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
1155 task_set_spec_ssb_disable(task);
1156 task_set_spec_ssb_force_disable(task);
1157 set_tsk_thread_flag(task, TIF_SSBD);
1158 + ssbd_ssbs_disable(task);
1159 break;
1160 default:
1161 return -ERANGE;
1162 diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
1163 index 963d669ae3a2..7414b76191c2 100644
1164 --- a/arch/arm64/kvm/hyp/sysreg-sr.c
1165 +++ b/arch/arm64/kvm/hyp/sysreg-sr.c
1166 @@ -293,3 +293,14 @@ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
1167
1168 vcpu->arch.sysregs_loaded_on_cpu = false;
1169 }
1170 +
1171 +void __hyp_text __kvm_enable_ssbs(void)
1172 +{
1173 + u64 tmp;
1174 +
1175 + asm volatile(
1176 + "mrs %0, sctlr_el2\n"
1177 + "orr %0, %0, %1\n"
1178 + "msr sctlr_el2, %0"
1179 + : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
1180 +}
1181 diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
1182 index 0edba3e75747..4e2ee743088f 100644
1183 --- a/arch/mips/include/asm/cpu-features.h
1184 +++ b/arch/mips/include/asm/cpu-features.h
1185 @@ -387,6 +387,22 @@
1186 #define cpu_has_dsp3 __ase(MIPS_ASE_DSP3)
1187 #endif
1188
1189 +#ifndef cpu_has_loongson_mmi
1190 +#define cpu_has_loongson_mmi __ase(MIPS_ASE_LOONGSON_MMI)
1191 +#endif
1192 +
1193 +#ifndef cpu_has_loongson_cam
1194 +#define cpu_has_loongson_cam __ase(MIPS_ASE_LOONGSON_CAM)
1195 +#endif
1196 +
1197 +#ifndef cpu_has_loongson_ext
1198 +#define cpu_has_loongson_ext __ase(MIPS_ASE_LOONGSON_EXT)
1199 +#endif
1200 +
1201 +#ifndef cpu_has_loongson_ext2
1202 +#define cpu_has_loongson_ext2 __ase(MIPS_ASE_LOONGSON_EXT2)
1203 +#endif
1204 +
1205 #ifndef cpu_has_mipsmt
1206 #define cpu_has_mipsmt __isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
1207 #endif
1208 diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
1209 index dacbdb84516a..2b4b14a56575 100644
1210 --- a/arch/mips/include/asm/cpu.h
1211 +++ b/arch/mips/include/asm/cpu.h
1212 @@ -436,5 +436,9 @@ enum cpu_type_enum {
1213 #define MIPS_ASE_MSA 0x00000100 /* MIPS SIMD Architecture */
1214 #define MIPS_ASE_DSP3 0x00000200 /* Signal Processing ASE Rev 3*/
1215 #define MIPS_ASE_MIPS16E2 0x00000400 /* MIPS16e2 */
1216 +#define MIPS_ASE_LOONGSON_MMI 0x00000800 /* Loongson MultiMedia extensions Instructions */
1217 +#define MIPS_ASE_LOONGSON_CAM 0x00001000 /* Loongson CAM */
1218 +#define MIPS_ASE_LOONGSON_EXT 0x00002000 /* Loongson EXTensions */
1219 +#define MIPS_ASE_LOONGSON_EXT2 0x00004000 /* Loongson EXTensions R2 */
1220
1221 #endif /* _ASM_CPU_H */
1222 diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
1223 index 25cd8737e7fe..958b627592c3 100644
1224 --- a/arch/mips/kernel/cpu-probe.c
1225 +++ b/arch/mips/kernel/cpu-probe.c
1226 @@ -1489,6 +1489,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
1227 __cpu_name[cpu] = "ICT Loongson-3";
1228 set_elf_platform(cpu, "loongson3a");
1229 set_isa(c, MIPS_CPU_ISA_M64R1);
1230 + c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
1231 + MIPS_ASE_LOONGSON_EXT);
1232 break;
1233 case PRID_REV_LOONGSON3B_R1:
1234 case PRID_REV_LOONGSON3B_R2:
1235 @@ -1496,6 +1498,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
1236 __cpu_name[cpu] = "ICT Loongson-3";
1237 set_elf_platform(cpu, "loongson3b");
1238 set_isa(c, MIPS_CPU_ISA_M64R1);
1239 + c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
1240 + MIPS_ASE_LOONGSON_EXT);
1241 break;
1242 }
1243
1244 @@ -1861,6 +1865,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
1245 decode_configs(c);
1246 c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
1247 c->writecombine = _CACHE_UNCACHED_ACCELERATED;
1248 + c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
1249 + MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
1250 break;
1251 default:
1252 panic("Unknown Loongson Processor ID!");
1253 diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
1254 index b2de408a259e..f8d36710cd58 100644
1255 --- a/arch/mips/kernel/proc.c
1256 +++ b/arch/mips/kernel/proc.c
1257 @@ -124,6 +124,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1258 if (cpu_has_eva) seq_printf(m, "%s", " eva");
1259 if (cpu_has_htw) seq_printf(m, "%s", " htw");
1260 if (cpu_has_xpa) seq_printf(m, "%s", " xpa");
1261 + if (cpu_has_loongson_mmi) seq_printf(m, "%s", " loongson-mmi");
1262 + if (cpu_has_loongson_cam) seq_printf(m, "%s", " loongson-cam");
1263 + if (cpu_has_loongson_ext) seq_printf(m, "%s", " loongson-ext");
1264 + if (cpu_has_loongson_ext2) seq_printf(m, "%s", " loongson-ext2");
1265 seq_printf(m, "\n");
1266
1267 if (cpu_has_mmips) {
1268 diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
1269 index 29f49a35d6ee..6a6804c2e1b0 100644
1270 --- a/arch/powerpc/include/asm/cputable.h
1271 +++ b/arch/powerpc/include/asm/cputable.h
1272 @@ -212,7 +212,7 @@ static inline void cpu_feature_keys_init(void) { }
1273 #define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x0000080000000000)
1274 #define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000)
1275 #define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000)
1276 -#define CPU_FTR_P9_TLBIE_BUG LONG_ASM_CONST(0x0000400000000000)
1277 +#define CPU_FTR_P9_TLBIE_STQ_BUG LONG_ASM_CONST(0x0000400000000000)
1278 #define CPU_FTR_P9_TIDR LONG_ASM_CONST(0x0000800000000000)
1279
1280 #ifndef __ASSEMBLY__
1281 @@ -460,7 +460,7 @@ static inline void cpu_feature_keys_init(void) { }
1282 CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
1283 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
1284 CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
1285 - CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR)
1286 + CPU_FTR_P9_TLBIE_STQ_BUG | CPU_FTR_P9_TIDR)
1287 #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
1288 #define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
1289 #define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
1290 diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
1291 index f432054234a4..f3b8e04eca9c 100644
1292 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c
1293 +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
1294 @@ -694,9 +694,35 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
1295 return true;
1296 }
1297
1298 +/*
1299 + * Handle POWER9 broadcast tlbie invalidation issue using
1300 + * cpu feature flag.
1301 + */
1302 +static __init void update_tlbie_feature_flag(unsigned long pvr)
1303 +{
1304 + if (PVR_VER(pvr) == PVR_POWER9) {
1305 + /*
1306 + * Set the tlbie feature flag for anything below
1307 + * Nimbus DD 2.3 and Cumulus DD 1.3
1308 + */
1309 + if ((pvr & 0xe000) == 0) {
1310 + /* Nimbus */
1311 + if ((pvr & 0xfff) < 0x203)
1312 + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
1313 + } else if ((pvr & 0xc000) == 0) {
1314 + /* Cumulus */
1315 + if ((pvr & 0xfff) < 0x103)
1316 + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
1317 + } else {
1318 + WARN_ONCE(1, "Unknown PVR");
1319 + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
1320 + }
1321 + }
1322 +}
1323 +
1324 static __init void cpufeatures_cpu_quirks(void)
1325 {
1326 - int version = mfspr(SPRN_PVR);
1327 + unsigned long version = mfspr(SPRN_PVR);
1328
1329 /*
1330 * Not all quirks can be derived from the cpufeatures device tree.
1331 @@ -715,10 +741,10 @@ static __init void cpufeatures_cpu_quirks(void)
1332
1333 if ((version & 0xffff0000) == 0x004e0000) {
1334 cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
1335 - cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
1336 cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
1337 }
1338
1339 + update_tlbie_feature_flag(version);
1340 /*
1341 * PKEY was not in the initial base or feature node
1342 * specification, but it should become optional in the next
1343 diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
1344 index efdd16a79075..93e06778b136 100644
1345 --- a/arch/powerpc/kernel/mce.c
1346 +++ b/arch/powerpc/kernel/mce.c
1347 @@ -45,6 +45,7 @@ static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
1348 mce_ue_event_queue);
1349
1350 static void machine_check_process_queued_event(struct irq_work *work);
1351 +static void machine_check_ue_irq_work(struct irq_work *work);
1352 void machine_check_ue_event(struct machine_check_event *evt);
1353 static void machine_process_ue_event(struct work_struct *work);
1354
1355 @@ -52,6 +53,10 @@ static struct irq_work mce_event_process_work = {
1356 .func = machine_check_process_queued_event,
1357 };
1358
1359 +static struct irq_work mce_ue_event_irq_work = {
1360 + .func = machine_check_ue_irq_work,
1361 +};
1362 +
1363 DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
1364
1365 static void mce_set_error_info(struct machine_check_event *mce,
1366 @@ -208,6 +213,10 @@ void release_mce_event(void)
1367 get_mce_event(NULL, true);
1368 }
1369
1370 +static void machine_check_ue_irq_work(struct irq_work *work)
1371 +{
1372 + schedule_work(&mce_ue_event_work);
1373 +}
1374
1375 /*
1376 * Queue up the MCE event which then can be handled later.
1377 @@ -225,7 +234,7 @@ void machine_check_ue_event(struct machine_check_event *evt)
1378 memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
1379
1380 /* Queue work to process this event later. */
1381 - schedule_work(&mce_ue_event_work);
1382 + irq_work_queue(&mce_ue_event_irq_work);
1383 }
1384
1385 /*
1386 diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
1387 index 3022d67f0c48..37a110b8e7e1 100644
1388 --- a/arch/powerpc/kernel/mce_power.c
1389 +++ b/arch/powerpc/kernel/mce_power.c
1390 @@ -39,6 +39,7 @@
1391 static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
1392 {
1393 pte_t *ptep;
1394 + unsigned int shift;
1395 unsigned long flags;
1396 struct mm_struct *mm;
1397
1398 @@ -48,13 +49,18 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
1399 mm = &init_mm;
1400
1401 local_irq_save(flags);
1402 - if (mm == current->mm)
1403 - ptep = find_current_mm_pte(mm->pgd, addr, NULL, NULL);
1404 - else
1405 - ptep = find_init_mm_pte(addr, NULL);
1406 + ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
1407 local_irq_restore(flags);
1408 +
1409 if (!ptep || pte_special(*ptep))
1410 return ULONG_MAX;
1411 +
1412 + if (shift > PAGE_SHIFT) {
1413 + unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
1414 +
1415 + return pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
1416 + }
1417 +
1418 return pte_pfn(*ptep);
1419 }
1420
1421 @@ -339,7 +345,7 @@ static const struct mce_derror_table mce_p9_derror_table[] = {
1422 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
1423 { 0, false, 0, 0, 0, 0 } };
1424
1425 -static int mce_find_instr_ea_and_pfn(struct pt_regs *regs, uint64_t *addr,
1426 +static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
1427 uint64_t *phys_addr)
1428 {
1429 /*
1430 @@ -530,7 +536,8 @@ static int mce_handle_derror(struct pt_regs *regs,
1431 * kernel/exception-64s.h
1432 */
1433 if (get_paca()->in_mce < MAX_MCE_DEPTH)
1434 - mce_find_instr_ea_and_pfn(regs, addr, phys_addr);
1435 + mce_find_instr_ea_and_phys(regs, addr,
1436 + phys_addr);
1437 }
1438 found = 1;
1439 }
1440 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
1441 index 05b32cc12e41..3ae3e8d141e3 100644
1442 --- a/arch/powerpc/kvm/book3s_hv.c
1443 +++ b/arch/powerpc/kvm/book3s_hv.c
1444 @@ -1407,7 +1407,14 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1445 *val = get_reg_val(id, vcpu->arch.pspb);
1446 break;
1447 case KVM_REG_PPC_DPDES:
1448 - *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
1449 + /*
1450 + * On POWER9, where we are emulating msgsndp etc.,
1451 + * we return 1 bit for each vcpu, which can come from
1452 + * either vcore->dpdes or doorbell_request.
1453 + * On POWER8, doorbell_request is 0.
1454 + */
1455 + *val = get_reg_val(id, vcpu->arch.vcore->dpdes |
1456 + vcpu->arch.doorbell_request);
1457 break;
1458 case KVM_REG_PPC_VTB:
1459 *val = get_reg_val(id, vcpu->arch.vcore->vtb);
1460 @@ -2550,7 +2557,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
1461 if (!spin_trylock(&pvc->lock))
1462 continue;
1463 prepare_threads(pvc);
1464 - if (!pvc->n_runnable) {
1465 + if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
1466 list_del_init(&pvc->preempt_list);
1467 if (pvc->runner == NULL) {
1468 pvc->vcore_state = VCORE_INACTIVE;
1469 @@ -2571,15 +2578,20 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
1470 spin_unlock(&lp->lock);
1471 }
1472
1473 -static bool recheck_signals(struct core_info *cip)
1474 +static bool recheck_signals_and_mmu(struct core_info *cip)
1475 {
1476 int sub, i;
1477 struct kvm_vcpu *vcpu;
1478 + struct kvmppc_vcore *vc;
1479
1480 - for (sub = 0; sub < cip->n_subcores; ++sub)
1481 - for_each_runnable_thread(i, vcpu, cip->vc[sub])
1482 + for (sub = 0; sub < cip->n_subcores; ++sub) {
1483 + vc = cip->vc[sub];
1484 + if (!vc->kvm->arch.mmu_ready)
1485 + return true;
1486 + for_each_runnable_thread(i, vcpu, vc)
1487 if (signal_pending(vcpu->arch.run_task))
1488 return true;
1489 + }
1490 return false;
1491 }
1492
1493 @@ -2800,7 +2812,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
1494 local_irq_disable();
1495 hard_irq_disable();
1496 if (lazy_irq_pending() || need_resched() ||
1497 - recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) {
1498 + recheck_signals_and_mmu(&core_info)) {
1499 local_irq_enable();
1500 vc->vcore_state = VCORE_INACTIVE;
1501 /* Unlock all except the primary vcore */
1502 diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
1503 index a67cf1cdeda4..7c68d834c94a 100644
1504 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
1505 +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
1506 @@ -452,7 +452,7 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
1507 "r" (rbvalues[i]), "r" (kvm->arch.lpid));
1508 }
1509
1510 - if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
1511 + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
1512 /*
1513 * Need the extra ptesync to make sure we don't
1514 * re-order the tlbie
1515 diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
1516 index 68c7591f2b5f..f1878e13dd56 100644
1517 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
1518 +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
1519 @@ -2903,29 +2903,39 @@ kvm_cede_prodded:
1520 kvm_cede_exit:
1521 ld r9, HSTATE_KVM_VCPU(r13)
1522 #ifdef CONFIG_KVM_XICS
1523 - /* Abort if we still have a pending escalation */
1524 + /* are we using XIVE with single escalation? */
1525 + ld r10, VCPU_XIVE_ESC_VADDR(r9)
1526 + cmpdi r10, 0
1527 + beq 3f
1528 + li r6, XIVE_ESB_SET_PQ_00
1529 + /*
1530 + * If we still have a pending escalation, abort the cede,
1531 + * and we must set PQ to 10 rather than 00 so that we don't
1532 + * potentially end up with two entries for the escalation
1533 + * interrupt in the XIVE interrupt queue. In that case
1534 + * we also don't want to set xive_esc_on to 1 here in
1535 + * case we race with xive_esc_irq().
1536 + */
1537 lbz r5, VCPU_XIVE_ESC_ON(r9)
1538 cmpwi r5, 0
1539 - beq 1f
1540 + beq 4f
1541 li r0, 0
1542 stb r0, VCPU_CEDED(r9)
1543 -1: /* Enable XIVE escalation */
1544 - li r5, XIVE_ESB_SET_PQ_00
1545 + li r6, XIVE_ESB_SET_PQ_10
1546 + b 5f
1547 +4: li r0, 1
1548 + stb r0, VCPU_XIVE_ESC_ON(r9)
1549 + /* make sure store to xive_esc_on is seen before xive_esc_irq runs */
1550 + sync
1551 +5: /* Enable XIVE escalation */
1552 mfmsr r0
1553 andi. r0, r0, MSR_DR /* in real mode? */
1554 beq 1f
1555 - ld r10, VCPU_XIVE_ESC_VADDR(r9)
1556 - cmpdi r10, 0
1557 - beq 3f
1558 - ldx r0, r10, r5
1559 + ldx r0, r10, r6
1560 b 2f
1561 1: ld r10, VCPU_XIVE_ESC_RADDR(r9)
1562 - cmpdi r10, 0
1563 - beq 3f
1564 - ldcix r0, r10, r5
1565 + ldcix r0, r10, r6
1566 2: sync
1567 - li r0, 1
1568 - stb r0, VCPU_XIVE_ESC_ON(r9)
1569 #endif /* CONFIG_KVM_XICS */
1570 3: b guest_exit_cont
1571
1572 diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
1573 index aae34f218ab4..031f07f048af 100644
1574 --- a/arch/powerpc/kvm/book3s_xive.c
1575 +++ b/arch/powerpc/kvm/book3s_xive.c
1576 @@ -1037,20 +1037,22 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1577 /* Mask the VP IPI */
1578 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1579
1580 - /* Disable the VP */
1581 - xive_native_disable_vp(xc->vp_id);
1582 -
1583 - /* Free the queues & associated interrupts */
1584 + /* Free escalations */
1585 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1586 - struct xive_q *q = &xc->queues[i];
1587 -
1588 - /* Free the escalation irq */
1589 if (xc->esc_virq[i]) {
1590 free_irq(xc->esc_virq[i], vcpu);
1591 irq_dispose_mapping(xc->esc_virq[i]);
1592 kfree(xc->esc_virq_names[i]);
1593 }
1594 - /* Free the queue */
1595 + }
1596 +
1597 + /* Disable the VP */
1598 + xive_native_disable_vp(xc->vp_id);
1599 +
1600 + /* Free the queues */
1601 + for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1602 + struct xive_q *q = &xc->queues[i];
1603 +
1604 xive_native_disable_queue(xc->vp_id, q, i);
1605 if (q->qpage) {
1606 free_pages((unsigned long)q->qpage,
1607 diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
1608 index aaa28fd918fe..0c13561d8b80 100644
1609 --- a/arch/powerpc/mm/hash_native_64.c
1610 +++ b/arch/powerpc/mm/hash_native_64.c
1611 @@ -203,7 +203,7 @@ static inline unsigned long ___tlbie(unsigned long vpn, int psize,
1612
1613 static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
1614 {
1615 - if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
1616 + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
1617 /* Need the extra ptesync to ensure we don't reorder tlbie*/
1618 asm volatile("ptesync": : :"memory");
1619 ___tlbie(vpn, psize, apsize, ssize);
1620 diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
1621 index 29fd8940867e..b1007e9a31ba 100644
1622 --- a/arch/powerpc/mm/hash_utils_64.c
1623 +++ b/arch/powerpc/mm/hash_utils_64.c
1624 @@ -37,6 +37,7 @@
1625 #include <linux/context_tracking.h>
1626 #include <linux/libfdt.h>
1627 #include <linux/pkeys.h>
1628 +#include <linux/cpu.h>
1629
1630 #include <asm/debugfs.h>
1631 #include <asm/processor.h>
1632 @@ -1891,10 +1892,16 @@ static int hpt_order_get(void *data, u64 *val)
1633
1634 static int hpt_order_set(void *data, u64 val)
1635 {
1636 + int ret;
1637 +
1638 if (!mmu_hash_ops.resize_hpt)
1639 return -ENODEV;
1640
1641 - return mmu_hash_ops.resize_hpt(val);
1642 + cpus_read_lock();
1643 + ret = mmu_hash_ops.resize_hpt(val);
1644 + cpus_read_unlock();
1645 +
1646 + return ret;
1647 }
1648
1649 DEFINE_SIMPLE_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n");
1650 diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
1651 index fef3e1eb3a19..0cddae4263f9 100644
1652 --- a/arch/powerpc/mm/tlb-radix.c
1653 +++ b/arch/powerpc/mm/tlb-radix.c
1654 @@ -220,7 +220,7 @@ static inline void fixup_tlbie(void)
1655 unsigned long pid = 0;
1656 unsigned long va = ((1UL << 52) - 1);
1657
1658 - if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
1659 + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
1660 asm volatile("ptesync": : :"memory");
1661 __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
1662 }
1663 @@ -230,7 +230,7 @@ static inline void fixup_tlbie_lpid(unsigned long lpid)
1664 {
1665 unsigned long va = ((1UL << 52) - 1);
1666
1667 - if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
1668 + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
1669 asm volatile("ptesync": : :"memory");
1670 __tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
1671 }
1672 diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
1673 index 38fe4087484a..edf9032e2e5c 100644
1674 --- a/arch/powerpc/platforms/powernv/opal.c
1675 +++ b/arch/powerpc/platforms/powernv/opal.c
1676 @@ -680,7 +680,10 @@ static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
1677 bin_attr->size);
1678 }
1679
1680 -static BIN_ATTR_RO(symbol_map, 0);
1681 +static struct bin_attribute symbol_map_attr = {
1682 + .attr = {.name = "symbol_map", .mode = 0400},
1683 + .read = symbol_map_read
1684 +};
1685
1686 static void opal_export_symmap(void)
1687 {
1688 @@ -697,10 +700,10 @@ static void opal_export_symmap(void)
1689 return;
1690
1691 /* Setup attributes */
1692 - bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0]));
1693 - bin_attr_symbol_map.size = be64_to_cpu(syms[1]);
1694 + symbol_map_attr.private = __va(be64_to_cpu(syms[0]));
1695 + symbol_map_attr.size = be64_to_cpu(syms[1]);
1696
1697 - rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map);
1698 + rc = sysfs_create_bin_file(opal_kobj, &symbol_map_attr);
1699 if (rc)
1700 pr_warn("Error %d creating OPAL symbols file\n", rc);
1701 }
1702 diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
1703 index 29e66d6e5763..15a567128c0f 100644
1704 --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
1705 +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
1706 @@ -49,6 +49,9 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
1707 return addr;
1708 }
1709
1710 +static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
1711 + unsigned long size, unsigned int levels);
1712 +
1713 static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
1714 {
1715 __be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base;
1716 @@ -58,9 +61,9 @@ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
1717
1718 while (level) {
1719 int n = (idx & mask) >> (level * shift);
1720 - unsigned long tce;
1721 + unsigned long oldtce, tce = be64_to_cpu(READ_ONCE(tmp[n]));
1722
1723 - if (tmp[n] == 0) {
1724 + if (!tce) {
1725 __be64 *tmp2;
1726
1727 if (!alloc)
1728 @@ -71,10 +74,15 @@ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
1729 if (!tmp2)
1730 return NULL;
1731
1732 - tmp[n] = cpu_to_be64(__pa(tmp2) |
1733 - TCE_PCI_READ | TCE_PCI_WRITE);
1734 + tce = __pa(tmp2) | TCE_PCI_READ | TCE_PCI_WRITE;
1735 + oldtce = be64_to_cpu(cmpxchg(&tmp[n], 0,
1736 + cpu_to_be64(tce)));
1737 + if (oldtce) {
1738 + pnv_pci_ioda2_table_do_free_pages(tmp2,
1739 + ilog2(tbl->it_level_size) + 3, 1);
1740 + tce = oldtce;
1741 + }
1742 }
1743 - tce = be64_to_cpu(tmp[n]);
1744
1745 tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
1746 idx &= ~mask;
1747 diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
1748 index 9e52b686a8fa..ea602f7f97ce 100644
1749 --- a/arch/powerpc/platforms/pseries/lpar.c
1750 +++ b/arch/powerpc/platforms/pseries/lpar.c
1751 @@ -647,7 +647,10 @@ static int pseries_lpar_resize_hpt_commit(void *data)
1752 return 0;
1753 }
1754
1755 -/* Must be called in user context */
1756 +/*
1757 + * Must be called in process context. The caller must hold the
1758 + * cpus_lock.
1759 + */
1760 static int pseries_lpar_resize_hpt(unsigned long shift)
1761 {
1762 struct hpt_resize_state state = {
1763 @@ -699,7 +702,8 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
1764
1765 t1 = ktime_get();
1766
1767 - rc = stop_machine(pseries_lpar_resize_hpt_commit, &state, NULL);
1768 + rc = stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit,
1769 + &state, NULL);
1770
1771 t2 = ktime_get();
1772
1773 diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
1774 index fa2c08e3c05e..a03821b2656a 100644
1775 --- a/arch/riscv/kernel/entry.S
1776 +++ b/arch/riscv/kernel/entry.S
1777 @@ -171,9 +171,13 @@ ENTRY(handle_exception)
1778 move a1, s4 /* scause */
1779 tail do_IRQ
1780 1:
1781 - /* Exceptions run with interrupts enabled */
1782 + /* Exceptions run with interrupts enabled or disabled
1783 + depending on the state of sstatus.SR_SPIE */
1784 + andi t0, s1, SR_SPIE
1785 + beqz t0, 1f
1786 csrs sstatus, SR_SIE
1787
1788 +1:
1789 /* Handle syscalls */
1790 li t0, EXC_SYSCALL
1791 beq s4, t0, handle_syscall
1792 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
1793 index 6e758bb6cd29..99ef537e548a 100644
1794 --- a/arch/s390/kernel/process.c
1795 +++ b/arch/s390/kernel/process.c
1796 @@ -183,20 +183,30 @@ unsigned long get_wchan(struct task_struct *p)
1797
1798 if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
1799 return 0;
1800 +
1801 + if (!try_get_task_stack(p))
1802 + return 0;
1803 +
1804 low = task_stack_page(p);
1805 high = (struct stack_frame *) task_pt_regs(p);
1806 sf = (struct stack_frame *) p->thread.ksp;
1807 - if (sf <= low || sf > high)
1808 - return 0;
1809 + if (sf <= low || sf > high) {
1810 + return_address = 0;
1811 + goto out;
1812 + }
1813 for (count = 0; count < 16; count++) {
1814 sf = (struct stack_frame *) sf->back_chain;
1815 - if (sf <= low || sf > high)
1816 - return 0;
1817 + if (sf <= low || sf > high) {
1818 + return_address = 0;
1819 + goto out;
1820 + }
1821 return_address = sf->gprs[8];
1822 if (!in_sched_functions(return_address))
1823 - return return_address;
1824 + goto out;
1825 }
1826 - return 0;
1827 +out:
1828 + put_task_stack(p);
1829 + return return_address;
1830 }
1831
1832 unsigned long arch_align_stack(unsigned long sp)
1833 diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
1834 index e8184a15578a..7b96888974db 100644
1835 --- a/arch/s390/kernel/topology.c
1836 +++ b/arch/s390/kernel/topology.c
1837 @@ -311,7 +311,8 @@ int arch_update_cpu_topology(void)
1838 on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
1839 for_each_online_cpu(cpu) {
1840 dev = get_cpu_device(cpu);
1841 - kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1842 + if (dev)
1843 + kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1844 }
1845 return rc;
1846 }
1847 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
1848 index e0551c948c59..fac1d4eaa426 100644
1849 --- a/arch/s390/kvm/kvm-s390.c
1850 +++ b/arch/s390/kvm/kvm-s390.c
1851 @@ -3890,7 +3890,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
1852 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
1853 | KVM_S390_MEMOP_F_CHECK_ONLY;
1854
1855 - if (mop->flags & ~supported_flags)
1856 + if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
1857 return -EINVAL;
1858
1859 if (mop->size > MEM_OP_MAX_SIZE)
1860 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1861 index e83f4f6bfdac..6f7b3acdab26 100644
1862 --- a/arch/x86/kvm/vmx.c
1863 +++ b/arch/x86/kvm/vmx.c
1864 @@ -8801,7 +8801,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
1865 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
1866 if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
1867 (is_long_mode(vcpu) ? 8 : 4),
1868 - NULL))
1869 + &e))
1870 kvm_inject_page_fault(vcpu, &e);
1871 }
1872
1873 @@ -12574,7 +12574,7 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
1874
1875 /* VM-entry exception error code */
1876 if (has_error_code &&
1877 - vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
1878 + vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))
1879 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
1880
1881 /* VM-entry interruption-info field: reserved bits */
1882 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1883 index 05cb5855255e..6ae8a013af31 100644
1884 --- a/arch/x86/kvm/x86.c
1885 +++ b/arch/x86/kvm/x86.c
1886 @@ -791,34 +791,42 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
1887 }
1888 EXPORT_SYMBOL_GPL(kvm_set_xcr);
1889
1890 -int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1891 +static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1892 {
1893 - unsigned long old_cr4 = kvm_read_cr4(vcpu);
1894 - unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
1895 - X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
1896 -
1897 if (cr4 & CR4_RESERVED_BITS)
1898 - return 1;
1899 + return -EINVAL;
1900
1901 if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
1902 - return 1;
1903 + return -EINVAL;
1904
1905 if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
1906 - return 1;
1907 + return -EINVAL;
1908
1909 if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
1910 - return 1;
1911 + return -EINVAL;
1912
1913 if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
1914 - return 1;
1915 + return -EINVAL;
1916
1917 if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
1918 - return 1;
1919 + return -EINVAL;
1920
1921 if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
1922 - return 1;
1923 + return -EINVAL;
1924
1925 if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
1926 + return -EINVAL;
1927 +
1928 + return 0;
1929 +}
1930 +
1931 +int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1932 +{
1933 + unsigned long old_cr4 = kvm_read_cr4(vcpu);
1934 + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
1935 + X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
1936 +
1937 + if (kvm_valid_cr4(vcpu, cr4))
1938 return 1;
1939
1940 if (is_long_mode(vcpu)) {
1941 @@ -8237,10 +8245,6 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
1942
1943 static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1944 {
1945 - if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
1946 - (sregs->cr4 & X86_CR4_OSXSAVE))
1947 - return -EINVAL;
1948 -
1949 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
1950 /*
1951 * When EFER.LME and CR0.PG are set, the processor is in
1952 @@ -8259,7 +8263,7 @@ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1953 return -EINVAL;
1954 }
1955
1956 - return 0;
1957 + return kvm_valid_cr4(vcpu, sregs->cr4);
1958 }
1959
1960 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1961 diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
1962 index 10fb42da0007..b81b5172cf99 100644
1963 --- a/arch/x86/purgatory/Makefile
1964 +++ b/arch/x86/purgatory/Makefile
1965 @@ -23,6 +23,7 @@ KCOV_INSTRUMENT := n
1966
1967 PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
1968 PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
1969 +PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN)
1970
1971 # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
1972 # in turn leaves some undefined symbols like __fentry__ in purgatory and not
1973 diff --git a/crypto/skcipher.c b/crypto/skcipher.c
1974 index b664cf867f5f..a8750b4ebf26 100644
1975 --- a/crypto/skcipher.c
1976 +++ b/crypto/skcipher.c
1977 @@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
1978 return max(start, end_page);
1979 }
1980
1981 -static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
1982 +static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
1983 {
1984 u8 *addr;
1985
1986 @@ -103,19 +103,21 @@ static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
1987 addr = skcipher_get_spot(addr, bsize);
1988 scatterwalk_copychunks(addr, &walk->out, bsize,
1989 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
1990 + return 0;
1991 }
1992
1993 int skcipher_walk_done(struct skcipher_walk *walk, int err)
1994 {
1995 - unsigned int n; /* bytes processed */
1996 - bool more;
1997 + unsigned int n = walk->nbytes;
1998 + unsigned int nbytes = 0;
1999
2000 - if (unlikely(err < 0))
2001 + if (!n)
2002 goto finish;
2003
2004 - n = walk->nbytes - err;
2005 - walk->total -= n;
2006 - more = (walk->total != 0);
2007 + if (likely(err >= 0)) {
2008 + n -= err;
2009 + nbytes = walk->total - n;
2010 + }
2011
2012 if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
2013 SKCIPHER_WALK_SLOW |
2014 @@ -131,7 +133,7 @@ unmap_src:
2015 memcpy(walk->dst.virt.addr, walk->page, n);
2016 skcipher_unmap_dst(walk);
2017 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
2018 - if (err) {
2019 + if (err > 0) {
2020 /*
2021 * Didn't process all bytes. Either the algorithm is
2022 * broken, or this was the last step and it turned out
2023 @@ -139,27 +141,29 @@ unmap_src:
2024 * the algorithm requires it.
2025 */
2026 err = -EINVAL;
2027 - goto finish;
2028 - }
2029 - skcipher_done_slow(walk, n);
2030 - goto already_advanced;
2031 + nbytes = 0;
2032 + } else
2033 + n = skcipher_done_slow(walk, n);
2034 }
2035
2036 + if (err > 0)
2037 + err = 0;
2038 +
2039 + walk->total = nbytes;
2040 + walk->nbytes = 0;
2041 +
2042 scatterwalk_advance(&walk->in, n);
2043 scatterwalk_advance(&walk->out, n);
2044 -already_advanced:
2045 - scatterwalk_done(&walk->in, 0, more);
2046 - scatterwalk_done(&walk->out, 1, more);
2047 + scatterwalk_done(&walk->in, 0, nbytes);
2048 + scatterwalk_done(&walk->out, 1, nbytes);
2049
2050 - if (more) {
2051 + if (nbytes) {
2052 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
2053 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
2054 return skcipher_walk_next(walk);
2055 }
2056 - err = 0;
2057 -finish:
2058 - walk->nbytes = 0;
2059
2060 +finish:
2061 /* Short-circuit for the common/fast path. */
2062 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
2063 goto out;
2064 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
2065 index b1c7009de1f4..bc2fa4e85f0c 100644
2066 --- a/drivers/block/nbd.c
2067 +++ b/drivers/block/nbd.c
2068 @@ -106,6 +106,7 @@ struct nbd_device {
2069 struct nbd_config *config;
2070 struct mutex config_lock;
2071 struct gendisk *disk;
2072 + struct workqueue_struct *recv_workq;
2073
2074 struct list_head list;
2075 struct task_struct *task_recv;
2076 @@ -132,9 +133,10 @@ static struct dentry *nbd_dbg_dir;
2077
2078 #define NBD_MAGIC 0x68797548
2079
2080 +#define NBD_DEF_BLKSIZE 1024
2081 +
2082 static unsigned int nbds_max = 16;
2083 static int max_part = 16;
2084 -static struct workqueue_struct *recv_workqueue;
2085 static int part_shift;
2086
2087 static int nbd_dev_dbg_init(struct nbd_device *nbd);
2088 @@ -1025,7 +1027,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
2089 /* We take the tx_mutex in an error path in the recv_work, so we
2090 * need to queue_work outside of the tx_mutex.
2091 */
2092 - queue_work(recv_workqueue, &args->work);
2093 + queue_work(nbd->recv_workq, &args->work);
2094
2095 atomic_inc(&config->live_connections);
2096 wake_up(&config->conn_wait);
2097 @@ -1126,6 +1128,10 @@ static void nbd_config_put(struct nbd_device *nbd)
2098 kfree(nbd->config);
2099 nbd->config = NULL;
2100
2101 + if (nbd->recv_workq)
2102 + destroy_workqueue(nbd->recv_workq);
2103 + nbd->recv_workq = NULL;
2104 +
2105 nbd->tag_set.timeout = 0;
2106 nbd->disk->queue->limits.discard_granularity = 0;
2107 nbd->disk->queue->limits.discard_alignment = 0;
2108 @@ -1154,6 +1160,14 @@ static int nbd_start_device(struct nbd_device *nbd)
2109 return -EINVAL;
2110 }
2111
2112 + nbd->recv_workq = alloc_workqueue("knbd%d-recv",
2113 + WQ_MEM_RECLAIM | WQ_HIGHPRI |
2114 + WQ_UNBOUND, 0, nbd->index);
2115 + if (!nbd->recv_workq) {
2116 + dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
2117 + return -ENOMEM;
2118 + }
2119 +
2120 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
2121 nbd->task_recv = current;
2122
2123 @@ -1184,7 +1198,7 @@ static int nbd_start_device(struct nbd_device *nbd)
2124 INIT_WORK(&args->work, recv_work);
2125 args->nbd = nbd;
2126 args->index = i;
2127 - queue_work(recv_workqueue, &args->work);
2128 + queue_work(nbd->recv_workq, &args->work);
2129 }
2130 nbd_size_update(nbd);
2131 return error;
2132 @@ -1204,8 +1218,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
2133 mutex_unlock(&nbd->config_lock);
2134 ret = wait_event_interruptible(config->recv_wq,
2135 atomic_read(&config->recv_threads) == 0);
2136 - if (ret)
2137 + if (ret) {
2138 sock_shutdown(nbd);
2139 + flush_workqueue(nbd->recv_workq);
2140 + }
2141 mutex_lock(&nbd->config_lock);
2142 nbd_bdev_reset(bdev);
2143 /* user requested, ignore socket errors */
2144 @@ -1227,6 +1243,14 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
2145 nbd_config_put(nbd);
2146 }
2147
2148 +static bool nbd_is_valid_blksize(unsigned long blksize)
2149 +{
2150 + if (!blksize || !is_power_of_2(blksize) || blksize < 512 ||
2151 + blksize > PAGE_SIZE)
2152 + return false;
2153 + return true;
2154 +}
2155 +
2156 /* Must be called with config_lock held */
2157 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
2158 unsigned int cmd, unsigned long arg)
2159 @@ -1242,8 +1266,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
2160 case NBD_SET_SOCK:
2161 return nbd_add_socket(nbd, arg, false);
2162 case NBD_SET_BLKSIZE:
2163 - if (!arg || !is_power_of_2(arg) || arg < 512 ||
2164 - arg > PAGE_SIZE)
2165 + if (!arg)
2166 + arg = NBD_DEF_BLKSIZE;
2167 + if (!nbd_is_valid_blksize(arg))
2168 return -EINVAL;
2169 nbd_size_set(nbd, arg,
2170 div_s64(config->bytesize, arg));
2171 @@ -1323,7 +1348,7 @@ static struct nbd_config *nbd_alloc_config(void)
2172 atomic_set(&config->recv_threads, 0);
2173 init_waitqueue_head(&config->recv_wq);
2174 init_waitqueue_head(&config->conn_wait);
2175 - config->blksize = 1024;
2176 + config->blksize = NBD_DEF_BLKSIZE;
2177 atomic_set(&config->live_connections, 0);
2178 try_module_get(THIS_MODULE);
2179 return config;
2180 @@ -1759,6 +1784,12 @@ again:
2181 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
2182 u64 bsize =
2183 nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
2184 + if (!bsize)
2185 + bsize = NBD_DEF_BLKSIZE;
2186 + if (!nbd_is_valid_blksize(bsize)) {
2187 + ret = -EINVAL;
2188 + goto out;
2189 + }
2190 nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize));
2191 }
2192 if (info->attrs[NBD_ATTR_TIMEOUT]) {
2193 @@ -1835,6 +1866,12 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
2194 nbd_disconnect(nbd);
2195 nbd_clear_sock(nbd);
2196 mutex_unlock(&nbd->config_lock);
2197 + /*
2198 + * Make sure recv thread has finished, so it does not drop the last
2199 + * config ref and try to destroy the workqueue from inside the work
2200 + * queue.
2201 + */
2202 + flush_workqueue(nbd->recv_workq);
2203 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
2204 &nbd->config->runtime_flags))
2205 nbd_config_put(nbd);
2206 @@ -2215,20 +2252,12 @@ static int __init nbd_init(void)
2207
2208 if (nbds_max > 1UL << (MINORBITS - part_shift))
2209 return -EINVAL;
2210 - recv_workqueue = alloc_workqueue("knbd-recv",
2211 - WQ_MEM_RECLAIM | WQ_HIGHPRI |
2212 - WQ_UNBOUND, 0);
2213 - if (!recv_workqueue)
2214 - return -ENOMEM;
2215
2216 - if (register_blkdev(NBD_MAJOR, "nbd")) {
2217 - destroy_workqueue(recv_workqueue);
2218 + if (register_blkdev(NBD_MAJOR, "nbd"))
2219 return -EIO;
2220 - }
2221
2222 if (genl_register_family(&nbd_genl_family)) {
2223 unregister_blkdev(NBD_MAJOR, "nbd");
2224 - destroy_workqueue(recv_workqueue);
2225 return -EINVAL;
2226 }
2227 nbd_dbg_init();
2228 @@ -2270,7 +2299,6 @@ static void __exit nbd_cleanup(void)
2229
2230 idr_destroy(&nbd_index_idr);
2231 genl_unregister_family(&nbd_genl_family);
2232 - destroy_workqueue(recv_workqueue);
2233 unregister_blkdev(NBD_MAJOR, "nbd");
2234 }
2235
2236 diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
2237 index a408edd84f34..edacf9b39b63 100644
2238 --- a/drivers/crypto/caam/caamalg_desc.c
2239 +++ b/drivers/crypto/caam/caamalg_desc.c
2240 @@ -509,6 +509,7 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
2241 const bool is_qi, int era)
2242 {
2243 u32 geniv, moveiv;
2244 + u32 *wait_cmd;
2245
2246 /* Note: Context registers are saved. */
2247 init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2248 @@ -604,6 +605,14 @@ copy_iv:
2249
2250 /* Will read cryptlen */
2251 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
2252 +
2253 + /*
2254 + * Wait for IV transfer (ofifo -> class2) to finish before starting
2255 + * ciphertext transfer (ofifo -> external memory).
2256 + */
2257 + wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP);
2258 + set_jump_tgt_here(desc, wait_cmd);
2259 +
2260 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
2261 FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
2262 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
2263 diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
2264 index a917af5776ce..05516b0a4240 100644
2265 --- a/drivers/crypto/caam/caamalg_desc.h
2266 +++ b/drivers/crypto/caam/caamalg_desc.h
2267 @@ -12,7 +12,7 @@
2268 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
2269 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
2270 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
2271 -#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
2272 +#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 8 * CAAM_CMD_SZ)
2273 #define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
2274 #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
2275 #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
2276 diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
2277 index 6183f9128a8a..ea901bc5733c 100644
2278 --- a/drivers/crypto/cavium/zip/zip_main.c
2279 +++ b/drivers/crypto/cavium/zip/zip_main.c
2280 @@ -593,6 +593,7 @@ static const struct file_operations zip_stats_fops = {
2281 .owner = THIS_MODULE,
2282 .open = zip_stats_open,
2283 .read = seq_read,
2284 + .release = single_release,
2285 };
2286
2287 static int zip_clear_open(struct inode *inode, struct file *file)
2288 @@ -604,6 +605,7 @@ static const struct file_operations zip_clear_fops = {
2289 .owner = THIS_MODULE,
2290 .open = zip_clear_open,
2291 .read = seq_read,
2292 + .release = single_release,
2293 };
2294
2295 static int zip_regs_open(struct inode *inode, struct file *file)
2296 @@ -615,6 +617,7 @@ static const struct file_operations zip_regs_fops = {
2297 .owner = THIS_MODULE,
2298 .open = zip_regs_open,
2299 .read = seq_read,
2300 + .release = single_release,
2301 };
2302
2303 /* Root directory for thunderx_zip debugfs entry */
2304 diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
2305 index 0669033f5be5..aa6b45bc13b9 100644
2306 --- a/drivers/crypto/ccree/cc_aead.c
2307 +++ b/drivers/crypto/ccree/cc_aead.c
2308 @@ -227,7 +227,7 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
2309 /* In case of payload authentication failure, MUST NOT
2310 * revealed the decrypted message --> zero its memory.
2311 */
2312 - cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
2313 + cc_zero_sgl(areq->dst, areq->cryptlen);
2314 err = -EBADMSG;
2315 }
2316 } else { /*ENCRYPT*/
2317 diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
2318 index 09f708f6418e..bac278d274b0 100644
2319 --- a/drivers/crypto/ccree/cc_fips.c
2320 +++ b/drivers/crypto/ccree/cc_fips.c
2321 @@ -21,7 +21,13 @@ static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
2322 u32 reg;
2323
2324 reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
2325 - return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
2326 + /* Did the TEE report status? */
2327 + if (reg & CC_FIPS_SYNC_TEE_STATUS)
2328 + /* Yes. Is it OK? */
2329 + return (reg & CC_FIPS_SYNC_MODULE_OK);
2330 +
2331 + /* No. It's either not in use or will be reported later */
2332 + return true;
2333 }
2334
2335 /*
2336 diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
2337 index 5c4c0a253129..d78f8d5c89c3 100644
2338 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h
2339 +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
2340 @@ -95,7 +95,7 @@ struct service_hndl {
2341
2342 static inline int get_current_node(void)
2343 {
2344 - return topology_physical_package_id(smp_processor_id());
2345 + return topology_physical_package_id(raw_smp_processor_id());
2346 }
2347
2348 int adf_service_register(struct service_hndl *service);
2349 diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
2350 index c59d2eee5d30..06768074d2d8 100644
2351 --- a/drivers/devfreq/tegra-devfreq.c
2352 +++ b/drivers/devfreq/tegra-devfreq.c
2353 @@ -486,11 +486,11 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
2354 {
2355 struct tegra_devfreq *tegra = dev_get_drvdata(dev);
2356 struct dev_pm_opp *opp;
2357 - unsigned long rate = *freq * KHZ;
2358 + unsigned long rate;
2359
2360 - opp = devfreq_recommended_opp(dev, &rate, flags);
2361 + opp = devfreq_recommended_opp(dev, freq, flags);
2362 if (IS_ERR(opp)) {
2363 - dev_err(dev, "Failed to find opp for %lu KHz\n", *freq);
2364 + dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
2365 return PTR_ERR(opp);
2366 }
2367 rate = dev_pm_opp_get_freq(opp);
2368 @@ -499,8 +499,6 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
2369 clk_set_min_rate(tegra->emc_clock, rate);
2370 clk_set_rate(tegra->emc_clock, 0);
2371
2372 - *freq = rate;
2373 -
2374 return 0;
2375 }
2376
2377 @@ -510,7 +508,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
2378 struct tegra_devfreq *tegra = dev_get_drvdata(dev);
2379 struct tegra_devfreq_device *actmon_dev;
2380
2381 - stat->current_frequency = tegra->cur_freq;
2382 + stat->current_frequency = tegra->cur_freq * KHZ;
2383
2384 /* To be used by the tegra governor */
2385 stat->private_data = tegra;
2386 @@ -565,7 +563,7 @@ static int tegra_governor_get_target(struct devfreq *devfreq,
2387 target_freq = max(target_freq, dev->target_freq);
2388 }
2389
2390 - *freq = target_freq;
2391 + *freq = target_freq * KHZ;
2392
2393 return 0;
2394 }
2395 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
2396 index 51b5e977ca88..f4e9d1b10e3e 100644
2397 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
2398 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
2399 @@ -139,7 +139,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
2400 /* ring tests don't use a job */
2401 if (job) {
2402 vm = job->vm;
2403 - fence_ctx = job->base.s_fence->scheduled.context;
2404 + fence_ctx = job->base.s_fence ?
2405 + job->base.s_fence->scheduled.context : 0;
2406 } else {
2407 vm = NULL;
2408 fence_ctx = 0;
2409 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
2410 index c0396e83f352..fc93b103f777 100644
2411 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
2412 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
2413 @@ -562,6 +562,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
2414 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
2415 sh_num = 0xffffffff;
2416
2417 + if (info->read_mmr_reg.count > 128)
2418 + return -EINVAL;
2419 +
2420 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
2421 if (!regs)
2422 return -ENOMEM;
2423 diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
2424 index 663a7c9ca3d3..d0e216d85a22 100644
2425 --- a/drivers/gpu/drm/i915/gvt/scheduler.c
2426 +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
2427 @@ -1276,9 +1276,6 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
2428 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
2429 ((a)->lrca == (b)->lrca))
2430
2431 -#define get_last_workload(q) \
2432 - (list_empty(q) ? NULL : container_of(q->prev, \
2433 - struct intel_vgpu_workload, list))
2434 /**
2435 * intel_vgpu_create_workload - create a vGPU workload
2436 * @vgpu: a vGPU
2437 @@ -1297,7 +1294,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
2438 {
2439 struct intel_vgpu_submission *s = &vgpu->submission;
2440 struct list_head *q = workload_q_head(vgpu, ring_id);
2441 - struct intel_vgpu_workload *last_workload = get_last_workload(q);
2442 + struct intel_vgpu_workload *last_workload = NULL;
2443 struct intel_vgpu_workload *workload = NULL;
2444 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
2445 u64 ring_context_gpa;
2446 @@ -1320,15 +1317,20 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
2447 head &= RB_HEAD_OFF_MASK;
2448 tail &= RB_TAIL_OFF_MASK;
2449
2450 - if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
2451 - gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
2452 - gvt_dbg_el("ctx head %x real head %lx\n", head,
2453 - last_workload->rb_tail);
2454 - /*
2455 - * cannot use guest context head pointer here,
2456 - * as it might not be updated at this time
2457 - */
2458 - head = last_workload->rb_tail;
2459 + list_for_each_entry_reverse(last_workload, q, list) {
2460 +
2461 + if (same_context(&last_workload->ctx_desc, desc)) {
2462 + gvt_dbg_el("ring id %d cur workload == last\n",
2463 + ring_id);
2464 + gvt_dbg_el("ctx head %x real head %lx\n", head,
2465 + last_workload->rb_tail);
2466 + /*
2467 + * cannot use guest context head pointer here,
2468 + * as it might not be updated at this time
2469 + */
2470 + head = last_workload->rb_tail;
2471 + break;
2472 + }
2473 }
2474
2475 gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
2476 diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
2477 index 96fb5f635314..cc4ea5502d6c 100644
2478 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c
2479 +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
2480 @@ -429,15 +429,15 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
2481 }
2482
2483 msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
2484 - if (!msm_host->byte_clk_src) {
2485 - ret = -ENODEV;
2486 + if (IS_ERR(msm_host->byte_clk_src)) {
2487 + ret = PTR_ERR(msm_host->byte_clk_src);
2488 pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
2489 goto exit;
2490 }
2491
2492 msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
2493 - if (!msm_host->pixel_clk_src) {
2494 - ret = -ENODEV;
2495 + if (IS_ERR(msm_host->pixel_clk_src)) {
2496 + ret = PTR_ERR(msm_host->pixel_clk_src);
2497 pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
2498 goto exit;
2499 }
2500 diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
2501 index 5e01bfb69d7a..10107e551fac 100644
2502 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
2503 +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
2504 @@ -1517,7 +1517,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
2505 nv_encoder->aux = aux;
2506 }
2507
2508 - if ((data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
2509 + if (nv_connector->type != DCB_CONNECTOR_eDP &&
2510 + (data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
2511 ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) {
2512 ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
2513 nv_connector->base.base.id,
2514 diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
2515 index cb80ddaa19d2..7e9e2f064454 100644
2516 --- a/drivers/gpu/drm/omapdrm/dss/dss.c
2517 +++ b/drivers/gpu/drm/omapdrm/dss/dss.c
2518 @@ -1110,7 +1110,7 @@ static const struct dss_features omap34xx_dss_feats = {
2519
2520 static const struct dss_features omap3630_dss_feats = {
2521 .model = DSS_MODEL_OMAP3,
2522 - .fck_div_max = 32,
2523 + .fck_div_max = 31,
2524 .fck_freq_max = 173000000,
2525 .dss_fck_multiplier = 1,
2526 .parent_clk_name = "dpll4_ck",
2527 diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
2528 index 25b5407c74b5..d83310751a8e 100644
2529 --- a/drivers/gpu/drm/radeon/radeon_drv.c
2530 +++ b/drivers/gpu/drm/radeon/radeon_drv.c
2531 @@ -340,8 +340,39 @@ static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
2532 static int radeon_pci_probe(struct pci_dev *pdev,
2533 const struct pci_device_id *ent)
2534 {
2535 + unsigned long flags = 0;
2536 int ret;
2537
2538 + if (!ent)
2539 + return -ENODEV; /* Avoid NULL-ptr deref in drm_get_pci_dev */
2540 +
2541 + flags = ent->driver_data;
2542 +
2543 + if (!radeon_si_support) {
2544 + switch (flags & RADEON_FAMILY_MASK) {
2545 + case CHIP_TAHITI:
2546 + case CHIP_PITCAIRN:
2547 + case CHIP_VERDE:
2548 + case CHIP_OLAND:
2549 + case CHIP_HAINAN:
2550 + dev_info(&pdev->dev,
2551 + "SI support disabled by module param\n");
2552 + return -ENODEV;
2553 + }
2554 + }
2555 + if (!radeon_cik_support) {
2556 + switch (flags & RADEON_FAMILY_MASK) {
2557 + case CHIP_KAVERI:
2558 + case CHIP_BONAIRE:
2559 + case CHIP_HAWAII:
2560 + case CHIP_KABINI:
2561 + case CHIP_MULLINS:
2562 + dev_info(&pdev->dev,
2563 + "CIK support disabled by module param\n");
2564 + return -ENODEV;
2565 + }
2566 + }
2567 +
2568 if (vga_switcheroo_client_probe_defer(pdev))
2569 return -EPROBE_DEFER;
2570
2571 diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
2572 index 6a8fb6fd183c..3ff835767ac5 100644
2573 --- a/drivers/gpu/drm/radeon/radeon_kms.c
2574 +++ b/drivers/gpu/drm/radeon/radeon_kms.c
2575 @@ -95,31 +95,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
2576 struct radeon_device *rdev;
2577 int r, acpi_status;
2578
2579 - if (!radeon_si_support) {
2580 - switch (flags & RADEON_FAMILY_MASK) {
2581 - case CHIP_TAHITI:
2582 - case CHIP_PITCAIRN:
2583 - case CHIP_VERDE:
2584 - case CHIP_OLAND:
2585 - case CHIP_HAINAN:
2586 - dev_info(dev->dev,
2587 - "SI support disabled by module param\n");
2588 - return -ENODEV;
2589 - }
2590 - }
2591 - if (!radeon_cik_support) {
2592 - switch (flags & RADEON_FAMILY_MASK) {
2593 - case CHIP_KAVERI:
2594 - case CHIP_BONAIRE:
2595 - case CHIP_HAWAII:
2596 - case CHIP_KABINI:
2597 - case CHIP_MULLINS:
2598 - dev_info(dev->dev,
2599 - "CIK support disabled by module param\n");
2600 - return -ENODEV;
2601 - }
2602 - }
2603 -
2604 rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
2605 if (rdev == NULL) {
2606 return -ENOMEM;
2607 diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
2608 index 2bce7cf0b0af..e45b5ec2f451 100644
2609 --- a/drivers/hwtracing/coresight/coresight-etm4x.c
2610 +++ b/drivers/hwtracing/coresight/coresight-etm4x.c
2611 @@ -174,6 +174,12 @@ static void etm4_enable_hw(void *info)
2612 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
2613 dev_err(drvdata->dev,
2614 "timeout while waiting for Idle Trace Status\n");
2615 + /*
2616 + * As recommended by section 4.3.7 ("Synchronization when using the
2617 + * memory-mapped interface") of ARM IHI 0064D
2618 + */
2619 + dsb(sy);
2620 + isb();
2621
2622 CS_LOCK(drvdata->base);
2623
2624 @@ -324,8 +330,12 @@ static void etm4_disable_hw(void *info)
2625 /* EN, bit[0] Trace unit enable bit */
2626 control &= ~0x1;
2627
2628 - /* make sure everything completes before disabling */
2629 - mb();
2630 + /*
2631 + * Make sure everything completes before disabling, as recommended
2632 + * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
2633 + * SSTATUS") of ARM IHI 0064D
2634 + */
2635 + dsb(sy);
2636 isb();
2637 writel_relaxed(control, drvdata->base + TRCPRGCTLR);
2638
2639 diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
2640 index e5c598ae5f24..6627523e728b 100644
2641 --- a/drivers/mmc/host/sdhci-of-esdhc.c
2642 +++ b/drivers/mmc/host/sdhci-of-esdhc.c
2643 @@ -480,7 +480,12 @@ static int esdhc_of_enable_dma(struct sdhci_host *host)
2644 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
2645
2646 value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
2647 - value |= ESDHC_DMA_SNOOP;
2648 +
2649 + if (of_dma_is_coherent(dev->of_node))
2650 + value |= ESDHC_DMA_SNOOP;
2651 + else
2652 + value &= ~ESDHC_DMA_SNOOP;
2653 +
2654 sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
2655 return 0;
2656 }
2657 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
2658 index eb33b892b484..e99d5632d8fa 100644
2659 --- a/drivers/mmc/host/sdhci.c
2660 +++ b/drivers/mmc/host/sdhci.c
2661 @@ -2720,6 +2720,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
2662 static void sdhci_adma_show_error(struct sdhci_host *host)
2663 {
2664 void *desc = host->adma_table;
2665 + dma_addr_t dma = host->adma_addr;
2666
2667 sdhci_dumpregs(host);
2668
2669 @@ -2727,18 +2728,21 @@ static void sdhci_adma_show_error(struct sdhci_host *host)
2670 struct sdhci_adma2_64_desc *dma_desc = desc;
2671
2672 if (host->flags & SDHCI_USE_64_BIT_DMA)
2673 - DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2674 - desc, le32_to_cpu(dma_desc->addr_hi),
2675 + SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2676 + (unsigned long long)dma,
2677 + le32_to_cpu(dma_desc->addr_hi),
2678 le32_to_cpu(dma_desc->addr_lo),
2679 le16_to_cpu(dma_desc->len),
2680 le16_to_cpu(dma_desc->cmd));
2681 else
2682 - DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2683 - desc, le32_to_cpu(dma_desc->addr_lo),
2684 + SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2685 + (unsigned long long)dma,
2686 + le32_to_cpu(dma_desc->addr_lo),
2687 le16_to_cpu(dma_desc->len),
2688 le16_to_cpu(dma_desc->cmd));
2689
2690 desc += host->desc_sz;
2691 + dma += host->desc_sz;
2692
2693 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2694 break;
2695 @@ -2814,7 +2818,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2696 != MMC_BUS_TEST_R)
2697 host->data->error = -EILSEQ;
2698 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2699 - pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2700 + pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
2701 + intmask);
2702 sdhci_adma_show_error(host);
2703 host->data->error = -EIO;
2704 if (host->ops->adma_workaround)
2705 diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
2706 index fccb6bf21fad..de8d9dceb123 100644
2707 --- a/drivers/net/can/spi/mcp251x.c
2708 +++ b/drivers/net/can/spi/mcp251x.c
2709 @@ -626,7 +626,7 @@ static int mcp251x_setup(struct net_device *net, struct spi_device *spi)
2710 static int mcp251x_hw_reset(struct spi_device *spi)
2711 {
2712 struct mcp251x_priv *priv = spi_get_drvdata(spi);
2713 - u8 reg;
2714 + unsigned long timeout;
2715 int ret;
2716
2717 /* Wait for oscillator startup timer after power up */
2718 @@ -640,10 +640,19 @@ static int mcp251x_hw_reset(struct spi_device *spi)
2719 /* Wait for oscillator startup timer after reset */
2720 mdelay(MCP251X_OST_DELAY_MS);
2721
2722 - reg = mcp251x_read_reg(spi, CANSTAT);
2723 - if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
2724 - return -ENODEV;
2725 -
2726 + /* Wait for reset to finish */
2727 + timeout = jiffies + HZ;
2728 + while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
2729 + CANCTRL_REQOP_CONF) {
2730 + usleep_range(MCP251X_OST_DELAY_MS * 1000,
2731 + MCP251X_OST_DELAY_MS * 1000 * 2);
2732 +
2733 + if (time_after(jiffies, timeout)) {
2734 + dev_err(&spi->dev,
2735 + "MCP251x didn't enter in conf mode after reset\n");
2736 + return -EBUSY;
2737 + }
2738 + }
2739 return 0;
2740 }
2741
2742 diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
2743 index 22c572a09b32..c19e88efe958 100644
2744 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c
2745 +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
2746 @@ -272,6 +272,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
2747 port = nfp_port_alloc(app, port_type, repr);
2748 if (IS_ERR(port)) {
2749 err = PTR_ERR(port);
2750 + kfree(repr_priv);
2751 nfp_repr_free(repr);
2752 goto err_reprs_clean;
2753 }
2754 diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
2755 index 4f684cbcdc57..078027bbe002 100644
2756 --- a/drivers/net/ieee802154/atusb.c
2757 +++ b/drivers/net/ieee802154/atusb.c
2758 @@ -1140,10 +1140,11 @@ static void atusb_disconnect(struct usb_interface *interface)
2759
2760 ieee802154_unregister_hw(atusb->hw);
2761
2762 + usb_put_dev(atusb->usb_dev);
2763 +
2764 ieee802154_free_hw(atusb->hw);
2765
2766 usb_set_intfdata(interface, NULL);
2767 - usb_put_dev(atusb->usb_dev);
2768
2769 pr_debug("%s done\n", __func__);
2770 }
2771 diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
2772 index 2a9d6b0d1f19..80508da3c8b5 100644
2773 --- a/drivers/ntb/test/ntb_perf.c
2774 +++ b/drivers/ntb/test/ntb_perf.c
2775 @@ -1373,7 +1373,7 @@ static int perf_setup_peer_mw(struct perf_peer *peer)
2776 int ret;
2777
2778 /* Get outbound MW parameters and map it */
2779 - ret = ntb_peer_mw_get_addr(perf->ntb, peer->gidx, &phys_addr,
2780 + ret = ntb_peer_mw_get_addr(perf->ntb, perf->gidx, &phys_addr,
2781 &peer->outbuf_size);
2782 if (ret)
2783 return ret;
2784 diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
2785 index 2ba22cd1331b..54a633e8cb5d 100644
2786 --- a/drivers/nvdimm/bus.c
2787 +++ b/drivers/nvdimm/bus.c
2788 @@ -189,7 +189,7 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
2789 sector_t sector;
2790
2791 /* make sure device is a region */
2792 - if (!is_nd_pmem(dev))
2793 + if (!is_memory(dev))
2794 return 0;
2795
2796 nd_region = to_nd_region(dev);
2797 diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
2798 index f9130cc157e8..22224b21c34d 100644
2799 --- a/drivers/nvdimm/region.c
2800 +++ b/drivers/nvdimm/region.c
2801 @@ -42,7 +42,7 @@ static int nd_region_probe(struct device *dev)
2802 if (rc)
2803 return rc;
2804
2805 - if (is_nd_pmem(&nd_region->dev)) {
2806 + if (is_memory(&nd_region->dev)) {
2807 struct resource ndr_res;
2808
2809 if (devm_init_badblocks(dev, &nd_region->bb))
2810 @@ -131,7 +131,7 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event)
2811 struct nd_region *nd_region = to_nd_region(dev);
2812 struct resource res;
2813
2814 - if (is_nd_pmem(&nd_region->dev)) {
2815 + if (is_memory(&nd_region->dev)) {
2816 res.start = nd_region->ndr_start;
2817 res.end = nd_region->ndr_start +
2818 nd_region->ndr_size - 1;
2819 diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
2820 index 0303296e6d5b..609fc450522a 100644
2821 --- a/drivers/nvdimm/region_devs.c
2822 +++ b/drivers/nvdimm/region_devs.c
2823 @@ -633,11 +633,11 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
2824 if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
2825 return 0;
2826
2827 - if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
2828 + if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
2829 return 0;
2830
2831 if (a == &dev_attr_resource.attr) {
2832 - if (is_nd_pmem(dev))
2833 + if (is_memory(dev))
2834 return 0400;
2835 else
2836 return 0;
2837 diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
2838 index fd2dbd7eed7b..52d4fa4161dc 100644
2839 --- a/drivers/pci/controller/vmd.c
2840 +++ b/drivers/pci/controller/vmd.c
2841 @@ -31,6 +31,9 @@
2842 #define PCI_REG_VMLOCK 0x70
2843 #define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
2844
2845 +#define MB2_SHADOW_OFFSET 0x2000
2846 +#define MB2_SHADOW_SIZE 16
2847 +
2848 enum vmd_features {
2849 /*
2850 * Device may contain registers which hint the physical location of the
2851 @@ -600,7 +603,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
2852 u32 vmlock;
2853 int ret;
2854
2855 - membar2_offset = 0x2018;
2856 + membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
2857 ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
2858 if (ret || vmlock == ~0)
2859 return -ENODEV;
2860 @@ -612,9 +615,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
2861 if (!membar2)
2862 return -ENOMEM;
2863 offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
2864 - readq(membar2 + 0x2008);
2865 + readq(membar2 + MB2_SHADOW_OFFSET);
2866 offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
2867 - readq(membar2 + 0x2010);
2868 + readq(membar2 + MB2_SHADOW_OFFSET + 8);
2869 pci_iounmap(vmd->dev, membar2);
2870 }
2871 }
2872 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2873 index c65465385d8c..6384930a6749 100644
2874 --- a/drivers/pci/pci.c
2875 +++ b/drivers/pci/pci.c
2876 @@ -1366,7 +1366,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
2877 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
2878 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
2879 res = pdev->resource + bar_idx;
2880 - size = order_base_2((resource_size(res) >> 20) | 1) - 1;
2881 + size = ilog2(resource_size(res)) - 20;
2882 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
2883 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
2884 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
2885 diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
2886 index 8ba6abf584de..3958ee03eec1 100644
2887 --- a/drivers/power/supply/sbs-battery.c
2888 +++ b/drivers/power/supply/sbs-battery.c
2889 @@ -323,17 +323,22 @@ static int sbs_get_battery_presence_and_health(
2890 {
2891 int ret;
2892
2893 - if (psp == POWER_SUPPLY_PROP_PRESENT) {
2894 - /* Dummy command; if it succeeds, battery is present. */
2895 - ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
2896 - if (ret < 0)
2897 - val->intval = 0; /* battery disconnected */
2898 - else
2899 - val->intval = 1; /* battery present */
2900 - } else { /* POWER_SUPPLY_PROP_HEALTH */
2901 + /* Dummy command; if it succeeds, battery is present. */
2902 + ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
2903 +
2904 + if (ret < 0) { /* battery not present*/
2905 + if (psp == POWER_SUPPLY_PROP_PRESENT) {
2906 + val->intval = 0;
2907 + return 0;
2908 + }
2909 + return ret;
2910 + }
2911 +
2912 + if (psp == POWER_SUPPLY_PROP_PRESENT)
2913 + val->intval = 1; /* battery present */
2914 + else /* POWER_SUPPLY_PROP_HEALTH */
2915 /* SBS spec doesn't have a general health command. */
2916 val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
2917 - }
2918
2919 return 0;
2920 }
2921 @@ -629,12 +634,14 @@ static int sbs_get_property(struct power_supply *psy,
2922 switch (psp) {
2923 case POWER_SUPPLY_PROP_PRESENT:
2924 case POWER_SUPPLY_PROP_HEALTH:
2925 - if (client->flags & SBS_FLAGS_TI_BQ20Z75)
2926 + if (chip->flags & SBS_FLAGS_TI_BQ20Z75)
2927 ret = sbs_get_ti_battery_presence_and_health(client,
2928 psp, val);
2929 else
2930 ret = sbs_get_battery_presence_and_health(client, psp,
2931 val);
2932 +
2933 + /* this can only be true if no gpio is used */
2934 if (psp == POWER_SUPPLY_PROP_PRESENT)
2935 return 0;
2936 break;
2937 diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
2938 index 0059b24cfdc3..28e1f6413476 100644
2939 --- a/drivers/pwm/pwm-stm32-lp.c
2940 +++ b/drivers/pwm/pwm-stm32-lp.c
2941 @@ -58,6 +58,12 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
2942 /* Calculate the period and prescaler value */
2943 div = (unsigned long long)clk_get_rate(priv->clk) * state->period;
2944 do_div(div, NSEC_PER_SEC);
2945 + if (!div) {
2946 + /* Clock is too slow to achieve requested period. */
2947 + dev_dbg(priv->chip.dev, "Can't reach %u ns\n", state->period);
2948 + return -EINVAL;
2949 + }
2950 +
2951 prd = div;
2952 while (div > STM32_LPTIM_MAX_ARR) {
2953 presc++;
2954 diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
2955 index 93b2862bd3fa..674d848e377c 100644
2956 --- a/drivers/s390/cio/ccwgroup.c
2957 +++ b/drivers/s390/cio/ccwgroup.c
2958 @@ -372,7 +372,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
2959 goto error;
2960 }
2961 /* Check for trailing stuff. */
2962 - if (i == num_devices && strlen(buf) > 0) {
2963 + if (i == num_devices && buf && strlen(buf) > 0) {
2964 rc = -EINVAL;
2965 goto error;
2966 }
2967 diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
2968 index aea502922646..df09ed53ab45 100644
2969 --- a/drivers/s390/cio/css.c
2970 +++ b/drivers/s390/cio/css.c
2971 @@ -1213,6 +1213,8 @@ device_initcall(cio_settle_init);
2972
2973 int sch_is_pseudo_sch(struct subchannel *sch)
2974 {
2975 + if (!sch->dev.parent)
2976 + return 0;
2977 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
2978 }
2979
2980 diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
2981 index 0a089cf5c78f..fe6683effd05 100644
2982 --- a/drivers/staging/erofs/dir.c
2983 +++ b/drivers/staging/erofs/dir.c
2984 @@ -100,8 +100,15 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
2985 unsigned nameoff, maxsize;
2986
2987 dentry_page = read_mapping_page(mapping, i, NULL);
2988 - if (IS_ERR(dentry_page))
2989 - continue;
2990 + if (dentry_page == ERR_PTR(-ENOMEM)) {
2991 + err = -ENOMEM;
2992 + break;
2993 + } else if (IS_ERR(dentry_page)) {
2994 + errln("fail to readdir of logical block %u of nid %llu",
2995 + i, EROFS_V(dir)->nid);
2996 + err = PTR_ERR(dentry_page);
2997 + break;
2998 + }
2999
3000 lock_page(dentry_page);
3001 de = (struct erofs_dirent *)kmap(dentry_page);
3002 diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
3003 index ad6fe6d9d00a..0f1558c6747e 100644
3004 --- a/drivers/staging/erofs/unzip_vle.c
3005 +++ b/drivers/staging/erofs/unzip_vle.c
3006 @@ -311,7 +311,11 @@ z_erofs_vle_work_lookup(struct super_block *sb,
3007 /* if multiref is disabled, `primary' is always true */
3008 primary = true;
3009
3010 - DBG_BUGON(work->pageofs != pageofs);
3011 + if (work->pageofs != pageofs) {
3012 + DBG_BUGON(1);
3013 + erofs_workgroup_put(egrp);
3014 + return ERR_PTR(-EIO);
3015 + }
3016
3017 /*
3018 * lock must be taken first to avoid grp->next == NIL between
3019 @@ -853,6 +857,7 @@ repeat:
3020 for (i = 0; i < nr_pages; ++i)
3021 pages[i] = NULL;
3022
3023 + err = 0;
3024 z_erofs_pagevec_ctor_init(&ctor,
3025 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
3026
3027 @@ -874,8 +879,17 @@ repeat:
3028 pagenr = z_erofs_onlinepage_index(page);
3029
3030 DBG_BUGON(pagenr >= nr_pages);
3031 - DBG_BUGON(pages[pagenr]);
3032
3033 + /*
3034 + * currently EROFS doesn't support multiref(dedup),
3035 + * so here erroring out one multiref page.
3036 + */
3037 + if (pages[pagenr]) {
3038 + DBG_BUGON(1);
3039 + SetPageError(pages[pagenr]);
3040 + z_erofs_onlinepage_endio(pages[pagenr]);
3041 + err = -EIO;
3042 + }
3043 pages[pagenr] = page;
3044 }
3045 sparsemem_pages = i;
3046 @@ -885,7 +899,6 @@ repeat:
3047 overlapped = false;
3048 compressed_pages = grp->compressed_pages;
3049
3050 - err = 0;
3051 for (i = 0; i < clusterpages; ++i) {
3052 unsigned pagenr;
3053
3054 @@ -911,7 +924,12 @@ repeat:
3055 pagenr = z_erofs_onlinepage_index(page);
3056
3057 DBG_BUGON(pagenr >= nr_pages);
3058 - DBG_BUGON(pages[pagenr]);
3059 + if (pages[pagenr]) {
3060 + DBG_BUGON(1);
3061 + SetPageError(pages[pagenr]);
3062 + z_erofs_onlinepage_endio(pages[pagenr]);
3063 + err = -EIO;
3064 + }
3065 ++sparsemem_pages;
3066 pages[pagenr] = page;
3067
3068 @@ -1335,19 +1353,18 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file,
3069 err = z_erofs_do_read_page(&f, page, &pagepool);
3070 (void)z_erofs_vle_work_iter_end(&f.builder);
3071
3072 - if (err) {
3073 + /* if some compressed cluster ready, need submit them anyway */
3074 + z_erofs_submit_and_unzip(&f, &pagepool, true);
3075 +
3076 + if (err)
3077 errln("%s, failed to read, err [%d]", __func__, err);
3078 - goto out;
3079 - }
3080
3081 - z_erofs_submit_and_unzip(&f, &pagepool, true);
3082 -out:
3083 if (f.m_iter.mpage != NULL)
3084 put_page(f.m_iter.mpage);
3085
3086 /* clean up the remaining free pages */
3087 put_pages_list(&pagepool);
3088 - return 0;
3089 + return err;
3090 }
3091
3092 static inline int __z_erofs_vle_normalaccess_readpages(
3093 diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
3094 index bf9721fc2824..be3eafc7682b 100644
3095 --- a/drivers/thermal/thermal_core.c
3096 +++ b/drivers/thermal/thermal_core.c
3097 @@ -296,7 +296,7 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
3098 mod_delayed_work(system_freezable_wq, &tz->poll_queue,
3099 msecs_to_jiffies(delay));
3100 else
3101 - cancel_delayed_work(&tz->poll_queue);
3102 + cancel_delayed_work_sync(&tz->poll_queue);
3103 }
3104
3105 static void monitor_thermal_zone(struct thermal_zone_device *tz)
3106 diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
3107 index 40c69a533b24..dd5d8ee37928 100644
3108 --- a/drivers/thermal/thermal_hwmon.c
3109 +++ b/drivers/thermal/thermal_hwmon.c
3110 @@ -87,13 +87,17 @@ static struct thermal_hwmon_device *
3111 thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz)
3112 {
3113 struct thermal_hwmon_device *hwmon;
3114 + char type[THERMAL_NAME_LENGTH];
3115
3116 mutex_lock(&thermal_hwmon_list_lock);
3117 - list_for_each_entry(hwmon, &thermal_hwmon_list, node)
3118 - if (!strcmp(hwmon->type, tz->type)) {
3119 + list_for_each_entry(hwmon, &thermal_hwmon_list, node) {
3120 + strcpy(type, tz->type);
3121 + strreplace(type, '-', '_');
3122 + if (!strcmp(hwmon->type, type)) {
3123 mutex_unlock(&thermal_hwmon_list_lock);
3124 return hwmon;
3125 }
3126 + }
3127 mutex_unlock(&thermal_hwmon_list_lock);
3128
3129 return NULL;
3130 diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
3131 index 1abe4d021fd2..ffde179a9bb2 100644
3132 --- a/drivers/watchdog/aspeed_wdt.c
3133 +++ b/drivers/watchdog/aspeed_wdt.c
3134 @@ -38,6 +38,7 @@ static const struct aspeed_wdt_config ast2500_config = {
3135 static const struct of_device_id aspeed_wdt_of_table[] = {
3136 { .compatible = "aspeed,ast2400-wdt", .data = &ast2400_config },
3137 { .compatible = "aspeed,ast2500-wdt", .data = &ast2500_config },
3138 + { .compatible = "aspeed,ast2600-wdt", .data = &ast2500_config },
3139 { },
3140 };
3141 MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
3142 @@ -264,7 +265,8 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
3143 set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
3144 }
3145
3146 - if (of_device_is_compatible(np, "aspeed,ast2500-wdt")) {
3147 + if ((of_device_is_compatible(np, "aspeed,ast2500-wdt")) ||
3148 + (of_device_is_compatible(np, "aspeed,ast2600-wdt"))) {
3149 u32 reg = readl(wdt->base + WDT_RESET_WIDTH);
3150
3151 reg &= config->ext_pulse_width_mask;
3152 diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
3153 index 7e7bdcbbc741..9f3123b04536 100644
3154 --- a/drivers/watchdog/imx2_wdt.c
3155 +++ b/drivers/watchdog/imx2_wdt.c
3156 @@ -55,7 +55,7 @@
3157
3158 #define IMX2_WDT_WMCR 0x08 /* Misc Register */
3159
3160 -#define IMX2_WDT_MAX_TIME 128
3161 +#define IMX2_WDT_MAX_TIME 128U
3162 #define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */
3163
3164 #define WDOG_SEC_TO_COUNT(s) ((s * 2 - 1) << 8)
3165 @@ -180,7 +180,7 @@ static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
3166 {
3167 unsigned int actual;
3168
3169 - actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000);
3170 + actual = min(new_timeout, IMX2_WDT_MAX_TIME);
3171 __imx2_wdt_set_timeout(wdog, actual);
3172 wdog->timeout = new_timeout;
3173 return 0;
3174 diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
3175 index 7494dbeb4409..db58aaa4dc59 100644
3176 --- a/drivers/xen/pci.c
3177 +++ b/drivers/xen/pci.c
3178 @@ -29,6 +29,8 @@
3179 #include "../pci/pci.h"
3180 #ifdef CONFIG_PCI_MMCONFIG
3181 #include <asm/pci_x86.h>
3182 +
3183 +static int xen_mcfg_late(void);
3184 #endif
3185
3186 static bool __read_mostly pci_seg_supported = true;
3187 @@ -40,7 +42,18 @@ static int xen_add_device(struct device *dev)
3188 #ifdef CONFIG_PCI_IOV
3189 struct pci_dev *physfn = pci_dev->physfn;
3190 #endif
3191 -
3192 +#ifdef CONFIG_PCI_MMCONFIG
3193 + static bool pci_mcfg_reserved = false;
3194 + /*
3195 + * Reserve MCFG areas in Xen on first invocation due to this being
3196 + * potentially called from inside of acpi_init immediately after
3197 + * MCFG table has been finally parsed.
3198 + */
3199 + if (!pci_mcfg_reserved) {
3200 + xen_mcfg_late();
3201 + pci_mcfg_reserved = true;
3202 + }
3203 +#endif
3204 if (pci_seg_supported) {
3205 struct {
3206 struct physdev_pci_device_add add;
3207 @@ -213,7 +226,7 @@ static int __init register_xen_pci_notifier(void)
3208 arch_initcall(register_xen_pci_notifier);
3209
3210 #ifdef CONFIG_PCI_MMCONFIG
3211 -static int __init xen_mcfg_late(void)
3212 +static int xen_mcfg_late(void)
3213 {
3214 struct pci_mmcfg_region *cfg;
3215 int rc;
3216 @@ -252,8 +265,4 @@ static int __init xen_mcfg_late(void)
3217 }
3218 return 0;
3219 }
3220 -/*
3221 - * Needs to be done after acpi_init which are subsys_initcall.
3222 - */
3223 -subsys_initcall_sync(xen_mcfg_late);
3224 #endif
3225 diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
3226 index 39c63152a358..454c6826abdb 100644
3227 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c
3228 +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
3229 @@ -55,6 +55,7 @@
3230 #include <linux/string.h>
3231 #include <linux/slab.h>
3232 #include <linux/miscdevice.h>
3233 +#include <linux/workqueue.h>
3234
3235 #include <xen/xenbus.h>
3236 #include <xen/xen.h>
3237 @@ -116,6 +117,8 @@ struct xenbus_file_priv {
3238 wait_queue_head_t read_waitq;
3239
3240 struct kref kref;
3241 +
3242 + struct work_struct wq;
3243 };
3244
3245 /* Read out any raw xenbus messages queued up. */
3246 @@ -300,14 +303,14 @@ static void watch_fired(struct xenbus_watch *watch,
3247 mutex_unlock(&adap->dev_data->reply_mutex);
3248 }
3249
3250 -static void xenbus_file_free(struct kref *kref)
3251 +static void xenbus_worker(struct work_struct *wq)
3252 {
3253 struct xenbus_file_priv *u;
3254 struct xenbus_transaction_holder *trans, *tmp;
3255 struct watch_adapter *watch, *tmp_watch;
3256 struct read_buffer *rb, *tmp_rb;
3257
3258 - u = container_of(kref, struct xenbus_file_priv, kref);
3259 + u = container_of(wq, struct xenbus_file_priv, wq);
3260
3261 /*
3262 * No need for locking here because there are no other users,
3263 @@ -333,6 +336,18 @@ static void xenbus_file_free(struct kref *kref)
3264 kfree(u);
3265 }
3266
3267 +static void xenbus_file_free(struct kref *kref)
3268 +{
3269 + struct xenbus_file_priv *u;
3270 +
3271 + /*
3272 + * We might be called in xenbus_thread().
3273 + * Use workqueue to avoid deadlock.
3274 + */
3275 + u = container_of(kref, struct xenbus_file_priv, kref);
3276 + schedule_work(&u->wq);
3277 +}
3278 +
3279 static struct xenbus_transaction_holder *xenbus_get_transaction(
3280 struct xenbus_file_priv *u, uint32_t tx_id)
3281 {
3282 @@ -652,6 +667,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
3283 INIT_LIST_HEAD(&u->watches);
3284 INIT_LIST_HEAD(&u->read_buffers);
3285 init_waitqueue_head(&u->read_waitq);
3286 + INIT_WORK(&u->wq, xenbus_worker);
3287
3288 mutex_init(&u->reply_mutex);
3289 mutex_init(&u->msgbuffer_mutex);
3290 diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
3291 index 05454a7e22dc..550d0b169d7c 100644
3292 --- a/fs/9p/vfs_file.c
3293 +++ b/fs/9p/vfs_file.c
3294 @@ -528,6 +528,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
3295 v9inode = V9FS_I(inode);
3296 mutex_lock(&v9inode->v_mutex);
3297 if (!v9inode->writeback_fid &&
3298 + (vma->vm_flags & VM_SHARED) &&
3299 (vma->vm_flags & VM_WRITE)) {
3300 /*
3301 * clone a fid and add it to writeback_fid
3302 @@ -629,6 +630,8 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
3303 (vma->vm_end - vma->vm_start - 1),
3304 };
3305
3306 + if (!(vma->vm_flags & VM_SHARED))
3307 + return;
3308
3309 p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
3310
3311 diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
3312 index c06845237cba..8196c21d8623 100644
3313 --- a/fs/ceph/inode.c
3314 +++ b/fs/ceph/inode.c
3315 @@ -807,7 +807,12 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
3316
3317 /* update inode */
3318 inode->i_rdev = le32_to_cpu(info->rdev);
3319 - inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
3320 + /* directories have fl_stripe_unit set to zero */
3321 + if (le32_to_cpu(info->layout.fl_stripe_unit))
3322 + inode->i_blkbits =
3323 + fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
3324 + else
3325 + inode->i_blkbits = CEPH_BLOCK_SHIFT;
3326
3327 __ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
3328
3329 diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
3330 index bfcf11c70bfa..09db6d08614d 100644
3331 --- a/fs/ceph/mds_client.c
3332 +++ b/fs/ceph/mds_client.c
3333 @@ -3640,7 +3640,9 @@ static void delayed_work(struct work_struct *work)
3334 pr_info("mds%d hung\n", s->s_mds);
3335 }
3336 }
3337 - if (s->s_state < CEPH_MDS_SESSION_OPEN) {
3338 + if (s->s_state == CEPH_MDS_SESSION_NEW ||
3339 + s->s_state == CEPH_MDS_SESSION_RESTARTING ||
3340 + s->s_state == CEPH_MDS_SESSION_REJECTED) {
3341 /* this mds is failed or recovering, just wait */
3342 ceph_put_mds_session(s);
3343 continue;
3344 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
3345 index 8f68181256c0..f057c213c453 100644
3346 --- a/fs/fuse/cuse.c
3347 +++ b/fs/fuse/cuse.c
3348 @@ -518,6 +518,7 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
3349 rc = cuse_send_init(cc);
3350 if (rc) {
3351 fuse_dev_free(fud);
3352 + fuse_conn_put(&cc->fc);
3353 return rc;
3354 }
3355 file->private_data = fud;
3356 diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
3357 index b7bde12d8cd5..1c0227c78a7b 100644
3358 --- a/fs/nfs/nfs4xdr.c
3359 +++ b/fs/nfs/nfs4xdr.c
3360 @@ -1171,7 +1171,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
3361 } else
3362 *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
3363 }
3364 - if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
3365 + if (label && (bmval[2] & FATTR4_WORD2_SECURITY_LABEL)) {
3366 *p++ = cpu_to_be32(label->lfs);
3367 *p++ = cpu_to_be32(label->pi);
3368 *p++ = cpu_to_be32(label->len);
3369 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
3370 index 4931c3a75f03..c818f9886f61 100644
3371 --- a/fs/nfs/pnfs.c
3372 +++ b/fs/nfs/pnfs.c
3373 @@ -1426,10 +1426,15 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
3374 const nfs4_stateid *res_stateid = NULL;
3375 struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
3376
3377 - if (ret == 0) {
3378 - arg_stateid = &args->stateid;
3379 + switch (ret) {
3380 + case -NFS4ERR_NOMATCHING_LAYOUT:
3381 + break;
3382 + case 0:
3383 if (res->lrs_present)
3384 res_stateid = &res->stateid;
3385 + /* Fallthrough */
3386 + default:
3387 + arg_stateid = &args->stateid;
3388 }
3389 pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
3390 res_stateid);
3391 diff --git a/fs/statfs.c b/fs/statfs.c
3392 index f0216629621d..56f655f757ff 100644
3393 --- a/fs/statfs.c
3394 +++ b/fs/statfs.c
3395 @@ -304,19 +304,10 @@ COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *,
3396 static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
3397 {
3398 struct compat_statfs64 buf;
3399 - if (sizeof(ubuf->f_bsize) == 4) {
3400 - if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen |
3401 - kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL)
3402 - return -EOVERFLOW;
3403 - /* f_files and f_ffree may be -1; it's okay
3404 - * to stuff that into 32 bits */
3405 - if (kbuf->f_files != 0xffffffffffffffffULL
3406 - && (kbuf->f_files & 0xffffffff00000000ULL))
3407 - return -EOVERFLOW;
3408 - if (kbuf->f_ffree != 0xffffffffffffffffULL
3409 - && (kbuf->f_ffree & 0xffffffff00000000ULL))
3410 - return -EOVERFLOW;
3411 - }
3412 +
3413 + if ((kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
3414 + return -EOVERFLOW;
3415 +
3416 memset(&buf, 0, sizeof(struct compat_statfs64));
3417 buf.f_type = kbuf->f_type;
3418 buf.f_bsize = kbuf->f_bsize;
3419 diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
3420 index 9c03a7d5e400..c83478271c2e 100644
3421 --- a/include/linux/ieee80211.h
3422 +++ b/include/linux/ieee80211.h
3423 @@ -3185,4 +3185,57 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
3424 return true;
3425 }
3426
3427 +struct element {
3428 + u8 id;
3429 + u8 datalen;
3430 + u8 data[];
3431 +} __packed;
3432 +
3433 +/* element iteration helpers */
3434 +#define for_each_element(_elem, _data, _datalen) \
3435 + for (_elem = (const struct element *)(_data); \
3436 + (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \
3437 + (int)sizeof(*_elem) && \
3438 + (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \
3439 + (int)sizeof(*_elem) + _elem->datalen; \
3440 + _elem = (const struct element *)(_elem->data + _elem->datalen))
3441 +
3442 +#define for_each_element_id(element, _id, data, datalen) \
3443 + for_each_element(element, data, datalen) \
3444 + if (element->id == (_id))
3445 +
3446 +#define for_each_element_extid(element, extid, data, datalen) \
3447 + for_each_element(element, data, datalen) \
3448 + if (element->id == WLAN_EID_EXTENSION && \
3449 + element->datalen > 0 && \
3450 + element->data[0] == (extid))
3451 +
3452 +#define for_each_subelement(sub, element) \
3453 + for_each_element(sub, (element)->data, (element)->datalen)
3454 +
3455 +#define for_each_subelement_id(sub, id, element) \
3456 + for_each_element_id(sub, id, (element)->data, (element)->datalen)
3457 +
3458 +#define for_each_subelement_extid(sub, extid, element) \
3459 + for_each_element_extid(sub, extid, (element)->data, (element)->datalen)
3460 +
3461 +/**
3462 + * for_each_element_completed - determine if element parsing consumed all data
3463 + * @element: element pointer after for_each_element() or friends
3464 + * @data: same data pointer as passed to for_each_element() or friends
3465 + * @datalen: same data length as passed to for_each_element() or friends
3466 + *
3467 + * This function returns %true if all the data was parsed or considered
3468 + * while walking the elements. Only use this if your for_each_element()
3469 + * loop cannot be broken out of, otherwise it always returns %false.
3470 + *
3471 + * If some data was malformed, this returns %false since the last parsed
3472 + * element will not fill the whole remaining data.
3473 + */
3474 +static inline bool for_each_element_completed(const struct element *element,
3475 + const void *data, size_t datalen)
3476 +{
3477 + return (const u8 *)element == (const u8 *)data + datalen;
3478 +}
3479 +
3480 #endif /* LINUX_IEEE80211_H */
3481 diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
3482 index 0d10b7ce0da7..e9d4e389aed9 100644
3483 --- a/include/linux/sched/mm.h
3484 +++ b/include/linux/sched/mm.h
3485 @@ -330,6 +330,8 @@ enum {
3486
3487 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
3488 {
3489 + if (current->mm != mm)
3490 + return;
3491 if (likely(!(atomic_read(&mm->membarrier_state) &
3492 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
3493 return;
3494 diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
3495 index fdaaafdc7a00..5165e3b30899 100644
3496 --- a/include/sound/soc-dapm.h
3497 +++ b/include/sound/soc-dapm.h
3498 @@ -353,6 +353,8 @@ struct device;
3499 #define SND_SOC_DAPM_WILL_PMD 0x80 /* called at start of sequence */
3500 #define SND_SOC_DAPM_PRE_POST_PMD \
3501 (SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD)
3502 +#define SND_SOC_DAPM_PRE_POST_PMU \
3503 + (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU)
3504
3505 /* convenience event type detection */
3506 #define SND_SOC_DAPM_EVENT_ON(e) \
3507 diff --git a/kernel/elfcore.c b/kernel/elfcore.c
3508 index fc482c8e0bd8..57fb4dcff434 100644
3509 --- a/kernel/elfcore.c
3510 +++ b/kernel/elfcore.c
3511 @@ -3,6 +3,7 @@
3512 #include <linux/fs.h>
3513 #include <linux/mm.h>
3514 #include <linux/binfmts.h>
3515 +#include <linux/elfcore.h>
3516
3517 Elf_Half __weak elf_core_extra_phdrs(void)
3518 {
3519 diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
3520 index 5a0cf5f9008c..82104d3dd18e 100644
3521 --- a/kernel/locking/qspinlock_paravirt.h
3522 +++ b/kernel/locking/qspinlock_paravirt.h
3523 @@ -271,7 +271,7 @@ pv_wait_early(struct pv_node *prev, int loop)
3524 if ((loop & PV_PREV_CHECK_MASK) != 0)
3525 return false;
3526
3527 - return READ_ONCE(prev->state) != vcpu_running || vcpu_is_preempted(prev->cpu);
3528 + return READ_ONCE(prev->state) != vcpu_running;
3529 }
3530
3531 /*
3532 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
3533 index f4e050681ba1..78ecdfae25b6 100644
3534 --- a/kernel/sched/core.c
3535 +++ b/kernel/sched/core.c
3536 @@ -1077,7 +1077,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
3537 if (cpumask_equal(&p->cpus_allowed, new_mask))
3538 goto out;
3539
3540 - if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
3541 + dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
3542 + if (dest_cpu >= nr_cpu_ids) {
3543 ret = -EINVAL;
3544 goto out;
3545 }
3546 @@ -1098,7 +1099,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
3547 if (cpumask_test_cpu(task_cpu(p), new_mask))
3548 goto out;
3549
3550 - dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
3551 if (task_running(rq, p) || p->state == TASK_WAKING) {
3552 struct migration_arg arg = { p, dest_cpu };
3553 /* Need help from migration thread: drop lock and wait. */
3554 diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
3555 index 76e0eaf4654e..dd27e632b1ba 100644
3556 --- a/kernel/sched/membarrier.c
3557 +++ b/kernel/sched/membarrier.c
3558 @@ -235,7 +235,7 @@ static int membarrier_register_private_expedited(int flags)
3559 * groups, which use the same mm. (CLONE_VM but not
3560 * CLONE_THREAD).
3561 */
3562 - if (atomic_read(&mm->membarrier_state) & state)
3563 + if ((atomic_read(&mm->membarrier_state) & state) == state)
3564 return 0;
3565 atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED, &mm->membarrier_state);
3566 if (flags & MEMBARRIER_FLAG_SYNC_CORE)
3567 diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
3568 index a59641fb88b6..a836efd34589 100644
3569 --- a/kernel/time/tick-broadcast-hrtimer.c
3570 +++ b/kernel/time/tick-broadcast-hrtimer.c
3571 @@ -44,34 +44,39 @@ static int bc_shutdown(struct clock_event_device *evt)
3572 */
3573 static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
3574 {
3575 - int bc_moved;
3576 /*
3577 - * We try to cancel the timer first. If the callback is on
3578 - * flight on some other cpu then we let it handle it. If we
3579 - * were able to cancel the timer nothing can rearm it as we
3580 - * own broadcast_lock.
3581 + * This is called either from enter/exit idle code or from the
3582 + * broadcast handler. In all cases tick_broadcast_lock is held.
3583 *
3584 - * However we can also be called from the event handler of
3585 - * ce_broadcast_hrtimer itself when it expires. We cannot
3586 - * restart the timer because we are in the callback, but we
3587 - * can set the expiry time and let the callback return
3588 - * HRTIMER_RESTART.
3589 + * hrtimer_cancel() cannot be called here neither from the
3590 + * broadcast handler nor from the enter/exit idle code. The idle
3591 + * code can run into the problem described in bc_shutdown() and the
3592 + * broadcast handler cannot wait for itself to complete for obvious
3593 + * reasons.
3594 *
3595 - * Since we are in the idle loop at this point and because
3596 - * hrtimer_{start/cancel} functions call into tracing,
3597 - * calls to these functions must be bound within RCU_NONIDLE.
3598 + * Each caller tries to arm the hrtimer on its own CPU, but if the
3599 + * hrtimer callbback function is currently running, then
3600 + * hrtimer_start() cannot move it and the timer stays on the CPU on
3601 + * which it is assigned at the moment.
3602 + *
3603 + * As this can be called from idle code, the hrtimer_start()
3604 + * invocation has to be wrapped with RCU_NONIDLE() as
3605 + * hrtimer_start() can call into tracing.
3606 */
3607 - RCU_NONIDLE({
3608 - bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
3609 - if (bc_moved)
3610 - hrtimer_start(&bctimer, expires,
3611 - HRTIMER_MODE_ABS_PINNED);});
3612 - if (bc_moved) {
3613 - /* Bind the "device" to the cpu */
3614 - bc->bound_on = smp_processor_id();
3615 - } else if (bc->bound_on == smp_processor_id()) {
3616 - hrtimer_set_expires(&bctimer, expires);
3617 - }
3618 + RCU_NONIDLE( {
3619 + hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
3620 + /*
3621 + * The core tick broadcast mode expects bc->bound_on to be set
3622 + * correctly to prevent a CPU which has the broadcast hrtimer
3623 + * armed from going deep idle.
3624 + *
3625 + * As tick_broadcast_lock is held, nothing can change the cpu
3626 + * base which was just established in hrtimer_start() above. So
3627 + * the below access is safe even without holding the hrtimer
3628 + * base lock.
3629 + */
3630 + bc->bound_on = bctimer.base->cpu_base->cpu;
3631 + } );
3632 return 0;
3633 }
3634
3635 @@ -97,10 +102,6 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
3636 {
3637 ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
3638
3639 - if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
3640 - if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
3641 - return HRTIMER_RESTART;
3642 -
3643 return HRTIMER_NORESTART;
3644 }
3645
3646 diff --git a/kernel/time/timer.c b/kernel/time/timer.c
3647 index fa49cd753dea..ae64cb819a9a 100644
3648 --- a/kernel/time/timer.c
3649 +++ b/kernel/time/timer.c
3650 @@ -1590,24 +1590,26 @@ void timer_clear_idle(void)
3651 static int collect_expired_timers(struct timer_base *base,
3652 struct hlist_head *heads)
3653 {
3654 + unsigned long now = READ_ONCE(jiffies);
3655 +
3656 /*
3657 * NOHZ optimization. After a long idle sleep we need to forward the
3658 * base to current jiffies. Avoid a loop by searching the bitfield for
3659 * the next expiring timer.
3660 */
3661 - if ((long)(jiffies - base->clk) > 2) {
3662 + if ((long)(now - base->clk) > 2) {
3663 unsigned long next = __next_timer_interrupt(base);
3664
3665 /*
3666 * If the next timer is ahead of time forward to current
3667 * jiffies, otherwise forward to the next expiry time:
3668 */
3669 - if (time_after(next, jiffies)) {
3670 + if (time_after(next, now)) {
3671 /*
3672 * The call site will increment base->clk and then
3673 * terminate the expiry loop immediately.
3674 */
3675 - base->clk = jiffies;
3676 + base->clk = now;
3677 return 0;
3678 }
3679 base->clk = next;
3680 diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
3681 index 3f34cfb66a85..bdf104596d12 100644
3682 --- a/kernel/trace/trace_events_hist.c
3683 +++ b/kernel/trace/trace_events_hist.c
3684 @@ -2526,6 +2526,8 @@ static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
3685 return NULL;
3686 }
3687
3688 + alias->var_ref_idx = var_ref->var_ref_idx;
3689 +
3690 return alias;
3691 }
3692
3693 diff --git a/mm/usercopy.c b/mm/usercopy.c
3694 index 51411f9c4068..e81d11715d95 100644
3695 --- a/mm/usercopy.c
3696 +++ b/mm/usercopy.c
3697 @@ -15,6 +15,7 @@
3698 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3699
3700 #include <linux/mm.h>
3701 +#include <linux/highmem.h>
3702 #include <linux/slab.h>
3703 #include <linux/sched.h>
3704 #include <linux/sched/task.h>
3705 @@ -231,7 +232,12 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
3706 if (!virt_addr_valid(ptr))
3707 return;
3708
3709 - page = virt_to_head_page(ptr);
3710 + /*
3711 + * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
3712 + * highmem page or fallback to virt_to_page(). The following
3713 + * is effectively a highmem-aware virt_to_head_page().
3714 + */
3715 + page = compound_head(kmap_to_page((void *)ptr));
3716
3717 if (PageSlab(page)) {
3718 /* Check slab allocator for flags and size. */
3719 diff --git a/net/9p/client.c b/net/9p/client.c
3720 index b615aae5a0f8..d62f83f93d7b 100644
3721 --- a/net/9p/client.c
3722 +++ b/net/9p/client.c
3723 @@ -296,6 +296,7 @@ p9_tag_alloc(struct p9_client *c, int8_t type, unsigned int max_size)
3724
3725 p9pdu_reset(&req->tc);
3726 p9pdu_reset(&req->rc);
3727 + req->t_err = 0;
3728 req->status = REQ_STATUS_ALLOC;
3729 init_waitqueue_head(&req->wq);
3730 INIT_LIST_HEAD(&req->req_list);
3731 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
3732 index 2145581d7b3d..24fddf032279 100644
3733 --- a/net/netfilter/nf_tables_api.c
3734 +++ b/net/netfilter/nf_tables_api.c
3735 @@ -3429,8 +3429,11 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
3736 NFT_SET_OBJECT))
3737 return -EINVAL;
3738 /* Only one of these operations is supported */
3739 - if ((flags & (NFT_SET_MAP | NFT_SET_EVAL | NFT_SET_OBJECT)) ==
3740 - (NFT_SET_MAP | NFT_SET_EVAL | NFT_SET_OBJECT))
3741 + if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) ==
3742 + (NFT_SET_MAP | NFT_SET_OBJECT))
3743 + return -EOPNOTSUPP;
3744 + if ((flags & (NFT_SET_EVAL | NFT_SET_OBJECT)) ==
3745 + (NFT_SET_EVAL | NFT_SET_OBJECT))
3746 return -EOPNOTSUPP;
3747 }
3748
3749 diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
3750 index 161c3451a747..55754d9939b5 100644
3751 --- a/net/netfilter/nft_lookup.c
3752 +++ b/net/netfilter/nft_lookup.c
3753 @@ -76,9 +76,6 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
3754 if (IS_ERR(set))
3755 return PTR_ERR(set);
3756
3757 - if (set->flags & NFT_SET_EVAL)
3758 - return -EOPNOTSUPP;
3759 -
3760 priv->sreg = nft_parse_register(tb[NFTA_LOOKUP_SREG]);
3761 err = nft_validate_register_load(priv->sreg, set->klen);
3762 if (err < 0)
3763 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
3764 index 6168db3c35e4..334e3181f1c5 100644
3765 --- a/net/wireless/nl80211.c
3766 +++ b/net/wireless/nl80211.c
3767 @@ -200,6 +200,38 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
3768 return __cfg80211_rdev_from_attrs(netns, info->attrs);
3769 }
3770
3771 +static int validate_beacon_head(const struct nlattr *attr,
3772 + struct netlink_ext_ack *extack)
3773 +{
3774 + const u8 *data = nla_data(attr);
3775 + unsigned int len = nla_len(attr);
3776 + const struct element *elem;
3777 + const struct ieee80211_mgmt *mgmt = (void *)data;
3778 + unsigned int fixedlen = offsetof(struct ieee80211_mgmt,
3779 + u.beacon.variable);
3780 +
3781 + if (len < fixedlen)
3782 + goto err;
3783 +
3784 + if (ieee80211_hdrlen(mgmt->frame_control) !=
3785 + offsetof(struct ieee80211_mgmt, u.beacon))
3786 + goto err;
3787 +
3788 + data += fixedlen;
3789 + len -= fixedlen;
3790 +
3791 + for_each_element(elem, data, len) {
3792 + /* nothing */
3793 + }
3794 +
3795 + if (for_each_element_completed(elem, data, len))
3796 + return 0;
3797 +
3798 +err:
3799 + NL_SET_ERR_MSG_ATTR(extack, attr, "malformed beacon head");
3800 + return -EINVAL;
3801 +}
3802 +
3803 /* policy for the attributes */
3804 static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
3805 [NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
3806 @@ -2299,6 +2331,8 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
3807
3808 control_freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
3809
3810 + memset(chandef, 0, sizeof(*chandef));
3811 +
3812 chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq);
3813 chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
3814 chandef->center_freq1 = control_freq;
3815 @@ -2819,7 +2853,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
3816
3817 if (rdev->ops->get_channel) {
3818 int ret;
3819 - struct cfg80211_chan_def chandef;
3820 + struct cfg80211_chan_def chandef = {};
3821
3822 ret = rdev_get_channel(rdev, wdev, &chandef);
3823 if (ret == 0) {
3824 @@ -4014,6 +4048,12 @@ static int nl80211_parse_beacon(struct nlattr *attrs[],
3825 memset(bcn, 0, sizeof(*bcn));
3826
3827 if (attrs[NL80211_ATTR_BEACON_HEAD]) {
3828 + int ret = validate_beacon_head(attrs[NL80211_ATTR_BEACON_HEAD],
3829 + NULL);
3830 +
3831 + if (ret)
3832 + return ret;
3833 +
3834 bcn->head = nla_data(attrs[NL80211_ATTR_BEACON_HEAD]);
3835 bcn->head_len = nla_len(attrs[NL80211_ATTR_BEACON_HEAD]);
3836 if (!bcn->head_len)
3837 diff --git a/net/wireless/reg.c b/net/wireless/reg.c
3838 index d8ebf4f0ef6e..cccbf845079c 100644
3839 --- a/net/wireless/reg.c
3840 +++ b/net/wireless/reg.c
3841 @@ -2095,7 +2095,7 @@ static void reg_call_notifier(struct wiphy *wiphy,
3842
3843 static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
3844 {
3845 - struct cfg80211_chan_def chandef;
3846 + struct cfg80211_chan_def chandef = {};
3847 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
3848 enum nl80211_iftype iftype;
3849
3850 diff --git a/net/wireless/scan.c b/net/wireless/scan.c
3851 index d0e7472dd9fd..e5d61ba837ad 100644
3852 --- a/net/wireless/scan.c
3853 +++ b/net/wireless/scan.c
3854 @@ -484,6 +484,8 @@ const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
3855 const u8 *match, int match_len,
3856 int match_offset)
3857 {
3858 + const struct element *elem;
3859 +
3860 /* match_offset can't be smaller than 2, unless match_len is
3861 * zero, in which case match_offset must be zero as well.
3862 */
3863 @@ -491,14 +493,10 @@ const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
3864 (!match_len && match_offset)))
3865 return NULL;
3866
3867 - while (len >= 2 && len >= ies[1] + 2) {
3868 - if ((ies[0] == eid) &&
3869 - (ies[1] + 2 >= match_offset + match_len) &&
3870 - !memcmp(ies + match_offset, match, match_len))
3871 - return ies;
3872 -
3873 - len -= ies[1] + 2;
3874 - ies += ies[1] + 2;
3875 + for_each_element_id(elem, eid, ies, len) {
3876 + if (elem->datalen >= match_offset - 2 + match_len &&
3877 + !memcmp(elem->data + match_offset - 2, match, match_len))
3878 + return (void *)elem;
3879 }
3880
3881 return NULL;
3882 diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
3883 index 06943d9c9835..4f0cfb8cc682 100644
3884 --- a/net/wireless/wext-compat.c
3885 +++ b/net/wireless/wext-compat.c
3886 @@ -800,7 +800,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
3887 {
3888 struct wireless_dev *wdev = dev->ieee80211_ptr;
3889 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
3890 - struct cfg80211_chan_def chandef;
3891 + struct cfg80211_chan_def chandef = {};
3892 int ret;
3893
3894 switch (wdev->iftype) {
3895 diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
3896 index d9e7728027c6..f63b4bd45d60 100644
3897 --- a/security/integrity/ima/ima_crypto.c
3898 +++ b/security/integrity/ima/ima_crypto.c
3899 @@ -271,8 +271,16 @@ static int ima_calc_file_hash_atfm(struct file *file,
3900 rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
3901 rc = integrity_kernel_read(file, offset, rbuf[active],
3902 rbuf_len);
3903 - if (rc != rbuf_len)
3904 + if (rc != rbuf_len) {
3905 + if (rc >= 0)
3906 + rc = -EINVAL;
3907 + /*
3908 + * Forward current rc, do not overwrite with return value
3909 + * from ahash_wait()
3910 + */
3911 + ahash_wait(ahash_rc, &wait);
3912 goto out3;
3913 + }
3914
3915 if (rbuf[1] && offset) {
3916 /* Using two buffers, and it is not the first
3917 diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
3918 index 18cddf1729a6..64a52d495b1f 100644
3919 --- a/sound/soc/codecs/sgtl5000.c
3920 +++ b/sound/soc/codecs/sgtl5000.c
3921 @@ -31,6 +31,13 @@
3922 #define SGTL5000_DAP_REG_OFFSET 0x0100
3923 #define SGTL5000_MAX_REG_OFFSET 0x013A
3924
3925 +/* Delay for the VAG ramp up */
3926 +#define SGTL5000_VAG_POWERUP_DELAY 500 /* ms */
3927 +/* Delay for the VAG ramp down */
3928 +#define SGTL5000_VAG_POWERDOWN_DELAY 500 /* ms */
3929 +
3930 +#define SGTL5000_OUTPUTS_MUTE (SGTL5000_HP_MUTE | SGTL5000_LINE_OUT_MUTE)
3931 +
3932 /* default value of sgtl5000 registers */
3933 static const struct reg_default sgtl5000_reg_defaults[] = {
3934 { SGTL5000_CHIP_DIG_POWER, 0x0000 },
3935 @@ -116,6 +123,13 @@ enum {
3936 I2S_LRCLK_STRENGTH_HIGH,
3937 };
3938
3939 +enum {
3940 + HP_POWER_EVENT,
3941 + DAC_POWER_EVENT,
3942 + ADC_POWER_EVENT,
3943 + LAST_POWER_EVENT = ADC_POWER_EVENT
3944 +};
3945 +
3946 /* sgtl5000 private structure in codec */
3947 struct sgtl5000_priv {
3948 int sysclk; /* sysclk rate */
3949 @@ -129,8 +143,109 @@ struct sgtl5000_priv {
3950 u8 micbias_resistor;
3951 u8 micbias_voltage;
3952 u8 lrclk_strength;
3953 + u16 mute_state[LAST_POWER_EVENT + 1];
3954 };
3955
3956 +static inline int hp_sel_input(struct snd_soc_component *component)
3957 +{
3958 + return (snd_soc_component_read32(component, SGTL5000_CHIP_ANA_CTRL) &
3959 + SGTL5000_HP_SEL_MASK) >> SGTL5000_HP_SEL_SHIFT;
3960 +}
3961 +
3962 +static inline u16 mute_output(struct snd_soc_component *component,
3963 + u16 mute_mask)
3964 +{
3965 + u16 mute_reg = snd_soc_component_read32(component,
3966 + SGTL5000_CHIP_ANA_CTRL);
3967 +
3968 + snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
3969 + mute_mask, mute_mask);
3970 + return mute_reg;
3971 +}
3972 +
3973 +static inline void restore_output(struct snd_soc_component *component,
3974 + u16 mute_mask, u16 mute_reg)
3975 +{
3976 + snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
3977 + mute_mask, mute_reg);
3978 +}
3979 +
3980 +static void vag_power_on(struct snd_soc_component *component, u32 source)
3981 +{
3982 + if (snd_soc_component_read32(component, SGTL5000_CHIP_ANA_POWER) &
3983 + SGTL5000_VAG_POWERUP)
3984 + return;
3985 +
3986 + snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
3987 + SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
3988 +
3989 + /* When VAG powering on to get local loop from Line-In, the sleep
3990 + * is required to avoid loud pop.
3991 + */
3992 + if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN &&
3993 + source == HP_POWER_EVENT)
3994 + msleep(SGTL5000_VAG_POWERUP_DELAY);
3995 +}
3996 +
3997 +static int vag_power_consumers(struct snd_soc_component *component,
3998 + u16 ana_pwr_reg, u32 source)
3999 +{
4000 + int consumers = 0;
4001 +
4002 + /* count dac/adc consumers unconditional */
4003 + if (ana_pwr_reg & SGTL5000_DAC_POWERUP)
4004 + consumers++;
4005 + if (ana_pwr_reg & SGTL5000_ADC_POWERUP)
4006 + consumers++;
4007 +
4008 + /*
4009 + * If the event comes from HP and Line-In is selected,
4010 + * current action is 'DAC to be powered down'.
4011 + * As HP_POWERUP is not set when HP muxed to line-in,
4012 + * we need to keep VAG power ON.
4013 + */
4014 + if (source == HP_POWER_EVENT) {
4015 + if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN)
4016 + consumers++;
4017 + } else {
4018 + if (ana_pwr_reg & SGTL5000_HP_POWERUP)
4019 + consumers++;
4020 + }
4021 +
4022 + return consumers;
4023 +}
4024 +
4025 +static void vag_power_off(struct snd_soc_component *component, u32 source)
4026 +{
4027 + u16 ana_pwr = snd_soc_component_read32(component,
4028 + SGTL5000_CHIP_ANA_POWER);
4029 +
4030 + if (!(ana_pwr & SGTL5000_VAG_POWERUP))
4031 + return;
4032 +
4033 + /*
4034 + * This function calls when any of VAG power consumers is disappearing.
4035 + * Thus, if there is more than one consumer at the moment, as minimum
4036 + * one consumer will definitely stay after the end of the current
4037 + * event.
4038 + * Don't clear VAG_POWERUP if 2 or more consumers of VAG present:
4039 + * - LINE_IN (for HP events) / HP (for DAC/ADC events)
4040 + * - DAC
4041 + * - ADC
4042 + * (the current consumer is disappearing right now)
4043 + */
4044 + if (vag_power_consumers(component, ana_pwr, source) >= 2)
4045 + return;
4046 +
4047 + snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
4048 + SGTL5000_VAG_POWERUP, 0);
4049 + /* In power down case, we need wait 400-1000 ms
4050 + * when VAG fully ramped down.
4051 + * As longer we wait, as smaller pop we've got.
4052 + */
4053 + msleep(SGTL5000_VAG_POWERDOWN_DELAY);
4054 +}
4055 +
4056 /*
4057 * mic_bias power on/off share the same register bits with
4058 * output impedance of mic bias, when power on mic bias, we
4059 @@ -162,36 +277,46 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
4060 return 0;
4061 }
4062
4063 -/*
4064 - * As manual described, ADC/DAC only works when VAG powerup,
4065 - * So enabled VAG before ADC/DAC up.
4066 - * In power down case, we need wait 400ms when vag fully ramped down.
4067 - */
4068 -static int power_vag_event(struct snd_soc_dapm_widget *w,
4069 - struct snd_kcontrol *kcontrol, int event)
4070 +static int vag_and_mute_control(struct snd_soc_component *component,
4071 + int event, int event_source)
4072 {
4073 - struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
4074 - const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
4075 + static const u16 mute_mask[] = {
4076 + /*
4077 + * Mask for HP_POWER_EVENT.
4078 + * Muxing Headphones have to be wrapped with mute/unmute
4079 + * headphones only.
4080 + */
4081 + SGTL5000_HP_MUTE,
4082 + /*
4083 + * Masks for DAC_POWER_EVENT/ADC_POWER_EVENT.
4084 + * Muxing DAC or ADC block have to wrapped with mute/unmute
4085 + * both headphones and line-out.
4086 + */
4087 + SGTL5000_OUTPUTS_MUTE,
4088 + SGTL5000_OUTPUTS_MUTE
4089 + };
4090 +
4091 + struct sgtl5000_priv *sgtl5000 =
4092 + snd_soc_component_get_drvdata(component);
4093
4094 switch (event) {
4095 + case SND_SOC_DAPM_PRE_PMU:
4096 + sgtl5000->mute_state[event_source] =
4097 + mute_output(component, mute_mask[event_source]);
4098 + break;
4099 case SND_SOC_DAPM_POST_PMU:
4100 - snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
4101 - SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
4102 - msleep(400);
4103 + vag_power_on(component, event_source);
4104 + restore_output(component, mute_mask[event_source],
4105 + sgtl5000->mute_state[event_source]);
4106 break;
4107 -
4108 case SND_SOC_DAPM_PRE_PMD:
4109 - /*
4110 - * Don't clear VAG_POWERUP, when both DAC and ADC are
4111 - * operational to prevent inadvertently starving the
4112 - * other one of them.
4113 - */
4114 - if ((snd_soc_component_read32(component, SGTL5000_CHIP_ANA_POWER) &
4115 - mask) != mask) {
4116 - snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
4117 - SGTL5000_VAG_POWERUP, 0);
4118 - msleep(400);
4119 - }
4120 + sgtl5000->mute_state[event_source] =
4121 + mute_output(component, mute_mask[event_source]);
4122 + vag_power_off(component, event_source);
4123 + break;
4124 + case SND_SOC_DAPM_POST_PMD:
4125 + restore_output(component, mute_mask[event_source],
4126 + sgtl5000->mute_state[event_source]);
4127 break;
4128 default:
4129 break;
4130 @@ -200,6 +325,41 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
4131 return 0;
4132 }
4133
4134 +/*
4135 + * Mute Headphone when power it up/down.
4136 + * Control VAG power on HP power path.
4137 + */
4138 +static int headphone_pga_event(struct snd_soc_dapm_widget *w,
4139 + struct snd_kcontrol *kcontrol, int event)
4140 +{
4141 + struct snd_soc_component *component =
4142 + snd_soc_dapm_to_component(w->dapm);
4143 +
4144 + return vag_and_mute_control(component, event, HP_POWER_EVENT);
4145 +}
4146 +
4147 +/* As manual describes, ADC/DAC powering up/down requires
4148 + * to mute outputs to avoid pops.
4149 + * Control VAG power on ADC/DAC power path.
4150 + */
4151 +static int adc_updown_depop(struct snd_soc_dapm_widget *w,
4152 + struct snd_kcontrol *kcontrol, int event)
4153 +{
4154 + struct snd_soc_component *component =
4155 + snd_soc_dapm_to_component(w->dapm);
4156 +
4157 + return vag_and_mute_control(component, event, ADC_POWER_EVENT);
4158 +}
4159 +
4160 +static int dac_updown_depop(struct snd_soc_dapm_widget *w,
4161 + struct snd_kcontrol *kcontrol, int event)
4162 +{
4163 + struct snd_soc_component *component =
4164 + snd_soc_dapm_to_component(w->dapm);
4165 +
4166 + return vag_and_mute_control(component, event, DAC_POWER_EVENT);
4167 +}
4168 +
4169 /* input sources for ADC */
4170 static const char *adc_mux_text[] = {
4171 "MIC_IN", "LINE_IN"
4172 @@ -272,7 +432,10 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
4173 mic_bias_event,
4174 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
4175
4176 - SND_SOC_DAPM_PGA("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0),
4177 + SND_SOC_DAPM_PGA_E("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0,
4178 + headphone_pga_event,
4179 + SND_SOC_DAPM_PRE_POST_PMU |
4180 + SND_SOC_DAPM_PRE_POST_PMD),
4181 SND_SOC_DAPM_PGA("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0),
4182
4183 SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, &adc_mux),
4184 @@ -293,11 +456,12 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
4185 0, SGTL5000_CHIP_DIG_POWER,
4186 1, 0),
4187
4188 - SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0),
4189 - SND_SOC_DAPM_DAC("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0),
4190 -
4191 - SND_SOC_DAPM_PRE("VAG_POWER_PRE", power_vag_event),
4192 - SND_SOC_DAPM_POST("VAG_POWER_POST", power_vag_event),
4193 + SND_SOC_DAPM_ADC_E("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0,
4194 + adc_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
4195 + SND_SOC_DAPM_PRE_POST_PMD),
4196 + SND_SOC_DAPM_DAC_E("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0,
4197 + dac_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
4198 + SND_SOC_DAPM_PRE_POST_PMD),
4199 };
4200
4201 /* routes for sgtl5000 */
4202 diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
4203 index 95a43ccb6dd0..bca0c9e5452c 100644
4204 --- a/tools/lib/traceevent/Makefile
4205 +++ b/tools/lib/traceevent/Makefile
4206 @@ -259,8 +259,8 @@ endef
4207
4208 define do_generate_dynamic_list_file
4209 symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \
4210 - xargs echo "U W w" | tr ' ' '\n' | sort -u | xargs echo`;\
4211 - if [ "$$symbol_type" = "U W w" ];then \
4212 + xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
4213 + if [ "$$symbol_type" = "U W" ];then \
4214 (echo '{'; \
4215 $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
4216 echo '};'; \
4217 diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
4218 index 6ccfd13d5cf9..382e476629fb 100644
4219 --- a/tools/lib/traceevent/event-parse.c
4220 +++ b/tools/lib/traceevent/event-parse.c
4221 @@ -254,10 +254,10 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
4222 errno = ENOMEM;
4223 return -1;
4224 }
4225 + pevent->cmdlines = cmdlines;
4226
4227 cmdlines[pevent->cmdline_count].comm = strdup(comm);
4228 if (!cmdlines[pevent->cmdline_count].comm) {
4229 - free(cmdlines);
4230 errno = ENOMEM;
4231 return -1;
4232 }
4233 @@ -268,7 +268,6 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
4234 pevent->cmdline_count++;
4235
4236 qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
4237 - pevent->cmdlines = cmdlines;
4238
4239 return 0;
4240 }
4241 diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
4242 index 849b3be15bd8..510caedd7319 100644
4243 --- a/tools/perf/Makefile.config
4244 +++ b/tools/perf/Makefile.config
4245 @@ -837,7 +837,7 @@ ifndef NO_JVMTI
4246 JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
4247 else
4248 ifneq (,$(wildcard /usr/sbin/alternatives))
4249 - JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
4250 + JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed -e 's%/jre/bin/java.%%g' -e 's%/bin/java.%%g')
4251 endif
4252 endif
4253 ifndef JDIR
4254 diff --git a/tools/perf/arch/x86/util/unwind-libunwind.c b/tools/perf/arch/x86/util/unwind-libunwind.c
4255 index 05920e3edf7a..47357973b55b 100644
4256 --- a/tools/perf/arch/x86/util/unwind-libunwind.c
4257 +++ b/tools/perf/arch/x86/util/unwind-libunwind.c
4258 @@ -1,11 +1,11 @@
4259 // SPDX-License-Identifier: GPL-2.0
4260
4261 #include <errno.h>
4262 +#include "../../util/debug.h"
4263 #ifndef REMOTE_UNWIND_LIBUNWIND
4264 #include <libunwind.h>
4265 #include "perf_regs.h"
4266 #include "../../util/unwind.h"
4267 -#include "../../util/debug.h"
4268 #endif
4269
4270 #ifdef HAVE_ARCH_X86_64_SUPPORT
4271 diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
4272 index 789962565c9c..6aae10ff954c 100644
4273 --- a/tools/perf/builtin-stat.c
4274 +++ b/tools/perf/builtin-stat.c
4275 @@ -3090,8 +3090,11 @@ int cmd_stat(int argc, const char **argv)
4276 fprintf(output, "[ perf stat: executing run #%d ... ]\n",
4277 run_idx + 1);
4278
4279 + if (run_idx != 0)
4280 + perf_evlist__reset_prev_raw_counts(evsel_list);
4281 +
4282 status = run_perf_stat(argc, argv, run_idx);
4283 - if (forever && status != -1) {
4284 + if (forever && status != -1 && !interval) {
4285 print_counters(NULL, argc, argv);
4286 perf_stat__reset_stats();
4287 }
4288 diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
4289 index 0c70788593c8..3c0d74fc1ff2 100644
4290 --- a/tools/perf/util/header.c
4291 +++ b/tools/perf/util/header.c
4292 @@ -1114,7 +1114,7 @@ static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 lev
4293
4294 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
4295 if (sysfs__read_str(file, &cache->map, &len)) {
4296 - free(cache->map);
4297 + free(cache->size);
4298 free(cache->type);
4299 return -1;
4300 }
4301 diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
4302 index a0061e0b0fad..6917ba8a0024 100644
4303 --- a/tools/perf/util/stat.c
4304 +++ b/tools/perf/util/stat.c
4305 @@ -154,6 +154,15 @@ static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
4306 evsel->prev_raw_counts = NULL;
4307 }
4308
4309 +static void perf_evsel__reset_prev_raw_counts(struct perf_evsel *evsel)
4310 +{
4311 + if (evsel->prev_raw_counts) {
4312 + evsel->prev_raw_counts->aggr.val = 0;
4313 + evsel->prev_raw_counts->aggr.ena = 0;
4314 + evsel->prev_raw_counts->aggr.run = 0;
4315 + }
4316 +}
4317 +
4318 static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
4319 {
4320 int ncpus = perf_evsel__nr_cpus(evsel);
4321 @@ -204,6 +213,14 @@ void perf_evlist__reset_stats(struct perf_evlist *evlist)
4322 }
4323 }
4324
4325 +void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist)
4326 +{
4327 + struct perf_evsel *evsel;
4328 +
4329 + evlist__for_each_entry(evlist, evsel)
4330 + perf_evsel__reset_prev_raw_counts(evsel);
4331 +}
4332 +
4333 static void zero_per_pkg(struct perf_evsel *counter)
4334 {
4335 if (counter->per_pkg_mask)
4336 diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
4337 index 36efb986f7fc..e19abb1635c4 100644
4338 --- a/tools/perf/util/stat.h
4339 +++ b/tools/perf/util/stat.h
4340 @@ -158,6 +158,7 @@ void perf_stat__collect_metric_expr(struct perf_evlist *);
4341 int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
4342 void perf_evlist__free_stats(struct perf_evlist *evlist);
4343 void perf_evlist__reset_stats(struct perf_evlist *evlist);
4344 +void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist);
4345
4346 int perf_stat_process_counter(struct perf_stat_config *config,
4347 struct perf_evsel *counter);
4348 diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
4349 index 33752e06ff8d..3de57cc8716b 100644
4350 --- a/tools/testing/nvdimm/test/nfit_test.h
4351 +++ b/tools/testing/nvdimm/test/nfit_test.h
4352 @@ -12,6 +12,7 @@
4353 */
4354 #ifndef __NFIT_TEST_H__
4355 #define __NFIT_TEST_H__
4356 +#include <linux/acpi.h>
4357 #include <linux/list.h>
4358 #include <linux/uuid.h>
4359 #include <linux/ioport.h>
4360 @@ -234,9 +235,6 @@ struct nd_intel_lss {
4361 __u32 status;
4362 } __packed;
4363
4364 -union acpi_object;
4365 -typedef void *acpi_handle;
4366 -
4367 typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
4368 typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
4369 const guid_t *guid, u64 rev, u64 func,