Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0213-4.9.114-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3190 - (hide annotations) (download)
Wed Aug 8 14:17:40 2018 UTC (5 years, 10 months ago) by niro
File size: 100718 byte(s)
-linux-4.9.114
1 niro 3190 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
2     index 52240a63132e..a16f87e4dd10 100644
3     --- a/Documentation/kernel-parameters.txt
4     +++ b/Documentation/kernel-parameters.txt
5     @@ -4023,6 +4023,23 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
6     spia_pedr=
7     spia_peddr=
8    
9     + ssbd= [ARM64,HW]
10     + Speculative Store Bypass Disable control
11     +
12     + On CPUs that are vulnerable to the Speculative
13     + Store Bypass vulnerability and offer a
14     + firmware based mitigation, this parameter
15     + indicates how the mitigation should be used:
16     +
17     + force-on: Unconditionally enable mitigation for
18     + for both kernel and userspace
19     + force-off: Unconditionally disable mitigation for
20     + for both kernel and userspace
21     + kernel: Always enable mitigation in the
22     + kernel, and offer a prctl interface
23     + to allow userspace to register its
24     + interest in being mitigated too.
25     +
26     stack_guard_gap= [MM]
27     override the default stack gap protection. The value
28     is in page units and it defines how many pages prior
29     diff --git a/Makefile b/Makefile
30     index 3884afb2850f..f4cd42c9b940 100644
31     --- a/Makefile
32     +++ b/Makefile
33     @@ -1,6 +1,6 @@
34     VERSION = 4
35     PATCHLEVEL = 9
36     -SUBLEVEL = 113
37     +SUBLEVEL = 114
38     EXTRAVERSION =
39     NAME = Roaring Lionus
40    
41     diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
42     index f4dab20ac9f3..0833d8a1dbbb 100644
43     --- a/arch/arm/include/asm/kvm_host.h
44     +++ b/arch/arm/include/asm/kvm_host.h
45     @@ -327,4 +327,16 @@ static inline bool kvm_arm_harden_branch_predictor(void)
46     return false;
47     }
48    
49     +#define KVM_SSBD_UNKNOWN -1
50     +#define KVM_SSBD_FORCE_DISABLE 0
51     +#define KVM_SSBD_KERNEL 1
52     +#define KVM_SSBD_FORCE_ENABLE 2
53     +#define KVM_SSBD_MITIGATED 3
54     +
55     +static inline int kvm_arm_have_ssbd(void)
56     +{
57     + /* No way to detect it yet, pretend it is not there. */
58     + return KVM_SSBD_UNKNOWN;
59     +}
60     +
61     #endif /* __ARM_KVM_HOST_H__ */
62     diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
63     index 7f66b1b3aca1..e2f05cedaf97 100644
64     --- a/arch/arm/include/asm/kvm_mmu.h
65     +++ b/arch/arm/include/asm/kvm_mmu.h
66     @@ -28,6 +28,13 @@
67     */
68     #define kern_hyp_va(kva) (kva)
69    
70     +/* Contrary to arm64, there is no need to generate a PC-relative address */
71     +#define hyp_symbol_addr(s) \
72     + ({ \
73     + typeof(s) *addr = &(s); \
74     + addr; \
75     + })
76     +
77     /*
78     * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
79     */
80     @@ -249,6 +256,11 @@ static inline int kvm_map_vectors(void)
81     return 0;
82     }
83    
84     +static inline int hyp_map_aux_data(void)
85     +{
86     + return 0;
87     +}
88     +
89     #endif /* !__ASSEMBLY__ */
90    
91     #endif /* __ARM_KVM_MMU_H__ */
92     diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
93     index ef6595c7d697..20436972537f 100644
94     --- a/arch/arm/kvm/arm.c
95     +++ b/arch/arm/kvm/arm.c
96     @@ -51,8 +51,8 @@
97     __asm__(".arch_extension virt");
98     #endif
99    
100     +DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
101     static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
102     -static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
103     static unsigned long hyp_default_vectors;
104    
105     /* Per-CPU variable containing the currently running vcpu. */
106     @@ -338,7 +338,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
107     }
108    
109     vcpu->cpu = cpu;
110     - vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
111     + vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state);
112    
113     kvm_arm_set_running_vcpu(vcpu);
114     }
115     @@ -1199,19 +1199,8 @@ static inline void hyp_cpu_pm_exit(void)
116     }
117     #endif
118    
119     -static void teardown_common_resources(void)
120     -{
121     - free_percpu(kvm_host_cpu_state);
122     -}
123     -
124     static int init_common_resources(void)
125     {
126     - kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
127     - if (!kvm_host_cpu_state) {
128     - kvm_err("Cannot allocate host CPU state\n");
129     - return -ENOMEM;
130     - }
131     -
132     /* set size of VMID supported by CPU */
133     kvm_vmid_bits = kvm_get_vmid_bits();
134     kvm_info("%d-bit VMID\n", kvm_vmid_bits);
135     @@ -1369,7 +1358,7 @@ static int init_hyp_mode(void)
136     for_each_possible_cpu(cpu) {
137     kvm_cpu_context_t *cpu_ctxt;
138    
139     - cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
140     + cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
141     err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
142    
143     if (err) {
144     @@ -1378,6 +1367,12 @@ static int init_hyp_mode(void)
145     }
146     }
147    
148     + err = hyp_map_aux_data();
149     + if (err) {
150     + kvm_err("Cannot map host auxilary data: %d\n", err);
151     + goto out_err;
152     + }
153     +
154     kvm_info("Hyp mode initialized successfully\n");
155    
156     return 0;
157     @@ -1447,7 +1442,6 @@ int kvm_arch_init(void *opaque)
158     out_hyp:
159     teardown_hyp_mode();
160     out_err:
161     - teardown_common_resources();
162     return err;
163     }
164    
165     diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
166     index 8a9c654f4f87..83365bec04b6 100644
167     --- a/arch/arm/kvm/psci.c
168     +++ b/arch/arm/kvm/psci.c
169     @@ -403,7 +403,7 @@ static int kvm_psci_call(struct kvm_vcpu *vcpu)
170     int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
171     {
172     u32 func_id = smccc_get_function(vcpu);
173     - u32 val = PSCI_RET_NOT_SUPPORTED;
174     + u32 val = SMCCC_RET_NOT_SUPPORTED;
175     u32 feature;
176    
177     switch (func_id) {
178     @@ -415,7 +415,21 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
179     switch(feature) {
180     case ARM_SMCCC_ARCH_WORKAROUND_1:
181     if (kvm_arm_harden_branch_predictor())
182     - val = 0;
183     + val = SMCCC_RET_SUCCESS;
184     + break;
185     + case ARM_SMCCC_ARCH_WORKAROUND_2:
186     + switch (kvm_arm_have_ssbd()) {
187     + case KVM_SSBD_FORCE_DISABLE:
188     + case KVM_SSBD_UNKNOWN:
189     + break;
190     + case KVM_SSBD_KERNEL:
191     + val = SMCCC_RET_SUCCESS;
192     + break;
193     + case KVM_SSBD_FORCE_ENABLE:
194     + case KVM_SSBD_MITIGATED:
195     + val = SMCCC_RET_NOT_REQUIRED;
196     + break;
197     + }
198     break;
199     }
200     break;
201     diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
202     index d0df3611d1e2..3e43874568f9 100644
203     --- a/arch/arm64/Kconfig
204     +++ b/arch/arm64/Kconfig
205     @@ -776,6 +776,15 @@ config HARDEN_BRANCH_PREDICTOR
206    
207     If unsure, say Y.
208    
209     +config ARM64_SSBD
210     + bool "Speculative Store Bypass Disable" if EXPERT
211     + default y
212     + help
213     + This enables mitigation of the bypassing of previous stores
214     + by speculative loads.
215     +
216     + If unsure, say Y.
217     +
218     menuconfig ARMV8_DEPRECATED
219     bool "Emulate deprecated/obsolete ARMv8 instructions"
220     depends on COMPAT
221     diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
222     index 6e1cb8c5af4d..7e842dcae450 100644
223     --- a/arch/arm64/include/asm/alternative.h
224     +++ b/arch/arm64/include/asm/alternative.h
225     @@ -4,6 +4,8 @@
226     #include <asm/cpucaps.h>
227     #include <asm/insn.h>
228    
229     +#define ARM64_CB_PATCH ARM64_NCAPS
230     +
231     #ifndef __ASSEMBLY__
232    
233     #include <linux/init.h>
234     @@ -11,6 +13,8 @@
235     #include <linux/stddef.h>
236     #include <linux/stringify.h>
237    
238     +extern int alternatives_applied;
239     +
240     struct alt_instr {
241     s32 orig_offset; /* offset to original instruction */
242     s32 alt_offset; /* offset to replacement instruction */
243     @@ -19,12 +23,19 @@ struct alt_instr {
244     u8 alt_len; /* size of new instruction(s), <= orig_len */
245     };
246    
247     +typedef void (*alternative_cb_t)(struct alt_instr *alt,
248     + __le32 *origptr, __le32 *updptr, int nr_inst);
249     +
250     void __init apply_alternatives_all(void);
251     void apply_alternatives(void *start, size_t length);
252    
253     -#define ALTINSTR_ENTRY(feature) \
254     +#define ALTINSTR_ENTRY(feature,cb) \
255     " .word 661b - .\n" /* label */ \
256     + " .if " __stringify(cb) " == 0\n" \
257     " .word 663f - .\n" /* new instruction */ \
258     + " .else\n" \
259     + " .word " __stringify(cb) "- .\n" /* callback */ \
260     + " .endif\n" \
261     " .hword " __stringify(feature) "\n" /* feature bit */ \
262     " .byte 662b-661b\n" /* source len */ \
263     " .byte 664f-663f\n" /* replacement len */
264     @@ -42,15 +53,18 @@ void apply_alternatives(void *start, size_t length);
265     * but most assemblers die if insn1 or insn2 have a .inst. This should
266     * be fixed in a binutils release posterior to 2.25.51.0.2 (anything
267     * containing commit 4e4d08cf7399b606 or c1baaddf8861).
268     + *
269     + * Alternatives with callbacks do not generate replacement instructions.
270     */
271     -#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
272     +#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \
273     ".if "__stringify(cfg_enabled)" == 1\n" \
274     "661:\n\t" \
275     oldinstr "\n" \
276     "662:\n" \
277     ".pushsection .altinstructions,\"a\"\n" \
278     - ALTINSTR_ENTRY(feature) \
279     + ALTINSTR_ENTRY(feature,cb) \
280     ".popsection\n" \
281     + " .if " __stringify(cb) " == 0\n" \
282     ".pushsection .altinstr_replacement, \"a\"\n" \
283     "663:\n\t" \
284     newinstr "\n" \
285     @@ -58,11 +72,17 @@ void apply_alternatives(void *start, size_t length);
286     ".popsection\n\t" \
287     ".org . - (664b-663b) + (662b-661b)\n\t" \
288     ".org . - (662b-661b) + (664b-663b)\n" \
289     + ".else\n\t" \
290     + "663:\n\t" \
291     + "664:\n\t" \
292     + ".endif\n" \
293     ".endif\n"
294    
295     #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
296     - __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
297     + __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
298    
299     +#define ALTERNATIVE_CB(oldinstr, cb) \
300     + __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
301     #else
302    
303     #include <asm/assembler.h>
304     @@ -129,6 +149,14 @@ void apply_alternatives(void *start, size_t length);
305     661:
306     .endm
307    
308     +.macro alternative_cb cb
309     + .set .Lasm_alt_mode, 0
310     + .pushsection .altinstructions, "a"
311     + altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0
312     + .popsection
313     +661:
314     +.endm
315     +
316     /*
317     * Provide the other half of the alternative code sequence.
318     */
319     @@ -154,6 +182,13 @@ void apply_alternatives(void *start, size_t length);
320     .org . - (662b-661b) + (664b-663b)
321     .endm
322    
323     +/*
324     + * Callback-based alternative epilogue
325     + */
326     +.macro alternative_cb_end
327     +662:
328     +.endm
329     +
330     /*
331     * Provides a trivial alternative or default sequence consisting solely
332     * of NOPs. The number of NOPs is chosen automatically to match the
333     diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
334     index bfcfec3590f6..3f85bbcd7e40 100644
335     --- a/arch/arm64/include/asm/assembler.h
336     +++ b/arch/arm64/include/asm/assembler.h
337     @@ -239,14 +239,33 @@ lr .req x30 // link register
338     .endm
339    
340     /*
341     + * @dst: Result of per_cpu(sym, smp_processor_id())
342     * @sym: The name of the per-cpu variable
343     - * @reg: Result of per_cpu(sym, smp_processor_id())
344     * @tmp: scratch register
345     */
346     - .macro this_cpu_ptr, sym, reg, tmp
347     - adr_l \reg, \sym
348     + .macro adr_this_cpu, dst, sym, tmp
349     + adr_l \dst, \sym
350     +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
351     mrs \tmp, tpidr_el1
352     - add \reg, \reg, \tmp
353     +alternative_else
354     + mrs \tmp, tpidr_el2
355     +alternative_endif
356     + add \dst, \dst, \tmp
357     + .endm
358     +
359     + /*
360     + * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
361     + * @sym: The name of the per-cpu variable
362     + * @tmp: scratch register
363     + */
364     + .macro ldr_this_cpu dst, sym, tmp
365     + adr_l \dst, \sym
366     +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
367     + mrs \tmp, tpidr_el1
368     +alternative_else
369     + mrs \tmp, tpidr_el2
370     +alternative_endif
371     + ldr \dst, [\dst, \tmp]
372     .endm
373    
374     /*
375     diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
376     index ce67bf6a0886..7010779a1429 100644
377     --- a/arch/arm64/include/asm/cpucaps.h
378     +++ b/arch/arm64/include/asm/cpucaps.h
379     @@ -36,7 +36,8 @@
380     #define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
381     #define ARM64_UNMAP_KERNEL_AT_EL0 16
382     #define ARM64_HARDEN_BRANCH_PREDICTOR 17
383     +#define ARM64_SSBD 18
384    
385     -#define ARM64_NCAPS 18
386     +#define ARM64_NCAPS 19
387    
388     #endif /* __ASM_CPUCAPS_H */
389     diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
390     index 4ea85ebdf4df..15868eca58de 100644
391     --- a/arch/arm64/include/asm/cpufeature.h
392     +++ b/arch/arm64/include/asm/cpufeature.h
393     @@ -221,6 +221,28 @@ static inline bool system_supports_mixed_endian_el0(void)
394     return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
395     }
396    
397     +#define ARM64_SSBD_UNKNOWN -1
398     +#define ARM64_SSBD_FORCE_DISABLE 0
399     +#define ARM64_SSBD_KERNEL 1
400     +#define ARM64_SSBD_FORCE_ENABLE 2
401     +#define ARM64_SSBD_MITIGATED 3
402     +
403     +static inline int arm64_get_ssbd_state(void)
404     +{
405     +#ifdef CONFIG_ARM64_SSBD
406     + extern int ssbd_state;
407     + return ssbd_state;
408     +#else
409     + return ARM64_SSBD_UNKNOWN;
410     +#endif
411     +}
412     +
413     +#ifdef CONFIG_ARM64_SSBD
414     +void arm64_set_ssbd_mitigation(bool state);
415     +#else
416     +static inline void arm64_set_ssbd_mitigation(bool state) {}
417     +#endif
418     +
419     #endif /* __ASSEMBLY__ */
420    
421     #endif
422     diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
423     index ec3553eb9349..8f5cf83b2339 100644
424     --- a/arch/arm64/include/asm/kvm_asm.h
425     +++ b/arch/arm64/include/asm/kvm_asm.h
426     @@ -33,6 +33,10 @@
427     #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
428     #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
429    
430     +#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
431     +#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
432     +
433     +/* Translate a kernel address of @sym into its equivalent linear mapping */
434     #define kvm_ksym_ref(sym) \
435     ({ \
436     void *val = &sym; \
437     @@ -65,6 +69,43 @@ extern u32 __kvm_get_mdcr_el2(void);
438    
439     extern u32 __init_stage2_translation(void);
440    
441     +/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
442     +#define __hyp_this_cpu_ptr(sym) \
443     + ({ \
444     + void *__ptr = hyp_symbol_addr(sym); \
445     + __ptr += read_sysreg(tpidr_el2); \
446     + (typeof(&sym))__ptr; \
447     + })
448     +
449     +#define __hyp_this_cpu_read(sym) \
450     + ({ \
451     + *__hyp_this_cpu_ptr(sym); \
452     + })
453     +
454     +#else /* __ASSEMBLY__ */
455     +
456     +.macro hyp_adr_this_cpu reg, sym, tmp
457     + adr_l \reg, \sym
458     + mrs \tmp, tpidr_el2
459     + add \reg, \reg, \tmp
460     +.endm
461     +
462     +.macro hyp_ldr_this_cpu reg, sym, tmp
463     + adr_l \reg, \sym
464     + mrs \tmp, tpidr_el2
465     + ldr \reg, [\reg, \tmp]
466     +.endm
467     +
468     +.macro get_host_ctxt reg, tmp
469     + hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
470     +.endm
471     +
472     +.macro get_vcpu_ptr vcpu, ctxt
473     + get_host_ctxt \ctxt, \vcpu
474     + ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
475     + kern_hyp_va \vcpu
476     +.endm
477     +
478     #endif
479    
480     #endif /* __ARM_KVM_ASM_H__ */
481     diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
482     index 2abb4493f4f6..4cdfbd01b2de 100644
483     --- a/arch/arm64/include/asm/kvm_host.h
484     +++ b/arch/arm64/include/asm/kvm_host.h
485     @@ -197,6 +197,8 @@ struct kvm_cpu_context {
486     u64 sys_regs[NR_SYS_REGS];
487     u32 copro[NR_COPRO_REGS];
488     };
489     +
490     + struct kvm_vcpu *__hyp_running_vcpu;
491     };
492    
493     typedef struct kvm_cpu_context kvm_cpu_context_t;
494     @@ -211,6 +213,9 @@ struct kvm_vcpu_arch {
495     /* Exception Information */
496     struct kvm_vcpu_fault_info fault;
497    
498     + /* State of various workarounds, see kvm_asm.h for bit assignment */
499     + u64 workaround_flags;
500     +
501     /* Guest debug state */
502     u64 debug_flags;
503    
504     @@ -354,10 +359,15 @@ int kvm_perf_teardown(void);
505    
506     struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
507    
508     +void __kvm_set_tpidr_el2(u64 tpidr_el2);
509     +DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
510     +
511     static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
512     unsigned long hyp_stack_ptr,
513     unsigned long vector_ptr)
514     {
515     + u64 tpidr_el2;
516     +
517     /*
518     * Call initialization code, and switch to the full blown HYP code.
519     * If the cpucaps haven't been finalized yet, something has gone very
520     @@ -366,6 +376,16 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
521     */
522     BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
523     __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
524     +
525     + /*
526     + * Calculate the raw per-cpu offset without a translation from the
527     + * kernel's mapping to the linear mapping, and store it in tpidr_el2
528     + * so that we can use adr_l to access per-cpu variables in EL2.
529     + */
530     + tpidr_el2 = (u64)this_cpu_ptr(&kvm_host_cpu_state)
531     + - (u64)kvm_ksym_ref(kvm_host_cpu_state);
532     +
533     + kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2);
534     }
535    
536     void __kvm_hyp_teardown(void);
537     @@ -405,4 +425,27 @@ static inline bool kvm_arm_harden_branch_predictor(void)
538     return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
539     }
540    
541     +#define KVM_SSBD_UNKNOWN -1
542     +#define KVM_SSBD_FORCE_DISABLE 0
543     +#define KVM_SSBD_KERNEL 1
544     +#define KVM_SSBD_FORCE_ENABLE 2
545     +#define KVM_SSBD_MITIGATED 3
546     +
547     +static inline int kvm_arm_have_ssbd(void)
548     +{
549     + switch (arm64_get_ssbd_state()) {
550     + case ARM64_SSBD_FORCE_DISABLE:
551     + return KVM_SSBD_FORCE_DISABLE;
552     + case ARM64_SSBD_KERNEL:
553     + return KVM_SSBD_KERNEL;
554     + case ARM64_SSBD_FORCE_ENABLE:
555     + return KVM_SSBD_FORCE_ENABLE;
556     + case ARM64_SSBD_MITIGATED:
557     + return KVM_SSBD_MITIGATED;
558     + case ARM64_SSBD_UNKNOWN:
559     + default:
560     + return KVM_SSBD_UNKNOWN;
561     + }
562     +}
563     +
564     #endif /* __ARM64_KVM_HOST_H__ */
565     diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
566     index 824c83db9b47..547519abc751 100644
567     --- a/arch/arm64/include/asm/kvm_mmu.h
568     +++ b/arch/arm64/include/asm/kvm_mmu.h
569     @@ -130,6 +130,26 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
570    
571     #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
572    
573     +/*
574     + * Obtain the PC-relative address of a kernel symbol
575     + * s: symbol
576     + *
577     + * The goal of this macro is to return a symbol's address based on a
578     + * PC-relative computation, as opposed to a loading the VA from a
579     + * constant pool or something similar. This works well for HYP, as an
580     + * absolute VA is guaranteed to be wrong. Only use this if trying to
581     + * obtain the address of a symbol (i.e. not something you obtained by
582     + * following a pointer).
583     + */
584     +#define hyp_symbol_addr(s) \
585     + ({ \
586     + typeof(s) *addr; \
587     + asm("adrp %0, %1\n" \
588     + "add %0, %0, :lo12:%1\n" \
589     + : "=r" (addr) : "S" (&s)); \
590     + addr; \
591     + })
592     +
593     /*
594     * We currently only support a 40bit IPA.
595     */
596     @@ -367,5 +387,29 @@ static inline int kvm_map_vectors(void)
597     }
598     #endif
599    
600     +#ifdef CONFIG_ARM64_SSBD
601     +DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
602     +
603     +static inline int hyp_map_aux_data(void)
604     +{
605     + int cpu, err;
606     +
607     + for_each_possible_cpu(cpu) {
608     + u64 *ptr;
609     +
610     + ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
611     + err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
612     + if (err)
613     + return err;
614     + }
615     + return 0;
616     +}
617     +#else
618     +static inline int hyp_map_aux_data(void)
619     +{
620     + return 0;
621     +}
622     +#endif
623     +
624     #endif /* __ASSEMBLY__ */
625     #endif /* __ARM64_KVM_MMU_H__ */
626     diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
627     index 5394c8405e66..0d551576eb57 100644
628     --- a/arch/arm64/include/asm/percpu.h
629     +++ b/arch/arm64/include/asm/percpu.h
630     @@ -16,9 +16,14 @@
631     #ifndef __ASM_PERCPU_H
632     #define __ASM_PERCPU_H
633    
634     +#include <asm/alternative.h>
635     +
636     static inline void set_my_cpu_offset(unsigned long off)
637     {
638     - asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
639     + asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
640     + "msr tpidr_el2, %0",
641     + ARM64_HAS_VIRT_HOST_EXTN)
642     + :: "r" (off) : "memory");
643     }
644    
645     static inline unsigned long __my_cpu_offset(void)
646     @@ -29,7 +34,10 @@ static inline unsigned long __my_cpu_offset(void)
647     * We want to allow caching the value, so avoid using volatile and
648     * instead use a fake stack read to hazard against barrier().
649     */
650     - asm("mrs %0, tpidr_el1" : "=r" (off) :
651     + asm(ALTERNATIVE("mrs %0, tpidr_el1",
652     + "mrs %0, tpidr_el2",
653     + ARM64_HAS_VIRT_HOST_EXTN)
654     + : "=r" (off) :
655     "Q" (*(const unsigned long *)current_stack_pointer));
656    
657     return off;
658     diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
659     index e9ea5a6bd449..0dd1bc13f942 100644
660     --- a/arch/arm64/include/asm/thread_info.h
661     +++ b/arch/arm64/include/asm/thread_info.h
662     @@ -122,6 +122,7 @@ static inline struct thread_info *current_thread_info(void)
663     #define TIF_RESTORE_SIGMASK 20
664     #define TIF_SINGLESTEP 21
665     #define TIF_32BIT 22 /* 32bit process */
666     +#define TIF_SSBD 23 /* Wants SSB mitigation */
667    
668     #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
669     #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
670     diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
671     index 74b8fd860714..6dadaaee796d 100644
672     --- a/arch/arm64/kernel/Makefile
673     +++ b/arch/arm64/kernel/Makefile
674     @@ -50,6 +50,7 @@ arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
675     arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
676     arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
677     cpu-reset.o
678     +arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
679    
680     ifeq ($(CONFIG_KVM),y)
681     arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o
682     diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
683     index 06d650f61da7..091748095140 100644
684     --- a/arch/arm64/kernel/alternative.c
685     +++ b/arch/arm64/kernel/alternative.c
686     @@ -28,10 +28,12 @@
687     #include <asm/sections.h>
688     #include <linux/stop_machine.h>
689    
690     -#define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f)
691     +#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f)
692     #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
693     #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
694    
695     +int alternatives_applied;
696     +
697     struct alt_region {
698     struct alt_instr *begin;
699     struct alt_instr *end;
700     @@ -105,31 +107,52 @@ static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr)
701     return insn;
702     }
703    
704     +static void patch_alternative(struct alt_instr *alt,
705     + __le32 *origptr, __le32 *updptr, int nr_inst)
706     +{
707     + __le32 *replptr;
708     + int i;
709     +
710     + replptr = ALT_REPL_PTR(alt);
711     + for (i = 0; i < nr_inst; i++) {
712     + u32 insn;
713     +
714     + insn = get_alt_insn(alt, origptr + i, replptr + i);
715     + updptr[i] = cpu_to_le32(insn);
716     + }
717     +}
718     +
719     static void __apply_alternatives(void *alt_region)
720     {
721     struct alt_instr *alt;
722     struct alt_region *region = alt_region;
723     - u32 *origptr, *replptr;
724     + __le32 *origptr;
725     + alternative_cb_t alt_cb;
726    
727     for (alt = region->begin; alt < region->end; alt++) {
728     - u32 insn;
729     - int i, nr_inst;
730     + int nr_inst;
731    
732     - if (!cpus_have_cap(alt->cpufeature))
733     + /* Use ARM64_CB_PATCH as an unconditional patch */
734     + if (alt->cpufeature < ARM64_CB_PATCH &&
735     + !cpus_have_cap(alt->cpufeature))
736     continue;
737    
738     - BUG_ON(alt->alt_len != alt->orig_len);
739     + if (alt->cpufeature == ARM64_CB_PATCH)
740     + BUG_ON(alt->alt_len != 0);
741     + else
742     + BUG_ON(alt->alt_len != alt->orig_len);
743    
744     pr_info_once("patching kernel code\n");
745    
746     origptr = ALT_ORIG_PTR(alt);
747     - replptr = ALT_REPL_PTR(alt);
748     - nr_inst = alt->alt_len / sizeof(insn);
749     + nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
750    
751     - for (i = 0; i < nr_inst; i++) {
752     - insn = get_alt_insn(alt, origptr + i, replptr + i);
753     - *(origptr + i) = cpu_to_le32(insn);
754     - }
755     + if (alt->cpufeature < ARM64_CB_PATCH)
756     + alt_cb = patch_alternative;
757     + else
758     + alt_cb = ALT_REPL_PTR(alt);
759     +
760     + alt_cb(alt, origptr, origptr, nr_inst);
761    
762     flush_icache_range((uintptr_t)origptr,
763     (uintptr_t)(origptr + nr_inst));
764     @@ -142,7 +165,6 @@ static void __apply_alternatives(void *alt_region)
765     */
766     static int __apply_alternatives_multi_stop(void *unused)
767     {
768     - static int patched = 0;
769     struct alt_region region = {
770     .begin = (struct alt_instr *)__alt_instructions,
771     .end = (struct alt_instr *)__alt_instructions_end,
772     @@ -150,14 +172,14 @@ static int __apply_alternatives_multi_stop(void *unused)
773    
774     /* We always have a CPU 0 at this point (__init) */
775     if (smp_processor_id()) {
776     - while (!READ_ONCE(patched))
777     + while (!READ_ONCE(alternatives_applied))
778     cpu_relax();
779     isb();
780     } else {
781     - BUG_ON(patched);
782     + BUG_ON(alternatives_applied);
783     __apply_alternatives(&region);
784     /* Barriers provided by the cache flushing */
785     - WRITE_ONCE(patched, 1);
786     + WRITE_ONCE(alternatives_applied, 1);
787     }
788    
789     return 0;
790     diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
791     index 5f4bf3c6f016..bd239b1b7a68 100644
792     --- a/arch/arm64/kernel/asm-offsets.c
793     +++ b/arch/arm64/kernel/asm-offsets.c
794     @@ -127,11 +127,13 @@ int main(void)
795     BLANK();
796     #ifdef CONFIG_KVM_ARM_HOST
797     DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
798     + DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
799     DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
800     DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
801     DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
802     DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
803     DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
804     + DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
805     #endif
806     #ifdef CONFIG_CPU_PM
807     DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
808     diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
809     index 2de62aa91303..1db97ad7b58b 100644
810     --- a/arch/arm64/kernel/cpu_errata.c
811     +++ b/arch/arm64/kernel/cpu_errata.c
812     @@ -187,6 +187,178 @@ static int enable_smccc_arch_workaround_1(void *data)
813     }
814     #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
815    
816     +#ifdef CONFIG_ARM64_SSBD
817     +DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
818     +
819     +int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
820     +
821     +static const struct ssbd_options {
822     + const char *str;
823     + int state;
824     +} ssbd_options[] = {
825     + { "force-on", ARM64_SSBD_FORCE_ENABLE, },
826     + { "force-off", ARM64_SSBD_FORCE_DISABLE, },
827     + { "kernel", ARM64_SSBD_KERNEL, },
828     +};
829     +
830     +static int __init ssbd_cfg(char *buf)
831     +{
832     + int i;
833     +
834     + if (!buf || !buf[0])
835     + return -EINVAL;
836     +
837     + for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
838     + int len = strlen(ssbd_options[i].str);
839     +
840     + if (strncmp(buf, ssbd_options[i].str, len))
841     + continue;
842     +
843     + ssbd_state = ssbd_options[i].state;
844     + return 0;
845     + }
846     +
847     + return -EINVAL;
848     +}
849     +early_param("ssbd", ssbd_cfg);
850     +
851     +void __init arm64_update_smccc_conduit(struct alt_instr *alt,
852     + __le32 *origptr, __le32 *updptr,
853     + int nr_inst)
854     +{
855     + u32 insn;
856     +
857     + BUG_ON(nr_inst != 1);
858     +
859     + switch (psci_ops.conduit) {
860     + case PSCI_CONDUIT_HVC:
861     + insn = aarch64_insn_get_hvc_value();
862     + break;
863     + case PSCI_CONDUIT_SMC:
864     + insn = aarch64_insn_get_smc_value();
865     + break;
866     + default:
867     + return;
868     + }
869     +
870     + *updptr = cpu_to_le32(insn);
871     +}
872     +
873     +void __init arm64_enable_wa2_handling(struct alt_instr *alt,
874     + __le32 *origptr, __le32 *updptr,
875     + int nr_inst)
876     +{
877     + BUG_ON(nr_inst != 1);
878     + /*
879     + * Only allow mitigation on EL1 entry/exit and guest
880     + * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
881     + * be flipped.
882     + */
883     + if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
884     + *updptr = cpu_to_le32(aarch64_insn_gen_nop());
885     +}
886     +
887     +void arm64_set_ssbd_mitigation(bool state)
888     +{
889     + switch (psci_ops.conduit) {
890     + case PSCI_CONDUIT_HVC:
891     + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
892     + break;
893     +
894     + case PSCI_CONDUIT_SMC:
895     + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
896     + break;
897     +
898     + default:
899     + WARN_ON_ONCE(1);
900     + break;
901     + }
902     +}
903     +
904     +static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
905     + int scope)
906     +{
907     + struct arm_smccc_res res;
908     + bool required = true;
909     + s32 val;
910     +
911     + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
912     +
913     + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
914     + ssbd_state = ARM64_SSBD_UNKNOWN;
915     + return false;
916     + }
917     +
918     + switch (psci_ops.conduit) {
919     + case PSCI_CONDUIT_HVC:
920     + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
921     + ARM_SMCCC_ARCH_WORKAROUND_2, &res);
922     + break;
923     +
924     + case PSCI_CONDUIT_SMC:
925     + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
926     + ARM_SMCCC_ARCH_WORKAROUND_2, &res);
927     + break;
928     +
929     + default:
930     + ssbd_state = ARM64_SSBD_UNKNOWN;
931     + return false;
932     + }
933     +
934     + val = (s32)res.a0;
935     +
936     + switch (val) {
937     + case SMCCC_RET_NOT_SUPPORTED:
938     + ssbd_state = ARM64_SSBD_UNKNOWN;
939     + return false;
940     +
941     + case SMCCC_RET_NOT_REQUIRED:
942     + pr_info_once("%s mitigation not required\n", entry->desc);
943     + ssbd_state = ARM64_SSBD_MITIGATED;
944     + return false;
945     +
946     + case SMCCC_RET_SUCCESS:
947     + required = true;
948     + break;
949     +
950     + case 1: /* Mitigation not required on this CPU */
951     + required = false;
952     + break;
953     +
954     + default:
955     + WARN_ON(1);
956     + return false;
957     + }
958     +
959     + switch (ssbd_state) {
960     + case ARM64_SSBD_FORCE_DISABLE:
961     + pr_info_once("%s disabled from command-line\n", entry->desc);
962     + arm64_set_ssbd_mitigation(false);
963     + required = false;
964     + break;
965     +
966     + case ARM64_SSBD_KERNEL:
967     + if (required) {
968     + __this_cpu_write(arm64_ssbd_callback_required, 1);
969     + arm64_set_ssbd_mitigation(true);
970     + }
971     + break;
972     +
973     + case ARM64_SSBD_FORCE_ENABLE:
974     + pr_info_once("%s forced from command-line\n", entry->desc);
975     + arm64_set_ssbd_mitigation(true);
976     + required = true;
977     + break;
978     +
979     + default:
980     + WARN_ON(1);
981     + break;
982     + }
983     +
984     + return required;
985     +}
986     +#endif /* CONFIG_ARM64_SSBD */
987     +
988     #define MIDR_RANGE(model, min, max) \
989     .def_scope = SCOPE_LOCAL_CPU, \
990     .matches = is_affected_midr_range, \
991     @@ -309,6 +481,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
992     MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
993     .enable = enable_smccc_arch_workaround_1,
994     },
995     +#endif
996     +#ifdef CONFIG_ARM64_SSBD
997     + {
998     + .desc = "Speculative Store Bypass Disable",
999     + .def_scope = SCOPE_LOCAL_CPU,
1000     + .capability = ARM64_SSBD,
1001     + .matches = has_ssbd_mitigation,
1002     + },
1003     #endif
1004     {
1005     }
1006     diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
1007     index 625c2b240ffb..ab15747a49d4 100644
1008     --- a/arch/arm64/kernel/cpufeature.c
1009     +++ b/arch/arm64/kernel/cpufeature.c
1010     @@ -829,6 +829,22 @@ static int __init parse_kpti(char *str)
1011     early_param("kpti", parse_kpti);
1012     #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1013    
1014     +static int cpu_copy_el2regs(void *__unused)
1015     +{
1016     + /*
1017     + * Copy register values that aren't redirected by hardware.
1018     + *
1019     + * Before code patching, we only set tpidr_el1, all CPUs need to copy
1020     + * this value to tpidr_el2 before we patch the code. Once we've done
1021     + * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
1022     + * do anything here.
1023     + */
1024     + if (!alternatives_applied)
1025     + write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
1026     +
1027     + return 0;
1028     +}
1029     +
1030     static const struct arm64_cpu_capabilities arm64_features[] = {
1031     {
1032     .desc = "GIC system register CPU interface",
1033     @@ -895,6 +911,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
1034     .capability = ARM64_HAS_VIRT_HOST_EXTN,
1035     .def_scope = SCOPE_SYSTEM,
1036     .matches = runs_at_el2,
1037     + .enable = cpu_copy_el2regs,
1038     },
1039     {
1040     .desc = "32-bit EL0 Support",
1041     diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
1042     index b79e302d2a3e..ca978d7d98eb 100644
1043     --- a/arch/arm64/kernel/entry.S
1044     +++ b/arch/arm64/kernel/entry.S
1045     @@ -18,6 +18,7 @@
1046     * along with this program. If not, see <http://www.gnu.org/licenses/>.
1047     */
1048    
1049     +#include <linux/arm-smccc.h>
1050     #include <linux/init.h>
1051     #include <linux/linkage.h>
1052    
1053     @@ -95,6 +96,25 @@ alternative_else_nop_endif
1054     add \dst, \dst, #(\sym - .entry.tramp.text)
1055     .endm
1056    
1057     + // This macro corrupts x0-x3. It is the caller's duty
1058     + // to save/restore them if required.
1059     + .macro apply_ssbd, state, targ, tmp1, tmp2
1060     +#ifdef CONFIG_ARM64_SSBD
1061     +alternative_cb arm64_enable_wa2_handling
1062     + b \targ
1063     +alternative_cb_end
1064     + ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
1065     + cbz \tmp2, \targ
1066     + ldr \tmp2, [tsk, #TI_FLAGS]
1067     + tbnz \tmp2, #TIF_SSBD, \targ
1068     + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
1069     + mov w1, #\state
1070     +alternative_cb arm64_update_smccc_conduit
1071     + nop // Patched to SMC/HVC #0
1072     +alternative_cb_end
1073     +#endif
1074     + .endm
1075     +
1076     .macro kernel_entry, el, regsize = 64
1077     .if \regsize == 32
1078     mov w0, w0 // zero upper 32 bits of x0
1079     @@ -122,6 +142,14 @@ alternative_else_nop_endif
1080     ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
1081     disable_step_tsk x19, x20 // exceptions when scheduling.
1082    
1083     + apply_ssbd 1, 1f, x22, x23
1084     +
1085     +#ifdef CONFIG_ARM64_SSBD
1086     + ldp x0, x1, [sp, #16 * 0]
1087     + ldp x2, x3, [sp, #16 * 1]
1088     +#endif
1089     +1:
1090     +
1091     mov x29, xzr // fp pointed to user-space
1092     .else
1093     add x21, sp, #S_FRAME_SIZE
1094     @@ -190,6 +218,8 @@ alternative_if ARM64_WORKAROUND_845719
1095     alternative_else_nop_endif
1096     #endif
1097     3:
1098     + apply_ssbd 0, 5f, x0, x1
1099     +5:
1100     .endif
1101     msr elr_el1, x21 // set up the return data
1102     msr spsr_el1, x22
1103     @@ -243,7 +273,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
1104     cmp x25, tsk
1105     b.ne 9998f
1106    
1107     - this_cpu_ptr irq_stack, x25, x26
1108     + adr_this_cpu x25, irq_stack, x26
1109     mov x26, #IRQ_STACK_START_SP
1110     add x26, x25, x26
1111    
1112     diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
1113     index d55a7b09959b..f6e71c73cceb 100644
1114     --- a/arch/arm64/kernel/hibernate.c
1115     +++ b/arch/arm64/kernel/hibernate.c
1116     @@ -308,6 +308,17 @@ int swsusp_arch_suspend(void)
1117    
1118     sleep_cpu = -EINVAL;
1119     __cpu_suspend_exit();
1120     +
1121     + /*
1122     + * Just in case the boot kernel did turn the SSBD
1123     + * mitigation off behind our back, let's set the state
1124     + * to what we expect it to be.
1125     + */
1126     + switch (arm64_get_ssbd_state()) {
1127     + case ARM64_SSBD_FORCE_ENABLE:
1128     + case ARM64_SSBD_KERNEL:
1129     + arm64_set_ssbd_mitigation(true);
1130     + }
1131     }
1132    
1133     local_dbg_restore(flags);
1134     diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
1135     new file mode 100644
1136     index 000000000000..0560738c1d5c
1137     --- /dev/null
1138     +++ b/arch/arm64/kernel/ssbd.c
1139     @@ -0,0 +1,108 @@
1140     +// SPDX-License-Identifier: GPL-2.0
1141     +/*
1142     + * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
1143     + */
1144     +
1145     +#include <linux/errno.h>
1146     +#include <linux/prctl.h>
1147     +#include <linux/sched.h>
1148     +#include <linux/thread_info.h>
1149     +
1150     +#include <asm/cpufeature.h>
1151     +
1152     +/*
1153     + * prctl interface for SSBD
1154     + */
1155     +static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
1156     +{
1157     + int state = arm64_get_ssbd_state();
1158     +
1159     + /* Unsupported */
1160     + if (state == ARM64_SSBD_UNKNOWN)
1161     + return -EINVAL;
1162     +
1163     + /* Treat the unaffected/mitigated state separately */
1164     + if (state == ARM64_SSBD_MITIGATED) {
1165     + switch (ctrl) {
1166     + case PR_SPEC_ENABLE:
1167     + return -EPERM;
1168     + case PR_SPEC_DISABLE:
1169     + case PR_SPEC_FORCE_DISABLE:
1170     + return 0;
1171     + }
1172     + }
1173     +
1174     + /*
1175     + * Things are a bit backward here: the arm64 internal API
1176     + * *enables the mitigation* when the userspace API *disables
1177     + * speculation*. So much fun.
1178     + */
1179     + switch (ctrl) {
1180     + case PR_SPEC_ENABLE:
1181     + /* If speculation is force disabled, enable is not allowed */
1182     + if (state == ARM64_SSBD_FORCE_ENABLE ||
1183     + task_spec_ssb_force_disable(task))
1184     + return -EPERM;
1185     + task_clear_spec_ssb_disable(task);
1186     + clear_tsk_thread_flag(task, TIF_SSBD);
1187     + break;
1188     + case PR_SPEC_DISABLE:
1189     + if (state == ARM64_SSBD_FORCE_DISABLE)
1190     + return -EPERM;
1191     + task_set_spec_ssb_disable(task);
1192     + set_tsk_thread_flag(task, TIF_SSBD);
1193     + break;
1194     + case PR_SPEC_FORCE_DISABLE:
1195     + if (state == ARM64_SSBD_FORCE_DISABLE)
1196     + return -EPERM;
1197     + task_set_spec_ssb_disable(task);
1198     + task_set_spec_ssb_force_disable(task);
1199     + set_tsk_thread_flag(task, TIF_SSBD);
1200     + break;
1201     + default:
1202     + return -ERANGE;
1203     + }
1204     +
1205     + return 0;
1206     +}
1207     +
1208     +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
1209     + unsigned long ctrl)
1210     +{
1211     + switch (which) {
1212     + case PR_SPEC_STORE_BYPASS:
1213     + return ssbd_prctl_set(task, ctrl);
1214     + default:
1215     + return -ENODEV;
1216     + }
1217     +}
1218     +
1219     +static int ssbd_prctl_get(struct task_struct *task)
1220     +{
1221     + switch (arm64_get_ssbd_state()) {
1222     + case ARM64_SSBD_UNKNOWN:
1223     + return -EINVAL;
1224     + case ARM64_SSBD_FORCE_ENABLE:
1225     + return PR_SPEC_DISABLE;
1226     + case ARM64_SSBD_KERNEL:
1227     + if (task_spec_ssb_force_disable(task))
1228     + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1229     + if (task_spec_ssb_disable(task))
1230     + return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1231     + return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1232     + case ARM64_SSBD_FORCE_DISABLE:
1233     + return PR_SPEC_ENABLE;
1234     + default:
1235     + return PR_SPEC_NOT_AFFECTED;
1236     + }
1237     +}
1238     +
1239     +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1240     +{
1241     + switch (which) {
1242     + case PR_SPEC_STORE_BYPASS:
1243     + return ssbd_prctl_get(task);
1244     + default:
1245     + return -ENODEV;
1246     + }
1247     +}
1248     diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
1249     index bb0cd787a9d3..1dbf6099e2a5 100644
1250     --- a/arch/arm64/kernel/suspend.c
1251     +++ b/arch/arm64/kernel/suspend.c
1252     @@ -67,6 +67,14 @@ void notrace __cpu_suspend_exit(void)
1253     */
1254     if (hw_breakpoint_restore)
1255     hw_breakpoint_restore(cpu);
1256     +
1257     + /*
1258     + * On resume, firmware implementing dynamic mitigation will
1259     + * have turned the mitigation on. If the user has forcefully
1260     + * disabled it, make sure their wishes are obeyed.
1261     + */
1262     + if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
1263     + arm64_set_ssbd_mitigation(false);
1264     }
1265    
1266     /*
1267     diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
1268     index 4bbff904169d..db5efaf2a985 100644
1269     --- a/arch/arm64/kvm/hyp-init.S
1270     +++ b/arch/arm64/kvm/hyp-init.S
1271     @@ -118,6 +118,10 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
1272     kern_hyp_va x2
1273     msr vbar_el2, x2
1274    
1275     + /* copy tpidr_el1 into tpidr_el2 for use by HYP */
1276     + mrs x1, tpidr_el1
1277     + msr tpidr_el2, x1
1278     +
1279     /* Hello, World! */
1280     eret
1281     ENDPROC(__kvm_hyp_init)
1282     diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
1283     index 12ee62d6d410..a360ac6e89e9 100644
1284     --- a/arch/arm64/kvm/hyp/entry.S
1285     +++ b/arch/arm64/kvm/hyp/entry.S
1286     @@ -62,9 +62,6 @@ ENTRY(__guest_enter)
1287     // Store the host regs
1288     save_callee_saved_regs x1
1289    
1290     - // Store the host_ctxt for use at exit time
1291     - str x1, [sp, #-16]!
1292     -
1293     add x18, x0, #VCPU_CONTEXT
1294    
1295     // Restore guest regs x0-x17
1296     @@ -118,8 +115,7 @@ ENTRY(__guest_exit)
1297     // Store the guest regs x19-x29, lr
1298     save_callee_saved_regs x1
1299    
1300     - // Restore the host_ctxt from the stack
1301     - ldr x2, [sp], #16
1302     + get_host_ctxt x2, x3
1303    
1304     // Now restore the host regs
1305     restore_callee_saved_regs x2
1306     @@ -159,6 +155,10 @@ abort_guest_exit_end:
1307     ENDPROC(__guest_exit)
1308    
1309     ENTRY(__fpsimd_guest_restore)
1310     + // x0: esr
1311     + // x1: vcpu
1312     + // x2-x29,lr: vcpu regs
1313     + // vcpu x0-x1 on the stack
1314     stp x2, x3, [sp, #-16]!
1315     stp x4, lr, [sp, #-16]!
1316    
1317     @@ -173,7 +173,7 @@ alternative_else
1318     alternative_endif
1319     isb
1320    
1321     - mrs x3, tpidr_el2
1322     + mov x3, x1
1323    
1324     ldr x0, [x3, #VCPU_HOST_CONTEXT]
1325     kern_hyp_va x0
1326     diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
1327     index 4e9d50c3e658..bf4988f9dae8 100644
1328     --- a/arch/arm64/kvm/hyp/hyp-entry.S
1329     +++ b/arch/arm64/kvm/hyp/hyp-entry.S
1330     @@ -72,13 +72,8 @@ ENDPROC(__kvm_hyp_teardown)
1331     el1_sync: // Guest trapped into EL2
1332     stp x0, x1, [sp, #-16]!
1333    
1334     -alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
1335     - mrs x1, esr_el2
1336     -alternative_else
1337     - mrs x1, esr_el1
1338     -alternative_endif
1339     - lsr x0, x1, #ESR_ELx_EC_SHIFT
1340     -
1341     + mrs x0, esr_el2
1342     + lsr x0, x0, #ESR_ELx_EC_SHIFT
1343     cmp x0, #ESR_ELx_EC_HVC64
1344     ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
1345     b.ne el1_trap
1346     @@ -112,33 +107,73 @@ el1_hvc_guest:
1347     */
1348     ldr x1, [sp] // Guest's x0
1349     eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
1350     + cbz w1, wa_epilogue
1351     +
1352     + /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
1353     + eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
1354     + ARM_SMCCC_ARCH_WORKAROUND_2)
1355     cbnz w1, el1_trap
1356     - mov x0, x1
1357     +
1358     +#ifdef CONFIG_ARM64_SSBD
1359     +alternative_cb arm64_enable_wa2_handling
1360     + b wa2_end
1361     +alternative_cb_end
1362     + get_vcpu_ptr x2, x0
1363     + ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
1364     +
1365     + // Sanitize the argument and update the guest flags
1366     + ldr x1, [sp, #8] // Guest's x1
1367     + clz w1, w1 // Murphy's device:
1368     + lsr w1, w1, #5 // w1 = !!w1 without using
1369     + eor w1, w1, #1 // the flags...
1370     + bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
1371     + str x0, [x2, #VCPU_WORKAROUND_FLAGS]
1372     +
1373     + /* Check that we actually need to perform the call */
1374     + hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
1375     + cbz x0, wa2_end
1376     +
1377     + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
1378     + smc #0
1379     +
1380     + /* Don't leak data from the SMC call */
1381     + mov x3, xzr
1382     +wa2_end:
1383     + mov x2, xzr
1384     + mov x1, xzr
1385     +#endif
1386     +
1387     +wa_epilogue:
1388     + mov x0, xzr
1389     add sp, sp, #16
1390     eret
1391    
1392     el1_trap:
1393     + get_vcpu_ptr x1, x0
1394     +
1395     + mrs x0, esr_el2
1396     + lsr x0, x0, #ESR_ELx_EC_SHIFT
1397     /*
1398     * x0: ESR_EC
1399     + * x1: vcpu pointer
1400     */
1401    
1402     /* Guest accessed VFP/SIMD registers, save host, restore Guest */
1403     cmp x0, #ESR_ELx_EC_FP_ASIMD
1404     b.eq __fpsimd_guest_restore
1405    
1406     - mrs x1, tpidr_el2
1407     mov x0, #ARM_EXCEPTION_TRAP
1408     b __guest_exit
1409    
1410     el1_irq:
1411     stp x0, x1, [sp, #-16]!
1412     - mrs x1, tpidr_el2
1413     + get_vcpu_ptr x1, x0
1414     mov x0, #ARM_EXCEPTION_IRQ
1415     b __guest_exit
1416    
1417     el1_error:
1418     stp x0, x1, [sp, #-16]!
1419     - mrs x1, tpidr_el2
1420     + get_vcpu_ptr x1, x0
1421     mov x0, #ARM_EXCEPTION_EL1_SERROR
1422     b __guest_exit
1423    
1424     @@ -173,6 +208,11 @@ ENTRY(__hyp_do_panic)
1425     eret
1426     ENDPROC(__hyp_do_panic)
1427    
1428     +ENTRY(__hyp_panic)
1429     + get_host_ctxt x0, x1
1430     + b hyp_panic
1431     +ENDPROC(__hyp_panic)
1432     +
1433     .macro invalid_vector label, target = __hyp_panic
1434     .align 2
1435     \label:
1436     diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
1437     index c49d09387192..12f9d1ecdf4c 100644
1438     --- a/arch/arm64/kvm/hyp/switch.c
1439     +++ b/arch/arm64/kvm/hyp/switch.c
1440     @@ -15,6 +15,7 @@
1441     * along with this program. If not, see <http://www.gnu.org/licenses/>.
1442     */
1443    
1444     +#include <linux/arm-smccc.h>
1445     #include <linux/types.h>
1446     #include <linux/jump_label.h>
1447     #include <uapi/linux/psci.h>
1448     @@ -267,6 +268,39 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
1449     write_sysreg_el2(*vcpu_pc(vcpu), elr);
1450     }
1451    
1452     +static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
1453     +{
1454     + if (!cpus_have_cap(ARM64_SSBD))
1455     + return false;
1456     +
1457     + return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
1458     +}
1459     +
1460     +static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
1461     +{
1462     +#ifdef CONFIG_ARM64_SSBD
1463     + /*
1464     + * The host runs with the workaround always present. If the
1465     + * guest wants it disabled, so be it...
1466     + */
1467     + if (__needs_ssbd_off(vcpu) &&
1468     + __hyp_this_cpu_read(arm64_ssbd_callback_required))
1469     + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
1470     +#endif
1471     +}
1472     +
1473     +static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
1474     +{
1475     +#ifdef CONFIG_ARM64_SSBD
1476     + /*
1477     + * If the guest has disabled the workaround, bring it back on.
1478     + */
1479     + if (__needs_ssbd_off(vcpu) &&
1480     + __hyp_this_cpu_read(arm64_ssbd_callback_required))
1481     + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
1482     +#endif
1483     +}
1484     +
1485     int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
1486     {
1487     struct kvm_cpu_context *host_ctxt;
1488     @@ -275,9 +309,9 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
1489     u64 exit_code;
1490    
1491     vcpu = kern_hyp_va(vcpu);
1492     - write_sysreg(vcpu, tpidr_el2);
1493    
1494     host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
1495     + host_ctxt->__hyp_running_vcpu = vcpu;
1496     guest_ctxt = &vcpu->arch.ctxt;
1497    
1498     __sysreg_save_host_state(host_ctxt);
1499     @@ -297,6 +331,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
1500     __sysreg_restore_guest_state(guest_ctxt);
1501     __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
1502    
1503     + __set_guest_arch_workaround_state(vcpu);
1504     +
1505     /* Jump in the fire! */
1506     again:
1507     exit_code = __guest_enter(vcpu, host_ctxt);
1508     @@ -339,6 +375,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
1509     }
1510     }
1511    
1512     + __set_host_arch_workaround_state(vcpu);
1513     +
1514     fp_enabled = __fpsimd_enabled();
1515    
1516     __sysreg_save_guest_state(guest_ctxt);
1517     @@ -364,7 +402,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
1518    
1519     static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
1520    
1521     -static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
1522     +static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
1523     + struct kvm_vcpu *vcpu)
1524     {
1525     unsigned long str_va;
1526    
1527     @@ -378,35 +417,32 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
1528     __hyp_do_panic(str_va,
1529     spsr, elr,
1530     read_sysreg(esr_el2), read_sysreg_el2(far),
1531     - read_sysreg(hpfar_el2), par,
1532     - (void *)read_sysreg(tpidr_el2));
1533     + read_sysreg(hpfar_el2), par, vcpu);
1534     }
1535    
1536     -static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
1537     +static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
1538     + struct kvm_vcpu *vcpu)
1539     {
1540     panic(__hyp_panic_string,
1541     spsr, elr,
1542     read_sysreg_el2(esr), read_sysreg_el2(far),
1543     - read_sysreg(hpfar_el2), par,
1544     - (void *)read_sysreg(tpidr_el2));
1545     + read_sysreg(hpfar_el2), par, vcpu);
1546     }
1547    
1548     static hyp_alternate_select(__hyp_call_panic,
1549     __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
1550     ARM64_HAS_VIRT_HOST_EXTN);
1551    
1552     -void __hyp_text __noreturn __hyp_panic(void)
1553     +void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
1554     {
1555     + struct kvm_vcpu *vcpu = NULL;
1556     +
1557     u64 spsr = read_sysreg_el2(spsr);
1558     u64 elr = read_sysreg_el2(elr);
1559     u64 par = read_sysreg(par_el1);
1560    
1561     if (read_sysreg(vttbr_el2)) {
1562     - struct kvm_vcpu *vcpu;
1563     - struct kvm_cpu_context *host_ctxt;
1564     -
1565     - vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
1566     - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
1567     + vcpu = host_ctxt->__hyp_running_vcpu;
1568     __timer_save_state(vcpu);
1569     __deactivate_traps(vcpu);
1570     __deactivate_vm(vcpu);
1571     @@ -414,7 +450,7 @@ void __hyp_text __noreturn __hyp_panic(void)
1572     }
1573    
1574     /* Call panic for real */
1575     - __hyp_call_panic()(spsr, elr, par);
1576     + __hyp_call_panic()(spsr, elr, par, vcpu);
1577    
1578     unreachable();
1579     }
1580     diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
1581     index 934137647837..e19d89cabf2a 100644
1582     --- a/arch/arm64/kvm/hyp/sysreg-sr.c
1583     +++ b/arch/arm64/kvm/hyp/sysreg-sr.c
1584     @@ -27,8 +27,8 @@ static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { }
1585     /*
1586     * Non-VHE: Both host and guest must save everything.
1587     *
1588     - * VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc,
1589     - * pstate, and guest must save everything.
1590     + * VHE: Host must save tpidr*_el0, actlr_el1, mdscr_el1, sp_el0,
1591     + * and guest must save everything.
1592     */
1593    
1594     static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
1595     @@ -36,11 +36,8 @@ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
1596     ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
1597     ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
1598     ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
1599     - ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
1600     ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
1601     ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
1602     - ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
1603     - ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
1604     }
1605    
1606     static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
1607     @@ -62,10 +59,13 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
1608     ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
1609     ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
1610     ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
1611     + ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
1612    
1613     ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
1614     ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
1615     ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
1616     + ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
1617     + ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
1618     }
1619    
1620     static hyp_alternate_select(__sysreg_call_save_host_state,
1621     @@ -89,11 +89,8 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx
1622     write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
1623     write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
1624     write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
1625     - write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
1626     write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
1627     write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
1628     - write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
1629     - write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
1630     }
1631    
1632     static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
1633     @@ -115,10 +112,13 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
1634     write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
1635     write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
1636     write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
1637     + write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
1638    
1639     write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
1640     write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
1641     write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
1642     + write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
1643     + write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
1644     }
1645    
1646     static hyp_alternate_select(__sysreg_call_restore_host_state,
1647     @@ -183,3 +183,8 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
1648     if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
1649     write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
1650     }
1651     +
1652     +void __hyp_text __kvm_set_tpidr_el2(u64 tpidr_el2)
1653     +{
1654     + asm("msr tpidr_el2, %0": : "r" (tpidr_el2));
1655     +}
1656     diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
1657     index 5bc460884639..29a27a09f21f 100644
1658     --- a/arch/arm64/kvm/reset.c
1659     +++ b/arch/arm64/kvm/reset.c
1660     @@ -135,6 +135,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
1661     /* Reset PMU */
1662     kvm_pmu_vcpu_reset(vcpu);
1663    
1664     + /* Default workaround setup is enabled (if supported) */
1665     + if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
1666     + vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
1667     +
1668     /* Reset timer */
1669     return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
1670     }
1671     diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1672     index cb1e9c184b5a..513a63b9b991 100644
1673     --- a/arch/mips/kernel/process.c
1674     +++ b/arch/mips/kernel/process.c
1675     @@ -26,6 +26,7 @@
1676     #include <linux/kallsyms.h>
1677     #include <linux/random.h>
1678     #include <linux/prctl.h>
1679     +#include <linux/nmi.h>
1680    
1681     #include <asm/asm.h>
1682     #include <asm/bootinfo.h>
1683     @@ -633,28 +634,42 @@ unsigned long arch_align_stack(unsigned long sp)
1684     return sp & ALMASK;
1685     }
1686    
1687     -static void arch_dump_stack(void *info)
1688     -{
1689     - struct pt_regs *regs;
1690     +static DEFINE_PER_CPU(struct call_single_data, backtrace_csd);
1691     +static struct cpumask backtrace_csd_busy;
1692    
1693     - regs = get_irq_regs();
1694     -
1695     - if (regs)
1696     - show_regs(regs);
1697     - else
1698     - dump_stack();
1699     +static void handle_backtrace(void *info)
1700     +{
1701     + nmi_cpu_backtrace(get_irq_regs());
1702     + cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
1703     }
1704    
1705     -void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
1706     +static void raise_backtrace(cpumask_t *mask)
1707     {
1708     - long this_cpu = get_cpu();
1709     + struct call_single_data *csd;
1710     + int cpu;
1711    
1712     - if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
1713     - dump_stack();
1714     + for_each_cpu(cpu, mask) {
1715     + /*
1716     + * If we previously sent an IPI to the target CPU & it hasn't
1717     + * cleared its bit in the busy cpumask then it didn't handle
1718     + * our previous IPI & it's not safe for us to reuse the
1719     + * call_single_data_t.
1720     + */
1721     + if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
1722     + pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
1723     + cpu);
1724     + continue;
1725     + }
1726    
1727     - smp_call_function_many(mask, arch_dump_stack, NULL, 1);
1728     + csd = &per_cpu(backtrace_csd, cpu);
1729     + csd->func = handle_backtrace;
1730     + smp_call_function_single_async(cpu, csd);
1731     + }
1732     +}
1733    
1734     - put_cpu();
1735     +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
1736     +{
1737     + nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
1738     }
1739    
1740     int mips_get_process_fp_mode(struct task_struct *task)
1741     diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
1742     index 8d8c24f3a963..742712b4bdc3 100644
1743     --- a/arch/x86/include/asm/asm.h
1744     +++ b/arch/x86/include/asm/asm.h
1745     @@ -45,6 +45,65 @@
1746     #define _ASM_SI __ASM_REG(si)
1747     #define _ASM_DI __ASM_REG(di)
1748    
1749     +#ifndef __x86_64__
1750     +/* 32 bit */
1751     +
1752     +#define _ASM_ARG1 _ASM_AX
1753     +#define _ASM_ARG2 _ASM_DX
1754     +#define _ASM_ARG3 _ASM_CX
1755     +
1756     +#define _ASM_ARG1L eax
1757     +#define _ASM_ARG2L edx
1758     +#define _ASM_ARG3L ecx
1759     +
1760     +#define _ASM_ARG1W ax
1761     +#define _ASM_ARG2W dx
1762     +#define _ASM_ARG3W cx
1763     +
1764     +#define _ASM_ARG1B al
1765     +#define _ASM_ARG2B dl
1766     +#define _ASM_ARG3B cl
1767     +
1768     +#else
1769     +/* 64 bit */
1770     +
1771     +#define _ASM_ARG1 _ASM_DI
1772     +#define _ASM_ARG2 _ASM_SI
1773     +#define _ASM_ARG3 _ASM_DX
1774     +#define _ASM_ARG4 _ASM_CX
1775     +#define _ASM_ARG5 r8
1776     +#define _ASM_ARG6 r9
1777     +
1778     +#define _ASM_ARG1Q rdi
1779     +#define _ASM_ARG2Q rsi
1780     +#define _ASM_ARG3Q rdx
1781     +#define _ASM_ARG4Q rcx
1782     +#define _ASM_ARG5Q r8
1783     +#define _ASM_ARG6Q r9
1784     +
1785     +#define _ASM_ARG1L edi
1786     +#define _ASM_ARG2L esi
1787     +#define _ASM_ARG3L edx
1788     +#define _ASM_ARG4L ecx
1789     +#define _ASM_ARG5L r8d
1790     +#define _ASM_ARG6L r9d
1791     +
1792     +#define _ASM_ARG1W di
1793     +#define _ASM_ARG2W si
1794     +#define _ASM_ARG3W dx
1795     +#define _ASM_ARG4W cx
1796     +#define _ASM_ARG5W r8w
1797     +#define _ASM_ARG6W r9w
1798     +
1799     +#define _ASM_ARG1B dil
1800     +#define _ASM_ARG2B sil
1801     +#define _ASM_ARG3B dl
1802     +#define _ASM_ARG4B cl
1803     +#define _ASM_ARG5B r8b
1804     +#define _ASM_ARG6B r9b
1805     +
1806     +#endif
1807     +
1808     /*
1809     * Macros to generate condition code outputs from inline assembly,
1810     * The output operand must be type "bool".
1811     diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
1812     index ac7692dcfa2e..8a8a6c66be9a 100644
1813     --- a/arch/x86/include/asm/irqflags.h
1814     +++ b/arch/x86/include/asm/irqflags.h
1815     @@ -12,7 +12,7 @@
1816     * Interrupt control:
1817     */
1818    
1819     -static inline unsigned long native_save_fl(void)
1820     +extern inline unsigned long native_save_fl(void)
1821     {
1822     unsigned long flags;
1823    
1824     diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
1825     index 4c9c61517613..a9ba968621cb 100644
1826     --- a/arch/x86/kernel/Makefile
1827     +++ b/arch/x86/kernel/Makefile
1828     @@ -56,6 +56,7 @@ obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
1829     obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
1830     obj-y += pci-iommu_table.o
1831     obj-y += resource.o
1832     +obj-y += irqflags.o
1833    
1834     obj-y += process.o
1835     obj-y += fpu/
1836     diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S
1837     new file mode 100644
1838     index 000000000000..ddeeaac8adda
1839     --- /dev/null
1840     +++ b/arch/x86/kernel/irqflags.S
1841     @@ -0,0 +1,26 @@
1842     +/* SPDX-License-Identifier: GPL-2.0 */
1843     +
1844     +#include <asm/asm.h>
1845     +#include <asm/export.h>
1846     +#include <linux/linkage.h>
1847     +
1848     +/*
1849     + * unsigned long native_save_fl(void)
1850     + */
1851     +ENTRY(native_save_fl)
1852     + pushf
1853     + pop %_ASM_AX
1854     + ret
1855     +ENDPROC(native_save_fl)
1856     +EXPORT_SYMBOL(native_save_fl)
1857     +
1858     +/*
1859     + * void native_restore_fl(unsigned long flags)
1860     + * %eax/%rdi: flags
1861     + */
1862     +ENTRY(native_restore_fl)
1863     + push %_ASM_ARG1
1864     + popf
1865     + ret
1866     +ENDPROC(native_restore_fl)
1867     +EXPORT_SYMBOL(native_restore_fl)
1868     diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
1869     index d0fac641e717..a0b88f148990 100644
1870     --- a/drivers/atm/zatm.c
1871     +++ b/drivers/atm/zatm.c
1872     @@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
1873     return -EFAULT;
1874     if (pool < 0 || pool > ZATM_LAST_POOL)
1875     return -EINVAL;
1876     + pool = array_index_nospec(pool,
1877     + ZATM_LAST_POOL + 1);
1878     if (copy_from_user(&info,
1879     &((struct zatm_pool_req __user *) arg)->info,
1880     sizeof(info))) return -EFAULT;
1881     diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
1882     index dae1e39139e9..c7524bbbaf98 100644
1883     --- a/drivers/crypto/amcc/crypto4xx_core.c
1884     +++ b/drivers/crypto/amcc/crypto4xx_core.c
1885     @@ -208,7 +208,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
1886     dev->pdr_pa);
1887     return -ENOMEM;
1888     }
1889     - memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
1890     + memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
1891     dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
1892     256 * PPC4XX_NUM_PD,
1893     &dev->shadow_sa_pool_pa,
1894     @@ -241,13 +241,15 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
1895    
1896     static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
1897     {
1898     - if (dev->pdr != NULL)
1899     + if (dev->pdr)
1900     dma_free_coherent(dev->core_dev->device,
1901     sizeof(struct ce_pd) * PPC4XX_NUM_PD,
1902     dev->pdr, dev->pdr_pa);
1903     +
1904     if (dev->shadow_sa_pool)
1905     dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
1906     dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
1907     +
1908     if (dev->shadow_sr_pool)
1909     dma_free_coherent(dev->core_dev->device,
1910     sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
1911     @@ -417,12 +419,12 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
1912    
1913     static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
1914     {
1915     - if (dev->sdr != NULL)
1916     + if (dev->sdr)
1917     dma_free_coherent(dev->core_dev->device,
1918     sizeof(struct ce_sd) * PPC4XX_NUM_SD,
1919     dev->sdr, dev->sdr_pa);
1920    
1921     - if (dev->scatter_buffer_va != NULL)
1922     + if (dev->scatter_buffer_va)
1923     dma_free_coherent(dev->core_dev->device,
1924     dev->scatter_buffer_size * PPC4XX_NUM_SD,
1925     dev->scatter_buffer_va,
1926     @@ -1034,12 +1036,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1927     break;
1928     }
1929    
1930     - if (rc) {
1931     - list_del(&alg->entry);
1932     + if (rc)
1933     kfree(alg);
1934     - } else {
1935     + else
1936     list_add_tail(&alg->entry, &sec_dev->alg_list);
1937     - }
1938     }
1939    
1940     return 0;
1941     @@ -1193,7 +1193,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
1942    
1943     rc = crypto4xx_build_gdr(core_dev->dev);
1944     if (rc)
1945     - goto err_build_gdr;
1946     + goto err_build_pdr;
1947    
1948     rc = crypto4xx_build_sdr(core_dev->dev);
1949     if (rc)
1950     @@ -1236,12 +1236,11 @@ static int crypto4xx_probe(struct platform_device *ofdev)
1951     err_request_irq:
1952     irq_dispose_mapping(core_dev->irq);
1953     tasklet_kill(&core_dev->tasklet);
1954     - crypto4xx_destroy_sdr(core_dev->dev);
1955     err_build_sdr:
1956     + crypto4xx_destroy_sdr(core_dev->dev);
1957     crypto4xx_destroy_gdr(core_dev->dev);
1958     -err_build_gdr:
1959     - crypto4xx_destroy_pdr(core_dev->dev);
1960     err_build_pdr:
1961     + crypto4xx_destroy_pdr(core_dev->dev);
1962     kfree(core_dev->dev);
1963     err_alloc_dev:
1964     kfree(core_dev);
1965     diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
1966     index 9cf7fcd28034..16a7df2a0246 100644
1967     --- a/drivers/mtd/devices/m25p80.c
1968     +++ b/drivers/mtd/devices/m25p80.c
1969     @@ -172,7 +172,8 @@ static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
1970    
1971     t[1].rx_buf = buf;
1972     t[1].rx_nbits = m25p80_rx_nbits(nor);
1973     - t[1].len = min(len, spi_max_transfer_size(spi));
1974     + t[1].len = min3(len, spi_max_transfer_size(spi),
1975     + spi_max_message_size(spi) - t[0].len);
1976     spi_message_add_tail(&t[1], &m);
1977    
1978     ret = spi_sync(spi, &m);
1979     diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
1980     index 08d91efceed0..c4078401b7de 100644
1981     --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
1982     +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
1983     @@ -1063,7 +1063,8 @@ static int bcm_enet_open(struct net_device *dev)
1984     val = enet_readl(priv, ENET_CTL_REG);
1985     val |= ENET_CTL_ENABLE_MASK;
1986     enet_writel(priv, val, ENET_CTL_REG);
1987     - enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1988     + if (priv->dma_has_sram)
1989     + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1990     enet_dmac_writel(priv, priv->dma_chan_en_mask,
1991     ENETDMAC_CHANCFG, priv->rx_chan);
1992    
1993     @@ -1790,7 +1791,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
1994     ret = PTR_ERR(priv->mac_clk);
1995     goto out;
1996     }
1997     - clk_prepare_enable(priv->mac_clk);
1998     + ret = clk_prepare_enable(priv->mac_clk);
1999     + if (ret)
2000     + goto out_put_clk_mac;
2001    
2002     /* initialize default and fetch platform data */
2003     priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2004     @@ -1822,9 +1825,11 @@ static int bcm_enet_probe(struct platform_device *pdev)
2005     if (IS_ERR(priv->phy_clk)) {
2006     ret = PTR_ERR(priv->phy_clk);
2007     priv->phy_clk = NULL;
2008     - goto out_put_clk_mac;
2009     + goto out_disable_clk_mac;
2010     }
2011     - clk_prepare_enable(priv->phy_clk);
2012     + ret = clk_prepare_enable(priv->phy_clk);
2013     + if (ret)
2014     + goto out_put_clk_phy;
2015     }
2016    
2017     /* do minimal hardware init to be able to probe mii bus */
2018     @@ -1915,13 +1920,16 @@ static int bcm_enet_probe(struct platform_device *pdev)
2019     out_uninit_hw:
2020     /* turn off mdc clock */
2021     enet_writel(priv, 0, ENET_MIISC_REG);
2022     - if (priv->phy_clk) {
2023     + if (priv->phy_clk)
2024     clk_disable_unprepare(priv->phy_clk);
2025     +
2026     +out_put_clk_phy:
2027     + if (priv->phy_clk)
2028     clk_put(priv->phy_clk);
2029     - }
2030    
2031     -out_put_clk_mac:
2032     +out_disable_clk_mac:
2033     clk_disable_unprepare(priv->mac_clk);
2034     +out_put_clk_mac:
2035     clk_put(priv->mac_clk);
2036     out:
2037     free_netdev(dev);
2038     @@ -2766,7 +2774,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
2039     ret = PTR_ERR(priv->mac_clk);
2040     goto out_unmap;
2041     }
2042     - clk_enable(priv->mac_clk);
2043     + ret = clk_prepare_enable(priv->mac_clk);
2044     + if (ret)
2045     + goto out_put_clk;
2046    
2047     priv->rx_chan = 0;
2048     priv->tx_chan = 1;
2049     @@ -2787,7 +2797,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
2050    
2051     ret = register_netdev(dev);
2052     if (ret)
2053     - goto out_put_clk;
2054     + goto out_disable_clk;
2055    
2056     netif_carrier_off(dev);
2057     platform_set_drvdata(pdev, dev);
2058     @@ -2796,6 +2806,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
2059    
2060     return 0;
2061    
2062     +out_disable_clk:
2063     + clk_disable_unprepare(priv->mac_clk);
2064     +
2065     out_put_clk:
2066     clk_put(priv->mac_clk);
2067    
2068     @@ -2827,6 +2840,9 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
2069     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2070     release_mem_region(res->start, resource_size(res));
2071    
2072     + clk_disable_unprepare(priv->mac_clk);
2073     + clk_put(priv->mac_clk);
2074     +
2075     free_netdev(dev);
2076     return 0;
2077     }
2078     diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2079     index 43da891fab97..dc0efbd91c32 100644
2080     --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2081     +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2082     @@ -50,6 +50,7 @@
2083     #include <linux/stringify.h>
2084     #include <linux/sched.h>
2085     #include <linux/slab.h>
2086     +#include <linux/nospec.h>
2087     #include <asm/uaccess.h>
2088    
2089     #include "common.h"
2090     @@ -2259,6 +2260,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2091    
2092     if (t.qset_idx >= nqsets)
2093     return -EINVAL;
2094     + t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2095    
2096     q = &adapter->params.sge.qset[q1 + t.qset_idx];
2097     t.rspq_size = q->rspq_size;
2098     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2099     index 6631fb0782d7..9680c8805178 100644
2100     --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2101     +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2102     @@ -784,6 +784,7 @@ static void cmd_work_handler(struct work_struct *work)
2103     struct semaphore *sem;
2104     unsigned long flags;
2105     int alloc_ret;
2106     + int cmd_mode;
2107    
2108     sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
2109     down(sem);
2110     @@ -830,6 +831,7 @@ static void cmd_work_handler(struct work_struct *work)
2111     set_signature(ent, !cmd->checksum_disabled);
2112     dump_command(dev, ent, 1);
2113     ent->ts1 = ktime_get_ns();
2114     + cmd_mode = cmd->mode;
2115    
2116     if (ent->callback)
2117     schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
2118     @@ -854,7 +856,7 @@ static void cmd_work_handler(struct work_struct *work)
2119     iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
2120     mmiowb();
2121     /* if not in polling don't use ent after this point */
2122     - if (cmd->mode == CMD_MODE_POLLING) {
2123     + if (cmd_mode == CMD_MODE_POLLING) {
2124     poll_timeout(ent);
2125     /* make sure we read the descriptor after ownership is SW */
2126     rmb();
2127     @@ -1256,7 +1258,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
2128     {
2129     struct mlx5_core_dev *dev = filp->private_data;
2130     struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
2131     - char outlen_str[8];
2132     + char outlen_str[8] = {0};
2133     int outlen;
2134     void *ptr;
2135     int err;
2136     @@ -1271,8 +1273,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
2137     if (copy_from_user(outlen_str, buf, count))
2138     return -EFAULT;
2139    
2140     - outlen_str[7] = 0;
2141     -
2142     err = sscanf(outlen_str, "%d", &outlen);
2143     if (err < 0)
2144     return err;
2145     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
2146     index 34e7184e23c9..43d7c8378fb4 100644
2147     --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
2148     +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
2149     @@ -575,7 +575,7 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc);
2150     static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
2151     int inlen)
2152     {
2153     - u32 out[MLX5_ST_SZ_DW(qtct_reg)];
2154     + u32 out[MLX5_ST_SZ_DW(qetc_reg)];
2155    
2156     if (!MLX5_CAP_GEN(mdev, ets))
2157     return -ENOTSUPP;
2158     @@ -587,7 +587,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
2159     static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
2160     int outlen)
2161     {
2162     - u32 in[MLX5_ST_SZ_DW(qtct_reg)];
2163     + u32 in[MLX5_ST_SZ_DW(qetc_reg)];
2164    
2165     if (!MLX5_CAP_GEN(mdev, ets))
2166     return -ENOTSUPP;
2167     diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
2168     index 9d59cb85c012..7b6824e560d2 100644
2169     --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
2170     +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
2171     @@ -677,9 +677,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
2172     p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
2173    
2174     memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
2175     - ARRAY_SIZE(p_local->local_chassis_id));
2176     + sizeof(p_local->local_chassis_id));
2177     memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
2178     - ARRAY_SIZE(p_local->local_port_id));
2179     + sizeof(p_local->local_port_id));
2180     }
2181    
2182     static void
2183     @@ -692,9 +692,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
2184     p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
2185    
2186     memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
2187     - ARRAY_SIZE(p_remote->peer_chassis_id));
2188     + sizeof(p_remote->peer_chassis_id));
2189     memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
2190     - ARRAY_SIZE(p_remote->peer_port_id));
2191     + sizeof(p_remote->peer_port_id));
2192     }
2193    
2194     static int
2195     diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
2196     index 0b949c6d83fc..f36bd0bd37da 100644
2197     --- a/drivers/net/ethernet/qlogic/qed/qed_main.c
2198     +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
2199     @@ -23,6 +23,7 @@
2200     #include <linux/vmalloc.h>
2201     #include <linux/qed/qed_if.h>
2202     #include <linux/qed/qed_ll2_if.h>
2203     +#include <linux/crash_dump.h>
2204    
2205     #include "qed.h"
2206     #include "qed_sriov.h"
2207     @@ -701,6 +702,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
2208     /* We want a minimum of one slowpath and one fastpath vector per hwfn */
2209     cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
2210    
2211     + if (is_kdump_kernel()) {
2212     + DP_INFO(cdev,
2213     + "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
2214     + cdev->int_params.in.min_msix_cnt);
2215     + cdev->int_params.in.num_vectors =
2216     + cdev->int_params.in.min_msix_cnt;
2217     + }
2218     +
2219     rc = qed_set_int_mode(cdev, false);
2220     if (rc) {
2221     DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
2222     diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
2223     index d6ad0fbd054e..920321bf4bb6 100644
2224     --- a/drivers/net/ethernet/sun/sungem.c
2225     +++ b/drivers/net/ethernet/sun/sungem.c
2226     @@ -59,8 +59,7 @@
2227     #include <linux/sungem_phy.h>
2228     #include "sungem.h"
2229    
2230     -/* Stripping FCS is causing problems, disabled for now */
2231     -#undef STRIP_FCS
2232     +#define STRIP_FCS
2233    
2234     #define DEFAULT_MSG (NETIF_MSG_DRV | \
2235     NETIF_MSG_PROBE | \
2236     @@ -434,7 +433,7 @@ static int gem_rxmac_reset(struct gem *gp)
2237     writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
2238     writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
2239     val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
2240     - ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
2241     + (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
2242     writel(val, gp->regs + RXDMA_CFG);
2243     if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
2244     writel(((5 & RXDMA_BLANK_IPKTS) |
2245     @@ -759,7 +758,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
2246     struct net_device *dev = gp->dev;
2247     int entry, drops, work_done = 0;
2248     u32 done;
2249     - __sum16 csum;
2250    
2251     if (netif_msg_rx_status(gp))
2252     printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
2253     @@ -854,9 +852,13 @@ static int gem_rx(struct gem *gp, int work_to_do)
2254     skb = copy_skb;
2255     }
2256    
2257     - csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
2258     - skb->csum = csum_unfold(csum);
2259     - skb->ip_summed = CHECKSUM_COMPLETE;
2260     + if (likely(dev->features & NETIF_F_RXCSUM)) {
2261     + __sum16 csum;
2262     +
2263     + csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
2264     + skb->csum = csum_unfold(csum);
2265     + skb->ip_summed = CHECKSUM_COMPLETE;
2266     + }
2267     skb->protocol = eth_type_trans(skb, gp->dev);
2268    
2269     napi_gro_receive(&gp->napi, skb);
2270     @@ -1754,7 +1756,7 @@ static void gem_init_dma(struct gem *gp)
2271     writel(0, gp->regs + TXDMA_KICK);
2272    
2273     val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
2274     - ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
2275     + (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
2276     writel(val, gp->regs + RXDMA_CFG);
2277    
2278     writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
2279     @@ -2972,8 +2974,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2280     pci_set_drvdata(pdev, dev);
2281    
2282     /* We can do scatter/gather and HW checksum */
2283     - dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2284     - dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2285     + dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2286     + dev->features = dev->hw_features;
2287     if (pci_using_dac)
2288     dev->features |= NETIF_F_HIGHDMA;
2289    
2290     diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
2291     index dfbc4ef6d507..24eb5755604f 100644
2292     --- a/drivers/net/ipvlan/ipvlan_main.c
2293     +++ b/drivers/net/ipvlan/ipvlan_main.c
2294     @@ -525,7 +525,8 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
2295     ipvlan->dev = dev;
2296     ipvlan->port = port;
2297     ipvlan->sfeatures = IPVLAN_FEATURES;
2298     - ipvlan_adjust_mtu(ipvlan, phy_dev);
2299     + if (!tb[IFLA_MTU])
2300     + ipvlan_adjust_mtu(ipvlan, phy_dev);
2301     INIT_LIST_HEAD(&ipvlan->addrs);
2302    
2303     /* TODO Probably put random address here to be presented to the
2304     diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
2305     index f5a96678494b..5e0626c80b81 100644
2306     --- a/drivers/net/usb/lan78xx.c
2307     +++ b/drivers/net/usb/lan78xx.c
2308     @@ -2964,6 +2964,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
2309     pkt_cnt = 0;
2310     count = 0;
2311     length = 0;
2312     + spin_lock_irqsave(&tqp->lock, flags);
2313     for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2314     if (skb_is_gso(skb)) {
2315     if (pkt_cnt) {
2316     @@ -2972,7 +2973,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
2317     }
2318     count = 1;
2319     length = skb->len - TX_OVERHEAD;
2320     - skb2 = skb_dequeue(tqp);
2321     + __skb_unlink(skb, tqp);
2322     + spin_unlock_irqrestore(&tqp->lock, flags);
2323     goto gso_skb;
2324     }
2325    
2326     @@ -2981,6 +2983,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
2327     skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2328     pkt_cnt++;
2329     }
2330     + spin_unlock_irqrestore(&tqp->lock, flags);
2331    
2332     /* copy to a single skb */
2333     skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2334     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2335     index 85bc0ca61389..6d654d65f8a0 100644
2336     --- a/drivers/net/usb/qmi_wwan.c
2337     +++ b/drivers/net/usb/qmi_wwan.c
2338     @@ -946,6 +946,7 @@ static const struct usb_device_id products[] = {
2339     {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
2340     {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
2341     {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
2342     + {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */
2343     {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
2344     {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
2345     {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
2346     diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
2347     index d3d89b05f66e..5988674818ed 100644
2348     --- a/drivers/net/usb/r8152.c
2349     +++ b/drivers/net/usb/r8152.c
2350     @@ -3327,7 +3327,8 @@ static int rtl8152_close(struct net_device *netdev)
2351     #ifdef CONFIG_PM_SLEEP
2352     unregister_pm_notifier(&tp->pm_notifier);
2353     #endif
2354     - napi_disable(&tp->napi);
2355     + if (!test_bit(RTL8152_UNPLUG, &tp->flags))
2356     + napi_disable(&tp->napi);
2357     clear_bit(WORK_ENABLE, &tp->flags);
2358     usb_kill_urb(tp->intr_urb);
2359     cancel_delayed_work_sync(&tp->schedule);
2360     diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
2361     index 4da4e458142c..9526643312d9 100644
2362     --- a/drivers/net/wireless/realtek/rtlwifi/core.c
2363     +++ b/drivers/net/wireless/realtek/rtlwifi/core.c
2364     @@ -131,7 +131,6 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
2365     firmware->size);
2366     rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
2367     }
2368     - rtlpriv->rtlhal.fwsize = firmware->size;
2369     release_firmware(firmware);
2370     }
2371    
2372     diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
2373     index 487586e2d8b9..353c93bc459b 100644
2374     --- a/drivers/vhost/net.c
2375     +++ b/drivers/vhost/net.c
2376     @@ -1052,7 +1052,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
2377     if (ubufs)
2378     vhost_net_ubuf_put_wait_and_free(ubufs);
2379     err_ubufs:
2380     - sockfd_put(sock);
2381     + if (sock)
2382     + sockfd_put(sock);
2383     err_vq:
2384     mutex_unlock(&vq->mutex);
2385     err:
2386     diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
2387     index f2961b13e8c5..c26d046adaaa 100644
2388     --- a/fs/ocfs2/aops.c
2389     +++ b/fs/ocfs2/aops.c
2390     @@ -134,6 +134,19 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
2391     return err;
2392     }
2393    
2394     +static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock,
2395     + struct buffer_head *bh_result, int create)
2396     +{
2397     + int ret = 0;
2398     + struct ocfs2_inode_info *oi = OCFS2_I(inode);
2399     +
2400     + down_read(&oi->ip_alloc_sem);
2401     + ret = ocfs2_get_block(inode, iblock, bh_result, create);
2402     + up_read(&oi->ip_alloc_sem);
2403     +
2404     + return ret;
2405     +}
2406     +
2407     int ocfs2_get_block(struct inode *inode, sector_t iblock,
2408     struct buffer_head *bh_result, int create)
2409     {
2410     @@ -2120,7 +2133,7 @@ static void ocfs2_dio_free_write_ctx(struct inode *inode,
2411     * called like this: dio->get_blocks(dio->inode, fs_startblk,
2412     * fs_count, map_bh, dio->rw == WRITE);
2413     */
2414     -static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock,
2415     +static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
2416     struct buffer_head *bh_result, int create)
2417     {
2418     struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2419     @@ -2146,12 +2159,9 @@ static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock,
2420     * while file size will be changed.
2421     */
2422     if (pos + total_len <= i_size_read(inode)) {
2423     - down_read(&oi->ip_alloc_sem);
2424     - /* This is the fast path for re-write. */
2425     - ret = ocfs2_get_block(inode, iblock, bh_result, create);
2426     -
2427     - up_read(&oi->ip_alloc_sem);
2428    
2429     + /* This is the fast path for re-write. */
2430     + ret = ocfs2_lock_get_block(inode, iblock, bh_result, create);
2431     if (buffer_mapped(bh_result) &&
2432     !buffer_new(bh_result) &&
2433     ret == 0)
2434     @@ -2416,9 +2426,9 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2435     return 0;
2436    
2437     if (iov_iter_rw(iter) == READ)
2438     - get_block = ocfs2_get_block;
2439     + get_block = ocfs2_lock_get_block;
2440     else
2441     - get_block = ocfs2_dio_get_block;
2442     + get_block = ocfs2_dio_wr_get_block;
2443    
2444     return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
2445     iter, get_block,
2446     diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
2447     index b17d180bdc16..c204ac9b49e5 100644
2448     --- a/fs/ocfs2/cluster/nodemanager.c
2449     +++ b/fs/ocfs2/cluster/nodemanager.c
2450     @@ -40,6 +40,9 @@ char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
2451     "panic", /* O2NM_FENCE_PANIC */
2452     };
2453    
2454     +static inline void o2nm_lock_subsystem(void);
2455     +static inline void o2nm_unlock_subsystem(void);
2456     +
2457     struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
2458     {
2459     struct o2nm_node *node = NULL;
2460     @@ -181,7 +184,10 @@ static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
2461     {
2462     /* through the first node_set .parent
2463     * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
2464     - return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
2465     + if (node->nd_item.ci_parent)
2466     + return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
2467     + else
2468     + return NULL;
2469     }
2470    
2471     enum {
2472     @@ -194,7 +200,7 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
2473     size_t count)
2474     {
2475     struct o2nm_node *node = to_o2nm_node(item);
2476     - struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
2477     + struct o2nm_cluster *cluster;
2478     unsigned long tmp;
2479     char *p = (char *)page;
2480     int ret = 0;
2481     @@ -214,6 +220,13 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
2482     !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
2483     return -EINVAL; /* XXX */
2484    
2485     + o2nm_lock_subsystem();
2486     + cluster = to_o2nm_cluster_from_node(node);
2487     + if (!cluster) {
2488     + o2nm_unlock_subsystem();
2489     + return -EINVAL;
2490     + }
2491     +
2492     write_lock(&cluster->cl_nodes_lock);
2493     if (cluster->cl_nodes[tmp])
2494     ret = -EEXIST;
2495     @@ -226,6 +239,8 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
2496     set_bit(tmp, cluster->cl_nodes_bitmap);
2497     }
2498     write_unlock(&cluster->cl_nodes_lock);
2499     + o2nm_unlock_subsystem();
2500     +
2501     if (ret)
2502     return ret;
2503    
2504     @@ -269,7 +284,7 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
2505     size_t count)
2506     {
2507     struct o2nm_node *node = to_o2nm_node(item);
2508     - struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
2509     + struct o2nm_cluster *cluster;
2510     int ret, i;
2511     struct rb_node **p, *parent;
2512     unsigned int octets[4];
2513     @@ -286,6 +301,13 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
2514     be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
2515     }
2516    
2517     + o2nm_lock_subsystem();
2518     + cluster = to_o2nm_cluster_from_node(node);
2519     + if (!cluster) {
2520     + o2nm_unlock_subsystem();
2521     + return -EINVAL;
2522     + }
2523     +
2524     ret = 0;
2525     write_lock(&cluster->cl_nodes_lock);
2526     if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
2527     @@ -298,6 +320,8 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
2528     rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
2529     }
2530     write_unlock(&cluster->cl_nodes_lock);
2531     + o2nm_unlock_subsystem();
2532     +
2533     if (ret)
2534     return ret;
2535    
2536     @@ -315,7 +339,7 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
2537     size_t count)
2538     {
2539     struct o2nm_node *node = to_o2nm_node(item);
2540     - struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
2541     + struct o2nm_cluster *cluster;
2542     unsigned long tmp;
2543     char *p = (char *)page;
2544     ssize_t ret;
2545     @@ -333,17 +357,26 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
2546     !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
2547     return -EINVAL; /* XXX */
2548    
2549     + o2nm_lock_subsystem();
2550     + cluster = to_o2nm_cluster_from_node(node);
2551     + if (!cluster) {
2552     + ret = -EINVAL;
2553     + goto out;
2554     + }
2555     +
2556     /* the only failure case is trying to set a new local node
2557     * when a different one is already set */
2558     if (tmp && tmp == cluster->cl_has_local &&
2559     - cluster->cl_local_node != node->nd_num)
2560     - return -EBUSY;
2561     + cluster->cl_local_node != node->nd_num) {
2562     + ret = -EBUSY;
2563     + goto out;
2564     + }
2565    
2566     /* bring up the rx thread if we're setting the new local node. */
2567     if (tmp && !cluster->cl_has_local) {
2568     ret = o2net_start_listening(node);
2569     if (ret)
2570     - return ret;
2571     + goto out;
2572     }
2573    
2574     if (!tmp && cluster->cl_has_local &&
2575     @@ -358,7 +391,11 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
2576     cluster->cl_local_node = node->nd_num;
2577     }
2578    
2579     - return count;
2580     + ret = count;
2581     +
2582     +out:
2583     + o2nm_unlock_subsystem();
2584     + return ret;
2585     }
2586    
2587     CONFIGFS_ATTR(o2nm_node_, num);
2588     @@ -738,6 +775,16 @@ static struct o2nm_cluster_group o2nm_cluster_group = {
2589     },
2590     };
2591    
2592     +static inline void o2nm_lock_subsystem(void)
2593     +{
2594     + mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
2595     +}
2596     +
2597     +static inline void o2nm_unlock_subsystem(void)
2598     +{
2599     + mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
2600     +}
2601     +
2602     int o2nm_depend_item(struct config_item *item)
2603     {
2604     return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
2605     diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
2606     index 4f3f928076f3..92470e5973f8 100644
2607     --- a/fs/reiserfs/prints.c
2608     +++ b/fs/reiserfs/prints.c
2609     @@ -76,83 +76,99 @@ static char *le_type(struct reiserfs_key *key)
2610     }
2611    
2612     /* %k */
2613     -static void sprintf_le_key(char *buf, struct reiserfs_key *key)
2614     +static int scnprintf_le_key(char *buf, size_t size, struct reiserfs_key *key)
2615     {
2616     if (key)
2617     - sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id),
2618     - le32_to_cpu(key->k_objectid), le_offset(key),
2619     - le_type(key));
2620     + return scnprintf(buf, size, "[%d %d %s %s]",
2621     + le32_to_cpu(key->k_dir_id),
2622     + le32_to_cpu(key->k_objectid), le_offset(key),
2623     + le_type(key));
2624     else
2625     - sprintf(buf, "[NULL]");
2626     + return scnprintf(buf, size, "[NULL]");
2627     }
2628    
2629     /* %K */
2630     -static void sprintf_cpu_key(char *buf, struct cpu_key *key)
2631     +static int scnprintf_cpu_key(char *buf, size_t size, struct cpu_key *key)
2632     {
2633     if (key)
2634     - sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
2635     - key->on_disk_key.k_objectid, reiserfs_cpu_offset(key),
2636     - cpu_type(key));
2637     + return scnprintf(buf, size, "[%d %d %s %s]",
2638     + key->on_disk_key.k_dir_id,
2639     + key->on_disk_key.k_objectid,
2640     + reiserfs_cpu_offset(key), cpu_type(key));
2641     else
2642     - sprintf(buf, "[NULL]");
2643     + return scnprintf(buf, size, "[NULL]");
2644     }
2645    
2646     -static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh)
2647     +static int scnprintf_de_head(char *buf, size_t size,
2648     + struct reiserfs_de_head *deh)
2649     {
2650     if (deh)
2651     - sprintf(buf,
2652     - "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
2653     - deh_offset(deh), deh_dir_id(deh), deh_objectid(deh),
2654     - deh_location(deh), deh_state(deh));
2655     + return scnprintf(buf, size,
2656     + "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
2657     + deh_offset(deh), deh_dir_id(deh),
2658     + deh_objectid(deh), deh_location(deh),
2659     + deh_state(deh));
2660     else
2661     - sprintf(buf, "[NULL]");
2662     + return scnprintf(buf, size, "[NULL]");
2663    
2664     }
2665    
2666     -static void sprintf_item_head(char *buf, struct item_head *ih)
2667     +static int scnprintf_item_head(char *buf, size_t size, struct item_head *ih)
2668     {
2669     if (ih) {
2670     - strcpy(buf,
2671     - (ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*");
2672     - sprintf_le_key(buf + strlen(buf), &(ih->ih_key));
2673     - sprintf(buf + strlen(buf), ", item_len %d, item_location %d, "
2674     - "free_space(entry_count) %d",
2675     - ih_item_len(ih), ih_location(ih), ih_free_space(ih));
2676     + char *p = buf;
2677     + char * const end = buf + size;
2678     +
2679     + p += scnprintf(p, end - p, "%s",
2680     + (ih_version(ih) == KEY_FORMAT_3_6) ?
2681     + "*3.6* " : "*3.5*");
2682     +
2683     + p += scnprintf_le_key(p, end - p, &ih->ih_key);
2684     +
2685     + p += scnprintf(p, end - p,
2686     + ", item_len %d, item_location %d, free_space(entry_count) %d",
2687     + ih_item_len(ih), ih_location(ih),
2688     + ih_free_space(ih));
2689     + return p - buf;
2690     } else
2691     - sprintf(buf, "[NULL]");
2692     + return scnprintf(buf, size, "[NULL]");
2693     }
2694    
2695     -static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de)
2696     +static int scnprintf_direntry(char *buf, size_t size,
2697     + struct reiserfs_dir_entry *de)
2698     {
2699     char name[20];
2700    
2701     memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen);
2702     name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0;
2703     - sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid);
2704     + return scnprintf(buf, size, "\"%s\"==>[%d %d]",
2705     + name, de->de_dir_id, de->de_objectid);
2706     }
2707    
2708     -static void sprintf_block_head(char *buf, struct buffer_head *bh)
2709     +static int scnprintf_block_head(char *buf, size_t size, struct buffer_head *bh)
2710     {
2711     - sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ",
2712     - B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
2713     + return scnprintf(buf, size,
2714     + "level=%d, nr_items=%d, free_space=%d rdkey ",
2715     + B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
2716     }
2717    
2718     -static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
2719     +static int scnprintf_buffer_head(char *buf, size_t size, struct buffer_head *bh)
2720     {
2721     - sprintf(buf,
2722     - "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
2723     - bh->b_bdev, bh->b_size,
2724     - (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
2725     - bh->b_state, bh->b_page,
2726     - buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
2727     - buffer_dirty(bh) ? "DIRTY" : "CLEAN",
2728     - buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
2729     + return scnprintf(buf, size,
2730     + "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
2731     + bh->b_bdev, bh->b_size,
2732     + (unsigned long long)bh->b_blocknr,
2733     + atomic_read(&(bh->b_count)),
2734     + bh->b_state, bh->b_page,
2735     + buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
2736     + buffer_dirty(bh) ? "DIRTY" : "CLEAN",
2737     + buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
2738     }
2739    
2740     -static void sprintf_disk_child(char *buf, struct disk_child *dc)
2741     +static int scnprintf_disk_child(char *buf, size_t size, struct disk_child *dc)
2742     {
2743     - sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc),
2744     - dc_size(dc));
2745     + return scnprintf(buf, size, "[dc_number=%d, dc_size=%u]",
2746     + dc_block_number(dc), dc_size(dc));
2747     }
2748    
2749     static char *is_there_reiserfs_struct(char *fmt, int *what)
2750     @@ -189,55 +205,60 @@ static void prepare_error_buf(const char *fmt, va_list args)
2751     char *fmt1 = fmt_buf;
2752     char *k;
2753     char *p = error_buf;
2754     + char * const end = &error_buf[sizeof(error_buf)];
2755     int what;
2756    
2757     spin_lock(&error_lock);
2758    
2759     - strcpy(fmt1, fmt);
2760     + if (WARN_ON(strscpy(fmt_buf, fmt, sizeof(fmt_buf)) < 0)) {
2761     + strscpy(error_buf, "format string too long", end - error_buf);
2762     + goto out_unlock;
2763     + }
2764    
2765     while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) {
2766     *k = 0;
2767    
2768     - p += vsprintf(p, fmt1, args);
2769     + p += vscnprintf(p, end - p, fmt1, args);
2770    
2771     switch (what) {
2772     case 'k':
2773     - sprintf_le_key(p, va_arg(args, struct reiserfs_key *));
2774     + p += scnprintf_le_key(p, end - p,
2775     + va_arg(args, struct reiserfs_key *));
2776     break;
2777     case 'K':
2778     - sprintf_cpu_key(p, va_arg(args, struct cpu_key *));
2779     + p += scnprintf_cpu_key(p, end - p,
2780     + va_arg(args, struct cpu_key *));
2781     break;
2782     case 'h':
2783     - sprintf_item_head(p, va_arg(args, struct item_head *));
2784     + p += scnprintf_item_head(p, end - p,
2785     + va_arg(args, struct item_head *));
2786     break;
2787     case 't':
2788     - sprintf_direntry(p,
2789     - va_arg(args,
2790     - struct reiserfs_dir_entry *));
2791     + p += scnprintf_direntry(p, end - p,
2792     + va_arg(args, struct reiserfs_dir_entry *));
2793     break;
2794     case 'y':
2795     - sprintf_disk_child(p,
2796     - va_arg(args, struct disk_child *));
2797     + p += scnprintf_disk_child(p, end - p,
2798     + va_arg(args, struct disk_child *));
2799     break;
2800     case 'z':
2801     - sprintf_block_head(p,
2802     - va_arg(args, struct buffer_head *));
2803     + p += scnprintf_block_head(p, end - p,
2804     + va_arg(args, struct buffer_head *));
2805     break;
2806     case 'b':
2807     - sprintf_buffer_head(p,
2808     - va_arg(args, struct buffer_head *));
2809     + p += scnprintf_buffer_head(p, end - p,
2810     + va_arg(args, struct buffer_head *));
2811     break;
2812     case 'a':
2813     - sprintf_de_head(p,
2814     - va_arg(args,
2815     - struct reiserfs_de_head *));
2816     + p += scnprintf_de_head(p, end - p,
2817     + va_arg(args, struct reiserfs_de_head *));
2818     break;
2819     }
2820    
2821     - p += strlen(p);
2822     fmt1 = k + 2;
2823     }
2824     - vsprintf(p, fmt1, args);
2825     + p += vscnprintf(p, end - p, fmt1, args);
2826     +out_unlock:
2827     spin_unlock(&error_lock);
2828    
2829     }
2830     diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
2831     index a031897fca76..ca1d2cc2cdfa 100644
2832     --- a/include/linux/arm-smccc.h
2833     +++ b/include/linux/arm-smccc.h
2834     @@ -80,6 +80,11 @@
2835     ARM_SMCCC_SMC_32, \
2836     0, 0x8000)
2837    
2838     +#define ARM_SMCCC_ARCH_WORKAROUND_2 \
2839     + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
2840     + ARM_SMCCC_SMC_32, \
2841     + 0, 0x7fff)
2842     +
2843     #ifndef __ASSEMBLY__
2844    
2845     #include <linux/linkage.h>
2846     @@ -291,5 +296,10 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
2847     */
2848     #define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
2849    
2850     +/* Return codes defined in ARM DEN 0070A */
2851     +#define SMCCC_RET_SUCCESS 0
2852     +#define SMCCC_RET_NOT_SUPPORTED -1
2853     +#define SMCCC_RET_NOT_REQUIRED -2
2854     +
2855     #endif /*__ASSEMBLY__*/
2856     #endif /*__LINUX_ARM_SMCCC_H*/
2857     diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
2858     index ad793c69cc46..8e82e3373eaf 100644
2859     --- a/include/linux/compiler-gcc.h
2860     +++ b/include/linux/compiler-gcc.h
2861     @@ -64,22 +64,41 @@
2862     #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
2863     #endif
2864    
2865     +/*
2866     + * Feature detection for gnu_inline (gnu89 extern inline semantics). Either
2867     + * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
2868     + * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
2869     + * defined so the gnu89 semantics are the default.
2870     + */
2871     +#ifdef __GNUC_STDC_INLINE__
2872     +# define __gnu_inline __attribute__((gnu_inline))
2873     +#else
2874     +# define __gnu_inline
2875     +#endif
2876     +
2877     /*
2878     * Force always-inline if the user requests it so via the .config,
2879     - * or if gcc is too old:
2880     + * or if gcc is too old.
2881     + * GCC does not warn about unused static inline functions for
2882     + * -Wunused-function. This turns out to avoid the need for complex #ifdef
2883     + * directives. Suppress the warning in clang as well by using "unused"
2884     + * function attribute, which is redundant but not harmful for gcc.
2885     + * Prefer gnu_inline, so that extern inline functions do not emit an
2886     + * externally visible function. This makes extern inline behave as per gnu89
2887     + * semantics rather than c99. This prevents multiple symbol definition errors
2888     + * of extern inline functions at link time.
2889     + * A lot of inline functions can cause havoc with function tracing.
2890     */
2891     #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
2892     !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
2893     -#define inline inline __attribute__((always_inline)) notrace
2894     -#define __inline__ __inline__ __attribute__((always_inline)) notrace
2895     -#define __inline __inline __attribute__((always_inline)) notrace
2896     +#define inline \
2897     + inline __attribute__((always_inline, unused)) notrace __gnu_inline
2898     #else
2899     -/* A lot of inline functions can cause havoc with function tracing */
2900     -#define inline inline notrace
2901     -#define __inline__ __inline__ notrace
2902     -#define __inline __inline notrace
2903     +#define inline inline __attribute__((unused)) notrace __gnu_inline
2904     #endif
2905    
2906     +#define __inline__ inline
2907     +#define __inline inline
2908     #define __always_inline inline __attribute__((always_inline))
2909     #define noinline __attribute__((noinline))
2910    
2911     diff --git a/include/linux/string.h b/include/linux/string.h
2912     index 0c88c0a1a72b..60042e5e88ff 100644
2913     --- a/include/linux/string.h
2914     +++ b/include/linux/string.h
2915     @@ -27,7 +27,7 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
2916     size_t strlcpy(char *, const char *, size_t);
2917     #endif
2918     #ifndef __HAVE_ARCH_STRSCPY
2919     -ssize_t __must_check strscpy(char *, const char *, size_t);
2920     +ssize_t strscpy(char *, const char *, size_t);
2921     #endif
2922     #ifndef __HAVE_ARCH_STRCAT
2923     extern char * strcat(char *, const char *);
2924     diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
2925     index cb6fbb525ba6..18c1f07e4f3b 100644
2926     --- a/net/bridge/netfilter/ebtables.c
2927     +++ b/net/bridge/netfilter/ebtables.c
2928     @@ -406,6 +406,12 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
2929     watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
2930     if (IS_ERR(watcher))
2931     return PTR_ERR(watcher);
2932     +
2933     + if (watcher->family != NFPROTO_BRIDGE) {
2934     + module_put(watcher->me);
2935     + return -ENOENT;
2936     + }
2937     +
2938     w->u.watcher = watcher;
2939    
2940     par->target = watcher;
2941     @@ -727,6 +733,13 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
2942     goto cleanup_watchers;
2943     }
2944    
2945     + /* Reject UNSPEC, xtables verdicts/return values are incompatible */
2946     + if (target->family != NFPROTO_BRIDGE) {
2947     + module_put(target->me);
2948     + ret = -ENOENT;
2949     + goto cleanup_watchers;
2950     + }
2951     +
2952     t->u.target = target;
2953     if (t->u.target == &ebt_standard_target) {
2954     if (gap < sizeof(struct ebt_standard_target)) {
2955     diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
2956     index 119c04317d48..03fcf3ee1534 100644
2957     --- a/net/dccp/ccids/ccid3.c
2958     +++ b/net/dccp/ccids/ccid3.c
2959     @@ -599,7 +599,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
2960     {
2961     struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
2962     struct dccp_sock *dp = dccp_sk(sk);
2963     - ktime_t now = ktime_get_real();
2964     + ktime_t now = ktime_get();
2965     s64 delta = 0;
2966    
2967     switch (fbtype) {
2968     @@ -624,15 +624,14 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
2969     case CCID3_FBACK_PERIODIC:
2970     delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
2971     if (delta <= 0)
2972     - DCCP_BUG("delta (%ld) <= 0", (long)delta);
2973     - else
2974     - hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
2975     + delta = 1;
2976     + hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
2977     break;
2978     default:
2979     return;
2980     }
2981    
2982     - ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
2983     + ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta,
2984     hc->rx_x_recv, hc->rx_pinv);
2985    
2986     hc->rx_tstamp_last_feedback = now;
2987     @@ -679,7 +678,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
2988     static u32 ccid3_first_li(struct sock *sk)
2989     {
2990     struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
2991     - u32 x_recv, p, delta;
2992     + u32 x_recv, p;
2993     + s64 delta;
2994     u64 fval;
2995    
2996     if (hc->rx_rtt == 0) {
2997     @@ -687,7 +687,9 @@ static u32 ccid3_first_li(struct sock *sk)
2998     hc->rx_rtt = DCCP_FALLBACK_RTT;
2999     }
3000    
3001     - delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
3002     + delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
3003     + if (delta <= 0)
3004     + delta = 1;
3005     x_recv = scaled_div32(hc->rx_bytes_recv, delta);
3006     if (x_recv == 0) { /* would also trigger divide-by-zero */
3007     DCCP_WARN("X_recv==0\n");
3008     diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
3009     index f0252768ecf4..5f5d9eafccf5 100644
3010     --- a/net/dns_resolver/dns_key.c
3011     +++ b/net/dns_resolver/dns_key.c
3012     @@ -87,35 +87,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
3013     opt++;
3014     kdebug("options: '%s'", opt);
3015     do {
3016     + int opt_len, opt_nlen;
3017     const char *eq;
3018     - int opt_len, opt_nlen, opt_vlen, tmp;
3019     + char optval[128];
3020    
3021     next_opt = memchr(opt, '#', end - opt) ?: end;
3022     opt_len = next_opt - opt;
3023     - if (opt_len <= 0 || opt_len > 128) {
3024     + if (opt_len <= 0 || opt_len > sizeof(optval)) {
3025     pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
3026     opt_len);
3027     return -EINVAL;
3028     }
3029    
3030     - eq = memchr(opt, '=', opt_len) ?: end;
3031     - opt_nlen = eq - opt;
3032     - eq++;
3033     - opt_vlen = next_opt - eq; /* will be -1 if no value */
3034     + eq = memchr(opt, '=', opt_len);
3035     + if (eq) {
3036     + opt_nlen = eq - opt;
3037     + eq++;
3038     + memcpy(optval, eq, next_opt - eq);
3039     + optval[next_opt - eq] = '\0';
3040     + } else {
3041     + opt_nlen = opt_len;
3042     + optval[0] = '\0';
3043     + }
3044    
3045     - tmp = opt_vlen >= 0 ? opt_vlen : 0;
3046     - kdebug("option '%*.*s' val '%*.*s'",
3047     - opt_nlen, opt_nlen, opt, tmp, tmp, eq);
3048     + kdebug("option '%*.*s' val '%s'",
3049     + opt_nlen, opt_nlen, opt, optval);
3050    
3051     /* see if it's an error number representing a DNS error
3052     * that's to be recorded as the result in this key */
3053     if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
3054     memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
3055     kdebug("dns error number option");
3056     - if (opt_vlen <= 0)
3057     - goto bad_option_value;
3058    
3059     - ret = kstrtoul(eq, 10, &derrno);
3060     + ret = kstrtoul(optval, 10, &derrno);
3061     if (ret < 0)
3062     goto bad_option_value;
3063    
3064     diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
3065     index 566cfc50f7cf..51a0039cb318 100644
3066     --- a/net/ipv4/sysctl_net_ipv4.c
3067     +++ b/net/ipv4/sysctl_net_ipv4.c
3068     @@ -212,8 +212,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
3069     {
3070     struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
3071     struct tcp_fastopen_context *ctxt;
3072     - int ret;
3073     u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
3074     + __le32 key[4];
3075     + int ret, i;
3076    
3077     tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
3078     if (!tbl.data)
3079     @@ -222,11 +223,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
3080     rcu_read_lock();
3081     ctxt = rcu_dereference(tcp_fastopen_ctx);
3082     if (ctxt)
3083     - memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
3084     + memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
3085     else
3086     - memset(user_key, 0, sizeof(user_key));
3087     + memset(key, 0, sizeof(key));
3088     rcu_read_unlock();
3089    
3090     + for (i = 0; i < ARRAY_SIZE(key); i++)
3091     + user_key[i] = le32_to_cpu(key[i]);
3092     +
3093     snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
3094     user_key[0], user_key[1], user_key[2], user_key[3]);
3095     ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
3096     @@ -242,12 +246,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
3097     * first invocation of tcp_fastopen_cookie_gen
3098     */
3099     tcp_fastopen_init_key_once(false);
3100     - tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
3101     +
3102     + for (i = 0; i < ARRAY_SIZE(user_key); i++)
3103     + key[i] = cpu_to_le32(user_key[i]);
3104     +
3105     + tcp_fastopen_reset_cipher(key, TCP_FASTOPEN_KEY_LENGTH);
3106     }
3107    
3108     bad_key:
3109     pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
3110     - user_key[0], user_key[1], user_key[2], user_key[3],
3111     + user_key[0], user_key[1], user_key[2], user_key[3],
3112     (char *)tbl.data, ret);
3113     kfree(tbl.data);
3114     return ret;
3115     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3116     index 8999e25fd0e1..be453aa8fce8 100644
3117     --- a/net/ipv4/tcp_input.c
3118     +++ b/net/ipv4/tcp_input.c
3119     @@ -3236,6 +3236,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3120    
3121     if (tcp_is_reno(tp)) {
3122     tcp_remove_reno_sacks(sk, pkts_acked);
3123     +
3124     + /* If any of the cumulatively ACKed segments was
3125     + * retransmitted, non-SACK case cannot confirm that
3126     + * progress was due to original transmission due to
3127     + * lack of TCPCB_SACKED_ACKED bits even if some of
3128     + * the packets may have been never retransmitted.
3129     + */
3130     + if (flag & FLAG_RETRANS_DATA_ACKED)
3131     + flag &= ~FLAG_ORIG_SACK_ACKED;
3132     } else {
3133     int delta;
3134    
3135     diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
3136     index 64ec23388450..722a9db8c6a7 100644
3137     --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
3138     +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
3139     @@ -618,6 +618,8 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
3140     fq->q.meat == fq->q.len &&
3141     nf_ct_frag6_reasm(fq, skb, dev))
3142     ret = 0;
3143     + else
3144     + skb_dst_drop(skb);
3145    
3146     out_unlock:
3147     spin_unlock_bh(&fq->q.lock);
3148     diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
3149     index 3f266115294f..04759a0c3273 100644
3150     --- a/net/nfc/llcp_commands.c
3151     +++ b/net/nfc/llcp_commands.c
3152     @@ -753,11 +753,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
3153     pr_debug("Fragment %zd bytes remaining %zd",
3154     frag_len, remaining_len);
3155    
3156     - pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
3157     + pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
3158     frag_len + LLCP_HEADER_SIZE, &err);
3159     if (pdu == NULL) {
3160     - pr_err("Could not allocate PDU\n");
3161     - continue;
3162     + pr_err("Could not allocate PDU (error=%d)\n", err);
3163     + len -= remaining_len;
3164     + if (len == 0)
3165     + len = err;
3166     + break;
3167     }
3168    
3169     pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
3170     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3171     index 2c4a47f29f36..ea601f7ca2f8 100644
3172     --- a/net/packet/af_packet.c
3173     +++ b/net/packet/af_packet.c
3174     @@ -2265,6 +2265,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
3175     if (po->stats.stats1.tp_drops)
3176     status |= TP_STATUS_LOSING;
3177     }
3178     +
3179     + if (do_vnet &&
3180     + __packet_rcv_vnet(skb, h.raw + macoff -
3181     + sizeof(struct virtio_net_hdr)))
3182     + goto drop_n_account;
3183     +
3184     po->stats.stats1.tp_packets++;
3185     if (copy_skb) {
3186     status |= TP_STATUS_COPY;
3187     @@ -2272,14 +2278,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
3188     }
3189     spin_unlock(&sk->sk_receive_queue.lock);
3190    
3191     - if (do_vnet) {
3192     - if (__packet_rcv_vnet(skb, h.raw + macoff -
3193     - sizeof(struct virtio_net_hdr))) {
3194     - spin_lock(&sk->sk_receive_queue.lock);
3195     - goto drop_n_account;
3196     - }
3197     - }
3198     -
3199     skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
3200    
3201     if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
3202     diff --git a/net/rds/loop.c b/net/rds/loop.c
3203     index f2bf78de5688..dac6218a460e 100644
3204     --- a/net/rds/loop.c
3205     +++ b/net/rds/loop.c
3206     @@ -193,4 +193,5 @@ struct rds_transport rds_loop_transport = {
3207     .inc_copy_to_user = rds_message_inc_copy_to_user,
3208     .inc_free = rds_loop_inc_free,
3209     .t_name = "loopback",
3210     + .t_type = RDS_TRANS_LOOP,
3211     };
3212     diff --git a/net/rds/rds.h b/net/rds/rds.h
3213     index 30a51fec0f63..edfc3397aa24 100644
3214     --- a/net/rds/rds.h
3215     +++ b/net/rds/rds.h
3216     @@ -440,6 +440,11 @@ struct rds_notifier {
3217     int n_status;
3218     };
3219    
3220     +/* Available as part of RDS core, so doesn't need to participate
3221     + * in get_preferred transport etc
3222     + */
3223     +#define RDS_TRANS_LOOP 3
3224     +
3225     /**
3226     * struct rds_transport - transport specific behavioural hooks
3227     *
3228     diff --git a/net/rds/recv.c b/net/rds/recv.c
3229     index cbfabdf3ff48..f16ee1b13b8d 100644
3230     --- a/net/rds/recv.c
3231     +++ b/net/rds/recv.c
3232     @@ -94,6 +94,11 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
3233     return;
3234    
3235     rs->rs_rcv_bytes += delta;
3236     +
3237     + /* loop transport doesn't send/recv congestion updates */
3238     + if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
3239     + return;
3240     +
3241     now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
3242    
3243     rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
3244     diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c
3245     index c98a61e980ba..9c4c2bb547d7 100644
3246     --- a/net/sched/sch_blackhole.c
3247     +++ b/net/sched/sch_blackhole.c
3248     @@ -21,7 +21,7 @@ static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
3249     struct sk_buff **to_free)
3250     {
3251     qdisc_drop(skb, sch, to_free);
3252     - return NET_XMIT_SUCCESS;
3253     + return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
3254     }
3255    
3256     static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
3257     diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
3258     index 95021246ee26..3d6dbdf850aa 100644
3259     --- a/virt/kvm/arm/hyp/vgic-v2-sr.c
3260     +++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
3261     @@ -203,7 +203,7 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
3262     return -1;
3263    
3264     rd = kvm_vcpu_dabt_get_rd(vcpu);
3265     - addr = kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state))->vcpu_base_va);
3266     + addr = kern_hyp_va(hyp_symbol_addr(kvm_vgic_global_state)->vcpu_base_va);
3267     addr += fault_ipa - vgic->vgic_cpu_base;
3268    
3269     if (kvm_vcpu_dabt_iswrite(vcpu)) {