Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0156-4.14.57-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 6 months ago) by niro
File size: 157316 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
2     index 0380a45ecf4b..d6d7669e667f 100644
3     --- a/Documentation/admin-guide/kernel-parameters.txt
4     +++ b/Documentation/admin-guide/kernel-parameters.txt
5     @@ -3997,6 +3997,23 @@
6     expediting. Set to zero to disable automatic
7     expediting.
8    
9     + ssbd= [ARM64,HW]
10     + Speculative Store Bypass Disable control
11     +
12     + On CPUs that are vulnerable to the Speculative
13     + Store Bypass vulnerability and offer a
14     + firmware based mitigation, this parameter
15     + indicates how the mitigation should be used:
16     +
17     + force-on: Unconditionally enable mitigation for
18     + for both kernel and userspace
19     + force-off: Unconditionally disable mitigation for
20     + for both kernel and userspace
21     + kernel: Always enable mitigation in the
22     + kernel, and offer a prctl interface
23     + to allow userspace to register its
24     + interest in being mitigated too.
25     +
26     stack_guard_gap= [MM]
27     override the default stack gap protection. The value
28     is in page units and it defines how many pages prior
29     diff --git a/Makefile b/Makefile
30     index acbb0e3d29c9..a44d6b2adb76 100644
31     --- a/Makefile
32     +++ b/Makefile
33     @@ -1,7 +1,7 @@
34     # SPDX-License-Identifier: GPL-2.0
35     VERSION = 4
36     PATCHLEVEL = 14
37     -SUBLEVEL = 56
38     +SUBLEVEL = 57
39     EXTRAVERSION =
40     NAME = Petit Gorille
41    
42     diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
43     index 8f973e3b7348..65572e14306c 100644
44     --- a/arch/arm/include/asm/kvm_host.h
45     +++ b/arch/arm/include/asm/kvm_host.h
46     @@ -302,4 +302,16 @@ static inline bool kvm_arm_harden_branch_predictor(void)
47     return false;
48     }
49    
50     +#define KVM_SSBD_UNKNOWN -1
51     +#define KVM_SSBD_FORCE_DISABLE 0
52     +#define KVM_SSBD_KERNEL 1
53     +#define KVM_SSBD_FORCE_ENABLE 2
54     +#define KVM_SSBD_MITIGATED 3
55     +
56     +static inline int kvm_arm_have_ssbd(void)
57     +{
58     + /* No way to detect it yet, pretend it is not there. */
59     + return KVM_SSBD_UNKNOWN;
60     +}
61     +
62     #endif /* __ARM_KVM_HOST_H__ */
63     diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
64     index 08cd720eae01..8a098e65f5f8 100644
65     --- a/arch/arm/include/asm/kvm_mmu.h
66     +++ b/arch/arm/include/asm/kvm_mmu.h
67     @@ -28,6 +28,13 @@
68     */
69     #define kern_hyp_va(kva) (kva)
70    
71     +/* Contrary to arm64, there is no need to generate a PC-relative address */
72     +#define hyp_symbol_addr(s) \
73     + ({ \
74     + typeof(s) *addr = &(s); \
75     + addr; \
76     + })
77     +
78     /*
79     * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
80     */
81     @@ -247,6 +254,11 @@ static inline int kvm_map_vectors(void)
82     return 0;
83     }
84    
85     +static inline int hyp_map_aux_data(void)
86     +{
87     + return 0;
88     +}
89     +
90     #endif /* !__ASSEMBLY__ */
91    
92     #endif /* __ARM_KVM_MMU_H__ */
93     diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
94     index 2d5f7aca156d..1bbb89d37f57 100644
95     --- a/arch/arm64/Kconfig
96     +++ b/arch/arm64/Kconfig
97     @@ -849,6 +849,15 @@ config HARDEN_BRANCH_PREDICTOR
98    
99     If unsure, say Y.
100    
101     +config ARM64_SSBD
102     + bool "Speculative Store Bypass Disable" if EXPERT
103     + default y
104     + help
105     + This enables mitigation of the bypassing of previous stores
106     + by speculative loads.
107     +
108     + If unsure, say Y.
109     +
110     menuconfig ARMV8_DEPRECATED
111     bool "Emulate deprecated/obsolete ARMv8 instructions"
112     depends on COMPAT
113     diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
114     index 4a85c6952a22..a91933b1e2e6 100644
115     --- a/arch/arm64/include/asm/alternative.h
116     +++ b/arch/arm64/include/asm/alternative.h
117     @@ -5,6 +5,8 @@
118     #include <asm/cpucaps.h>
119     #include <asm/insn.h>
120    
121     +#define ARM64_CB_PATCH ARM64_NCAPS
122     +
123     #ifndef __ASSEMBLY__
124    
125     #include <linux/init.h>
126     @@ -12,6 +14,8 @@
127     #include <linux/stddef.h>
128     #include <linux/stringify.h>
129    
130     +extern int alternatives_applied;
131     +
132     struct alt_instr {
133     s32 orig_offset; /* offset to original instruction */
134     s32 alt_offset; /* offset to replacement instruction */
135     @@ -20,12 +24,19 @@ struct alt_instr {
136     u8 alt_len; /* size of new instruction(s), <= orig_len */
137     };
138    
139     +typedef void (*alternative_cb_t)(struct alt_instr *alt,
140     + __le32 *origptr, __le32 *updptr, int nr_inst);
141     +
142     void __init apply_alternatives_all(void);
143     void apply_alternatives(void *start, size_t length);
144    
145     -#define ALTINSTR_ENTRY(feature) \
146     +#define ALTINSTR_ENTRY(feature,cb) \
147     " .word 661b - .\n" /* label */ \
148     + " .if " __stringify(cb) " == 0\n" \
149     " .word 663f - .\n" /* new instruction */ \
150     + " .else\n" \
151     + " .word " __stringify(cb) "- .\n" /* callback */ \
152     + " .endif\n" \
153     " .hword " __stringify(feature) "\n" /* feature bit */ \
154     " .byte 662b-661b\n" /* source len */ \
155     " .byte 664f-663f\n" /* replacement len */
156     @@ -43,15 +54,18 @@ void apply_alternatives(void *start, size_t length);
157     * but most assemblers die if insn1 or insn2 have a .inst. This should
158     * be fixed in a binutils release posterior to 2.25.51.0.2 (anything
159     * containing commit 4e4d08cf7399b606 or c1baaddf8861).
160     + *
161     + * Alternatives with callbacks do not generate replacement instructions.
162     */
163     -#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
164     +#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \
165     ".if "__stringify(cfg_enabled)" == 1\n" \
166     "661:\n\t" \
167     oldinstr "\n" \
168     "662:\n" \
169     ".pushsection .altinstructions,\"a\"\n" \
170     - ALTINSTR_ENTRY(feature) \
171     + ALTINSTR_ENTRY(feature,cb) \
172     ".popsection\n" \
173     + " .if " __stringify(cb) " == 0\n" \
174     ".pushsection .altinstr_replacement, \"a\"\n" \
175     "663:\n\t" \
176     newinstr "\n" \
177     @@ -59,11 +73,17 @@ void apply_alternatives(void *start, size_t length);
178     ".popsection\n\t" \
179     ".org . - (664b-663b) + (662b-661b)\n\t" \
180     ".org . - (662b-661b) + (664b-663b)\n" \
181     + ".else\n\t" \
182     + "663:\n\t" \
183     + "664:\n\t" \
184     + ".endif\n" \
185     ".endif\n"
186    
187     #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
188     - __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
189     + __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
190    
191     +#define ALTERNATIVE_CB(oldinstr, cb) \
192     + __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
193     #else
194    
195     #include <asm/assembler.h>
196     @@ -130,6 +150,14 @@ void apply_alternatives(void *start, size_t length);
197     661:
198     .endm
199    
200     +.macro alternative_cb cb
201     + .set .Lasm_alt_mode, 0
202     + .pushsection .altinstructions, "a"
203     + altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0
204     + .popsection
205     +661:
206     +.endm
207     +
208     /*
209     * Provide the other half of the alternative code sequence.
210     */
211     @@ -155,6 +183,13 @@ void apply_alternatives(void *start, size_t length);
212     .org . - (662b-661b) + (664b-663b)
213     .endm
214    
215     +/*
216     + * Callback-based alternative epilogue
217     + */
218     +.macro alternative_cb_end
219     +662:
220     +.endm
221     +
222     /*
223     * Provides a trivial alternative or default sequence consisting solely
224     * of NOPs. The number of NOPs is chosen automatically to match the
225     diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
226     index 25b2a4161c7a..66aea4aa455d 100644
227     --- a/arch/arm64/include/asm/assembler.h
228     +++ b/arch/arm64/include/asm/assembler.h
229     @@ -260,7 +260,11 @@ lr .req x30 // link register
230     #else
231     adr_l \dst, \sym
232     #endif
233     +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
234     mrs \tmp, tpidr_el1
235     +alternative_else
236     + mrs \tmp, tpidr_el2
237     +alternative_endif
238     add \dst, \dst, \tmp
239     .endm
240    
241     @@ -271,7 +275,11 @@ lr .req x30 // link register
242     */
243     .macro ldr_this_cpu dst, sym, tmp
244     adr_l \dst, \sym
245     +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
246     mrs \tmp, tpidr_el1
247     +alternative_else
248     + mrs \tmp, tpidr_el2
249     +alternative_endif
250     ldr \dst, [\dst, \tmp]
251     .endm
252    
253     diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
254     index 2e7b236bc596..76c0d23ca161 100644
255     --- a/arch/arm64/include/asm/cpucaps.h
256     +++ b/arch/arm64/include/asm/cpucaps.h
257     @@ -43,7 +43,8 @@
258     #define ARM64_UNMAP_KERNEL_AT_EL0 23
259     #define ARM64_HARDEN_BRANCH_PREDICTOR 24
260     #define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
261     +#define ARM64_SSBD 26
262    
263     -#define ARM64_NCAPS 26
264     +#define ARM64_NCAPS 27
265    
266     #endif /* __ASM_CPUCAPS_H */
267     diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
268     index 428ee1f2468c..c5bc80a03515 100644
269     --- a/arch/arm64/include/asm/cpufeature.h
270     +++ b/arch/arm64/include/asm/cpufeature.h
271     @@ -262,6 +262,28 @@ static inline bool system_uses_ttbr0_pan(void)
272     !cpus_have_const_cap(ARM64_HAS_PAN);
273     }
274    
275     +#define ARM64_SSBD_UNKNOWN -1
276     +#define ARM64_SSBD_FORCE_DISABLE 0
277     +#define ARM64_SSBD_KERNEL 1
278     +#define ARM64_SSBD_FORCE_ENABLE 2
279     +#define ARM64_SSBD_MITIGATED 3
280     +
281     +static inline int arm64_get_ssbd_state(void)
282     +{
283     +#ifdef CONFIG_ARM64_SSBD
284     + extern int ssbd_state;
285     + return ssbd_state;
286     +#else
287     + return ARM64_SSBD_UNKNOWN;
288     +#endif
289     +}
290     +
291     +#ifdef CONFIG_ARM64_SSBD
292     +void arm64_set_ssbd_mitigation(bool state);
293     +#else
294     +static inline void arm64_set_ssbd_mitigation(bool state) {}
295     +#endif
296     +
297     #endif /* __ASSEMBLY__ */
298    
299     #endif
300     diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
301     index a7ef5a051911..1a6d02350fc6 100644
302     --- a/arch/arm64/include/asm/kvm_asm.h
303     +++ b/arch/arm64/include/asm/kvm_asm.h
304     @@ -33,6 +33,10 @@
305     #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
306     #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
307    
308     +#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
309     +#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
310     +
311     +/* Translate a kernel address of @sym into its equivalent linear mapping */
312     #define kvm_ksym_ref(sym) \
313     ({ \
314     void *val = &sym; \
315     @@ -68,6 +72,43 @@ extern u32 __init_stage2_translation(void);
316    
317     extern void __qcom_hyp_sanitize_btac_predictors(void);
318    
319     +/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
320     +#define __hyp_this_cpu_ptr(sym) \
321     + ({ \
322     + void *__ptr = hyp_symbol_addr(sym); \
323     + __ptr += read_sysreg(tpidr_el2); \
324     + (typeof(&sym))__ptr; \
325     + })
326     +
327     +#define __hyp_this_cpu_read(sym) \
328     + ({ \
329     + *__hyp_this_cpu_ptr(sym); \
330     + })
331     +
332     +#else /* __ASSEMBLY__ */
333     +
334     +.macro hyp_adr_this_cpu reg, sym, tmp
335     + adr_l \reg, \sym
336     + mrs \tmp, tpidr_el2
337     + add \reg, \reg, \tmp
338     +.endm
339     +
340     +.macro hyp_ldr_this_cpu reg, sym, tmp
341     + adr_l \reg, \sym
342     + mrs \tmp, tpidr_el2
343     + ldr \reg, [\reg, \tmp]
344     +.endm
345     +
346     +.macro get_host_ctxt reg, tmp
347     + hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
348     +.endm
349     +
350     +.macro get_vcpu_ptr vcpu, ctxt
351     + get_host_ctxt \ctxt, \vcpu
352     + ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
353     + kern_hyp_va \vcpu
354     +.endm
355     +
356     #endif
357    
358     #endif /* __ARM_KVM_ASM_H__ */
359     diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
360     index 8abec9f7f430..b01ad3489bd8 100644
361     --- a/arch/arm64/include/asm/kvm_host.h
362     +++ b/arch/arm64/include/asm/kvm_host.h
363     @@ -194,6 +194,8 @@ struct kvm_cpu_context {
364     u64 sys_regs[NR_SYS_REGS];
365     u32 copro[NR_COPRO_REGS];
366     };
367     +
368     + struct kvm_vcpu *__hyp_running_vcpu;
369     };
370    
371     typedef struct kvm_cpu_context kvm_cpu_context_t;
372     @@ -208,6 +210,9 @@ struct kvm_vcpu_arch {
373     /* Exception Information */
374     struct kvm_vcpu_fault_info fault;
375    
376     + /* State of various workarounds, see kvm_asm.h for bit assignment */
377     + u64 workaround_flags;
378     +
379     /* Guest debug state */
380     u64 debug_flags;
381    
382     @@ -348,10 +353,15 @@ int kvm_perf_teardown(void);
383    
384     struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
385    
386     +void __kvm_set_tpidr_el2(u64 tpidr_el2);
387     +DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
388     +
389     static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
390     unsigned long hyp_stack_ptr,
391     unsigned long vector_ptr)
392     {
393     + u64 tpidr_el2;
394     +
395     /*
396     * Call initialization code, and switch to the full blown HYP code.
397     * If the cpucaps haven't been finalized yet, something has gone very
398     @@ -360,6 +370,16 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
399     */
400     BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
401     __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
402     +
403     + /*
404     + * Calculate the raw per-cpu offset without a translation from the
405     + * kernel's mapping to the linear mapping, and store it in tpidr_el2
406     + * so that we can use adr_l to access per-cpu variables in EL2.
407     + */
408     + tpidr_el2 = (u64)this_cpu_ptr(&kvm_host_cpu_state)
409     + - (u64)kvm_ksym_ref(kvm_host_cpu_state);
410     +
411     + kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2);
412     }
413    
414     static inline void kvm_arch_hardware_unsetup(void) {}
415     @@ -392,4 +412,27 @@ static inline bool kvm_arm_harden_branch_predictor(void)
416     return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
417     }
418    
419     +#define KVM_SSBD_UNKNOWN -1
420     +#define KVM_SSBD_FORCE_DISABLE 0
421     +#define KVM_SSBD_KERNEL 1
422     +#define KVM_SSBD_FORCE_ENABLE 2
423     +#define KVM_SSBD_MITIGATED 3
424     +
425     +static inline int kvm_arm_have_ssbd(void)
426     +{
427     + switch (arm64_get_ssbd_state()) {
428     + case ARM64_SSBD_FORCE_DISABLE:
429     + return KVM_SSBD_FORCE_DISABLE;
430     + case ARM64_SSBD_KERNEL:
431     + return KVM_SSBD_KERNEL;
432     + case ARM64_SSBD_FORCE_ENABLE:
433     + return KVM_SSBD_FORCE_ENABLE;
434     + case ARM64_SSBD_MITIGATED:
435     + return KVM_SSBD_MITIGATED;
436     + case ARM64_SSBD_UNKNOWN:
437     + default:
438     + return KVM_SSBD_UNKNOWN;
439     + }
440     +}
441     +
442     #endif /* __ARM64_KVM_HOST_H__ */
443     diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
444     index fe55b516f018..e42c1f0ae6cf 100644
445     --- a/arch/arm64/include/asm/kvm_mmu.h
446     +++ b/arch/arm64/include/asm/kvm_mmu.h
447     @@ -130,6 +130,26 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
448    
449     #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
450    
451     +/*
452     + * Obtain the PC-relative address of a kernel symbol
453     + * s: symbol
454     + *
455     + * The goal of this macro is to return a symbol's address based on a
456     + * PC-relative computation, as opposed to a loading the VA from a
457     + * constant pool or something similar. This works well for HYP, as an
458     + * absolute VA is guaranteed to be wrong. Only use this if trying to
459     + * obtain the address of a symbol (i.e. not something you obtained by
460     + * following a pointer).
461     + */
462     +#define hyp_symbol_addr(s) \
463     + ({ \
464     + typeof(s) *addr; \
465     + asm("adrp %0, %1\n" \
466     + "add %0, %0, :lo12:%1\n" \
467     + : "=r" (addr) : "S" (&s)); \
468     + addr; \
469     + })
470     +
471     /*
472     * We currently only support a 40bit IPA.
473     */
474     @@ -363,5 +383,29 @@ static inline int kvm_map_vectors(void)
475     }
476     #endif
477    
478     +#ifdef CONFIG_ARM64_SSBD
479     +DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
480     +
481     +static inline int hyp_map_aux_data(void)
482     +{
483     + int cpu, err;
484     +
485     + for_each_possible_cpu(cpu) {
486     + u64 *ptr;
487     +
488     + ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
489     + err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
490     + if (err)
491     + return err;
492     + }
493     + return 0;
494     +}
495     +#else
496     +static inline int hyp_map_aux_data(void)
497     +{
498     + return 0;
499     +}
500     +#endif
501     +
502     #endif /* __ASSEMBLY__ */
503     #endif /* __ARM64_KVM_MMU_H__ */
504     diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
505     index 3bd498e4de4c..43393208229e 100644
506     --- a/arch/arm64/include/asm/percpu.h
507     +++ b/arch/arm64/include/asm/percpu.h
508     @@ -16,11 +16,15 @@
509     #ifndef __ASM_PERCPU_H
510     #define __ASM_PERCPU_H
511    
512     +#include <asm/alternative.h>
513     #include <asm/stack_pointer.h>
514    
515     static inline void set_my_cpu_offset(unsigned long off)
516     {
517     - asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
518     + asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
519     + "msr tpidr_el2, %0",
520     + ARM64_HAS_VIRT_HOST_EXTN)
521     + :: "r" (off) : "memory");
522     }
523    
524     static inline unsigned long __my_cpu_offset(void)
525     @@ -31,7 +35,10 @@ static inline unsigned long __my_cpu_offset(void)
526     * We want to allow caching the value, so avoid using volatile and
527     * instead use a fake stack read to hazard against barrier().
528     */
529     - asm("mrs %0, tpidr_el1" : "=r" (off) :
530     + asm(ALTERNATIVE("mrs %0, tpidr_el1",
531     + "mrs %0, tpidr_el2",
532     + ARM64_HAS_VIRT_HOST_EXTN)
533     + : "=r" (off) :
534     "Q" (*(const unsigned long *)current_stack_pointer));
535    
536     return off;
537     diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
538     index ddded6497a8a..fc786d344e46 100644
539     --- a/arch/arm64/include/asm/thread_info.h
540     +++ b/arch/arm64/include/asm/thread_info.h
541     @@ -92,6 +92,7 @@ void arch_setup_new_exec(void);
542     #define TIF_RESTORE_SIGMASK 20
543     #define TIF_SINGLESTEP 21
544     #define TIF_32BIT 22 /* 32bit process */
545     +#define TIF_SSBD 23 /* Wants SSB mitigation */
546    
547     #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
548     #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
549     diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
550     index def8d5623fd1..714fe90dbf66 100644
551     --- a/arch/arm64/kernel/Makefile
552     +++ b/arch/arm64/kernel/Makefile
553     @@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
554     arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
555     arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
556     arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
557     +arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
558    
559     ifeq ($(CONFIG_KVM),y)
560     arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o
561     diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
562     index 6dd0a3a3e5c9..5c4bce4ac381 100644
563     --- a/arch/arm64/kernel/alternative.c
564     +++ b/arch/arm64/kernel/alternative.c
565     @@ -32,6 +32,8 @@
566     #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
567     #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
568    
569     +int alternatives_applied;
570     +
571     struct alt_region {
572     struct alt_instr *begin;
573     struct alt_instr *end;
574     @@ -105,32 +107,53 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp
575     return insn;
576     }
577    
578     +static void patch_alternative(struct alt_instr *alt,
579     + __le32 *origptr, __le32 *updptr, int nr_inst)
580     +{
581     + __le32 *replptr;
582     + int i;
583     +
584     + replptr = ALT_REPL_PTR(alt);
585     + for (i = 0; i < nr_inst; i++) {
586     + u32 insn;
587     +
588     + insn = get_alt_insn(alt, origptr + i, replptr + i);
589     + updptr[i] = cpu_to_le32(insn);
590     + }
591     +}
592     +
593     static void __apply_alternatives(void *alt_region, bool use_linear_alias)
594     {
595     struct alt_instr *alt;
596     struct alt_region *region = alt_region;
597     - __le32 *origptr, *replptr, *updptr;
598     + __le32 *origptr, *updptr;
599     + alternative_cb_t alt_cb;
600    
601     for (alt = region->begin; alt < region->end; alt++) {
602     - u32 insn;
603     - int i, nr_inst;
604     + int nr_inst;
605    
606     - if (!cpus_have_cap(alt->cpufeature))
607     + /* Use ARM64_CB_PATCH as an unconditional patch */
608     + if (alt->cpufeature < ARM64_CB_PATCH &&
609     + !cpus_have_cap(alt->cpufeature))
610     continue;
611    
612     - BUG_ON(alt->alt_len != alt->orig_len);
613     + if (alt->cpufeature == ARM64_CB_PATCH)
614     + BUG_ON(alt->alt_len != 0);
615     + else
616     + BUG_ON(alt->alt_len != alt->orig_len);
617    
618     pr_info_once("patching kernel code\n");
619    
620     origptr = ALT_ORIG_PTR(alt);
621     - replptr = ALT_REPL_PTR(alt);
622     updptr = use_linear_alias ? lm_alias(origptr) : origptr;
623     - nr_inst = alt->alt_len / sizeof(insn);
624     + nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
625    
626     - for (i = 0; i < nr_inst; i++) {
627     - insn = get_alt_insn(alt, origptr + i, replptr + i);
628     - updptr[i] = cpu_to_le32(insn);
629     - }
630     + if (alt->cpufeature < ARM64_CB_PATCH)
631     + alt_cb = patch_alternative;
632     + else
633     + alt_cb = ALT_REPL_PTR(alt);
634     +
635     + alt_cb(alt, origptr, updptr, nr_inst);
636    
637     flush_icache_range((uintptr_t)origptr,
638     (uintptr_t)(origptr + nr_inst));
639     @@ -143,7 +166,6 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
640     */
641     static int __apply_alternatives_multi_stop(void *unused)
642     {
643     - static int patched = 0;
644     struct alt_region region = {
645     .begin = (struct alt_instr *)__alt_instructions,
646     .end = (struct alt_instr *)__alt_instructions_end,
647     @@ -151,14 +173,14 @@ static int __apply_alternatives_multi_stop(void *unused)
648    
649     /* We always have a CPU 0 at this point (__init) */
650     if (smp_processor_id()) {
651     - while (!READ_ONCE(patched))
652     + while (!READ_ONCE(alternatives_applied))
653     cpu_relax();
654     isb();
655     } else {
656     - BUG_ON(patched);
657     + BUG_ON(alternatives_applied);
658     __apply_alternatives(&region, true);
659     /* Barriers provided by the cache flushing */
660     - WRITE_ONCE(patched, 1);
661     + WRITE_ONCE(alternatives_applied, 1);
662     }
663    
664     return 0;
665     diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
666     index af247d10252f..b5e43b01b396 100644
667     --- a/arch/arm64/kernel/asm-offsets.c
668     +++ b/arch/arm64/kernel/asm-offsets.c
669     @@ -131,11 +131,13 @@ int main(void)
670     BLANK();
671     #ifdef CONFIG_KVM_ARM_HOST
672     DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
673     + DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
674     DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
675     DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
676     DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
677     DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
678     DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
679     + DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
680     #endif
681     #ifdef CONFIG_CPU_PM
682     DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
683     diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
684     index b5a28336c077..eccdb28b4a39 100644
685     --- a/arch/arm64/kernel/cpu_errata.c
686     +++ b/arch/arm64/kernel/cpu_errata.c
687     @@ -228,6 +228,178 @@ static int qcom_enable_link_stack_sanitization(void *data)
688     }
689     #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
690    
691     +#ifdef CONFIG_ARM64_SSBD
692     +DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
693     +
694     +int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
695     +
696     +static const struct ssbd_options {
697     + const char *str;
698     + int state;
699     +} ssbd_options[] = {
700     + { "force-on", ARM64_SSBD_FORCE_ENABLE, },
701     + { "force-off", ARM64_SSBD_FORCE_DISABLE, },
702     + { "kernel", ARM64_SSBD_KERNEL, },
703     +};
704     +
705     +static int __init ssbd_cfg(char *buf)
706     +{
707     + int i;
708     +
709     + if (!buf || !buf[0])
710     + return -EINVAL;
711     +
712     + for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
713     + int len = strlen(ssbd_options[i].str);
714     +
715     + if (strncmp(buf, ssbd_options[i].str, len))
716     + continue;
717     +
718     + ssbd_state = ssbd_options[i].state;
719     + return 0;
720     + }
721     +
722     + return -EINVAL;
723     +}
724     +early_param("ssbd", ssbd_cfg);
725     +
726     +void __init arm64_update_smccc_conduit(struct alt_instr *alt,
727     + __le32 *origptr, __le32 *updptr,
728     + int nr_inst)
729     +{
730     + u32 insn;
731     +
732     + BUG_ON(nr_inst != 1);
733     +
734     + switch (psci_ops.conduit) {
735     + case PSCI_CONDUIT_HVC:
736     + insn = aarch64_insn_get_hvc_value();
737     + break;
738     + case PSCI_CONDUIT_SMC:
739     + insn = aarch64_insn_get_smc_value();
740     + break;
741     + default:
742     + return;
743     + }
744     +
745     + *updptr = cpu_to_le32(insn);
746     +}
747     +
748     +void __init arm64_enable_wa2_handling(struct alt_instr *alt,
749     + __le32 *origptr, __le32 *updptr,
750     + int nr_inst)
751     +{
752     + BUG_ON(nr_inst != 1);
753     + /*
754     + * Only allow mitigation on EL1 entry/exit and guest
755     + * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
756     + * be flipped.
757     + */
758     + if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
759     + *updptr = cpu_to_le32(aarch64_insn_gen_nop());
760     +}
761     +
762     +void arm64_set_ssbd_mitigation(bool state)
763     +{
764     + switch (psci_ops.conduit) {
765     + case PSCI_CONDUIT_HVC:
766     + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
767     + break;
768     +
769     + case PSCI_CONDUIT_SMC:
770     + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
771     + break;
772     +
773     + default:
774     + WARN_ON_ONCE(1);
775     + break;
776     + }
777     +}
778     +
779     +static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
780     + int scope)
781     +{
782     + struct arm_smccc_res res;
783     + bool required = true;
784     + s32 val;
785     +
786     + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
787     +
788     + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
789     + ssbd_state = ARM64_SSBD_UNKNOWN;
790     + return false;
791     + }
792     +
793     + switch (psci_ops.conduit) {
794     + case PSCI_CONDUIT_HVC:
795     + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
796     + ARM_SMCCC_ARCH_WORKAROUND_2, &res);
797     + break;
798     +
799     + case PSCI_CONDUIT_SMC:
800     + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
801     + ARM_SMCCC_ARCH_WORKAROUND_2, &res);
802     + break;
803     +
804     + default:
805     + ssbd_state = ARM64_SSBD_UNKNOWN;
806     + return false;
807     + }
808     +
809     + val = (s32)res.a0;
810     +
811     + switch (val) {
812     + case SMCCC_RET_NOT_SUPPORTED:
813     + ssbd_state = ARM64_SSBD_UNKNOWN;
814     + return false;
815     +
816     + case SMCCC_RET_NOT_REQUIRED:
817     + pr_info_once("%s mitigation not required\n", entry->desc);
818     + ssbd_state = ARM64_SSBD_MITIGATED;
819     + return false;
820     +
821     + case SMCCC_RET_SUCCESS:
822     + required = true;
823     + break;
824     +
825     + case 1: /* Mitigation not required on this CPU */
826     + required = false;
827     + break;
828     +
829     + default:
830     + WARN_ON(1);
831     + return false;
832     + }
833     +
834     + switch (ssbd_state) {
835     + case ARM64_SSBD_FORCE_DISABLE:
836     + pr_info_once("%s disabled from command-line\n", entry->desc);
837     + arm64_set_ssbd_mitigation(false);
838     + required = false;
839     + break;
840     +
841     + case ARM64_SSBD_KERNEL:
842     + if (required) {
843     + __this_cpu_write(arm64_ssbd_callback_required, 1);
844     + arm64_set_ssbd_mitigation(true);
845     + }
846     + break;
847     +
848     + case ARM64_SSBD_FORCE_ENABLE:
849     + pr_info_once("%s forced from command-line\n", entry->desc);
850     + arm64_set_ssbd_mitigation(true);
851     + required = true;
852     + break;
853     +
854     + default:
855     + WARN_ON(1);
856     + break;
857     + }
858     +
859     + return required;
860     +}
861     +#endif /* CONFIG_ARM64_SSBD */
862     +
863     #define MIDR_RANGE(model, min, max) \
864     .def_scope = SCOPE_LOCAL_CPU, \
865     .matches = is_affected_midr_range, \
866     @@ -425,6 +597,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
867     MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
868     .enable = enable_smccc_arch_workaround_1,
869     },
870     +#endif
871     +#ifdef CONFIG_ARM64_SSBD
872     + {
873     + .desc = "Speculative Store Bypass Disable",
874     + .def_scope = SCOPE_LOCAL_CPU,
875     + .capability = ARM64_SSBD,
876     + .matches = has_ssbd_mitigation,
877     + },
878     #endif
879     {
880     }
881     diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
882     index 718822ab6e4b..376cf12edf0c 100644
883     --- a/arch/arm64/kernel/cpufeature.c
884     +++ b/arch/arm64/kernel/cpufeature.c
885     @@ -880,6 +880,22 @@ static int __init parse_kpti(char *str)
886     early_param("kpti", parse_kpti);
887     #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
888    
889     +static int cpu_copy_el2regs(void *__unused)
890     +{
891     + /*
892     + * Copy register values that aren't redirected by hardware.
893     + *
894     + * Before code patching, we only set tpidr_el1, all CPUs need to copy
895     + * this value to tpidr_el2 before we patch the code. Once we've done
896     + * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
897     + * do anything here.
898     + */
899     + if (!alternatives_applied)
900     + write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
901     +
902     + return 0;
903     +}
904     +
905     static const struct arm64_cpu_capabilities arm64_features[] = {
906     {
907     .desc = "GIC system register CPU interface",
908     @@ -949,6 +965,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
909     .capability = ARM64_HAS_VIRT_HOST_EXTN,
910     .def_scope = SCOPE_SYSTEM,
911     .matches = runs_at_el2,
912     + .enable = cpu_copy_el2regs,
913     },
914     {
915     .desc = "32-bit EL0 Support",
916     diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
917     index 93958d1341bb..c1ffa95c0ad2 100644
918     --- a/arch/arm64/kernel/entry.S
919     +++ b/arch/arm64/kernel/entry.S
920     @@ -18,6 +18,7 @@
921     * along with this program. If not, see <http://www.gnu.org/licenses/>.
922     */
923    
924     +#include <linux/arm-smccc.h>
925     #include <linux/init.h>
926     #include <linux/linkage.h>
927    
928     @@ -137,6 +138,25 @@ alternative_else_nop_endif
929     add \dst, \dst, #(\sym - .entry.tramp.text)
930     .endm
931    
932     + // This macro corrupts x0-x3. It is the caller's duty
933     + // to save/restore them if required.
934     + .macro apply_ssbd, state, targ, tmp1, tmp2
935     +#ifdef CONFIG_ARM64_SSBD
936     +alternative_cb arm64_enable_wa2_handling
937     + b \targ
938     +alternative_cb_end
939     + ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
940     + cbz \tmp2, \targ
941     + ldr \tmp2, [tsk, #TSK_TI_FLAGS]
942     + tbnz \tmp2, #TIF_SSBD, \targ
943     + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
944     + mov w1, #\state
945     +alternative_cb arm64_update_smccc_conduit
946     + nop // Patched to SMC/HVC #0
947     +alternative_cb_end
948     +#endif
949     + .endm
950     +
951     .macro kernel_entry, el, regsize = 64
952     .if \regsize == 32
953     mov w0, w0 // zero upper 32 bits of x0
954     @@ -163,6 +183,14 @@ alternative_else_nop_endif
955     ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
956     disable_step_tsk x19, x20 // exceptions when scheduling.
957    
958     + apply_ssbd 1, 1f, x22, x23
959     +
960     +#ifdef CONFIG_ARM64_SSBD
961     + ldp x0, x1, [sp, #16 * 0]
962     + ldp x2, x3, [sp, #16 * 1]
963     +#endif
964     +1:
965     +
966     mov x29, xzr // fp pointed to user-space
967     .else
968     add x21, sp, #S_FRAME_SIZE
969     @@ -301,6 +329,8 @@ alternative_if ARM64_WORKAROUND_845719
970     alternative_else_nop_endif
971     #endif
972     3:
973     + apply_ssbd 0, 5f, x0, x1
974     +5:
975     .endif
976    
977     msr elr_el1, x21 // set up the return data
978     diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
979     index 095d3c170f5d..a028cc95afe1 100644
980     --- a/arch/arm64/kernel/hibernate.c
981     +++ b/arch/arm64/kernel/hibernate.c
982     @@ -313,6 +313,17 @@ int swsusp_arch_suspend(void)
983    
984     sleep_cpu = -EINVAL;
985     __cpu_suspend_exit();
986     +
987     + /*
988     + * Just in case the boot kernel did turn the SSBD
989     + * mitigation off behind our back, let's set the state
990     + * to what we expect it to be.
991     + */
992     + switch (arm64_get_ssbd_state()) {
993     + case ARM64_SSBD_FORCE_ENABLE:
994     + case ARM64_SSBD_KERNEL:
995     + arm64_set_ssbd_mitigation(true);
996     + }
997     }
998    
999     local_dbg_restore(flags);
1000     diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
1001     new file mode 100644
1002     index 000000000000..0560738c1d5c
1003     --- /dev/null
1004     +++ b/arch/arm64/kernel/ssbd.c
1005     @@ -0,0 +1,108 @@
1006     +// SPDX-License-Identifier: GPL-2.0
1007     +/*
1008     + * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
1009     + */
1010     +
1011     +#include <linux/errno.h>
1012     +#include <linux/prctl.h>
1013     +#include <linux/sched.h>
1014     +#include <linux/thread_info.h>
1015     +
1016     +#include <asm/cpufeature.h>
1017     +
1018     +/*
1019     + * prctl interface for SSBD
1020     + */
1021     +static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
1022     +{
1023     + int state = arm64_get_ssbd_state();
1024     +
1025     + /* Unsupported */
1026     + if (state == ARM64_SSBD_UNKNOWN)
1027     + return -EINVAL;
1028     +
1029     + /* Treat the unaffected/mitigated state separately */
1030     + if (state == ARM64_SSBD_MITIGATED) {
1031     + switch (ctrl) {
1032     + case PR_SPEC_ENABLE:
1033     + return -EPERM;
1034     + case PR_SPEC_DISABLE:
1035     + case PR_SPEC_FORCE_DISABLE:
1036     + return 0;
1037     + }
1038     + }
1039     +
1040     + /*
1041     + * Things are a bit backward here: the arm64 internal API
1042     + * *enables the mitigation* when the userspace API *disables
1043     + * speculation*. So much fun.
1044     + */
1045     + switch (ctrl) {
1046     + case PR_SPEC_ENABLE:
1047     + /* If speculation is force disabled, enable is not allowed */
1048     + if (state == ARM64_SSBD_FORCE_ENABLE ||
1049     + task_spec_ssb_force_disable(task))
1050     + return -EPERM;
1051     + task_clear_spec_ssb_disable(task);
1052     + clear_tsk_thread_flag(task, TIF_SSBD);
1053     + break;
1054     + case PR_SPEC_DISABLE:
1055     + if (state == ARM64_SSBD_FORCE_DISABLE)
1056     + return -EPERM;
1057     + task_set_spec_ssb_disable(task);
1058     + set_tsk_thread_flag(task, TIF_SSBD);
1059     + break;
1060     + case PR_SPEC_FORCE_DISABLE:
1061     + if (state == ARM64_SSBD_FORCE_DISABLE)
1062     + return -EPERM;
1063     + task_set_spec_ssb_disable(task);
1064     + task_set_spec_ssb_force_disable(task);
1065     + set_tsk_thread_flag(task, TIF_SSBD);
1066     + break;
1067     + default:
1068     + return -ERANGE;
1069     + }
1070     +
1071     + return 0;
1072     +}
1073     +
1074     +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
1075     + unsigned long ctrl)
1076     +{
1077     + switch (which) {
1078     + case PR_SPEC_STORE_BYPASS:
1079     + return ssbd_prctl_set(task, ctrl);
1080     + default:
1081     + return -ENODEV;
1082     + }
1083     +}
1084     +
1085     +static int ssbd_prctl_get(struct task_struct *task)
1086     +{
1087     + switch (arm64_get_ssbd_state()) {
1088     + case ARM64_SSBD_UNKNOWN:
1089     + return -EINVAL;
1090     + case ARM64_SSBD_FORCE_ENABLE:
1091     + return PR_SPEC_DISABLE;
1092     + case ARM64_SSBD_KERNEL:
1093     + if (task_spec_ssb_force_disable(task))
1094     + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1095     + if (task_spec_ssb_disable(task))
1096     + return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1097     + return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1098     + case ARM64_SSBD_FORCE_DISABLE:
1099     + return PR_SPEC_ENABLE;
1100     + default:
1101     + return PR_SPEC_NOT_AFFECTED;
1102     + }
1103     +}
1104     +
1105     +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1106     +{
1107     + switch (which) {
1108     + case PR_SPEC_STORE_BYPASS:
1109     + return ssbd_prctl_get(task);
1110     + default:
1111     + return -ENODEV;
1112     + }
1113     +}
1114     diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
1115     index 77cd655e6eb7..7a655e60cf4b 100644
1116     --- a/arch/arm64/kernel/suspend.c
1117     +++ b/arch/arm64/kernel/suspend.c
1118     @@ -62,6 +62,14 @@ void notrace __cpu_suspend_exit(void)
1119     */
1120     if (hw_breakpoint_restore)
1121     hw_breakpoint_restore(cpu);
1122     +
1123     + /*
1124     + * On resume, firmware implementing dynamic mitigation will
1125     + * have turned the mitigation on. If the user has forcefully
1126     + * disabled it, make sure their wishes are obeyed.
1127     + */
1128     + if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
1129     + arm64_set_ssbd_mitigation(false);
1130     }
1131    
1132     /*
1133     diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
1134     index 870828c364c5..dea20651a5f1 100644
1135     --- a/arch/arm64/kvm/hyp-init.S
1136     +++ b/arch/arm64/kvm/hyp-init.S
1137     @@ -122,6 +122,10 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
1138     kern_hyp_va x2
1139     msr vbar_el2, x2
1140    
1141     + /* copy tpidr_el1 into tpidr_el2 for use by HYP */
1142     + mrs x1, tpidr_el1
1143     + msr tpidr_el2, x1
1144     +
1145     /* Hello, World! */
1146     eret
1147     ENDPROC(__kvm_hyp_init)
1148     diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
1149     index 9c45c6af1f58..a7b3c198d4de 100644
1150     --- a/arch/arm64/kvm/hyp/entry.S
1151     +++ b/arch/arm64/kvm/hyp/entry.S
1152     @@ -62,9 +62,6 @@ ENTRY(__guest_enter)
1153     // Store the host regs
1154     save_callee_saved_regs x1
1155    
1156     - // Store the host_ctxt for use at exit time
1157     - str x1, [sp, #-16]!
1158     -
1159     add x18, x0, #VCPU_CONTEXT
1160    
1161     // Restore guest regs x0-x17
1162     @@ -118,8 +115,7 @@ ENTRY(__guest_exit)
1163     // Store the guest regs x19-x29, lr
1164     save_callee_saved_regs x1
1165    
1166     - // Restore the host_ctxt from the stack
1167     - ldr x2, [sp], #16
1168     + get_host_ctxt x2, x3
1169    
1170     // Now restore the host regs
1171     restore_callee_saved_regs x2
1172     @@ -159,6 +155,10 @@ abort_guest_exit_end:
1173     ENDPROC(__guest_exit)
1174    
1175     ENTRY(__fpsimd_guest_restore)
1176     + // x0: esr
1177     + // x1: vcpu
1178     + // x2-x29,lr: vcpu regs
1179     + // vcpu x0-x1 on the stack
1180     stp x2, x3, [sp, #-16]!
1181     stp x4, lr, [sp, #-16]!
1182    
1183     @@ -173,7 +173,7 @@ alternative_else
1184     alternative_endif
1185     isb
1186    
1187     - mrs x3, tpidr_el2
1188     + mov x3, x1
1189    
1190     ldr x0, [x3, #VCPU_HOST_CONTEXT]
1191     kern_hyp_va x0
1192     diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
1193     index f49b53331d28..3c283fd8c8f5 100644
1194     --- a/arch/arm64/kvm/hyp/hyp-entry.S
1195     +++ b/arch/arm64/kvm/hyp/hyp-entry.S
1196     @@ -57,13 +57,8 @@ ENDPROC(__vhe_hyp_call)
1197     el1_sync: // Guest trapped into EL2
1198     stp x0, x1, [sp, #-16]!
1199    
1200     -alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
1201     - mrs x1, esr_el2
1202     -alternative_else
1203     - mrs x1, esr_el1
1204     -alternative_endif
1205     - lsr x0, x1, #ESR_ELx_EC_SHIFT
1206     -
1207     + mrs x0, esr_el2
1208     + lsr x0, x0, #ESR_ELx_EC_SHIFT
1209     cmp x0, #ESR_ELx_EC_HVC64
1210     ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
1211     b.ne el1_trap
1212     @@ -111,14 +106,55 @@ el1_hvc_guest:
1213     */
1214     ldr x1, [sp] // Guest's x0
1215     eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
1216     + cbz w1, wa_epilogue
1217     +
1218     + /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
1219     + eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
1220     + ARM_SMCCC_ARCH_WORKAROUND_2)
1221     cbnz w1, el1_trap
1222     - mov x0, x1
1223     +
1224     +#ifdef CONFIG_ARM64_SSBD
1225     +alternative_cb arm64_enable_wa2_handling
1226     + b wa2_end
1227     +alternative_cb_end
1228     + get_vcpu_ptr x2, x0
1229     + ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
1230     +
1231     + // Sanitize the argument and update the guest flags
1232     + ldr x1, [sp, #8] // Guest's x1
1233     + clz w1, w1 // Murphy's device:
1234     + lsr w1, w1, #5 // w1 = !!w1 without using
1235     + eor w1, w1, #1 // the flags...
1236     + bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
1237     + str x0, [x2, #VCPU_WORKAROUND_FLAGS]
1238     +
1239     + /* Check that we actually need to perform the call */
1240     + hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
1241     + cbz x0, wa2_end
1242     +
1243     + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
1244     + smc #0
1245     +
1246     + /* Don't leak data from the SMC call */
1247     + mov x3, xzr
1248     +wa2_end:
1249     + mov x2, xzr
1250     + mov x1, xzr
1251     +#endif
1252     +
1253     +wa_epilogue:
1254     + mov x0, xzr
1255     add sp, sp, #16
1256     eret
1257    
1258     el1_trap:
1259     + get_vcpu_ptr x1, x0
1260     +
1261     + mrs x0, esr_el2
1262     + lsr x0, x0, #ESR_ELx_EC_SHIFT
1263     /*
1264     * x0: ESR_EC
1265     + * x1: vcpu pointer
1266     */
1267    
1268     /*
1269     @@ -132,19 +168,18 @@ alternative_if_not ARM64_HAS_NO_FPSIMD
1270     b.eq __fpsimd_guest_restore
1271     alternative_else_nop_endif
1272    
1273     - mrs x1, tpidr_el2
1274     mov x0, #ARM_EXCEPTION_TRAP
1275     b __guest_exit
1276    
1277     el1_irq:
1278     stp x0, x1, [sp, #-16]!
1279     - mrs x1, tpidr_el2
1280     + get_vcpu_ptr x1, x0
1281     mov x0, #ARM_EXCEPTION_IRQ
1282     b __guest_exit
1283    
1284     el1_error:
1285     stp x0, x1, [sp, #-16]!
1286     - mrs x1, tpidr_el2
1287     + get_vcpu_ptr x1, x0
1288     mov x0, #ARM_EXCEPTION_EL1_SERROR
1289     b __guest_exit
1290    
1291     @@ -179,6 +214,11 @@ ENTRY(__hyp_do_panic)
1292     eret
1293     ENDPROC(__hyp_do_panic)
1294    
1295     +ENTRY(__hyp_panic)
1296     + get_host_ctxt x0, x1
1297     + b hyp_panic
1298     +ENDPROC(__hyp_panic)
1299     +
1300     .macro invalid_vector label, target = __hyp_panic
1301     .align 2
1302     \label:
1303     diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
1304     index e08ae6b6b63e..b2f1992c6234 100644
1305     --- a/arch/arm64/kvm/hyp/switch.c
1306     +++ b/arch/arm64/kvm/hyp/switch.c
1307     @@ -15,6 +15,7 @@
1308     * along with this program. If not, see <http://www.gnu.org/licenses/>.
1309     */
1310    
1311     +#include <linux/arm-smccc.h>
1312     #include <linux/types.h>
1313     #include <linux/jump_label.h>
1314     #include <uapi/linux/psci.h>
1315     @@ -281,6 +282,39 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
1316     write_sysreg_el2(*vcpu_pc(vcpu), elr);
1317     }
1318    
1319     +static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
1320     +{
1321     + if (!cpus_have_const_cap(ARM64_SSBD))
1322     + return false;
1323     +
1324     + return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
1325     +}
1326     +
1327     +static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
1328     +{
1329     +#ifdef CONFIG_ARM64_SSBD
1330     + /*
1331     + * The host runs with the workaround always present. If the
1332     + * guest wants it disabled, so be it...
1333     + */
1334     + if (__needs_ssbd_off(vcpu) &&
1335     + __hyp_this_cpu_read(arm64_ssbd_callback_required))
1336     + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
1337     +#endif
1338     +}
1339     +
1340     +static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
1341     +{
1342     +#ifdef CONFIG_ARM64_SSBD
1343     + /*
1344     + * If the guest has disabled the workaround, bring it back on.
1345     + */
1346     + if (__needs_ssbd_off(vcpu) &&
1347     + __hyp_this_cpu_read(arm64_ssbd_callback_required))
1348     + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
1349     +#endif
1350     +}
1351     +
1352     int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
1353     {
1354     struct kvm_cpu_context *host_ctxt;
1355     @@ -289,9 +323,9 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
1356     u64 exit_code;
1357    
1358     vcpu = kern_hyp_va(vcpu);
1359     - write_sysreg(vcpu, tpidr_el2);
1360    
1361     host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
1362     + host_ctxt->__hyp_running_vcpu = vcpu;
1363     guest_ctxt = &vcpu->arch.ctxt;
1364    
1365     __sysreg_save_host_state(host_ctxt);
1366     @@ -311,6 +345,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
1367     __sysreg_restore_guest_state(guest_ctxt);
1368     __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
1369    
1370     + __set_guest_arch_workaround_state(vcpu);
1371     +
1372     /* Jump in the fire! */
1373     again:
1374     exit_code = __guest_enter(vcpu, host_ctxt);
1375     @@ -367,6 +403,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
1376     /* 0 falls through to be handled out of EL2 */
1377     }
1378    
1379     + __set_host_arch_workaround_state(vcpu);
1380     +
1381     if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) {
1382     u32 midr = read_cpuid_id();
1383    
1384     @@ -406,7 +444,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
1385    
1386     static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
1387    
1388     -static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
1389     +static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
1390     + struct kvm_vcpu *vcpu)
1391     {
1392     unsigned long str_va;
1393    
1394     @@ -420,35 +459,32 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
1395     __hyp_do_panic(str_va,
1396     spsr, elr,
1397     read_sysreg(esr_el2), read_sysreg_el2(far),
1398     - read_sysreg(hpfar_el2), par,
1399     - (void *)read_sysreg(tpidr_el2));
1400     + read_sysreg(hpfar_el2), par, vcpu);
1401     }
1402    
1403     -static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
1404     +static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
1405     + struct kvm_vcpu *vcpu)
1406     {
1407     panic(__hyp_panic_string,
1408     spsr, elr,
1409     read_sysreg_el2(esr), read_sysreg_el2(far),
1410     - read_sysreg(hpfar_el2), par,
1411     - (void *)read_sysreg(tpidr_el2));
1412     + read_sysreg(hpfar_el2), par, vcpu);
1413     }
1414    
1415     static hyp_alternate_select(__hyp_call_panic,
1416     __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
1417     ARM64_HAS_VIRT_HOST_EXTN);
1418    
1419     -void __hyp_text __noreturn __hyp_panic(void)
1420     +void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
1421     {
1422     + struct kvm_vcpu *vcpu = NULL;
1423     +
1424     u64 spsr = read_sysreg_el2(spsr);
1425     u64 elr = read_sysreg_el2(elr);
1426     u64 par = read_sysreg(par_el1);
1427    
1428     if (read_sysreg(vttbr_el2)) {
1429     - struct kvm_vcpu *vcpu;
1430     - struct kvm_cpu_context *host_ctxt;
1431     -
1432     - vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
1433     - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
1434     + vcpu = host_ctxt->__hyp_running_vcpu;
1435     __timer_save_state(vcpu);
1436     __deactivate_traps(vcpu);
1437     __deactivate_vm(vcpu);
1438     @@ -456,7 +492,7 @@ void __hyp_text __noreturn __hyp_panic(void)
1439     }
1440    
1441     /* Call panic for real */
1442     - __hyp_call_panic()(spsr, elr, par);
1443     + __hyp_call_panic()(spsr, elr, par, vcpu);
1444    
1445     unreachable();
1446     }
1447     diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
1448     index 934137647837..e19d89cabf2a 100644
1449     --- a/arch/arm64/kvm/hyp/sysreg-sr.c
1450     +++ b/arch/arm64/kvm/hyp/sysreg-sr.c
1451     @@ -27,8 +27,8 @@ static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { }
1452     /*
1453     * Non-VHE: Both host and guest must save everything.
1454     *
1455     - * VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc,
1456     - * pstate, and guest must save everything.
1457     + * VHE: Host must save tpidr*_el0, actlr_el1, mdscr_el1, sp_el0,
1458     + * and guest must save everything.
1459     */
1460    
1461     static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
1462     @@ -36,11 +36,8 @@ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
1463     ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
1464     ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
1465     ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
1466     - ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
1467     ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
1468     ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
1469     - ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
1470     - ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
1471     }
1472    
1473     static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
1474     @@ -62,10 +59,13 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
1475     ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
1476     ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
1477     ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
1478     + ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
1479    
1480     ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
1481     ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
1482     ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
1483     + ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
1484     + ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
1485     }
1486    
1487     static hyp_alternate_select(__sysreg_call_save_host_state,
1488     @@ -89,11 +89,8 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx
1489     write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
1490     write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
1491     write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
1492     - write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
1493     write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
1494     write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
1495     - write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
1496     - write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
1497     }
1498    
1499     static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
1500     @@ -115,10 +112,13 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
1501     write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
1502     write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
1503     write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
1504     + write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
1505    
1506     write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
1507     write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
1508     write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
1509     + write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
1510     + write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
1511     }
1512    
1513     static hyp_alternate_select(__sysreg_call_restore_host_state,
1514     @@ -183,3 +183,8 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
1515     if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
1516     write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
1517     }
1518     +
1519     +void __hyp_text __kvm_set_tpidr_el2(u64 tpidr_el2)
1520     +{
1521     + asm("msr tpidr_el2, %0": : "r" (tpidr_el2));
1522     +}
1523     diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
1524     index 3256b9228e75..a74311beda35 100644
1525     --- a/arch/arm64/kvm/reset.c
1526     +++ b/arch/arm64/kvm/reset.c
1527     @@ -122,6 +122,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
1528     /* Reset PMU */
1529     kvm_pmu_vcpu_reset(vcpu);
1530    
1531     + /* Default workaround setup is enabled (if supported) */
1532     + if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
1533     + vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
1534     +
1535     /* Reset timer */
1536     return kvm_timer_vcpu_reset(vcpu);
1537     }
1538     diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
1539     index bf0821b7b1ab..10c835f13f62 100644
1540     --- a/arch/arm64/mm/proc.S
1541     +++ b/arch/arm64/mm/proc.S
1542     @@ -70,7 +70,11 @@ ENTRY(cpu_do_suspend)
1543     mrs x8, mdscr_el1
1544     mrs x9, oslsr_el1
1545     mrs x10, sctlr_el1
1546     +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
1547     mrs x11, tpidr_el1
1548     +alternative_else
1549     + mrs x11, tpidr_el2
1550     +alternative_endif
1551     mrs x12, sp_el0
1552     stp x2, x3, [x0]
1553     stp x4, xzr, [x0, #16]
1554     @@ -116,7 +120,11 @@ ENTRY(cpu_do_resume)
1555     msr mdscr_el1, x10
1556    
1557     msr sctlr_el1, x12
1558     +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
1559     msr tpidr_el1, x13
1560     +alternative_else
1561     + msr tpidr_el2, x13
1562     +alternative_endif
1563     msr sp_el0, x14
1564     /*
1565     * Restore oslsr_el1 by writing oslar_el1
1566     diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
1567     index 386a6900e206..3bf87f92b932 100644
1568     --- a/arch/x86/include/asm/asm.h
1569     +++ b/arch/x86/include/asm/asm.h
1570     @@ -46,6 +46,65 @@
1571     #define _ASM_SI __ASM_REG(si)
1572     #define _ASM_DI __ASM_REG(di)
1573    
1574     +#ifndef __x86_64__
1575     +/* 32 bit */
1576     +
1577     +#define _ASM_ARG1 _ASM_AX
1578     +#define _ASM_ARG2 _ASM_DX
1579     +#define _ASM_ARG3 _ASM_CX
1580     +
1581     +#define _ASM_ARG1L eax
1582     +#define _ASM_ARG2L edx
1583     +#define _ASM_ARG3L ecx
1584     +
1585     +#define _ASM_ARG1W ax
1586     +#define _ASM_ARG2W dx
1587     +#define _ASM_ARG3W cx
1588     +
1589     +#define _ASM_ARG1B al
1590     +#define _ASM_ARG2B dl
1591     +#define _ASM_ARG3B cl
1592     +
1593     +#else
1594     +/* 64 bit */
1595     +
1596     +#define _ASM_ARG1 _ASM_DI
1597     +#define _ASM_ARG2 _ASM_SI
1598     +#define _ASM_ARG3 _ASM_DX
1599     +#define _ASM_ARG4 _ASM_CX
1600     +#define _ASM_ARG5 r8
1601     +#define _ASM_ARG6 r9
1602     +
1603     +#define _ASM_ARG1Q rdi
1604     +#define _ASM_ARG2Q rsi
1605     +#define _ASM_ARG3Q rdx
1606     +#define _ASM_ARG4Q rcx
1607     +#define _ASM_ARG5Q r8
1608     +#define _ASM_ARG6Q r9
1609     +
1610     +#define _ASM_ARG1L edi
1611     +#define _ASM_ARG2L esi
1612     +#define _ASM_ARG3L edx
1613     +#define _ASM_ARG4L ecx
1614     +#define _ASM_ARG5L r8d
1615     +#define _ASM_ARG6L r9d
1616     +
1617     +#define _ASM_ARG1W di
1618     +#define _ASM_ARG2W si
1619     +#define _ASM_ARG3W dx
1620     +#define _ASM_ARG4W cx
1621     +#define _ASM_ARG5W r8w
1622     +#define _ASM_ARG6W r9w
1623     +
1624     +#define _ASM_ARG1B dil
1625     +#define _ASM_ARG2B sil
1626     +#define _ASM_ARG3B dl
1627     +#define _ASM_ARG4B cl
1628     +#define _ASM_ARG5B r8b
1629     +#define _ASM_ARG6B r9b
1630     +
1631     +#endif
1632     +
1633     /*
1634     * Macros to generate condition code outputs from inline assembly,
1635     * The output operand must be type "bool".
1636     diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
1637     index 89f08955fff7..c4fc17220df9 100644
1638     --- a/arch/x86/include/asm/irqflags.h
1639     +++ b/arch/x86/include/asm/irqflags.h
1640     @@ -13,7 +13,7 @@
1641     * Interrupt control:
1642     */
1643    
1644     -static inline unsigned long native_save_fl(void)
1645     +extern inline unsigned long native_save_fl(void)
1646     {
1647     unsigned long flags;
1648    
1649     diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
1650     index 295abaa58add..4137f7ba0f88 100644
1651     --- a/arch/x86/kernel/Makefile
1652     +++ b/arch/x86/kernel/Makefile
1653     @@ -58,6 +58,7 @@ obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
1654     obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
1655     obj-y += pci-iommu_table.o
1656     obj-y += resource.o
1657     +obj-y += irqflags.o
1658    
1659     obj-y += process.o
1660     obj-y += fpu/
1661     diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S
1662     new file mode 100644
1663     index 000000000000..ddeeaac8adda
1664     --- /dev/null
1665     +++ b/arch/x86/kernel/irqflags.S
1666     @@ -0,0 +1,26 @@
1667     +/* SPDX-License-Identifier: GPL-2.0 */
1668     +
1669     +#include <asm/asm.h>
1670     +#include <asm/export.h>
1671     +#include <linux/linkage.h>
1672     +
1673     +/*
1674     + * unsigned long native_save_fl(void)
1675     + */
1676     +ENTRY(native_save_fl)
1677     + pushf
1678     + pop %_ASM_AX
1679     + ret
1680     +ENDPROC(native_save_fl)
1681     +EXPORT_SYMBOL(native_save_fl)
1682     +
1683     +/*
1684     + * void native_restore_fl(unsigned long flags)
1685     + * %eax/%rdi: flags
1686     + */
1687     +ENTRY(native_restore_fl)
1688     + push %_ASM_ARG1
1689     + popf
1690     + ret
1691     +ENDPROC(native_restore_fl)
1692     +EXPORT_SYMBOL(native_restore_fl)
1693     diff --git a/block/blk-core.c b/block/blk-core.c
1694     index 6f6e21821d2d..68bae6338ad4 100644
1695     --- a/block/blk-core.c
1696     +++ b/block/blk-core.c
1697     @@ -779,7 +779,6 @@ EXPORT_SYMBOL(blk_alloc_queue);
1698     int blk_queue_enter(struct request_queue *q, bool nowait)
1699     {
1700     while (true) {
1701     - int ret;
1702    
1703     if (percpu_ref_tryget_live(&q->q_usage_counter))
1704     return 0;
1705     @@ -796,13 +795,11 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
1706     */
1707     smp_rmb();
1708    
1709     - ret = wait_event_interruptible(q->mq_freeze_wq,
1710     - !atomic_read(&q->mq_freeze_depth) ||
1711     - blk_queue_dying(q));
1712     + wait_event(q->mq_freeze_wq,
1713     + !atomic_read(&q->mq_freeze_depth) ||
1714     + blk_queue_dying(q));
1715     if (blk_queue_dying(q))
1716     return -ENODEV;
1717     - if (ret)
1718     - return ret;
1719     }
1720     }
1721    
1722     diff --git a/crypto/af_alg.c b/crypto/af_alg.c
1723     index 815ee1075574..42dfdd1fd6d8 100644
1724     --- a/crypto/af_alg.c
1725     +++ b/crypto/af_alg.c
1726     @@ -1183,8 +1183,10 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
1727    
1728     /* make one iovec available as scatterlist */
1729     err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
1730     - if (err < 0)
1731     + if (err < 0) {
1732     + rsgl->sg_num_bytes = 0;
1733     return err;
1734     + }
1735    
1736     /* chain the new scatterlist with previous one */
1737     if (areq->last_rsgl)
1738     diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
1739     index a8d2eb0ceb8d..2c288d1f42bb 100644
1740     --- a/drivers/atm/zatm.c
1741     +++ b/drivers/atm/zatm.c
1742     @@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
1743     return -EFAULT;
1744     if (pool < 0 || pool > ZATM_LAST_POOL)
1745     return -EINVAL;
1746     + pool = array_index_nospec(pool,
1747     + ZATM_LAST_POOL + 1);
1748     if (copy_from_user(&info,
1749     &((struct zatm_pool_req __user *) arg)->info,
1750     sizeof(info))) return -EFAULT;
1751     diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
1752     index 8b432d6e846d..c9ce716247c1 100644
1753     --- a/drivers/cpufreq/cppc_cpufreq.c
1754     +++ b/drivers/cpufreq/cppc_cpufreq.c
1755     @@ -126,6 +126,49 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
1756     cpu->perf_caps.lowest_perf, cpu_num, ret);
1757     }
1758    
1759     +/*
1760     + * The PCC subspace describes the rate at which platform can accept commands
1761     + * on the shared PCC channel (including READs which do not count towards freq
1762     + * trasition requests), so ideally we need to use the PCC values as a fallback
1763     + * if we don't have a platform specific transition_delay_us
1764     + */
1765     +#ifdef CONFIG_ARM64
1766     +#include <asm/cputype.h>
1767     +
1768     +static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
1769     +{
1770     + unsigned long implementor = read_cpuid_implementor();
1771     + unsigned long part_num = read_cpuid_part_number();
1772     + unsigned int delay_us = 0;
1773     +
1774     + switch (implementor) {
1775     + case ARM_CPU_IMP_QCOM:
1776     + switch (part_num) {
1777     + case QCOM_CPU_PART_FALKOR_V1:
1778     + case QCOM_CPU_PART_FALKOR:
1779     + delay_us = 10000;
1780     + break;
1781     + default:
1782     + delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
1783     + break;
1784     + }
1785     + break;
1786     + default:
1787     + delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
1788     + break;
1789     + }
1790     +
1791     + return delay_us;
1792     +}
1793     +
1794     +#else
1795     +
1796     +static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
1797     +{
1798     + return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
1799     +}
1800     +#endif
1801     +
1802     static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
1803     {
1804     struct cppc_cpudata *cpu;
1805     @@ -163,8 +206,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
1806     policy->cpuinfo.max_freq = cppc_dmi_max_khz;
1807    
1808     policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num);
1809     - policy->transition_delay_us = cppc_get_transition_latency(cpu_num) /
1810     - NSEC_PER_USEC;
1811     + policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num);
1812     policy->shared_type = cpu->shared_type;
1813    
1814     if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
1815     diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
1816     index 65dc78b91dea..3f9eee7e555f 100644
1817     --- a/drivers/crypto/amcc/crypto4xx_core.c
1818     +++ b/drivers/crypto/amcc/crypto4xx_core.c
1819     @@ -207,7 +207,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
1820     dev->pdr_pa);
1821     return -ENOMEM;
1822     }
1823     - memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
1824     + memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
1825     dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
1826     256 * PPC4XX_NUM_PD,
1827     &dev->shadow_sa_pool_pa,
1828     @@ -240,13 +240,15 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
1829    
1830     static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
1831     {
1832     - if (dev->pdr != NULL)
1833     + if (dev->pdr)
1834     dma_free_coherent(dev->core_dev->device,
1835     sizeof(struct ce_pd) * PPC4XX_NUM_PD,
1836     dev->pdr, dev->pdr_pa);
1837     +
1838     if (dev->shadow_sa_pool)
1839     dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
1840     dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
1841     +
1842     if (dev->shadow_sr_pool)
1843     dma_free_coherent(dev->core_dev->device,
1844     sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
1845     @@ -416,12 +418,12 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
1846    
1847     static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
1848     {
1849     - if (dev->sdr != NULL)
1850     + if (dev->sdr)
1851     dma_free_coherent(dev->core_dev->device,
1852     sizeof(struct ce_sd) * PPC4XX_NUM_SD,
1853     dev->sdr, dev->sdr_pa);
1854    
1855     - if (dev->scatter_buffer_va != NULL)
1856     + if (dev->scatter_buffer_va)
1857     dma_free_coherent(dev->core_dev->device,
1858     dev->scatter_buffer_size * PPC4XX_NUM_SD,
1859     dev->scatter_buffer_va,
1860     @@ -1033,12 +1035,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1861     break;
1862     }
1863    
1864     - if (rc) {
1865     - list_del(&alg->entry);
1866     + if (rc)
1867     kfree(alg);
1868     - } else {
1869     + else
1870     list_add_tail(&alg->entry, &sec_dev->alg_list);
1871     - }
1872     }
1873    
1874     return 0;
1875     @@ -1193,7 +1193,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
1876    
1877     rc = crypto4xx_build_gdr(core_dev->dev);
1878     if (rc)
1879     - goto err_build_gdr;
1880     + goto err_build_pdr;
1881    
1882     rc = crypto4xx_build_sdr(core_dev->dev);
1883     if (rc)
1884     @@ -1236,12 +1236,11 @@ static int crypto4xx_probe(struct platform_device *ofdev)
1885     err_request_irq:
1886     irq_dispose_mapping(core_dev->irq);
1887     tasklet_kill(&core_dev->tasklet);
1888     - crypto4xx_destroy_sdr(core_dev->dev);
1889     err_build_sdr:
1890     + crypto4xx_destroy_sdr(core_dev->dev);
1891     crypto4xx_destroy_gdr(core_dev->dev);
1892     -err_build_gdr:
1893     - crypto4xx_destroy_pdr(core_dev->dev);
1894     err_build_pdr:
1895     + crypto4xx_destroy_pdr(core_dev->dev);
1896     kfree(core_dev->dev);
1897     err_alloc_dev:
1898     kfree(core_dev);
1899     diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
1900     index 72f381522cb2..a22828713c1c 100644
1901     --- a/drivers/media/rc/rc-main.c
1902     +++ b/drivers/media/rc/rc-main.c
1903     @@ -1824,11 +1824,11 @@ void rc_unregister_device(struct rc_dev *dev)
1904     if (!dev)
1905     return;
1906    
1907     - del_timer_sync(&dev->timer_keyup);
1908     -
1909     if (dev->driver_type == RC_DRIVER_IR_RAW)
1910     ir_raw_event_unregister(dev);
1911    
1912     + del_timer_sync(&dev->timer_keyup);
1913     +
1914     rc_free_rx_device(dev);
1915    
1916     device_del(&dev->dev);
1917     diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
1918     index 56e2e177644d..3f4f4aea0e8b 100644
1919     --- a/drivers/mtd/nand/denali_dt.c
1920     +++ b/drivers/mtd/nand/denali_dt.c
1921     @@ -122,7 +122,11 @@ static int denali_dt_probe(struct platform_device *pdev)
1922     if (ret)
1923     return ret;
1924    
1925     - denali->clk_x_rate = clk_get_rate(dt->clk);
1926     + /*
1927     + * Hardcode the clock rate for the backward compatibility.
1928     + * This works for both SOCFPGA and UniPhier.
1929     + */
1930     + denali->clk_x_rate = 200000000;
1931    
1932     ret = denali_init(denali);
1933     if (ret)
1934     diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
1935     index 567ee54504bc..5e5022fa1d04 100644
1936     --- a/drivers/net/ethernet/atheros/alx/main.c
1937     +++ b/drivers/net/ethernet/atheros/alx/main.c
1938     @@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev)
1939     struct pci_dev *pdev = to_pci_dev(dev);
1940     struct alx_priv *alx = pci_get_drvdata(pdev);
1941     struct alx_hw *hw = &alx->hw;
1942     + int err;
1943    
1944     alx_reset_phy(hw);
1945    
1946     if (!netif_running(alx->dev))
1947     return 0;
1948     netif_device_attach(alx->dev);
1949     - return __alx_open(alx, true);
1950     +
1951     + rtnl_lock();
1952     + err = __alx_open(alx, true);
1953     + rtnl_unlock();
1954     +
1955     + return err;
1956     }
1957    
1958     static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
1959     diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
1960     index 4f3845a58126..68470c7c630a 100644
1961     --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
1962     +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
1963     @@ -1062,7 +1062,8 @@ static int bcm_enet_open(struct net_device *dev)
1964     val = enet_readl(priv, ENET_CTL_REG);
1965     val |= ENET_CTL_ENABLE_MASK;
1966     enet_writel(priv, val, ENET_CTL_REG);
1967     - enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1968     + if (priv->dma_has_sram)
1969     + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1970     enet_dmac_writel(priv, priv->dma_chan_en_mask,
1971     ENETDMAC_CHANCFG, priv->rx_chan);
1972    
1973     @@ -1773,7 +1774,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
1974     ret = PTR_ERR(priv->mac_clk);
1975     goto out;
1976     }
1977     - clk_prepare_enable(priv->mac_clk);
1978     + ret = clk_prepare_enable(priv->mac_clk);
1979     + if (ret)
1980     + goto out_put_clk_mac;
1981    
1982     /* initialize default and fetch platform data */
1983     priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1984     @@ -1805,9 +1808,11 @@ static int bcm_enet_probe(struct platform_device *pdev)
1985     if (IS_ERR(priv->phy_clk)) {
1986     ret = PTR_ERR(priv->phy_clk);
1987     priv->phy_clk = NULL;
1988     - goto out_put_clk_mac;
1989     + goto out_disable_clk_mac;
1990     }
1991     - clk_prepare_enable(priv->phy_clk);
1992     + ret = clk_prepare_enable(priv->phy_clk);
1993     + if (ret)
1994     + goto out_put_clk_phy;
1995     }
1996    
1997     /* do minimal hardware init to be able to probe mii bus */
1998     @@ -1901,13 +1906,16 @@ static int bcm_enet_probe(struct platform_device *pdev)
1999     out_uninit_hw:
2000     /* turn off mdc clock */
2001     enet_writel(priv, 0, ENET_MIISC_REG);
2002     - if (priv->phy_clk) {
2003     + if (priv->phy_clk)
2004     clk_disable_unprepare(priv->phy_clk);
2005     +
2006     +out_put_clk_phy:
2007     + if (priv->phy_clk)
2008     clk_put(priv->phy_clk);
2009     - }
2010    
2011     -out_put_clk_mac:
2012     +out_disable_clk_mac:
2013     clk_disable_unprepare(priv->mac_clk);
2014     +out_put_clk_mac:
2015     clk_put(priv->mac_clk);
2016     out:
2017     free_netdev(dev);
2018     @@ -2752,7 +2760,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
2019     ret = PTR_ERR(priv->mac_clk);
2020     goto out_unmap;
2021     }
2022     - clk_enable(priv->mac_clk);
2023     + ret = clk_prepare_enable(priv->mac_clk);
2024     + if (ret)
2025     + goto out_put_clk;
2026    
2027     priv->rx_chan = 0;
2028     priv->tx_chan = 1;
2029     @@ -2773,7 +2783,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
2030    
2031     ret = register_netdev(dev);
2032     if (ret)
2033     - goto out_put_clk;
2034     + goto out_disable_clk;
2035    
2036     netif_carrier_off(dev);
2037     platform_set_drvdata(pdev, dev);
2038     @@ -2782,6 +2792,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
2039    
2040     return 0;
2041    
2042     +out_disable_clk:
2043     + clk_disable_unprepare(priv->mac_clk);
2044     +
2045     out_put_clk:
2046     clk_put(priv->mac_clk);
2047    
2048     @@ -2813,6 +2826,9 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
2049     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2050     release_mem_region(res->start, resource_size(res));
2051    
2052     + clk_disable_unprepare(priv->mac_clk);
2053     + clk_put(priv->mac_clk);
2054     +
2055     free_netdev(dev);
2056     return 0;
2057     }
2058     diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
2059     index 2220c771092b..678835136bf8 100755
2060     --- a/drivers/net/ethernet/cadence/macb_ptp.c
2061     +++ b/drivers/net/ethernet/cadence/macb_ptp.c
2062     @@ -170,10 +170,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
2063    
2064     if (delta > TSU_NSEC_MAX_VAL) {
2065     gem_tsu_get_time(&bp->ptp_clock_info, &now);
2066     - if (sign)
2067     - now = timespec64_sub(now, then);
2068     - else
2069     - now = timespec64_add(now, then);
2070     + now = timespec64_add(now, then);
2071    
2072     gem_tsu_set_time(&bp->ptp_clock_info,
2073     (const struct timespec64 *)&now);
2074     diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2075     index 6a015362c340..bf291e90cdb0 100644
2076     --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2077     +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2078     @@ -51,6 +51,7 @@
2079     #include <linux/sched.h>
2080     #include <linux/slab.h>
2081     #include <linux/uaccess.h>
2082     +#include <linux/nospec.h>
2083    
2084     #include "common.h"
2085     #include "cxgb3_ioctl.h"
2086     @@ -2268,6 +2269,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2087    
2088     if (t.qset_idx >= nqsets)
2089     return -EINVAL;
2090     + t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2091    
2092     q = &adapter->params.sge.qset[q1 + t.qset_idx];
2093     t.rspq_size = q->rspq_size;
2094     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2095     index 20a8018d41ef..b68d94b49a8a 100644
2096     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2097     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2098     @@ -2211,9 +2211,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2099     return skb;
2100     }
2101    
2102     -#define IXGBE_XDP_PASS 0
2103     -#define IXGBE_XDP_CONSUMED 1
2104     -#define IXGBE_XDP_TX 2
2105     +#define IXGBE_XDP_PASS 0
2106     +#define IXGBE_XDP_CONSUMED BIT(0)
2107     +#define IXGBE_XDP_TX BIT(1)
2108     +#define IXGBE_XDP_REDIR BIT(2)
2109    
2110     static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
2111     struct xdp_buff *xdp);
2112     @@ -2242,7 +2243,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
2113     case XDP_REDIRECT:
2114     err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
2115     if (!err)
2116     - result = IXGBE_XDP_TX;
2117     + result = IXGBE_XDP_REDIR;
2118     else
2119     result = IXGBE_XDP_CONSUMED;
2120     break;
2121     @@ -2302,7 +2303,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2122     unsigned int mss = 0;
2123     #endif /* IXGBE_FCOE */
2124     u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2125     - bool xdp_xmit = false;
2126     + unsigned int xdp_xmit = 0;
2127    
2128     while (likely(total_rx_packets < budget)) {
2129     union ixgbe_adv_rx_desc *rx_desc;
2130     @@ -2342,8 +2343,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2131     }
2132    
2133     if (IS_ERR(skb)) {
2134     - if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
2135     - xdp_xmit = true;
2136     + unsigned int xdp_res = -PTR_ERR(skb);
2137     +
2138     + if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
2139     + xdp_xmit |= xdp_res;
2140     ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
2141     } else {
2142     rx_buffer->pagecnt_bias++;
2143     @@ -2415,7 +2418,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2144     total_rx_packets++;
2145     }
2146    
2147     - if (xdp_xmit) {
2148     + if (xdp_xmit & IXGBE_XDP_REDIR)
2149     + xdp_do_flush_map();
2150     +
2151     + if (xdp_xmit & IXGBE_XDP_TX) {
2152     struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
2153    
2154     /* Force memory writes to complete before letting h/w
2155     @@ -2423,8 +2429,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2156     */
2157     wmb();
2158     writel(ring->next_to_use, ring->tail);
2159     -
2160     - xdp_do_flush_map();
2161     }
2162    
2163     u64_stats_update_begin(&rx_ring->syncp);
2164     diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
2165     index d28f873169a9..3deaa3413313 100644
2166     --- a/drivers/net/ethernet/marvell/mvneta.c
2167     +++ b/drivers/net/ethernet/marvell/mvneta.c
2168     @@ -1959,7 +1959,7 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
2169     rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2170     index = rx_desc - rxq->descs;
2171     data = rxq->buf_virt_addr[index];
2172     - phys_addr = rx_desc->buf_phys_addr;
2173     + phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction;
2174    
2175     if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2176     (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2177     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2178     index 3efe45bc2471..cf94fdf25155 100644
2179     --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2180     +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2181     @@ -801,6 +801,7 @@ static void cmd_work_handler(struct work_struct *work)
2182     unsigned long flags;
2183     bool poll_cmd = ent->polling;
2184     int alloc_ret;
2185     + int cmd_mode;
2186    
2187     sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
2188     down(sem);
2189     @@ -847,6 +848,7 @@ static void cmd_work_handler(struct work_struct *work)
2190     set_signature(ent, !cmd->checksum_disabled);
2191     dump_command(dev, ent, 1);
2192     ent->ts1 = ktime_get_ns();
2193     + cmd_mode = cmd->mode;
2194    
2195     if (ent->callback)
2196     schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
2197     @@ -871,7 +873,7 @@ static void cmd_work_handler(struct work_struct *work)
2198     iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
2199     mmiowb();
2200     /* if not in polling don't use ent after this point */
2201     - if (cmd->mode == CMD_MODE_POLLING || poll_cmd) {
2202     + if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
2203     poll_timeout(ent);
2204     /* make sure we read the descriptor after ownership is SW */
2205     rmb();
2206     @@ -1272,7 +1274,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
2207     {
2208     struct mlx5_core_dev *dev = filp->private_data;
2209     struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
2210     - char outlen_str[8];
2211     + char outlen_str[8] = {0};
2212     int outlen;
2213     void *ptr;
2214     int err;
2215     @@ -1287,8 +1289,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
2216     if (copy_from_user(outlen_str, buf, count))
2217     return -EFAULT;
2218    
2219     - outlen_str[7] = 0;
2220     -
2221     err = sscanf(outlen_str, "%d", &outlen);
2222     if (err < 0)
2223     return err;
2224     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2225     index 337ce9423794..bf34264c734b 100644
2226     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2227     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2228     @@ -2626,7 +2626,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2229     mlx5e_activate_channels(&priv->channels);
2230     netif_tx_start_all_queues(priv->netdev);
2231    
2232     - if (MLX5_VPORT_MANAGER(priv->mdev))
2233     + if (MLX5_ESWITCH_MANAGER(priv->mdev))
2234     mlx5e_add_sqs_fwd_rules(priv);
2235    
2236     mlx5e_wait_channels_min_rx_wqes(&priv->channels);
2237     @@ -2637,7 +2637,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2238     {
2239     mlx5e_redirect_rqts_to_drop(priv);
2240    
2241     - if (MLX5_VPORT_MANAGER(priv->mdev))
2242     + if (MLX5_ESWITCH_MANAGER(priv->mdev))
2243     mlx5e_remove_sqs_fwd_rules(priv);
2244    
2245     /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2246     @@ -4127,7 +4127,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
2247     mlx5e_set_netdev_dev_addr(netdev);
2248    
2249     #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
2250     - if (MLX5_VPORT_MANAGER(mdev))
2251     + if (MLX5_ESWITCH_MANAGER(mdev))
2252     netdev->switchdev_ops = &mlx5e_switchdev_ops;
2253     #endif
2254    
2255     @@ -4273,7 +4273,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
2256    
2257     mlx5e_enable_async_events(priv);
2258    
2259     - if (MLX5_VPORT_MANAGER(priv->mdev))
2260     + if (MLX5_ESWITCH_MANAGER(priv->mdev))
2261     mlx5e_register_vport_reps(priv);
2262    
2263     if (netdev->reg_state != NETREG_REGISTERED)
2264     @@ -4300,7 +4300,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
2265    
2266     queue_work(priv->wq, &priv->set_rx_mode_work);
2267    
2268     - if (MLX5_VPORT_MANAGER(priv->mdev))
2269     + if (MLX5_ESWITCH_MANAGER(priv->mdev))
2270     mlx5e_unregister_vport_reps(priv);
2271    
2272     mlx5e_disable_async_events(priv);
2273     @@ -4483,7 +4483,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
2274     return NULL;
2275    
2276     #ifdef CONFIG_MLX5_ESWITCH
2277     - if (MLX5_VPORT_MANAGER(mdev)) {
2278     + if (MLX5_ESWITCH_MANAGER(mdev)) {
2279     rpriv = mlx5e_alloc_nic_rep_priv(mdev);
2280     if (!rpriv) {
2281     mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
2282     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
2283     index 4727e7390834..281911698f72 100644
2284     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
2285     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
2286     @@ -710,7 +710,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
2287     struct mlx5e_rep_priv *rpriv = priv->ppriv;
2288     struct mlx5_eswitch_rep *rep;
2289    
2290     - if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
2291     + if (!MLX5_ESWITCH_MANAGER(priv->mdev))
2292     return false;
2293    
2294     rep = rpriv->rep;
2295     @@ -724,8 +724,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
2296     static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
2297     {
2298     struct mlx5e_rep_priv *rpriv = priv->ppriv;
2299     - struct mlx5_eswitch_rep *rep = rpriv->rep;
2300     + struct mlx5_eswitch_rep *rep;
2301    
2302     + if (!MLX5_ESWITCH_MANAGER(priv->mdev))
2303     + return false;
2304     +
2305     + rep = rpriv->rep;
2306     if (rep && rep->vport != FDB_UPLINK_VPORT)
2307     return true;
2308    
2309     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2310     index 82e37250ed01..667415301066 100644
2311     --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2312     +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2313     @@ -1535,7 +1535,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
2314     if (!ESW_ALLOWED(esw))
2315     return 0;
2316    
2317     - if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
2318     + if (!MLX5_ESWITCH_MANAGER(esw->dev) ||
2319     !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
2320     esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
2321     return -EOPNOTSUPP;
2322     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
2323     index 565c8b7a399a..10bf770675f3 100644
2324     --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
2325     +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
2326     @@ -39,6 +39,8 @@
2327     #include <linux/mlx5/device.h>
2328     #include "lib/mpfs.h"
2329    
2330     +#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
2331     +
2332     enum {
2333     SRIOV_NONE,
2334     SRIOV_LEGACY,
2335     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2336     index d9fd8570b07c..c699055c0ffd 100644
2337     --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2338     +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2339     @@ -912,8 +912,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
2340     if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2341     return -EOPNOTSUPP;
2342    
2343     - if (!MLX5_CAP_GEN(dev, vport_group_manager))
2344     - return -EOPNOTSUPP;
2345     + if(!MLX5_ESWITCH_MANAGER(dev))
2346     + return -EPERM;
2347    
2348     if (dev->priv.eswitch->mode == SRIOV_NONE)
2349     return -EOPNOTSUPP;
2350     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2351     index 33e5ff081e36..dd05cf148845 100644
2352     --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2353     +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2354     @@ -36,6 +36,7 @@
2355     #include "mlx5_core.h"
2356     #include "fs_core.h"
2357     #include "fs_cmd.h"
2358     +#include "eswitch.h"
2359     #include "diag/fs_tracepoint.h"
2360    
2361     #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
2362     @@ -2211,7 +2212,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
2363     goto err;
2364     }
2365    
2366     - if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
2367     + if (MLX5_ESWITCH_MANAGER(dev)) {
2368     if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
2369     err = init_fdb_root_ns(steering);
2370     if (err)
2371     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
2372     index 2c71557d1cee..d69897a1e2ce 100644
2373     --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
2374     +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
2375     @@ -34,6 +34,7 @@
2376     #include <linux/mlx5/cmd.h>
2377     #include <linux/module.h>
2378     #include "mlx5_core.h"
2379     +#include "eswitch.h"
2380     #include "../../mlxfw/mlxfw.h"
2381    
2382     static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
2383     @@ -152,13 +153,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
2384     }
2385    
2386     if (MLX5_CAP_GEN(dev, vport_group_manager) &&
2387     - MLX5_CAP_GEN(dev, eswitch_flow_table)) {
2388     + MLX5_ESWITCH_MANAGER(dev)) {
2389     err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
2390     if (err)
2391     return err;
2392     }
2393    
2394     - if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
2395     + if (MLX5_ESWITCH_MANAGER(dev)) {
2396     err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
2397     if (err)
2398     return err;
2399     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
2400     index 7cb67122e8b5..22811ecd8fcd 100644
2401     --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
2402     +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
2403     @@ -34,6 +34,7 @@
2404     #include <linux/mlx5/driver.h>
2405     #include <linux/mlx5/mlx5_ifc.h>
2406     #include "mlx5_core.h"
2407     +#include "eswitch.h"
2408     #include "lib/mpfs.h"
2409    
2410     /* HW L2 Table (MPFS) management */
2411     @@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
2412     int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
2413     struct mlx5_mpfs *mpfs;
2414    
2415     - if (!MLX5_VPORT_MANAGER(dev))
2416     + if (!MLX5_ESWITCH_MANAGER(dev))
2417     return 0;
2418    
2419     mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL);
2420     @@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
2421     {
2422     struct mlx5_mpfs *mpfs = dev->priv.mpfs;
2423    
2424     - if (!MLX5_VPORT_MANAGER(dev))
2425     + if (!MLX5_ESWITCH_MANAGER(dev))
2426     return;
2427    
2428     WARN_ON(!hlist_empty(mpfs->hash));
2429     @@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
2430     u32 index;
2431     int err;
2432    
2433     - if (!MLX5_VPORT_MANAGER(dev))
2434     + if (!MLX5_ESWITCH_MANAGER(dev))
2435     return 0;
2436    
2437     mutex_lock(&mpfs->lock);
2438     @@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
2439     int err = 0;
2440     u32 index;
2441    
2442     - if (!MLX5_VPORT_MANAGER(dev))
2443     + if (!MLX5_ESWITCH_MANAGER(dev))
2444     return 0;
2445    
2446     mutex_lock(&mpfs->lock);
2447     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
2448     index e07061f565d6..ccb6287aeeb7 100644
2449     --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
2450     +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
2451     @@ -641,7 +641,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
2452     static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
2453     int inlen)
2454     {
2455     - u32 out[MLX5_ST_SZ_DW(qtct_reg)];
2456     + u32 out[MLX5_ST_SZ_DW(qetc_reg)];
2457    
2458     if (!MLX5_CAP_GEN(mdev, ets))
2459     return -EOPNOTSUPP;
2460     @@ -653,7 +653,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
2461     static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
2462     int outlen)
2463     {
2464     - u32 in[MLX5_ST_SZ_DW(qtct_reg)];
2465     + u32 in[MLX5_ST_SZ_DW(qetc_reg)];
2466    
2467     if (!MLX5_CAP_GEN(mdev, ets))
2468     return -EOPNOTSUPP;
2469     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
2470     index 2a8b529ce6dd..a0674962f02c 100644
2471     --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
2472     +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
2473     @@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
2474     return -EBUSY;
2475     }
2476    
2477     + if (!MLX5_ESWITCH_MANAGER(dev))
2478     + goto enable_vfs_hca;
2479     +
2480     err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
2481     if (err) {
2482     mlx5_core_warn(dev,
2483     @@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
2484     return err;
2485     }
2486    
2487     +enable_vfs_hca:
2488     for (vf = 0; vf < num_vfs; vf++) {
2489     err = mlx5_core_enable_hca(dev, vf + 1);
2490     if (err) {
2491     @@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
2492     }
2493    
2494     out:
2495     - mlx5_eswitch_disable_sriov(dev->priv.eswitch);
2496     + if (MLX5_ESWITCH_MANAGER(dev))
2497     + mlx5_eswitch_disable_sriov(dev->priv.eswitch);
2498    
2499     if (mlx5_wait_for_vf_pages(dev))
2500     mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
2501     diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
2502     index 8f6ccc0c39e5..b306961b02fd 100644
2503     --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
2504     +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
2505     @@ -700,9 +700,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
2506     p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
2507    
2508     memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
2509     - ARRAY_SIZE(p_local->local_chassis_id));
2510     + sizeof(p_local->local_chassis_id));
2511     memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
2512     - ARRAY_SIZE(p_local->local_port_id));
2513     + sizeof(p_local->local_port_id));
2514     }
2515    
2516     static void
2517     @@ -714,9 +714,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
2518     p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
2519    
2520     memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
2521     - ARRAY_SIZE(p_remote->peer_chassis_id));
2522     + sizeof(p_remote->peer_chassis_id));
2523     memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
2524     - ARRAY_SIZE(p_remote->peer_port_id));
2525     + sizeof(p_remote->peer_port_id));
2526     }
2527    
2528     static int
2529     diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
2530     index 58a689fb04db..ef2374699726 100644
2531     --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
2532     +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
2533     @@ -1782,7 +1782,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
2534     DP_INFO(p_hwfn, "Failed to update driver state\n");
2535    
2536     rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
2537     - QED_OV_ESWITCH_VEB);
2538     + QED_OV_ESWITCH_NONE);
2539     if (rc)
2540     DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
2541     }
2542     diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
2543     index 27832885a87f..2c958921dfb3 100644
2544     --- a/drivers/net/ethernet/qlogic/qed/qed_main.c
2545     +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
2546     @@ -779,6 +779,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
2547     /* We want a minimum of one slowpath and one fastpath vector per hwfn */
2548     cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
2549    
2550     + if (is_kdump_kernel()) {
2551     + DP_INFO(cdev,
2552     + "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
2553     + cdev->int_params.in.min_msix_cnt);
2554     + cdev->int_params.in.num_vectors =
2555     + cdev->int_params.in.min_msix_cnt;
2556     + }
2557     +
2558     rc = qed_set_int_mode(cdev, false);
2559     if (rc) {
2560     DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
2561     diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2562     index 3f40b1de7957..d08fe350ab6c 100644
2563     --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2564     +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2565     @@ -4396,6 +4396,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
2566     static int qed_sriov_enable(struct qed_dev *cdev, int num)
2567     {
2568     struct qed_iov_vf_init_params params;
2569     + struct qed_hwfn *hwfn;
2570     + struct qed_ptt *ptt;
2571     int i, j, rc;
2572    
2573     if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
2574     @@ -4408,8 +4410,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
2575    
2576     /* Initialize HW for VF access */
2577     for_each_hwfn(cdev, j) {
2578     - struct qed_hwfn *hwfn = &cdev->hwfns[j];
2579     - struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
2580     + hwfn = &cdev->hwfns[j];
2581     + ptt = qed_ptt_acquire(hwfn);
2582    
2583     /* Make sure not to use more than 16 queues per VF */
2584     params.num_queues = min_t(int,
2585     @@ -4445,6 +4447,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
2586     goto err;
2587     }
2588    
2589     + hwfn = QED_LEADING_HWFN(cdev);
2590     + ptt = qed_ptt_acquire(hwfn);
2591     + if (!ptt) {
2592     + DP_ERR(hwfn, "Failed to acquire ptt\n");
2593     + rc = -EBUSY;
2594     + goto err;
2595     + }
2596     +
2597     + rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
2598     + if (rc)
2599     + DP_INFO(cdev, "Failed to update eswitch mode\n");
2600     + qed_ptt_release(hwfn, ptt);
2601     +
2602     return num;
2603    
2604     err:
2605     diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
2606     index 9b2280badaf7..475f6ae5d4b3 100644
2607     --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
2608     +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
2609     @@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
2610     {
2611     struct qede_ptp *ptp = edev->ptp;
2612    
2613     - if (!ptp)
2614     - return -EIO;
2615     + if (!ptp) {
2616     + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
2617     + SOF_TIMESTAMPING_RX_SOFTWARE |
2618     + SOF_TIMESTAMPING_SOFTWARE;
2619     + info->phc_index = -1;
2620     +
2621     + return 0;
2622     + }
2623    
2624     info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
2625     SOF_TIMESTAMPING_RX_SOFTWARE |
2626     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2627     index 9866d2e34cdd..27f2e650e27b 100644
2628     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2629     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2630     @@ -914,6 +914,7 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
2631     static int stmmac_init_phy(struct net_device *dev)
2632     {
2633     struct stmmac_priv *priv = netdev_priv(dev);
2634     + u32 tx_cnt = priv->plat->tx_queues_to_use;
2635     struct phy_device *phydev;
2636     char phy_id_fmt[MII_BUS_ID_SIZE + 3];
2637     char bus_id[MII_BUS_ID_SIZE];
2638     @@ -954,6 +955,15 @@ static int stmmac_init_phy(struct net_device *dev)
2639     phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
2640     SUPPORTED_1000baseT_Full);
2641    
2642     + /*
2643     + * Half-duplex mode not supported with multiqueue
2644     + * half-duplex can only works with single queue
2645     + */
2646     + if (tx_cnt > 1)
2647     + phydev->supported &= ~(SUPPORTED_1000baseT_Half |
2648     + SUPPORTED_100baseT_Half |
2649     + SUPPORTED_10baseT_Half);
2650     +
2651     /*
2652     * Broken HW is sometimes missing the pull-up resistor on the
2653     * MDIO line, which results in reads to non-existent devices returning
2654     diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
2655     index fa607d062cb3..15cd086e3f47 100644
2656     --- a/drivers/net/ethernet/sun/sungem.c
2657     +++ b/drivers/net/ethernet/sun/sungem.c
2658     @@ -59,8 +59,7 @@
2659     #include <linux/sungem_phy.h>
2660     #include "sungem.h"
2661    
2662     -/* Stripping FCS is causing problems, disabled for now */
2663     -#undef STRIP_FCS
2664     +#define STRIP_FCS
2665    
2666     #define DEFAULT_MSG (NETIF_MSG_DRV | \
2667     NETIF_MSG_PROBE | \
2668     @@ -434,7 +433,7 @@ static int gem_rxmac_reset(struct gem *gp)
2669     writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
2670     writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
2671     val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
2672     - ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
2673     + (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
2674     writel(val, gp->regs + RXDMA_CFG);
2675     if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
2676     writel(((5 & RXDMA_BLANK_IPKTS) |
2677     @@ -759,7 +758,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
2678     struct net_device *dev = gp->dev;
2679     int entry, drops, work_done = 0;
2680     u32 done;
2681     - __sum16 csum;
2682    
2683     if (netif_msg_rx_status(gp))
2684     printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
2685     @@ -854,9 +852,13 @@ static int gem_rx(struct gem *gp, int work_to_do)
2686     skb = copy_skb;
2687     }
2688    
2689     - csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
2690     - skb->csum = csum_unfold(csum);
2691     - skb->ip_summed = CHECKSUM_COMPLETE;
2692     + if (likely(dev->features & NETIF_F_RXCSUM)) {
2693     + __sum16 csum;
2694     +
2695     + csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
2696     + skb->csum = csum_unfold(csum);
2697     + skb->ip_summed = CHECKSUM_COMPLETE;
2698     + }
2699     skb->protocol = eth_type_trans(skb, gp->dev);
2700    
2701     napi_gro_receive(&gp->napi, skb);
2702     @@ -1760,7 +1762,7 @@ static void gem_init_dma(struct gem *gp)
2703     writel(0, gp->regs + TXDMA_KICK);
2704    
2705     val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
2706     - ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
2707     + (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
2708     writel(val, gp->regs + RXDMA_CFG);
2709    
2710     writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
2711     @@ -2986,8 +2988,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2712     pci_set_drvdata(pdev, dev);
2713    
2714     /* We can do scatter/gather and HW checksum */
2715     - dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2716     - dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2717     + dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2718     + dev->features = dev->hw_features;
2719     if (pci_using_dac)
2720     dev->features |= NETIF_F_HIGHDMA;
2721    
2722     diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
2723     index fbc825ac97ab..cb51448389a1 100644
2724     --- a/drivers/net/geneve.c
2725     +++ b/drivers/net/geneve.c
2726     @@ -474,7 +474,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
2727     out_unlock:
2728     rcu_read_unlock();
2729     out:
2730     - NAPI_GRO_CB(skb)->flush |= flush;
2731     + skb_gro_flush_final(skb, pp, flush);
2732    
2733     return pp;
2734     }
2735     diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
2736     index 01017dd88802..cb250cacf721 100644
2737     --- a/drivers/net/hyperv/hyperv_net.h
2738     +++ b/drivers/net/hyperv/hyperv_net.h
2739     @@ -207,7 +207,7 @@ int netvsc_recv_callback(struct net_device *net,
2740     void netvsc_channel_cb(void *context);
2741     int netvsc_poll(struct napi_struct *napi, int budget);
2742    
2743     -void rndis_set_subchannel(struct work_struct *w);
2744     +int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
2745     int rndis_filter_open(struct netvsc_device *nvdev);
2746     int rndis_filter_close(struct netvsc_device *nvdev);
2747     struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
2748     diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
2749     index 4647ecbe6f36..701be5d81062 100644
2750     --- a/drivers/net/hyperv/netvsc.c
2751     +++ b/drivers/net/hyperv/netvsc.c
2752     @@ -62,6 +62,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
2753     VM_PKT_DATA_INBAND, 0);
2754     }
2755    
2756     +/* Worker to setup sub channels on initial setup
2757     + * Initial hotplug event occurs in softirq context
2758     + * and can't wait for channels.
2759     + */
2760     +static void netvsc_subchan_work(struct work_struct *w)
2761     +{
2762     + struct netvsc_device *nvdev =
2763     + container_of(w, struct netvsc_device, subchan_work);
2764     + struct rndis_device *rdev;
2765     + int i, ret;
2766     +
2767     + /* Avoid deadlock with device removal already under RTNL */
2768     + if (!rtnl_trylock()) {
2769     + schedule_work(w);
2770     + return;
2771     + }
2772     +
2773     + rdev = nvdev->extension;
2774     + if (rdev) {
2775     + ret = rndis_set_subchannel(rdev->ndev, nvdev);
2776     + if (ret == 0) {
2777     + netif_device_attach(rdev->ndev);
2778     + } else {
2779     + /* fallback to only primary channel */
2780     + for (i = 1; i < nvdev->num_chn; i++)
2781     + netif_napi_del(&nvdev->chan_table[i].napi);
2782     +
2783     + nvdev->max_chn = 1;
2784     + nvdev->num_chn = 1;
2785     + }
2786     + }
2787     +
2788     + rtnl_unlock();
2789     +}
2790     +
2791     static struct netvsc_device *alloc_net_device(void)
2792     {
2793     struct netvsc_device *net_device;
2794     @@ -78,7 +113,7 @@ static struct netvsc_device *alloc_net_device(void)
2795    
2796     init_completion(&net_device->channel_init_wait);
2797     init_waitqueue_head(&net_device->subchan_open);
2798     - INIT_WORK(&net_device->subchan_work, rndis_set_subchannel);
2799     + INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
2800    
2801     return net_device;
2802     }
2803     diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
2804     index 6890478a0851..aeabeb107fed 100644
2805     --- a/drivers/net/hyperv/netvsc_drv.c
2806     +++ b/drivers/net/hyperv/netvsc_drv.c
2807     @@ -911,8 +911,20 @@ static int netvsc_attach(struct net_device *ndev,
2808     if (IS_ERR(nvdev))
2809     return PTR_ERR(nvdev);
2810    
2811     - /* Note: enable and attach happen when sub-channels setup */
2812     + if (nvdev->num_chn > 1) {
2813     + ret = rndis_set_subchannel(ndev, nvdev);
2814     +
2815     + /* if unavailable, just proceed with one queue */
2816     + if (ret) {
2817     + nvdev->max_chn = 1;
2818     + nvdev->num_chn = 1;
2819     + }
2820     + }
2821     +
2822     + /* In any case device is now ready */
2823     + netif_device_attach(ndev);
2824    
2825     + /* Note: enable and attach happen when sub-channels setup */
2826     netif_carrier_off(ndev);
2827    
2828     if (netif_running(ndev)) {
2829     @@ -2035,6 +2047,9 @@ static int netvsc_probe(struct hv_device *dev,
2830    
2831     memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
2832    
2833     + if (nvdev->num_chn > 1)
2834     + schedule_work(&nvdev->subchan_work);
2835     +
2836     /* hw_features computed in rndis_netdev_set_hwcaps() */
2837     net->features = net->hw_features |
2838     NETIF_F_HIGHDMA | NETIF_F_SG |
2839     diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
2840     index d1ae184008b4..cb03a6ea076a 100644
2841     --- a/drivers/net/hyperv/rndis_filter.c
2842     +++ b/drivers/net/hyperv/rndis_filter.c
2843     @@ -1055,29 +1055,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
2844     * This breaks overlap of processing the host message for the
2845     * new primary channel with the initialization of sub-channels.
2846     */
2847     -void rndis_set_subchannel(struct work_struct *w)
2848     +int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
2849     {
2850     - struct netvsc_device *nvdev
2851     - = container_of(w, struct netvsc_device, subchan_work);
2852     struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
2853     - struct net_device_context *ndev_ctx;
2854     - struct rndis_device *rdev;
2855     - struct net_device *ndev;
2856     - struct hv_device *hv_dev;
2857     + struct net_device_context *ndev_ctx = netdev_priv(ndev);
2858     + struct hv_device *hv_dev = ndev_ctx->device_ctx;
2859     + struct rndis_device *rdev = nvdev->extension;
2860     int i, ret;
2861    
2862     - if (!rtnl_trylock()) {
2863     - schedule_work(w);
2864     - return;
2865     - }
2866     -
2867     - rdev = nvdev->extension;
2868     - if (!rdev)
2869     - goto unlock; /* device was removed */
2870     -
2871     - ndev = rdev->ndev;
2872     - ndev_ctx = netdev_priv(ndev);
2873     - hv_dev = ndev_ctx->device_ctx;
2874     + ASSERT_RTNL();
2875    
2876     memset(init_packet, 0, sizeof(struct nvsp_message));
2877     init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
2878     @@ -1091,13 +1077,13 @@ void rndis_set_subchannel(struct work_struct *w)
2879     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2880     if (ret) {
2881     netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
2882     - goto failed;
2883     + return ret;
2884     }
2885    
2886     wait_for_completion(&nvdev->channel_init_wait);
2887     if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
2888     netdev_err(ndev, "sub channel request failed\n");
2889     - goto failed;
2890     + return -EIO;
2891     }
2892    
2893     nvdev->num_chn = 1 +
2894     @@ -1116,21 +1102,7 @@ void rndis_set_subchannel(struct work_struct *w)
2895     for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
2896     ndev_ctx->tx_table[i] = i % nvdev->num_chn;
2897    
2898     - netif_device_attach(ndev);
2899     - rtnl_unlock();
2900     - return;
2901     -
2902     -failed:
2903     - /* fallback to only primary channel */
2904     - for (i = 1; i < nvdev->num_chn; i++)
2905     - netif_napi_del(&nvdev->chan_table[i].napi);
2906     -
2907     - nvdev->max_chn = 1;
2908     - nvdev->num_chn = 1;
2909     -
2910     - netif_device_attach(ndev);
2911     -unlock:
2912     - rtnl_unlock();
2913     + return 0;
2914     }
2915    
2916     static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
2917     @@ -1321,21 +1293,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
2918     netif_napi_add(net, &net_device->chan_table[i].napi,
2919     netvsc_poll, NAPI_POLL_WEIGHT);
2920    
2921     - if (net_device->num_chn > 1)
2922     - schedule_work(&net_device->subchan_work);
2923     + return net_device;
2924    
2925     out:
2926     - /* if unavailable, just proceed with one queue */
2927     - if (ret) {
2928     - net_device->max_chn = 1;
2929     - net_device->num_chn = 1;
2930     - }
2931     -
2932     - /* No sub channels, device is ready */
2933     - if (net_device->num_chn == 1)
2934     - netif_device_attach(net);
2935     -
2936     - return net_device;
2937     + /* setting up multiple channels failed */
2938     + net_device->max_chn = 1;
2939     + net_device->num_chn = 1;
2940    
2941     err_dev_remv:
2942     rndis_filter_device_remove(dev, net_device);
2943     diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
2944     index c74893c1e620..e7f7a1a002ee 100644
2945     --- a/drivers/net/ipvlan/ipvlan_main.c
2946     +++ b/drivers/net/ipvlan/ipvlan_main.c
2947     @@ -546,7 +546,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
2948     ipvlan->dev = dev;
2949     ipvlan->port = port;
2950     ipvlan->sfeatures = IPVLAN_FEATURES;
2951     - ipvlan_adjust_mtu(ipvlan, phy_dev);
2952     + if (!tb[IFLA_MTU])
2953     + ipvlan_adjust_mtu(ipvlan, phy_dev);
2954     INIT_LIST_HEAD(&ipvlan->addrs);
2955    
2956     /* If the port-id base is at the MAX value, then wrap it around and
2957     diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
2958     index 9881edc568ba..0aa91ab9a0fb 100644
2959     --- a/drivers/net/usb/lan78xx.c
2960     +++ b/drivers/net/usb/lan78xx.c
2961     @@ -3197,6 +3197,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
2962     pkt_cnt = 0;
2963     count = 0;
2964     length = 0;
2965     + spin_lock_irqsave(&tqp->lock, flags);
2966     for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2967     if (skb_is_gso(skb)) {
2968     if (pkt_cnt) {
2969     @@ -3205,7 +3206,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
2970     }
2971     count = 1;
2972     length = skb->len - TX_OVERHEAD;
2973     - skb2 = skb_dequeue(tqp);
2974     + __skb_unlink(skb, tqp);
2975     + spin_unlock_irqrestore(&tqp->lock, flags);
2976     goto gso_skb;
2977     }
2978    
2979     @@ -3214,6 +3216,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
2980     skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2981     pkt_cnt++;
2982     }
2983     + spin_unlock_irqrestore(&tqp->lock, flags);
2984    
2985     /* copy to a single skb */
2986     skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2987     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2988     index b23ee948e7c9..0db500bf86d9 100644
2989     --- a/drivers/net/usb/qmi_wwan.c
2990     +++ b/drivers/net/usb/qmi_wwan.c
2991     @@ -1245,6 +1245,7 @@ static const struct usb_device_id products[] = {
2992     {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
2993     {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
2994     {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
2995     + {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */
2996     {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
2997     {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
2998     {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
2999     diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
3000     index aa88b640cb6c..0fa64cc1a011 100644
3001     --- a/drivers/net/usb/r8152.c
3002     +++ b/drivers/net/usb/r8152.c
3003     @@ -3959,7 +3959,8 @@ static int rtl8152_close(struct net_device *netdev)
3004     #ifdef CONFIG_PM_SLEEP
3005     unregister_pm_notifier(&tp->pm_notifier);
3006     #endif
3007     - napi_disable(&tp->napi);
3008     + if (!test_bit(RTL8152_UNPLUG, &tp->flags))
3009     + napi_disable(&tp->napi);
3010     clear_bit(WORK_ENABLE, &tp->flags);
3011     usb_kill_urb(tp->intr_urb);
3012     cancel_delayed_work_sync(&tp->schedule);
3013     diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
3014     index 3d9c5b35a4a7..bbdb46916dc3 100644
3015     --- a/drivers/net/vxlan.c
3016     +++ b/drivers/net/vxlan.c
3017     @@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
3018     flush = 0;
3019    
3020     out:
3021     - skb_gro_remcsum_cleanup(skb, &grc);
3022     - skb->remcsum_offload = 0;
3023     - NAPI_GRO_CB(skb)->flush |= flush;
3024     + skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
3025    
3026     return pp;
3027     }
3028     diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
3029     index 93256f8bc0b5..ec82c1c3f12e 100644
3030     --- a/drivers/net/wireless/realtek/rtlwifi/base.c
3031     +++ b/drivers/net/wireless/realtek/rtlwifi/base.c
3032     @@ -483,18 +483,21 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
3033    
3034     }
3035    
3036     -void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
3037     +void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
3038     {
3039     struct rtl_priv *rtlpriv = rtl_priv(hw);
3040    
3041     del_timer_sync(&rtlpriv->works.watchdog_timer);
3042    
3043     - cancel_delayed_work(&rtlpriv->works.watchdog_wq);
3044     - cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
3045     - cancel_delayed_work(&rtlpriv->works.ps_work);
3046     - cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
3047     - cancel_delayed_work(&rtlpriv->works.fwevt_wq);
3048     - cancel_delayed_work(&rtlpriv->works.c2hcmd_wq);
3049     + cancel_delayed_work_sync(&rtlpriv->works.watchdog_wq);
3050     + if (ips_wq)
3051     + cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
3052     + else
3053     + cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
3054     + cancel_delayed_work_sync(&rtlpriv->works.ps_work);
3055     + cancel_delayed_work_sync(&rtlpriv->works.ps_rfon_wq);
3056     + cancel_delayed_work_sync(&rtlpriv->works.fwevt_wq);
3057     + cancel_delayed_work_sync(&rtlpriv->works.c2hcmd_wq);
3058     }
3059     EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
3060    
3061     diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
3062     index b56d1b7f5567..cbbb5be36a09 100644
3063     --- a/drivers/net/wireless/realtek/rtlwifi/base.h
3064     +++ b/drivers/net/wireless/realtek/rtlwifi/base.h
3065     @@ -121,7 +121,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw);
3066     void rtl_deinit_rfkill(struct ieee80211_hw *hw);
3067    
3068     void rtl_watch_dog_timer_callback(unsigned long data);
3069     -void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
3070     +void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq);
3071    
3072     bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
3073     int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht,
3074     diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
3075     index c53cbf3d52bd..b01123138797 100644
3076     --- a/drivers/net/wireless/realtek/rtlwifi/core.c
3077     +++ b/drivers/net/wireless/realtek/rtlwifi/core.c
3078     @@ -130,7 +130,6 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
3079     firmware->size);
3080     rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
3081     }
3082     - rtlpriv->rtlhal.fwsize = firmware->size;
3083     release_firmware(firmware);
3084     }
3085    
3086     @@ -196,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
3087     /* reset sec info */
3088     rtl_cam_reset_sec_info(hw);
3089    
3090     - rtl_deinit_deferred_work(hw);
3091     + rtl_deinit_deferred_work(hw, false);
3092     }
3093     rtlpriv->intf_ops->adapter_stop(hw);
3094    
3095     diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
3096     index d7331225c5f3..457a0f725c8a 100644
3097     --- a/drivers/net/wireless/realtek/rtlwifi/pci.c
3098     +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
3099     @@ -2359,7 +2359,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
3100     ieee80211_unregister_hw(hw);
3101     rtlmac->mac80211_registered = 0;
3102     } else {
3103     - rtl_deinit_deferred_work(hw);
3104     + rtl_deinit_deferred_work(hw, false);
3105     rtlpriv->intf_ops->adapter_stop(hw);
3106     }
3107     rtlpriv->cfg->ops->disable_interrupt(hw);
3108     diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
3109     index 07ee3096f50e..f6d00613c53d 100644
3110     --- a/drivers/net/wireless/realtek/rtlwifi/ps.c
3111     +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
3112     @@ -66,7 +66,7 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
3113     struct rtl_priv *rtlpriv = rtl_priv(hw);
3114    
3115     /*<1> Stop all timer */
3116     - rtl_deinit_deferred_work(hw);
3117     + rtl_deinit_deferred_work(hw, true);
3118    
3119     /*<2> Disable Interrupt */
3120     rtlpriv->cfg->ops->disable_interrupt(hw);
3121     @@ -287,7 +287,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
3122     struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
3123     enum rf_pwrstate rtstate;
3124    
3125     - cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
3126     + cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
3127    
3128     spin_lock(&rtlpriv->locks.ips_lock);
3129     if (ppsc->inactiveps) {
3130     diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
3131     index 5590d07d0918..820c42ff5384 100644
3132     --- a/drivers/net/wireless/realtek/rtlwifi/usb.c
3133     +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
3134     @@ -1150,7 +1150,7 @@ void rtl_usb_disconnect(struct usb_interface *intf)
3135     ieee80211_unregister_hw(hw);
3136     rtlmac->mac80211_registered = 0;
3137     } else {
3138     - rtl_deinit_deferred_work(hw);
3139     + rtl_deinit_deferred_work(hw, false);
3140     rtlpriv->intf_ops->adapter_stop(hw);
3141     }
3142     /*deinit rfkill */
3143     diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c
3144     index 5596fdedbb94..ea03f1ec12a4 100644
3145     --- a/drivers/pci/dwc/pci-exynos.c
3146     +++ b/drivers/pci/dwc/pci-exynos.c
3147     @@ -695,7 +695,8 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
3148     return ret;
3149     }
3150    
3151     - if (ep->ops && ep->ops->get_clk_resources) {
3152     + if (ep->ops && ep->ops->get_clk_resources &&
3153     + ep->ops->init_clk_resources) {
3154     ret = ep->ops->get_clk_resources(ep);
3155     if (ret)
3156     return ret;
3157     diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
3158     index caea7c618207..4523d7e1bcb9 100644
3159     --- a/drivers/pci/host/pci-hyperv.c
3160     +++ b/drivers/pci/host/pci-hyperv.c
3161     @@ -1091,6 +1091,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
3162     struct pci_bus *pbus;
3163     struct pci_dev *pdev;
3164     struct cpumask *dest;
3165     + unsigned long flags;
3166     struct compose_comp_ctxt comp;
3167     struct tran_int_desc *int_desc;
3168     struct {
3169     @@ -1182,14 +1183,15 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
3170     * the channel callback directly when channel->target_cpu is
3171     * the current CPU. When the higher level interrupt code
3172     * calls us with interrupt enabled, let's add the
3173     - * local_bh_disable()/enable() to avoid race.
3174     + * local_irq_save()/restore() to avoid race:
3175     + * hv_pci_onchannelcallback() can also run in tasklet.
3176     */
3177     - local_bh_disable();
3178     + local_irq_save(flags);
3179    
3180     if (hbus->hdev->channel->target_cpu == smp_processor_id())
3181     hv_pci_onchannelcallback(hbus);
3182    
3183     - local_bh_enable();
3184     + local_irq_restore(flags);
3185    
3186     if (hpdev->state == hv_pcichild_ejecting) {
3187     dev_err_once(&hbus->hdev->device,
3188     diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
3189     index 00b8d4cdcac3..c01d1f3a1c7d 100644
3190     --- a/drivers/usb/host/xhci-hub.c
3191     +++ b/drivers/usb/host/xhci-hub.c
3192     @@ -366,7 +366,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
3193    
3194     slot_id = 0;
3195     for (i = 0; i < MAX_HC_SLOTS; i++) {
3196     - if (!xhci->devs[i])
3197     + if (!xhci->devs[i] || !xhci->devs[i]->udev)
3198     continue;
3199     speed = xhci->devs[i]->udev->speed;
3200     if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
3201     diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
3202     index b0d606b2d06c..6123b4dd8638 100644
3203     --- a/drivers/vhost/net.c
3204     +++ b/drivers/vhost/net.c
3205     @@ -1186,7 +1186,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
3206     if (ubufs)
3207     vhost_net_ubuf_put_wait_and_free(ubufs);
3208     err_ubufs:
3209     - sockfd_put(sock);
3210     + if (sock)
3211     + sockfd_put(sock);
3212     err_vq:
3213     mutex_unlock(&vq->mutex);
3214     err:
3215     diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
3216     index b7c816f39404..6dd63981787a 100644
3217     --- a/fs/autofs4/dev-ioctl.c
3218     +++ b/fs/autofs4/dev-ioctl.c
3219     @@ -148,6 +148,15 @@ static int validate_dev_ioctl(int cmd, struct autofs_dev_ioctl *param)
3220     cmd);
3221     goto out;
3222     }
3223     + } else {
3224     + unsigned int inr = _IOC_NR(cmd);
3225     +
3226     + if (inr == AUTOFS_DEV_IOCTL_OPENMOUNT_CMD ||
3227     + inr == AUTOFS_DEV_IOCTL_REQUESTER_CMD ||
3228     + inr == AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD) {
3229     + err = -EINVAL;
3230     + goto out;
3231     + }
3232     }
3233    
3234     err = 0;
3235     @@ -284,7 +293,8 @@ static int autofs_dev_ioctl_openmount(struct file *fp,
3236     dev_t devid;
3237     int err, fd;
3238    
3239     - /* param->path has already been checked */
3240     + /* param->path has been checked in validate_dev_ioctl() */
3241     +
3242     if (!param->openmount.devid)
3243     return -EINVAL;
3244    
3245     @@ -446,10 +456,7 @@ static int autofs_dev_ioctl_requester(struct file *fp,
3246     dev_t devid;
3247     int err = -ENOENT;
3248    
3249     - if (param->size <= AUTOFS_DEV_IOCTL_SIZE) {
3250     - err = -EINVAL;
3251     - goto out;
3252     - }
3253     + /* param->path has been checked in validate_dev_ioctl() */
3254    
3255     devid = sbi->sb->s_dev;
3256    
3257     @@ -534,10 +541,7 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
3258     unsigned int devid, magic;
3259     int err = -ENOENT;
3260    
3261     - if (param->size <= AUTOFS_DEV_IOCTL_SIZE) {
3262     - err = -EINVAL;
3263     - goto out;
3264     - }
3265     + /* param->path has been checked in validate_dev_ioctl() */
3266    
3267     name = param->path;
3268     type = param->ismountpoint.in.type;
3269     diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3270     index fc4c14a72366..bf4e22df7c97 100644
3271     --- a/fs/btrfs/tree-log.c
3272     +++ b/fs/btrfs/tree-log.c
3273     @@ -4214,6 +4214,110 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
3274     return ret;
3275     }
3276    
3277     +/*
3278     + * Log all prealloc extents beyond the inode's i_size to make sure we do not
3279     + * lose them after doing a fast fsync and replaying the log. We scan the
3280     + * subvolume's root instead of iterating the inode's extent map tree because
3281     + * otherwise we can log incorrect extent items based on extent map conversion.
3282     + * That can happen due to the fact that extent maps are merged when they
3283     + * are not in the extent map tree's list of modified extents.
3284     + */
3285     +static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
3286     + struct btrfs_inode *inode,
3287     + struct btrfs_path *path)
3288     +{
3289     + struct btrfs_root *root = inode->root;
3290     + struct btrfs_key key;
3291     + const u64 i_size = i_size_read(&inode->vfs_inode);
3292     + const u64 ino = btrfs_ino(inode);
3293     + struct btrfs_path *dst_path = NULL;
3294     + u64 last_extent = (u64)-1;
3295     + int ins_nr = 0;
3296     + int start_slot;
3297     + int ret;
3298     +
3299     + if (!(inode->flags & BTRFS_INODE_PREALLOC))
3300     + return 0;
3301     +
3302     + key.objectid = ino;
3303     + key.type = BTRFS_EXTENT_DATA_KEY;
3304     + key.offset = i_size;
3305     + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3306     + if (ret < 0)
3307     + goto out;
3308     +
3309     + while (true) {
3310     + struct extent_buffer *leaf = path->nodes[0];
3311     + int slot = path->slots[0];
3312     +
3313     + if (slot >= btrfs_header_nritems(leaf)) {
3314     + if (ins_nr > 0) {
3315     + ret = copy_items(trans, inode, dst_path, path,
3316     + &last_extent, start_slot,
3317     + ins_nr, 1, 0);
3318     + if (ret < 0)
3319     + goto out;
3320     + ins_nr = 0;
3321     + }
3322     + ret = btrfs_next_leaf(root, path);
3323     + if (ret < 0)
3324     + goto out;
3325     + if (ret > 0) {
3326     + ret = 0;
3327     + break;
3328     + }
3329     + continue;
3330     + }
3331     +
3332     + btrfs_item_key_to_cpu(leaf, &key, slot);
3333     + if (key.objectid > ino)
3334     + break;
3335     + if (WARN_ON_ONCE(key.objectid < ino) ||
3336     + key.type < BTRFS_EXTENT_DATA_KEY ||
3337     + key.offset < i_size) {
3338     + path->slots[0]++;
3339     + continue;
3340     + }
3341     + if (last_extent == (u64)-1) {
3342     + last_extent = key.offset;
3343     + /*
3344     + * Avoid logging extent items logged in past fsync calls
3345     + * and leading to duplicate keys in the log tree.
3346     + */
3347     + do {
3348     + ret = btrfs_truncate_inode_items(trans,
3349     + root->log_root,
3350     + &inode->vfs_inode,
3351     + i_size,
3352     + BTRFS_EXTENT_DATA_KEY);
3353     + } while (ret == -EAGAIN);
3354     + if (ret)
3355     + goto out;
3356     + }
3357     + if (ins_nr == 0)
3358     + start_slot = slot;
3359     + ins_nr++;
3360     + path->slots[0]++;
3361     + if (!dst_path) {
3362     + dst_path = btrfs_alloc_path();
3363     + if (!dst_path) {
3364     + ret = -ENOMEM;
3365     + goto out;
3366     + }
3367     + }
3368     + }
3369     + if (ins_nr > 0) {
3370     + ret = copy_items(trans, inode, dst_path, path, &last_extent,
3371     + start_slot, ins_nr, 1, 0);
3372     + if (ret > 0)
3373     + ret = 0;
3374     + }
3375     +out:
3376     + btrfs_release_path(path);
3377     + btrfs_free_path(dst_path);
3378     + return ret;
3379     +}
3380     +
3381     static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
3382     struct btrfs_root *root,
3383     struct btrfs_inode *inode,
3384     @@ -4256,6 +4360,11 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
3385     if (em->generation <= test_gen)
3386     continue;
3387    
3388     + /* We log prealloc extents beyond eof later. */
3389     + if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
3390     + em->start >= i_size_read(&inode->vfs_inode))
3391     + continue;
3392     +
3393     if (em->start < logged_start)
3394     logged_start = em->start;
3395     if ((em->start + em->len - 1) > logged_end)
3396     @@ -4268,31 +4377,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
3397     num++;
3398     }
3399    
3400     - /*
3401     - * Add all prealloc extents beyond the inode's i_size to make sure we
3402     - * don't lose them after doing a fast fsync and replaying the log.
3403     - */
3404     - if (inode->flags & BTRFS_INODE_PREALLOC) {
3405     - struct rb_node *node;
3406     -
3407     - for (node = rb_last(&tree->map); node; node = rb_prev(node)) {
3408     - em = rb_entry(node, struct extent_map, rb_node);
3409     - if (em->start < i_size_read(&inode->vfs_inode))
3410     - break;
3411     - if (!list_empty(&em->list))
3412     - continue;
3413     - /* Same as above loop. */
3414     - if (++num > 32768) {
3415     - list_del_init(&tree->modified_extents);
3416     - ret = -EFBIG;
3417     - goto process;
3418     - }
3419     - refcount_inc(&em->refs);
3420     - set_bit(EXTENT_FLAG_LOGGING, &em->flags);
3421     - list_add_tail(&em->list, &extents);
3422     - }
3423     - }
3424     -
3425     list_sort(NULL, &extents, extent_cmp);
3426     btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end);
3427     /*
3428     @@ -4337,6 +4421,9 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
3429     up_write(&inode->dio_sem);
3430    
3431     btrfs_release_path(path);
3432     + if (!ret)
3433     + ret = btrfs_log_prealloc_extents(trans, inode, path);
3434     +
3435     return ret;
3436     }
3437    
3438     diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
3439     index 88a31e9340a0..d1516327b787 100644
3440     --- a/fs/ocfs2/aops.c
3441     +++ b/fs/ocfs2/aops.c
3442     @@ -134,6 +134,19 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
3443     return err;
3444     }
3445    
3446     +static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock,
3447     + struct buffer_head *bh_result, int create)
3448     +{
3449     + int ret = 0;
3450     + struct ocfs2_inode_info *oi = OCFS2_I(inode);
3451     +
3452     + down_read(&oi->ip_alloc_sem);
3453     + ret = ocfs2_get_block(inode, iblock, bh_result, create);
3454     + up_read(&oi->ip_alloc_sem);
3455     +
3456     + return ret;
3457     +}
3458     +
3459     int ocfs2_get_block(struct inode *inode, sector_t iblock,
3460     struct buffer_head *bh_result, int create)
3461     {
3462     @@ -2128,7 +2141,7 @@ static void ocfs2_dio_free_write_ctx(struct inode *inode,
3463     * called like this: dio->get_blocks(dio->inode, fs_startblk,
3464     * fs_count, map_bh, dio->rw == WRITE);
3465     */
3466     -static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock,
3467     +static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
3468     struct buffer_head *bh_result, int create)
3469     {
3470     struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3471     @@ -2154,12 +2167,9 @@ static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock,
3472     * while file size will be changed.
3473     */
3474     if (pos + total_len <= i_size_read(inode)) {
3475     - down_read(&oi->ip_alloc_sem);
3476     - /* This is the fast path for re-write. */
3477     - ret = ocfs2_get_block(inode, iblock, bh_result, create);
3478     -
3479     - up_read(&oi->ip_alloc_sem);
3480    
3481     + /* This is the fast path for re-write. */
3482     + ret = ocfs2_lock_get_block(inode, iblock, bh_result, create);
3483     if (buffer_mapped(bh_result) &&
3484     !buffer_new(bh_result) &&
3485     ret == 0)
3486     @@ -2424,9 +2434,9 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3487     return 0;
3488    
3489     if (iov_iter_rw(iter) == READ)
3490     - get_block = ocfs2_get_block;
3491     + get_block = ocfs2_lock_get_block;
3492     else
3493     - get_block = ocfs2_dio_get_block;
3494     + get_block = ocfs2_dio_wr_get_block;
3495    
3496     return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3497     iter, get_block,
3498     diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
3499     index b17d180bdc16..c204ac9b49e5 100644
3500     --- a/fs/ocfs2/cluster/nodemanager.c
3501     +++ b/fs/ocfs2/cluster/nodemanager.c
3502     @@ -40,6 +40,9 @@ char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
3503     "panic", /* O2NM_FENCE_PANIC */
3504     };
3505    
3506     +static inline void o2nm_lock_subsystem(void);
3507     +static inline void o2nm_unlock_subsystem(void);
3508     +
3509     struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
3510     {
3511     struct o2nm_node *node = NULL;
3512     @@ -181,7 +184,10 @@ static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
3513     {
3514     /* through the first node_set .parent
3515     * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
3516     - return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
3517     + if (node->nd_item.ci_parent)
3518     + return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
3519     + else
3520     + return NULL;
3521     }
3522    
3523     enum {
3524     @@ -194,7 +200,7 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
3525     size_t count)
3526     {
3527     struct o2nm_node *node = to_o2nm_node(item);
3528     - struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
3529     + struct o2nm_cluster *cluster;
3530     unsigned long tmp;
3531     char *p = (char *)page;
3532     int ret = 0;
3533     @@ -214,6 +220,13 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
3534     !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
3535     return -EINVAL; /* XXX */
3536    
3537     + o2nm_lock_subsystem();
3538     + cluster = to_o2nm_cluster_from_node(node);
3539     + if (!cluster) {
3540     + o2nm_unlock_subsystem();
3541     + return -EINVAL;
3542     + }
3543     +
3544     write_lock(&cluster->cl_nodes_lock);
3545     if (cluster->cl_nodes[tmp])
3546     ret = -EEXIST;
3547     @@ -226,6 +239,8 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
3548     set_bit(tmp, cluster->cl_nodes_bitmap);
3549     }
3550     write_unlock(&cluster->cl_nodes_lock);
3551     + o2nm_unlock_subsystem();
3552     +
3553     if (ret)
3554     return ret;
3555    
3556     @@ -269,7 +284,7 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
3557     size_t count)
3558     {
3559     struct o2nm_node *node = to_o2nm_node(item);
3560     - struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
3561     + struct o2nm_cluster *cluster;
3562     int ret, i;
3563     struct rb_node **p, *parent;
3564     unsigned int octets[4];
3565     @@ -286,6 +301,13 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
3566     be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
3567     }
3568    
3569     + o2nm_lock_subsystem();
3570     + cluster = to_o2nm_cluster_from_node(node);
3571     + if (!cluster) {
3572     + o2nm_unlock_subsystem();
3573     + return -EINVAL;
3574     + }
3575     +
3576     ret = 0;
3577     write_lock(&cluster->cl_nodes_lock);
3578     if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
3579     @@ -298,6 +320,8 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
3580     rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
3581     }
3582     write_unlock(&cluster->cl_nodes_lock);
3583     + o2nm_unlock_subsystem();
3584     +
3585     if (ret)
3586     return ret;
3587    
3588     @@ -315,7 +339,7 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
3589     size_t count)
3590     {
3591     struct o2nm_node *node = to_o2nm_node(item);
3592     - struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
3593     + struct o2nm_cluster *cluster;
3594     unsigned long tmp;
3595     char *p = (char *)page;
3596     ssize_t ret;
3597     @@ -333,17 +357,26 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
3598     !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
3599     return -EINVAL; /* XXX */
3600    
3601     + o2nm_lock_subsystem();
3602     + cluster = to_o2nm_cluster_from_node(node);
3603     + if (!cluster) {
3604     + ret = -EINVAL;
3605     + goto out;
3606     + }
3607     +
3608     /* the only failure case is trying to set a new local node
3609     * when a different one is already set */
3610     if (tmp && tmp == cluster->cl_has_local &&
3611     - cluster->cl_local_node != node->nd_num)
3612     - return -EBUSY;
3613     + cluster->cl_local_node != node->nd_num) {
3614     + ret = -EBUSY;
3615     + goto out;
3616     + }
3617    
3618     /* bring up the rx thread if we're setting the new local node. */
3619     if (tmp && !cluster->cl_has_local) {
3620     ret = o2net_start_listening(node);
3621     if (ret)
3622     - return ret;
3623     + goto out;
3624     }
3625    
3626     if (!tmp && cluster->cl_has_local &&
3627     @@ -358,7 +391,11 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
3628     cluster->cl_local_node = node->nd_num;
3629     }
3630    
3631     - return count;
3632     + ret = count;
3633     +
3634     +out:
3635     + o2nm_unlock_subsystem();
3636     + return ret;
3637     }
3638    
3639     CONFIGFS_ATTR(o2nm_node_, num);
3640     @@ -738,6 +775,16 @@ static struct o2nm_cluster_group o2nm_cluster_group = {
3641     },
3642     };
3643    
3644     +static inline void o2nm_lock_subsystem(void)
3645     +{
3646     + mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
3647     +}
3648     +
3649     +static inline void o2nm_unlock_subsystem(void)
3650     +{
3651     + mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
3652     +}
3653     +
3654     int o2nm_depend_item(struct config_item *item)
3655     {
3656     return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
3657     diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
3658     index 64f49cafbc5b..cfb0c9ac2de4 100644
3659     --- a/fs/reiserfs/prints.c
3660     +++ b/fs/reiserfs/prints.c
3661     @@ -76,83 +76,99 @@ static char *le_type(struct reiserfs_key *key)
3662     }
3663    
3664     /* %k */
3665     -static void sprintf_le_key(char *buf, struct reiserfs_key *key)
3666     +static int scnprintf_le_key(char *buf, size_t size, struct reiserfs_key *key)
3667     {
3668     if (key)
3669     - sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id),
3670     - le32_to_cpu(key->k_objectid), le_offset(key),
3671     - le_type(key));
3672     + return scnprintf(buf, size, "[%d %d %s %s]",
3673     + le32_to_cpu(key->k_dir_id),
3674     + le32_to_cpu(key->k_objectid), le_offset(key),
3675     + le_type(key));
3676     else
3677     - sprintf(buf, "[NULL]");
3678     + return scnprintf(buf, size, "[NULL]");
3679     }
3680    
3681     /* %K */
3682     -static void sprintf_cpu_key(char *buf, struct cpu_key *key)
3683     +static int scnprintf_cpu_key(char *buf, size_t size, struct cpu_key *key)
3684     {
3685     if (key)
3686     - sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
3687     - key->on_disk_key.k_objectid, reiserfs_cpu_offset(key),
3688     - cpu_type(key));
3689     + return scnprintf(buf, size, "[%d %d %s %s]",
3690     + key->on_disk_key.k_dir_id,
3691     + key->on_disk_key.k_objectid,
3692     + reiserfs_cpu_offset(key), cpu_type(key));
3693     else
3694     - sprintf(buf, "[NULL]");
3695     + return scnprintf(buf, size, "[NULL]");
3696     }
3697    
3698     -static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh)
3699     +static int scnprintf_de_head(char *buf, size_t size,
3700     + struct reiserfs_de_head *deh)
3701     {
3702     if (deh)
3703     - sprintf(buf,
3704     - "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
3705     - deh_offset(deh), deh_dir_id(deh), deh_objectid(deh),
3706     - deh_location(deh), deh_state(deh));
3707     + return scnprintf(buf, size,
3708     + "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
3709     + deh_offset(deh), deh_dir_id(deh),
3710     + deh_objectid(deh), deh_location(deh),
3711     + deh_state(deh));
3712     else
3713     - sprintf(buf, "[NULL]");
3714     + return scnprintf(buf, size, "[NULL]");
3715    
3716     }
3717    
3718     -static void sprintf_item_head(char *buf, struct item_head *ih)
3719     +static int scnprintf_item_head(char *buf, size_t size, struct item_head *ih)
3720     {
3721     if (ih) {
3722     - strcpy(buf,
3723     - (ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*");
3724     - sprintf_le_key(buf + strlen(buf), &(ih->ih_key));
3725     - sprintf(buf + strlen(buf), ", item_len %d, item_location %d, "
3726     - "free_space(entry_count) %d",
3727     - ih_item_len(ih), ih_location(ih), ih_free_space(ih));
3728     + char *p = buf;
3729     + char * const end = buf + size;
3730     +
3731     + p += scnprintf(p, end - p, "%s",
3732     + (ih_version(ih) == KEY_FORMAT_3_6) ?
3733     + "*3.6* " : "*3.5*");
3734     +
3735     + p += scnprintf_le_key(p, end - p, &ih->ih_key);
3736     +
3737     + p += scnprintf(p, end - p,
3738     + ", item_len %d, item_location %d, free_space(entry_count) %d",
3739     + ih_item_len(ih), ih_location(ih),
3740     + ih_free_space(ih));
3741     + return p - buf;
3742     } else
3743     - sprintf(buf, "[NULL]");
3744     + return scnprintf(buf, size, "[NULL]");
3745     }
3746    
3747     -static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de)
3748     +static int scnprintf_direntry(char *buf, size_t size,
3749     + struct reiserfs_dir_entry *de)
3750     {
3751     char name[20];
3752    
3753     memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen);
3754     name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0;
3755     - sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid);
3756     + return scnprintf(buf, size, "\"%s\"==>[%d %d]",
3757     + name, de->de_dir_id, de->de_objectid);
3758     }
3759    
3760     -static void sprintf_block_head(char *buf, struct buffer_head *bh)
3761     +static int scnprintf_block_head(char *buf, size_t size, struct buffer_head *bh)
3762     {
3763     - sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ",
3764     - B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
3765     + return scnprintf(buf, size,
3766     + "level=%d, nr_items=%d, free_space=%d rdkey ",
3767     + B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
3768     }
3769    
3770     -static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
3771     +static int scnprintf_buffer_head(char *buf, size_t size, struct buffer_head *bh)
3772     {
3773     - sprintf(buf,
3774     - "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
3775     - bh->b_bdev, bh->b_size,
3776     - (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
3777     - bh->b_state, bh->b_page,
3778     - buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
3779     - buffer_dirty(bh) ? "DIRTY" : "CLEAN",
3780     - buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
3781     + return scnprintf(buf, size,
3782     + "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
3783     + bh->b_bdev, bh->b_size,
3784     + (unsigned long long)bh->b_blocknr,
3785     + atomic_read(&(bh->b_count)),
3786     + bh->b_state, bh->b_page,
3787     + buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
3788     + buffer_dirty(bh) ? "DIRTY" : "CLEAN",
3789     + buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
3790     }
3791    
3792     -static void sprintf_disk_child(char *buf, struct disk_child *dc)
3793     +static int scnprintf_disk_child(char *buf, size_t size, struct disk_child *dc)
3794     {
3795     - sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc),
3796     - dc_size(dc));
3797     + return scnprintf(buf, size, "[dc_number=%d, dc_size=%u]",
3798     + dc_block_number(dc), dc_size(dc));
3799     }
3800    
3801     static char *is_there_reiserfs_struct(char *fmt, int *what)
3802     @@ -189,55 +205,60 @@ static void prepare_error_buf(const char *fmt, va_list args)
3803     char *fmt1 = fmt_buf;
3804     char *k;
3805     char *p = error_buf;
3806     + char * const end = &error_buf[sizeof(error_buf)];
3807     int what;
3808    
3809     spin_lock(&error_lock);
3810    
3811     - strcpy(fmt1, fmt);
3812     + if (WARN_ON(strscpy(fmt_buf, fmt, sizeof(fmt_buf)) < 0)) {
3813     + strscpy(error_buf, "format string too long", end - error_buf);
3814     + goto out_unlock;
3815     + }
3816    
3817     while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) {
3818     *k = 0;
3819    
3820     - p += vsprintf(p, fmt1, args);
3821     + p += vscnprintf(p, end - p, fmt1, args);
3822    
3823     switch (what) {
3824     case 'k':
3825     - sprintf_le_key(p, va_arg(args, struct reiserfs_key *));
3826     + p += scnprintf_le_key(p, end - p,
3827     + va_arg(args, struct reiserfs_key *));
3828     break;
3829     case 'K':
3830     - sprintf_cpu_key(p, va_arg(args, struct cpu_key *));
3831     + p += scnprintf_cpu_key(p, end - p,
3832     + va_arg(args, struct cpu_key *));
3833     break;
3834     case 'h':
3835     - sprintf_item_head(p, va_arg(args, struct item_head *));
3836     + p += scnprintf_item_head(p, end - p,
3837     + va_arg(args, struct item_head *));
3838     break;
3839     case 't':
3840     - sprintf_direntry(p,
3841     - va_arg(args,
3842     - struct reiserfs_dir_entry *));
3843     + p += scnprintf_direntry(p, end - p,
3844     + va_arg(args, struct reiserfs_dir_entry *));
3845     break;
3846     case 'y':
3847     - sprintf_disk_child(p,
3848     - va_arg(args, struct disk_child *));
3849     + p += scnprintf_disk_child(p, end - p,
3850     + va_arg(args, struct disk_child *));
3851     break;
3852     case 'z':
3853     - sprintf_block_head(p,
3854     - va_arg(args, struct buffer_head *));
3855     + p += scnprintf_block_head(p, end - p,
3856     + va_arg(args, struct buffer_head *));
3857     break;
3858     case 'b':
3859     - sprintf_buffer_head(p,
3860     - va_arg(args, struct buffer_head *));
3861     + p += scnprintf_buffer_head(p, end - p,
3862     + va_arg(args, struct buffer_head *));
3863     break;
3864     case 'a':
3865     - sprintf_de_head(p,
3866     - va_arg(args,
3867     - struct reiserfs_de_head *));
3868     + p += scnprintf_de_head(p, end - p,
3869     + va_arg(args, struct reiserfs_de_head *));
3870     break;
3871     }
3872    
3873     - p += strlen(p);
3874     fmt1 = k + 2;
3875     }
3876     - vsprintf(p, fmt1, args);
3877     + p += vscnprintf(p, end - p, fmt1, args);
3878     +out_unlock:
3879     spin_unlock(&error_lock);
3880    
3881     }
3882     diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
3883     index a031897fca76..ca1d2cc2cdfa 100644
3884     --- a/include/linux/arm-smccc.h
3885     +++ b/include/linux/arm-smccc.h
3886     @@ -80,6 +80,11 @@
3887     ARM_SMCCC_SMC_32, \
3888     0, 0x8000)
3889    
3890     +#define ARM_SMCCC_ARCH_WORKAROUND_2 \
3891     + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
3892     + ARM_SMCCC_SMC_32, \
3893     + 0, 0x7fff)
3894     +
3895     #ifndef __ASSEMBLY__
3896    
3897     #include <linux/linkage.h>
3898     @@ -291,5 +296,10 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
3899     */
3900     #define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
3901    
3902     +/* Return codes defined in ARM DEN 0070A */
3903     +#define SMCCC_RET_SUCCESS 0
3904     +#define SMCCC_RET_NOT_SUPPORTED -1
3905     +#define SMCCC_RET_NOT_REQUIRED -2
3906     +
3907     #endif /*__ASSEMBLY__*/
3908     #endif /*__LINUX_ARM_SMCCC_H*/
3909     diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
3910     index 0c27515d2cf6..8124815eb121 100644
3911     --- a/include/linux/atmdev.h
3912     +++ b/include/linux/atmdev.h
3913     @@ -214,6 +214,7 @@ struct atmphy_ops {
3914     struct atm_skb_data {
3915     struct atm_vcc *vcc; /* ATM VCC */
3916     unsigned long atm_options; /* ATM layer options */
3917     + unsigned int acct_truesize; /* truesize accounted to vcc */
3918     };
3919    
3920     #define VCC_HTABLE_SIZE 32
3921     @@ -241,6 +242,20 @@ void vcc_insert_socket(struct sock *sk);
3922    
3923     void atm_dev_release_vccs(struct atm_dev *dev);
3924    
3925     +static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
3926     +{
3927     + /*
3928     + * Because ATM skbs may not belong to a sock (and we don't
3929     + * necessarily want to), skb->truesize may be adjusted,
3930     + * escaping the hack in pskb_expand_head() which avoids
3931     + * doing so for some cases. So stash the value of truesize
3932     + * at the time we accounted it, and atm_pop_raw() can use
3933     + * that value later, in case it changes.
3934     + */
3935     + refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
3936     + ATM_SKB(skb)->acct_truesize = skb->truesize;
3937     + ATM_SKB(skb)->atm_options = vcc->atm_options;
3938     +}
3939    
3940     static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
3941     {
3942     diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
3943     index eac387a3bfef..3c1beffc861a 100644
3944     --- a/include/linux/backing-dev-defs.h
3945     +++ b/include/linux/backing-dev-defs.h
3946     @@ -22,7 +22,6 @@ struct dentry;
3947     */
3948     enum wb_state {
3949     WB_registered, /* bdi_register() was done */
3950     - WB_shutting_down, /* wb_shutdown() in progress */
3951     WB_writeback_running, /* Writeback is in progress */
3952     WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
3953     };
3954     @@ -165,6 +164,7 @@ struct backing_dev_info {
3955     #ifdef CONFIG_CGROUP_WRITEBACK
3956     struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
3957     struct rb_root cgwb_congested_tree; /* their congested states */
3958     + struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
3959     #else
3960     struct bdi_writeback_congested *wb_congested;
3961     #endif
3962     diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
3963     index f43113b8890b..c11032b06d68 100644
3964     --- a/include/linux/compiler-gcc.h
3965     +++ b/include/linux/compiler-gcc.h
3966     @@ -65,6 +65,18 @@
3967     #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
3968     #endif
3969    
3970     +/*
3971     + * Feature detection for gnu_inline (gnu89 extern inline semantics). Either
3972     + * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
3973     + * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
3974     + * defined so the gnu89 semantics are the default.
3975     + */
3976     +#ifdef __GNUC_STDC_INLINE__
3977     +# define __gnu_inline __attribute__((gnu_inline))
3978     +#else
3979     +# define __gnu_inline
3980     +#endif
3981     +
3982     /*
3983     * Force always-inline if the user requests it so via the .config,
3984     * or if gcc is too old.
3985     @@ -72,19 +84,22 @@
3986     * -Wunused-function. This turns out to avoid the need for complex #ifdef
3987     * directives. Suppress the warning in clang as well by using "unused"
3988     * function attribute, which is redundant but not harmful for gcc.
3989     + * Prefer gnu_inline, so that extern inline functions do not emit an
3990     + * externally visible function. This makes extern inline behave as per gnu89
3991     + * semantics rather than c99. This prevents multiple symbol definition errors
3992     + * of extern inline functions at link time.
3993     + * A lot of inline functions can cause havoc with function tracing.
3994     */
3995     #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
3996     !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
3997     -#define inline inline __attribute__((always_inline,unused)) notrace
3998     -#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace
3999     -#define __inline __inline __attribute__((always_inline,unused)) notrace
4000     +#define inline \
4001     + inline __attribute__((always_inline, unused)) notrace __gnu_inline
4002     #else
4003     -/* A lot of inline functions can cause havoc with function tracing */
4004     -#define inline inline __attribute__((unused)) notrace
4005     -#define __inline__ __inline__ __attribute__((unused)) notrace
4006     -#define __inline __inline __attribute__((unused)) notrace
4007     +#define inline inline __attribute__((unused)) notrace __gnu_inline
4008     #endif
4009    
4010     +#define __inline__ inline
4011     +#define __inline inline
4012     #define __always_inline inline __attribute__((always_inline))
4013     #define noinline __attribute__((noinline))
4014    
4015     diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
4016     index f3765155fa4d..1d793d86d55f 100644
4017     --- a/include/linux/mlx5/mlx5_ifc.h
4018     +++ b/include/linux/mlx5/mlx5_ifc.h
4019     @@ -857,7 +857,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
4020     u8 reserved_at_1a4[0x1];
4021     u8 ets[0x1];
4022     u8 nic_flow_table[0x1];
4023     - u8 eswitch_flow_table[0x1];
4024     + u8 eswitch_manager[0x1];
4025     u8 early_vf_enable[0x1];
4026     u8 mcam_reg[0x1];
4027     u8 pcam_reg[0x1];
4028     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
4029     index 46bf7cc7d5d5..2ea7ee1fb495 100644
4030     --- a/include/linux/netdevice.h
4031     +++ b/include/linux/netdevice.h
4032     @@ -2668,11 +2668,31 @@ static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp,
4033     if (PTR_ERR(pp) != -EINPROGRESS)
4034     NAPI_GRO_CB(skb)->flush |= flush;
4035     }
4036     +static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
4037     + struct sk_buff **pp,
4038     + int flush,
4039     + struct gro_remcsum *grc)
4040     +{
4041     + if (PTR_ERR(pp) != -EINPROGRESS) {
4042     + NAPI_GRO_CB(skb)->flush |= flush;
4043     + skb_gro_remcsum_cleanup(skb, grc);
4044     + skb->remcsum_offload = 0;
4045     + }
4046     +}
4047     #else
4048     static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
4049     {
4050     NAPI_GRO_CB(skb)->flush |= flush;
4051     }
4052     +static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
4053     + struct sk_buff **pp,
4054     + int flush,
4055     + struct gro_remcsum *grc)
4056     +{
4057     + NAPI_GRO_CB(skb)->flush |= flush;
4058     + skb_gro_remcsum_cleanup(skb, grc);
4059     + skb->remcsum_offload = 0;
4060     +}
4061     #endif
4062    
4063     static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
4064     diff --git a/include/linux/string.h b/include/linux/string.h
4065     index cfd83eb2f926..96115bf561b4 100644
4066     --- a/include/linux/string.h
4067     +++ b/include/linux/string.h
4068     @@ -28,7 +28,7 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
4069     size_t strlcpy(char *, const char *, size_t);
4070     #endif
4071     #ifndef __HAVE_ARCH_STRSCPY
4072     -ssize_t __must_check strscpy(char *, const char *, size_t);
4073     +ssize_t strscpy(char *, const char *, size_t);
4074     #endif
4075     #ifndef __HAVE_ARCH_STRCAT
4076     extern char * strcat(char *, const char *);
4077     diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
4078     index 03918a19cf2d..3b71d859ee38 100644
4079     --- a/kernel/time/clocksource.c
4080     +++ b/kernel/time/clocksource.c
4081     @@ -322,6 +322,8 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
4082     {
4083     unsigned long flags;
4084    
4085     + INIT_LIST_HEAD(&cs->wd_list);
4086     +
4087     spin_lock_irqsave(&watchdog_lock, flags);
4088     if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
4089     /* cs is a clocksource to be watched. */
4090     diff --git a/mm/backing-dev.c b/mm/backing-dev.c
4091     index 6774e0369ebe..9386c98dac12 100644
4092     --- a/mm/backing-dev.c
4093     +++ b/mm/backing-dev.c
4094     @@ -356,15 +356,8 @@ static void wb_shutdown(struct bdi_writeback *wb)
4095     spin_lock_bh(&wb->work_lock);
4096     if (!test_and_clear_bit(WB_registered, &wb->state)) {
4097     spin_unlock_bh(&wb->work_lock);
4098     - /*
4099     - * Wait for wb shutdown to finish if someone else is just
4100     - * running wb_shutdown(). Otherwise we could proceed to wb /
4101     - * bdi destruction before wb_shutdown() is finished.
4102     - */
4103     - wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE);
4104     return;
4105     }
4106     - set_bit(WB_shutting_down, &wb->state);
4107     spin_unlock_bh(&wb->work_lock);
4108    
4109     cgwb_remove_from_bdi_list(wb);
4110     @@ -376,12 +369,6 @@ static void wb_shutdown(struct bdi_writeback *wb)
4111     mod_delayed_work(bdi_wq, &wb->dwork, 0);
4112     flush_delayed_work(&wb->dwork);
4113     WARN_ON(!list_empty(&wb->work_list));
4114     - /*
4115     - * Make sure bit gets cleared after shutdown is finished. Matches with
4116     - * the barrier provided by test_and_clear_bit() above.
4117     - */
4118     - smp_wmb();
4119     - clear_and_wake_up_bit(WB_shutting_down, &wb->state);
4120     }
4121    
4122     static void wb_exit(struct bdi_writeback *wb)
4123     @@ -505,10 +492,12 @@ static void cgwb_release_workfn(struct work_struct *work)
4124     struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
4125     release_work);
4126    
4127     + mutex_lock(&wb->bdi->cgwb_release_mutex);
4128     wb_shutdown(wb);
4129    
4130     css_put(wb->memcg_css);
4131     css_put(wb->blkcg_css);
4132     + mutex_unlock(&wb->bdi->cgwb_release_mutex);
4133    
4134     fprop_local_destroy_percpu(&wb->memcg_completions);
4135     percpu_ref_exit(&wb->refcnt);
4136     @@ -694,6 +683,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
4137    
4138     INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
4139     bdi->cgwb_congested_tree = RB_ROOT;
4140     + mutex_init(&bdi->cgwb_release_mutex);
4141    
4142     ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
4143     if (!ret) {
4144     @@ -714,7 +704,10 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
4145     spin_lock_irq(&cgwb_lock);
4146     radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
4147     cgwb_kill(*slot);
4148     + spin_unlock_irq(&cgwb_lock);
4149    
4150     + mutex_lock(&bdi->cgwb_release_mutex);
4151     + spin_lock_irq(&cgwb_lock);
4152     while (!list_empty(&bdi->wb_list)) {
4153     wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
4154     bdi_node);
4155     @@ -723,6 +716,7 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
4156     spin_lock_irq(&cgwb_lock);
4157     }
4158     spin_unlock_irq(&cgwb_lock);
4159     + mutex_unlock(&bdi->cgwb_release_mutex);
4160     }
4161    
4162     /**
4163     diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
4164     index cf2e70003a53..cf82d970b0e4 100644
4165     --- a/net/8021q/vlan.c
4166     +++ b/net/8021q/vlan.c
4167     @@ -664,7 +664,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
4168     out_unlock:
4169     rcu_read_unlock();
4170     out:
4171     - NAPI_GRO_CB(skb)->flush |= flush;
4172     + skb_gro_flush_final(skb, pp, flush);
4173    
4174     return pp;
4175     }
4176     diff --git a/net/atm/br2684.c b/net/atm/br2684.c
4177     index 4e111196f902..bc21f8e8daf2 100644
4178     --- a/net/atm/br2684.c
4179     +++ b/net/atm/br2684.c
4180     @@ -252,8 +252,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
4181    
4182     ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
4183     pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
4184     - refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
4185     - ATM_SKB(skb)->atm_options = atmvcc->atm_options;
4186     + atm_account_tx(atmvcc, skb);
4187     dev->stats.tx_packets++;
4188     dev->stats.tx_bytes += skb->len;
4189    
4190     diff --git a/net/atm/clip.c b/net/atm/clip.c
4191     index 65f706e4344c..60920a42f640 100644
4192     --- a/net/atm/clip.c
4193     +++ b/net/atm/clip.c
4194     @@ -381,8 +381,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
4195     memcpy(here, llc_oui, sizeof(llc_oui));
4196     ((__be16 *) here)[3] = skb->protocol;
4197     }
4198     - refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
4199     - ATM_SKB(skb)->atm_options = vcc->atm_options;
4200     + atm_account_tx(vcc, skb);
4201     entry->vccs->last_use = jiffies;
4202     pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
4203     old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
4204     diff --git a/net/atm/common.c b/net/atm/common.c
4205     index 8a4f99114cd2..9e812c782a37 100644
4206     --- a/net/atm/common.c
4207     +++ b/net/atm/common.c
4208     @@ -630,10 +630,9 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
4209     goto out;
4210     }
4211     pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
4212     - refcount_add(skb->truesize, &sk->sk_wmem_alloc);
4213     + atm_account_tx(vcc, skb);
4214    
4215     skb->dev = NULL; /* for paths shared with net_device interfaces */
4216     - ATM_SKB(skb)->atm_options = vcc->atm_options;
4217     if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
4218     kfree_skb(skb);
4219     error = -EFAULT;
4220     diff --git a/net/atm/lec.c b/net/atm/lec.c
4221     index 5741b6474dd9..9f2365694ad4 100644
4222     --- a/net/atm/lec.c
4223     +++ b/net/atm/lec.c
4224     @@ -182,9 +182,8 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
4225     struct net_device *dev = skb->dev;
4226    
4227     ATM_SKB(skb)->vcc = vcc;
4228     - ATM_SKB(skb)->atm_options = vcc->atm_options;
4229     + atm_account_tx(vcc, skb);
4230    
4231     - refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
4232     if (vcc->send(vcc, skb) < 0) {
4233     dev->stats.tx_dropped++;
4234     return;
4235     diff --git a/net/atm/mpc.c b/net/atm/mpc.c
4236     index 5677147209e8..db9a1838687c 100644
4237     --- a/net/atm/mpc.c
4238     +++ b/net/atm/mpc.c
4239     @@ -555,8 +555,7 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
4240     sizeof(struct llc_snap_hdr));
4241     }
4242    
4243     - refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
4244     - ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
4245     + atm_account_tx(entry->shortcut, skb);
4246     entry->shortcut->send(entry->shortcut, skb);
4247     entry->packets_fwded++;
4248     mpc->in_ops->put(entry);
4249     diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
4250     index 21d9d341a619..af8c4b38b746 100644
4251     --- a/net/atm/pppoatm.c
4252     +++ b/net/atm/pppoatm.c
4253     @@ -350,8 +350,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
4254     return 1;
4255     }
4256    
4257     - refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
4258     - ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
4259     + atm_account_tx(vcc, skb);
4260     pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
4261     skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
4262     ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
4263     diff --git a/net/atm/raw.c b/net/atm/raw.c
4264     index ee10e8d46185..b3ba44aab0ee 100644
4265     --- a/net/atm/raw.c
4266     +++ b/net/atm/raw.c
4267     @@ -35,8 +35,8 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
4268     struct sock *sk = sk_atm(vcc);
4269    
4270     pr_debug("(%d) %d -= %d\n",
4271     - vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
4272     - WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
4273     + vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize);
4274     + WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc));
4275     dev_kfree_skb_any(skb);
4276     sk->sk_write_space(sk);
4277     }
4278     diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
4279     index 25738b20676d..54c7fe68040f 100644
4280     --- a/net/bridge/netfilter/ebtables.c
4281     +++ b/net/bridge/netfilter/ebtables.c
4282     @@ -398,6 +398,12 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
4283     watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
4284     if (IS_ERR(watcher))
4285     return PTR_ERR(watcher);
4286     +
4287     + if (watcher->family != NFPROTO_BRIDGE) {
4288     + module_put(watcher->me);
4289     + return -ENOENT;
4290     + }
4291     +
4292     w->u.watcher = watcher;
4293    
4294     par->target = watcher;
4295     @@ -719,6 +725,13 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
4296     goto cleanup_watchers;
4297     }
4298    
4299     + /* Reject UNSPEC, xtables verdicts/return values are incompatible */
4300     + if (target->family != NFPROTO_BRIDGE) {
4301     + module_put(target->me);
4302     + ret = -ENOENT;
4303     + goto cleanup_watchers;
4304     + }
4305     +
4306     t->u.target = target;
4307     if (t->u.target == &ebt_standard_target) {
4308     if (gap < sizeof(struct ebt_standard_target)) {
4309     diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
4310     index 119c04317d48..03fcf3ee1534 100644
4311     --- a/net/dccp/ccids/ccid3.c
4312     +++ b/net/dccp/ccids/ccid3.c
4313     @@ -599,7 +599,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
4314     {
4315     struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
4316     struct dccp_sock *dp = dccp_sk(sk);
4317     - ktime_t now = ktime_get_real();
4318     + ktime_t now = ktime_get();
4319     s64 delta = 0;
4320    
4321     switch (fbtype) {
4322     @@ -624,15 +624,14 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
4323     case CCID3_FBACK_PERIODIC:
4324     delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
4325     if (delta <= 0)
4326     - DCCP_BUG("delta (%ld) <= 0", (long)delta);
4327     - else
4328     - hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
4329     + delta = 1;
4330     + hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
4331     break;
4332     default:
4333     return;
4334     }
4335    
4336     - ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
4337     + ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta,
4338     hc->rx_x_recv, hc->rx_pinv);
4339    
4340     hc->rx_tstamp_last_feedback = now;
4341     @@ -679,7 +678,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
4342     static u32 ccid3_first_li(struct sock *sk)
4343     {
4344     struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
4345     - u32 x_recv, p, delta;
4346     + u32 x_recv, p;
4347     + s64 delta;
4348     u64 fval;
4349    
4350     if (hc->rx_rtt == 0) {
4351     @@ -687,7 +687,9 @@ static u32 ccid3_first_li(struct sock *sk)
4352     hc->rx_rtt = DCCP_FALLBACK_RTT;
4353     }
4354    
4355     - delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
4356     + delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
4357     + if (delta <= 0)
4358     + delta = 1;
4359     x_recv = scaled_div32(hc->rx_bytes_recv, delta);
4360     if (x_recv == 0) { /* would also trigger divide-by-zero */
4361     DCCP_WARN("X_recv==0\n");
4362     diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
4363     index f0252768ecf4..5f5d9eafccf5 100644
4364     --- a/net/dns_resolver/dns_key.c
4365     +++ b/net/dns_resolver/dns_key.c
4366     @@ -87,35 +87,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
4367     opt++;
4368     kdebug("options: '%s'", opt);
4369     do {
4370     + int opt_len, opt_nlen;
4371     const char *eq;
4372     - int opt_len, opt_nlen, opt_vlen, tmp;
4373     + char optval[128];
4374    
4375     next_opt = memchr(opt, '#', end - opt) ?: end;
4376     opt_len = next_opt - opt;
4377     - if (opt_len <= 0 || opt_len > 128) {
4378     + if (opt_len <= 0 || opt_len > sizeof(optval)) {
4379     pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
4380     opt_len);
4381     return -EINVAL;
4382     }
4383    
4384     - eq = memchr(opt, '=', opt_len) ?: end;
4385     - opt_nlen = eq - opt;
4386     - eq++;
4387     - opt_vlen = next_opt - eq; /* will be -1 if no value */
4388     + eq = memchr(opt, '=', opt_len);
4389     + if (eq) {
4390     + opt_nlen = eq - opt;
4391     + eq++;
4392     + memcpy(optval, eq, next_opt - eq);
4393     + optval[next_opt - eq] = '\0';
4394     + } else {
4395     + opt_nlen = opt_len;
4396     + optval[0] = '\0';
4397     + }
4398    
4399     - tmp = opt_vlen >= 0 ? opt_vlen : 0;
4400     - kdebug("option '%*.*s' val '%*.*s'",
4401     - opt_nlen, opt_nlen, opt, tmp, tmp, eq);
4402     + kdebug("option '%*.*s' val '%s'",
4403     + opt_nlen, opt_nlen, opt, optval);
4404    
4405     /* see if it's an error number representing a DNS error
4406     * that's to be recorded as the result in this key */
4407     if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
4408     memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
4409     kdebug("dns error number option");
4410     - if (opt_vlen <= 0)
4411     - goto bad_option_value;
4412    
4413     - ret = kstrtoul(eq, 10, &derrno);
4414     + ret = kstrtoul(optval, 10, &derrno);
4415     if (ret < 0)
4416     goto bad_option_value;
4417    
4418     diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
4419     index 1540db65241a..c9ec1603666b 100644
4420     --- a/net/ipv4/fou.c
4421     +++ b/net/ipv4/fou.c
4422     @@ -448,9 +448,7 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
4423     out_unlock:
4424     rcu_read_unlock();
4425     out:
4426     - NAPI_GRO_CB(skb)->flush |= flush;
4427     - skb_gro_remcsum_cleanup(skb, &grc);
4428     - skb->remcsum_offload = 0;
4429     + skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
4430    
4431     return pp;
4432     }
4433     diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
4434     index 1859c473b21a..6a7d980105f6 100644
4435     --- a/net/ipv4/gre_offload.c
4436     +++ b/net/ipv4/gre_offload.c
4437     @@ -223,7 +223,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
4438     out_unlock:
4439     rcu_read_unlock();
4440     out:
4441     - NAPI_GRO_CB(skb)->flush |= flush;
4442     + skb_gro_flush_final(skb, pp, flush);
4443    
4444     return pp;
4445     }
4446     diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
4447     index e7d15fb0d94d..24b066c32e06 100644
4448     --- a/net/ipv4/inet_hashtables.c
4449     +++ b/net/ipv4/inet_hashtables.c
4450     @@ -188,9 +188,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
4451     bool dev_match = (sk->sk_bound_dev_if == dif ||
4452     sk->sk_bound_dev_if == sdif);
4453    
4454     - if (exact_dif && !dev_match)
4455     + if (!dev_match)
4456     return -1;
4457     - if (sk->sk_bound_dev_if && dev_match)
4458     + if (sk->sk_bound_dev_if)
4459     score += 4;
4460     }
4461     if (sk->sk_incoming_cpu == raw_smp_processor_id())
4462     diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
4463     index 0989e739d098..5a29dc5083a3 100644
4464     --- a/net/ipv4/sysctl_net_ipv4.c
4465     +++ b/net/ipv4/sysctl_net_ipv4.c
4466     @@ -258,8 +258,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
4467     {
4468     struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
4469     struct tcp_fastopen_context *ctxt;
4470     - int ret;
4471     u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
4472     + __le32 key[4];
4473     + int ret, i;
4474    
4475     tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
4476     if (!tbl.data)
4477     @@ -268,11 +269,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
4478     rcu_read_lock();
4479     ctxt = rcu_dereference(tcp_fastopen_ctx);
4480     if (ctxt)
4481     - memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
4482     + memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
4483     else
4484     - memset(user_key, 0, sizeof(user_key));
4485     + memset(key, 0, sizeof(key));
4486     rcu_read_unlock();
4487    
4488     + for (i = 0; i < ARRAY_SIZE(key); i++)
4489     + user_key[i] = le32_to_cpu(key[i]);
4490     +
4491     snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
4492     user_key[0], user_key[1], user_key[2], user_key[3]);
4493     ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
4494     @@ -288,12 +292,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
4495     * first invocation of tcp_fastopen_cookie_gen
4496     */
4497     tcp_fastopen_init_key_once(false);
4498     - tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
4499     +
4500     + for (i = 0; i < ARRAY_SIZE(user_key); i++)
4501     + key[i] = cpu_to_le32(user_key[i]);
4502     +
4503     + tcp_fastopen_reset_cipher(key, TCP_FASTOPEN_KEY_LENGTH);
4504     }
4505    
4506     bad_key:
4507     pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
4508     - user_key[0], user_key[1], user_key[2], user_key[3],
4509     + user_key[0], user_key[1], user_key[2], user_key[3],
4510     (char *)tbl.data, ret);
4511     kfree(tbl.data);
4512     return ret;
4513     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
4514     index f0caff3139ed..5711b1b12d28 100644
4515     --- a/net/ipv4/tcp_input.c
4516     +++ b/net/ipv4/tcp_input.c
4517     @@ -3194,6 +3194,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
4518    
4519     if (tcp_is_reno(tp)) {
4520     tcp_remove_reno_sacks(sk, pkts_acked);
4521     +
4522     + /* If any of the cumulatively ACKed segments was
4523     + * retransmitted, non-SACK case cannot confirm that
4524     + * progress was due to original transmission due to
4525     + * lack of TCPCB_SACKED_ACKED bits even if some of
4526     + * the packets may have been never retransmitted.
4527     + */
4528     + if (flag & FLAG_RETRANS_DATA_ACKED)
4529     + flag &= ~FLAG_ORIG_SACK_ACKED;
4530     } else {
4531     int delta;
4532    
4533     diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
4534     index ea6e6e7df0ee..cde2719fcb89 100644
4535     --- a/net/ipv4/udp_offload.c
4536     +++ b/net/ipv4/udp_offload.c
4537     @@ -295,7 +295,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
4538     out_unlock:
4539     rcu_read_unlock();
4540     out:
4541     - NAPI_GRO_CB(skb)->flush |= flush;
4542     + skb_gro_flush_final(skb, pp, flush);
4543     return pp;
4544     }
4545     EXPORT_SYMBOL(udp_gro_receive);
4546     diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
4547     index b01858f5deb1..6dc93ac28261 100644
4548     --- a/net/ipv6/inet6_hashtables.c
4549     +++ b/net/ipv6/inet6_hashtables.c
4550     @@ -113,9 +113,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
4551     bool dev_match = (sk->sk_bound_dev_if == dif ||
4552     sk->sk_bound_dev_if == sdif);
4553    
4554     - if (exact_dif && !dev_match)
4555     + if (!dev_match)
4556     return -1;
4557     - if (sk->sk_bound_dev_if && dev_match)
4558     + if (sk->sk_bound_dev_if)
4559     score++;
4560     }
4561     if (sk->sk_incoming_cpu == raw_smp_processor_id())
4562     diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
4563     index 64ec23388450..722a9db8c6a7 100644
4564     --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
4565     +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
4566     @@ -618,6 +618,8 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
4567     fq->q.meat == fq->q.len &&
4568     nf_ct_frag6_reasm(fq, skb, dev))
4569     ret = 0;
4570     + else
4571     + skb_dst_drop(skb);
4572    
4573     out_unlock:
4574     spin_unlock_bh(&fq->q.lock);
4575     diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
4576     index 33fb35cbfac1..558fe8cc6d43 100644
4577     --- a/net/ipv6/seg6_hmac.c
4578     +++ b/net/ipv6/seg6_hmac.c
4579     @@ -373,7 +373,7 @@ static int seg6_hmac_init_algo(void)
4580     return -ENOMEM;
4581    
4582     for_each_possible_cpu(cpu) {
4583     - tfm = crypto_alloc_shash(algo->name, 0, GFP_KERNEL);
4584     + tfm = crypto_alloc_shash(algo->name, 0, 0);
4585     if (IS_ERR(tfm))
4586     return PTR_ERR(tfm);
4587     p_tfm = per_cpu_ptr(algo->tfms, cpu);
4588     diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
4589     index 2ceefa183cee..6a196e438b6c 100644
4590     --- a/net/nfc/llcp_commands.c
4591     +++ b/net/nfc/llcp_commands.c
4592     @@ -752,11 +752,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
4593     pr_debug("Fragment %zd bytes remaining %zd",
4594     frag_len, remaining_len);
4595    
4596     - pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
4597     + pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
4598     frag_len + LLCP_HEADER_SIZE, &err);
4599     if (pdu == NULL) {
4600     - pr_err("Could not allocate PDU\n");
4601     - continue;
4602     + pr_err("Could not allocate PDU (error=%d)\n", err);
4603     + len -= remaining_len;
4604     + if (len == 0)
4605     + len = err;
4606     + break;
4607     }
4608    
4609     pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
4610     diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c
4611     index 6df6f58a8103..5647905c88d6 100644
4612     --- a/net/nsh/nsh.c
4613     +++ b/net/nsh/nsh.c
4614     @@ -42,7 +42,7 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
4615     __skb_pull(skb, nsh_len);
4616    
4617     skb_reset_mac_header(skb);
4618     - skb_reset_mac_len(skb);
4619     + skb->mac_len = proto == htons(ETH_P_TEB) ? ETH_HLEN : 0;
4620     skb->protocol = proto;
4621    
4622     features &= NETIF_F_SG;
4623     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
4624     index 4fe2e34522d6..27dafe36f29c 100644
4625     --- a/net/packet/af_packet.c
4626     +++ b/net/packet/af_packet.c
4627     @@ -2303,6 +2303,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
4628     if (po->stats.stats1.tp_drops)
4629     status |= TP_STATUS_LOSING;
4630     }
4631     +
4632     + if (do_vnet &&
4633     + virtio_net_hdr_from_skb(skb, h.raw + macoff -
4634     + sizeof(struct virtio_net_hdr),
4635     + vio_le(), true, 0))
4636     + goto drop_n_account;
4637     +
4638     po->stats.stats1.tp_packets++;
4639     if (copy_skb) {
4640     status |= TP_STATUS_COPY;
4641     @@ -2310,15 +2317,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
4642     }
4643     spin_unlock(&sk->sk_receive_queue.lock);
4644    
4645     - if (do_vnet) {
4646     - if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
4647     - sizeof(struct virtio_net_hdr),
4648     - vio_le(), true, 0)) {
4649     - spin_lock(&sk->sk_receive_queue.lock);
4650     - goto drop_n_account;
4651     - }
4652     - }
4653     -
4654     skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
4655    
4656     if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
4657     diff --git a/net/rds/loop.c b/net/rds/loop.c
4658     index f2bf78de5688..dac6218a460e 100644
4659     --- a/net/rds/loop.c
4660     +++ b/net/rds/loop.c
4661     @@ -193,4 +193,5 @@ struct rds_transport rds_loop_transport = {
4662     .inc_copy_to_user = rds_message_inc_copy_to_user,
4663     .inc_free = rds_loop_inc_free,
4664     .t_name = "loopback",
4665     + .t_type = RDS_TRANS_LOOP,
4666     };
4667     diff --git a/net/rds/rds.h b/net/rds/rds.h
4668     index d09f6c1facb4..f685d8b514e5 100644
4669     --- a/net/rds/rds.h
4670     +++ b/net/rds/rds.h
4671     @@ -454,6 +454,11 @@ struct rds_notifier {
4672     int n_status;
4673     };
4674    
4675     +/* Available as part of RDS core, so doesn't need to participate
4676     + * in get_preferred transport etc
4677     + */
4678     +#define RDS_TRANS_LOOP 3
4679     +
4680     /**
4681     * struct rds_transport - transport specific behavioural hooks
4682     *
4683     diff --git a/net/rds/recv.c b/net/rds/recv.c
4684     index 555f07ccf0dc..c27cceae52e1 100644
4685     --- a/net/rds/recv.c
4686     +++ b/net/rds/recv.c
4687     @@ -103,6 +103,11 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
4688     rds_stats_add(s_recv_bytes_added_to_socket, delta);
4689     else
4690     rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
4691     +
4692     + /* loop transport doesn't send/recv congestion updates */
4693     + if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
4694     + return;
4695     +
4696     now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
4697    
4698     rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
4699     diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c
4700     index c98a61e980ba..9c4c2bb547d7 100644
4701     --- a/net/sched/sch_blackhole.c
4702     +++ b/net/sched/sch_blackhole.c
4703     @@ -21,7 +21,7 @@ static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
4704     struct sk_buff **to_free)
4705     {
4706     qdisc_drop(skb, sch, to_free);
4707     - return NET_XMIT_SUCCESS;
4708     + return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
4709     }
4710    
4711     static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
4712     diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
4713     index c741365f77da..a68c754e84ea 100644
4714     --- a/net/strparser/strparser.c
4715     +++ b/net/strparser/strparser.c
4716     @@ -35,7 +35,6 @@ struct _strp_msg {
4717     */
4718     struct strp_msg strp;
4719     int accum_len;
4720     - int early_eaten;
4721     };
4722    
4723     static inline struct _strp_msg *_strp_msg(struct sk_buff *skb)
4724     @@ -115,20 +114,6 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
4725     head = strp->skb_head;
4726     if (head) {
4727     /* Message already in progress */
4728     -
4729     - stm = _strp_msg(head);
4730     - if (unlikely(stm->early_eaten)) {
4731     - /* Already some number of bytes on the receive sock
4732     - * data saved in skb_head, just indicate they
4733     - * are consumed.
4734     - */
4735     - eaten = orig_len <= stm->early_eaten ?
4736     - orig_len : stm->early_eaten;
4737     - stm->early_eaten -= eaten;
4738     -
4739     - return eaten;
4740     - }
4741     -
4742     if (unlikely(orig_offset)) {
4743     /* Getting data with a non-zero offset when a message is
4744     * in progress is not expected. If it does happen, we
4745     @@ -297,9 +282,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
4746     }
4747    
4748     stm->accum_len += cand_len;
4749     + eaten += cand_len;
4750     strp->need_bytes = stm->strp.full_len -
4751     stm->accum_len;
4752     - stm->early_eaten = cand_len;
4753     STRP_STATS_ADD(strp->stats.bytes, cand_len);
4754     desc->count = 0; /* Stop reading socket */
4755     break;
4756     diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
4757     index 97b9d4f671ac..2aaf46599126 100644
4758     --- a/net/sunrpc/xprtrdma/verbs.c
4759     +++ b/net/sunrpc/xprtrdma/verbs.c
4760     @@ -270,7 +270,6 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
4761     wait_for_completion(&ia->ri_remove_done);
4762    
4763     ia->ri_id = NULL;
4764     - ia->ri_pd = NULL;
4765     ia->ri_device = NULL;
4766     /* Return 1 to ensure the core destroys the id. */
4767     return 1;
4768     @@ -464,7 +463,9 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
4769     ia->ri_id->qp = NULL;
4770     }
4771     ib_free_cq(ep->rep_attr.recv_cq);
4772     + ep->rep_attr.recv_cq = NULL;
4773     ib_free_cq(ep->rep_attr.send_cq);
4774     + ep->rep_attr.send_cq = NULL;
4775    
4776     /* The ULP is responsible for ensuring all DMA
4777     * mappings and MRs are gone.
4778     @@ -477,6 +478,8 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
4779     rpcrdma_dma_unmap_regbuf(req->rl_recvbuf);
4780     }
4781     rpcrdma_destroy_mrs(buf);
4782     + ib_dealloc_pd(ia->ri_pd);
4783     + ia->ri_pd = NULL;
4784    
4785     /* Allow waiters to continue */
4786     complete(&ia->ri_remove_done);
4787     @@ -650,14 +653,16 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
4788    
4789     cancel_delayed_work_sync(&ep->rep_connect_worker);
4790    
4791     - if (ia->ri_id->qp) {
4792     + if (ia->ri_id && ia->ri_id->qp) {
4793     rpcrdma_ep_disconnect(ep, ia);
4794     rdma_destroy_qp(ia->ri_id);
4795     ia->ri_id->qp = NULL;
4796     }
4797    
4798     - ib_free_cq(ep->rep_attr.recv_cq);
4799     - ib_free_cq(ep->rep_attr.send_cq);
4800     + if (ep->rep_attr.recv_cq)
4801     + ib_free_cq(ep->rep_attr.recv_cq);
4802     + if (ep->rep_attr.send_cq)
4803     + ib_free_cq(ep->rep_attr.send_cq);
4804     }
4805    
4806     /* Re-establish a connection after a device removal event.
4807     diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
4808     index 3c86614462f6..8ee4e667a414 100644
4809     --- a/net/tls/tls_sw.c
4810     +++ b/net/tls/tls_sw.c
4811     @@ -449,7 +449,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
4812     ret = tls_push_record(sk, msg->msg_flags, record_type);
4813     if (!ret)
4814     continue;
4815     - if (ret == -EAGAIN)
4816     + if (ret < 0)
4817     goto send_end;
4818    
4819     copied -= try_to_copy;
4820     diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
4821     index 403d86e80162..fdb294441682 100644
4822     --- a/net/vmw_vsock/virtio_transport.c
4823     +++ b/net/vmw_vsock/virtio_transport.c
4824     @@ -201,7 +201,7 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
4825     return -ENODEV;
4826     }
4827    
4828     - if (le32_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
4829     + if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
4830     return virtio_transport_send_pkt_loopback(vsock, pkt);
4831    
4832     if (pkt->reply)
4833     diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
4834     index 9bee849db682..d5f1d8364571 100644
4835     --- a/virt/kvm/arm/arm.c
4836     +++ b/virt/kvm/arm/arm.c
4837     @@ -51,8 +51,8 @@
4838     __asm__(".arch_extension virt");
4839     #endif
4840    
4841     +DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
4842     static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
4843     -static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
4844    
4845     /* Per-CPU variable containing the currently running vcpu. */
4846     static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
4847     @@ -351,7 +351,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
4848     }
4849    
4850     vcpu->cpu = cpu;
4851     - vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
4852     + vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state);
4853    
4854     kvm_arm_set_running_vcpu(vcpu);
4855    
4856     @@ -1259,19 +1259,8 @@ static inline void hyp_cpu_pm_exit(void)
4857     }
4858     #endif
4859    
4860     -static void teardown_common_resources(void)
4861     -{
4862     - free_percpu(kvm_host_cpu_state);
4863     -}
4864     -
4865     static int init_common_resources(void)
4866     {
4867     - kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
4868     - if (!kvm_host_cpu_state) {
4869     - kvm_err("Cannot allocate host CPU state\n");
4870     - return -ENOMEM;
4871     - }
4872     -
4873     /* set size of VMID supported by CPU */
4874     kvm_vmid_bits = kvm_get_vmid_bits();
4875     kvm_info("%d-bit VMID\n", kvm_vmid_bits);
4876     @@ -1413,7 +1402,7 @@ static int init_hyp_mode(void)
4877     for_each_possible_cpu(cpu) {
4878     kvm_cpu_context_t *cpu_ctxt;
4879    
4880     - cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
4881     + cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
4882     err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
4883    
4884     if (err) {
4885     @@ -1422,6 +1411,10 @@ static int init_hyp_mode(void)
4886     }
4887     }
4888    
4889     + err = hyp_map_aux_data();
4890     + if (err)
4891     + kvm_err("Cannot map host auxilary data: %d\n", err);
4892     +
4893     return 0;
4894    
4895     out_err:
4896     @@ -1497,7 +1490,6 @@ int kvm_arch_init(void *opaque)
4897     if (!in_hyp_mode)
4898     teardown_hyp_mode();
4899     out_err:
4900     - teardown_common_resources();
4901     return err;
4902     }
4903    
4904     diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
4905     index d7fd46fe9efb..4b4221b0d4ba 100644
4906     --- a/virt/kvm/arm/hyp/vgic-v2-sr.c
4907     +++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
4908     @@ -139,7 +139,7 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
4909     return -1;
4910    
4911     rd = kvm_vcpu_dabt_get_rd(vcpu);
4912     - addr = kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state))->vcpu_base_va);
4913     + addr = kern_hyp_va(hyp_symbol_addr(kvm_vgic_global_state)->vcpu_base_va);
4914     addr += fault_ipa - vgic->vgic_cpu_base;
4915    
4916     if (kvm_vcpu_dabt_iswrite(vcpu)) {
4917     diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
4918     index c4762bef13c6..c95ab4c5a475 100644
4919     --- a/virt/kvm/arm/psci.c
4920     +++ b/virt/kvm/arm/psci.c
4921     @@ -405,7 +405,7 @@ static int kvm_psci_call(struct kvm_vcpu *vcpu)
4922     int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
4923     {
4924     u32 func_id = smccc_get_function(vcpu);
4925     - u32 val = PSCI_RET_NOT_SUPPORTED;
4926     + u32 val = SMCCC_RET_NOT_SUPPORTED;
4927     u32 feature;
4928    
4929     switch (func_id) {
4930     @@ -417,7 +417,21 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
4931     switch(feature) {
4932     case ARM_SMCCC_ARCH_WORKAROUND_1:
4933     if (kvm_arm_harden_branch_predictor())
4934     - val = 0;
4935     + val = SMCCC_RET_SUCCESS;
4936     + break;
4937     + case ARM_SMCCC_ARCH_WORKAROUND_2:
4938     + switch (kvm_arm_have_ssbd()) {
4939     + case KVM_SSBD_FORCE_DISABLE:
4940     + case KVM_SSBD_UNKNOWN:
4941     + break;
4942     + case KVM_SSBD_KERNEL:
4943     + val = SMCCC_RET_SUCCESS;
4944     + break;
4945     + case KVM_SSBD_FORCE_ENABLE:
4946     + case KVM_SSBD_MITIGATED:
4947     + val = SMCCC_RET_NOT_REQUIRED;
4948     + break;
4949     + }
4950     break;
4951     }
4952     break;