Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.17/0108-4.17.9-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3159 - (hide annotations) (download)
Tue Jul 31 06:32:20 2018 UTC (5 years, 10 months ago) by niro
File size: 146217 byte(s)
-linux-4.16.9
1 niro 3159 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
2     index f2040d46f095..ff4ba249a26f 100644
3     --- a/Documentation/admin-guide/kernel-parameters.txt
4     +++ b/Documentation/admin-guide/kernel-parameters.txt
5     @@ -4092,6 +4092,23 @@
6     expediting. Set to zero to disable automatic
7     expediting.
8    
9     + ssbd= [ARM64,HW]
10     + Speculative Store Bypass Disable control
11     +
12     + On CPUs that are vulnerable to the Speculative
13     + Store Bypass vulnerability and offer a
14     + firmware based mitigation, this parameter
15     + indicates how the mitigation should be used:
16     +
17     + force-on: Unconditionally enable mitigation for
18     + for both kernel and userspace
19     + force-off: Unconditionally disable mitigation for
20     + for both kernel and userspace
21     + kernel: Always enable mitigation in the
22     + kernel, and offer a prctl interface
23     + to allow userspace to register its
24     + interest in being mitigated too.
25     +
26     stack_guard_gap= [MM]
27     override the default stack gap protection. The value
28     is in page units and it defines how many pages prior
29     diff --git a/Makefile b/Makefile
30     index 7cc36fe18dbb..693fde3aa317 100644
31     --- a/Makefile
32     +++ b/Makefile
33     @@ -1,7 +1,7 @@
34     # SPDX-License-Identifier: GPL-2.0
35     VERSION = 4
36     PATCHLEVEL = 17
37     -SUBLEVEL = 8
38     +SUBLEVEL = 9
39     EXTRAVERSION =
40     NAME = Merciless Moray
41    
42     diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
43     index c7c28c885a19..7001fb871429 100644
44     --- a/arch/arm/include/asm/kvm_host.h
45     +++ b/arch/arm/include/asm/kvm_host.h
46     @@ -315,6 +315,18 @@ static inline bool kvm_arm_harden_branch_predictor(void)
47     return false;
48     }
49    
50     +#define KVM_SSBD_UNKNOWN -1
51     +#define KVM_SSBD_FORCE_DISABLE 0
52     +#define KVM_SSBD_KERNEL 1
53     +#define KVM_SSBD_FORCE_ENABLE 2
54     +#define KVM_SSBD_MITIGATED 3
55     +
56     +static inline int kvm_arm_have_ssbd(void)
57     +{
58     + /* No way to detect it yet, pretend it is not there. */
59     + return KVM_SSBD_UNKNOWN;
60     +}
61     +
62     static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
63     static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
64    
65     diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
66     index f675162663f0..d2eb24eccf8f 100644
67     --- a/arch/arm/include/asm/kvm_mmu.h
68     +++ b/arch/arm/include/asm/kvm_mmu.h
69     @@ -335,6 +335,11 @@ static inline int kvm_map_vectors(void)
70     return 0;
71     }
72    
73     +static inline int hyp_map_aux_data(void)
74     +{
75     + return 0;
76     +}
77     +
78     #define kvm_phys_to_vttbr(addr) (addr)
79    
80     #endif /* !__ASSEMBLY__ */
81     diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
82     index b5030e1a41d8..5539fba892ce 100644
83     --- a/arch/arm/net/bpf_jit_32.c
84     +++ b/arch/arm/net/bpf_jit_32.c
85     @@ -1928,7 +1928,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
86     /* there are 2 passes here */
87     bpf_jit_dump(prog->len, image_size, 2, ctx.target);
88    
89     - set_memory_ro((unsigned long)header, header->pages);
90     + bpf_jit_binary_lock_ro(header);
91     prog->bpf_func = (void *)ctx.target;
92     prog->jited = 1;
93     prog->jited_len = image_size;
94     diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
95     index eb2cf4938f6d..b2103b4df467 100644
96     --- a/arch/arm64/Kconfig
97     +++ b/arch/arm64/Kconfig
98     @@ -938,6 +938,15 @@ config HARDEN_EL2_VECTORS
99    
100     If unsure, say Y.
101    
102     +config ARM64_SSBD
103     + bool "Speculative Store Bypass Disable" if EXPERT
104     + default y
105     + help
106     + This enables mitigation of the bypassing of previous stores
107     + by speculative loads.
108     +
109     + If unsure, say Y.
110     +
111     menuconfig ARMV8_DEPRECATED
112     bool "Emulate deprecated/obsolete ARMv8 instructions"
113     depends on COMPAT
114     diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
115     index bc51b72fafd4..8a699c708fc9 100644
116     --- a/arch/arm64/include/asm/cpucaps.h
117     +++ b/arch/arm64/include/asm/cpucaps.h
118     @@ -48,7 +48,8 @@
119     #define ARM64_HAS_CACHE_IDC 27
120     #define ARM64_HAS_CACHE_DIC 28
121     #define ARM64_HW_DBM 29
122     +#define ARM64_SSBD 30
123    
124     -#define ARM64_NCAPS 30
125     +#define ARM64_NCAPS 31
126    
127     #endif /* __ASM_CPUCAPS_H */
128     diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
129     index 09b0f2a80c8f..55bc1f073bfb 100644
130     --- a/arch/arm64/include/asm/cpufeature.h
131     +++ b/arch/arm64/include/asm/cpufeature.h
132     @@ -537,6 +537,28 @@ static inline u64 read_zcr_features(void)
133     return zcr;
134     }
135    
136     +#define ARM64_SSBD_UNKNOWN -1
137     +#define ARM64_SSBD_FORCE_DISABLE 0
138     +#define ARM64_SSBD_KERNEL 1
139     +#define ARM64_SSBD_FORCE_ENABLE 2
140     +#define ARM64_SSBD_MITIGATED 3
141     +
142     +static inline int arm64_get_ssbd_state(void)
143     +{
144     +#ifdef CONFIG_ARM64_SSBD
145     + extern int ssbd_state;
146     + return ssbd_state;
147     +#else
148     + return ARM64_SSBD_UNKNOWN;
149     +#endif
150     +}
151     +
152     +#ifdef CONFIG_ARM64_SSBD
153     +void arm64_set_ssbd_mitigation(bool state);
154     +#else
155     +static inline void arm64_set_ssbd_mitigation(bool state) {}
156     +#endif
157     +
158     #endif /* __ASSEMBLY__ */
159    
160     #endif
161     diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
162     index f6648a3e4152..d4fbb1356c4c 100644
163     --- a/arch/arm64/include/asm/kvm_asm.h
164     +++ b/arch/arm64/include/asm/kvm_asm.h
165     @@ -33,6 +33,9 @@
166     #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
167     #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
168    
169     +#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
170     +#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
171     +
172     /* Translate a kernel address of @sym into its equivalent linear mapping */
173     #define kvm_ksym_ref(sym) \
174     ({ \
175     @@ -71,14 +74,37 @@ extern u32 __kvm_get_mdcr_el2(void);
176    
177     extern u32 __init_stage2_translation(void);
178    
179     +/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
180     +#define __hyp_this_cpu_ptr(sym) \
181     + ({ \
182     + void *__ptr = hyp_symbol_addr(sym); \
183     + __ptr += read_sysreg(tpidr_el2); \
184     + (typeof(&sym))__ptr; \
185     + })
186     +
187     +#define __hyp_this_cpu_read(sym) \
188     + ({ \
189     + *__hyp_this_cpu_ptr(sym); \
190     + })
191     +
192     #else /* __ASSEMBLY__ */
193    
194     -.macro get_host_ctxt reg, tmp
195     - adr_l \reg, kvm_host_cpu_state
196     +.macro hyp_adr_this_cpu reg, sym, tmp
197     + adr_l \reg, \sym
198     mrs \tmp, tpidr_el2
199     add \reg, \reg, \tmp
200     .endm
201    
202     +.macro hyp_ldr_this_cpu reg, sym, tmp
203     + adr_l \reg, \sym
204     + mrs \tmp, tpidr_el2
205     + ldr \reg, [\reg, \tmp]
206     +.endm
207     +
208     +.macro get_host_ctxt reg, tmp
209     + hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
210     +.endm
211     +
212     .macro get_vcpu_ptr vcpu, ctxt
213     get_host_ctxt \ctxt, \vcpu
214     ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
215     diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
216     index 469de8acd06f..95d8a0e15b5f 100644
217     --- a/arch/arm64/include/asm/kvm_host.h
218     +++ b/arch/arm64/include/asm/kvm_host.h
219     @@ -216,6 +216,9 @@ struct kvm_vcpu_arch {
220     /* Exception Information */
221     struct kvm_vcpu_fault_info fault;
222    
223     + /* State of various workarounds, see kvm_asm.h for bit assignment */
224     + u64 workaround_flags;
225     +
226     /* Guest debug state */
227     u64 debug_flags;
228    
229     @@ -452,6 +455,29 @@ static inline bool kvm_arm_harden_branch_predictor(void)
230     return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
231     }
232    
233     +#define KVM_SSBD_UNKNOWN -1
234     +#define KVM_SSBD_FORCE_DISABLE 0
235     +#define KVM_SSBD_KERNEL 1
236     +#define KVM_SSBD_FORCE_ENABLE 2
237     +#define KVM_SSBD_MITIGATED 3
238     +
239     +static inline int kvm_arm_have_ssbd(void)
240     +{
241     + switch (arm64_get_ssbd_state()) {
242     + case ARM64_SSBD_FORCE_DISABLE:
243     + return KVM_SSBD_FORCE_DISABLE;
244     + case ARM64_SSBD_KERNEL:
245     + return KVM_SSBD_KERNEL;
246     + case ARM64_SSBD_FORCE_ENABLE:
247     + return KVM_SSBD_FORCE_ENABLE;
248     + case ARM64_SSBD_MITIGATED:
249     + return KVM_SSBD_MITIGATED;
250     + case ARM64_SSBD_UNKNOWN:
251     + default:
252     + return KVM_SSBD_UNKNOWN;
253     + }
254     +}
255     +
256     void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
257     void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
258    
259     diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
260     index 6128992c2ded..e3b2ad7dd40a 100644
261     --- a/arch/arm64/include/asm/kvm_mmu.h
262     +++ b/arch/arm64/include/asm/kvm_mmu.h
263     @@ -473,6 +473,30 @@ static inline int kvm_map_vectors(void)
264     }
265     #endif
266    
267     +#ifdef CONFIG_ARM64_SSBD
268     +DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
269     +
270     +static inline int hyp_map_aux_data(void)
271     +{
272     + int cpu, err;
273     +
274     + for_each_possible_cpu(cpu) {
275     + u64 *ptr;
276     +
277     + ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
278     + err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
279     + if (err)
280     + return err;
281     + }
282     + return 0;
283     +}
284     +#else
285     +static inline int hyp_map_aux_data(void)
286     +{
287     + return 0;
288     +}
289     +#endif
290     +
291     #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
292    
293     #endif /* __ASSEMBLY__ */
294     diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
295     index 740aa03c5f0d..cbcf11b5e637 100644
296     --- a/arch/arm64/include/asm/thread_info.h
297     +++ b/arch/arm64/include/asm/thread_info.h
298     @@ -94,6 +94,7 @@ void arch_release_task_struct(struct task_struct *tsk);
299     #define TIF_32BIT 22 /* 32bit process */
300     #define TIF_SVE 23 /* Scalable Vector Extension in use */
301     #define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
302     +#define TIF_SSBD 25 /* Wants SSB mitigation */
303    
304     #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
305     #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
306     diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
307     index bf825f38d206..0025f8691046 100644
308     --- a/arch/arm64/kernel/Makefile
309     +++ b/arch/arm64/kernel/Makefile
310     @@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
311     arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
312     arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
313     arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
314     +arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
315    
316     obj-y += $(arm64-obj-y) vdso/ probes/
317     obj-m += $(arm64-obj-m)
318     diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
319     index 5bdda651bd05..323aeb5f2fe6 100644
320     --- a/arch/arm64/kernel/asm-offsets.c
321     +++ b/arch/arm64/kernel/asm-offsets.c
322     @@ -136,6 +136,7 @@ int main(void)
323     #ifdef CONFIG_KVM_ARM_HOST
324     DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
325     DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
326     + DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
327     DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
328     DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
329     DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
330     diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
331     index e4a1182deff7..2b9a31a6a16a 100644
332     --- a/arch/arm64/kernel/cpu_errata.c
333     +++ b/arch/arm64/kernel/cpu_errata.c
334     @@ -232,6 +232,178 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
335     }
336     #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
337    
338     +#ifdef CONFIG_ARM64_SSBD
339     +DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
340     +
341     +int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
342     +
343     +static const struct ssbd_options {
344     + const char *str;
345     + int state;
346     +} ssbd_options[] = {
347     + { "force-on", ARM64_SSBD_FORCE_ENABLE, },
348     + { "force-off", ARM64_SSBD_FORCE_DISABLE, },
349     + { "kernel", ARM64_SSBD_KERNEL, },
350     +};
351     +
352     +static int __init ssbd_cfg(char *buf)
353     +{
354     + int i;
355     +
356     + if (!buf || !buf[0])
357     + return -EINVAL;
358     +
359     + for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
360     + int len = strlen(ssbd_options[i].str);
361     +
362     + if (strncmp(buf, ssbd_options[i].str, len))
363     + continue;
364     +
365     + ssbd_state = ssbd_options[i].state;
366     + return 0;
367     + }
368     +
369     + return -EINVAL;
370     +}
371     +early_param("ssbd", ssbd_cfg);
372     +
373     +void __init arm64_update_smccc_conduit(struct alt_instr *alt,
374     + __le32 *origptr, __le32 *updptr,
375     + int nr_inst)
376     +{
377     + u32 insn;
378     +
379     + BUG_ON(nr_inst != 1);
380     +
381     + switch (psci_ops.conduit) {
382     + case PSCI_CONDUIT_HVC:
383     + insn = aarch64_insn_get_hvc_value();
384     + break;
385     + case PSCI_CONDUIT_SMC:
386     + insn = aarch64_insn_get_smc_value();
387     + break;
388     + default:
389     + return;
390     + }
391     +
392     + *updptr = cpu_to_le32(insn);
393     +}
394     +
395     +void __init arm64_enable_wa2_handling(struct alt_instr *alt,
396     + __le32 *origptr, __le32 *updptr,
397     + int nr_inst)
398     +{
399     + BUG_ON(nr_inst != 1);
400     + /*
401     + * Only allow mitigation on EL1 entry/exit and guest
402     + * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
403     + * be flipped.
404     + */
405     + if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
406     + *updptr = cpu_to_le32(aarch64_insn_gen_nop());
407     +}
408     +
409     +void arm64_set_ssbd_mitigation(bool state)
410     +{
411     + switch (psci_ops.conduit) {
412     + case PSCI_CONDUIT_HVC:
413     + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
414     + break;
415     +
416     + case PSCI_CONDUIT_SMC:
417     + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
418     + break;
419     +
420     + default:
421     + WARN_ON_ONCE(1);
422     + break;
423     + }
424     +}
425     +
426     +static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
427     + int scope)
428     +{
429     + struct arm_smccc_res res;
430     + bool required = true;
431     + s32 val;
432     +
433     + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
434     +
435     + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
436     + ssbd_state = ARM64_SSBD_UNKNOWN;
437     + return false;
438     + }
439     +
440     + switch (psci_ops.conduit) {
441     + case PSCI_CONDUIT_HVC:
442     + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
443     + ARM_SMCCC_ARCH_WORKAROUND_2, &res);
444     + break;
445     +
446     + case PSCI_CONDUIT_SMC:
447     + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
448     + ARM_SMCCC_ARCH_WORKAROUND_2, &res);
449     + break;
450     +
451     + default:
452     + ssbd_state = ARM64_SSBD_UNKNOWN;
453     + return false;
454     + }
455     +
456     + val = (s32)res.a0;
457     +
458     + switch (val) {
459     + case SMCCC_RET_NOT_SUPPORTED:
460     + ssbd_state = ARM64_SSBD_UNKNOWN;
461     + return false;
462     +
463     + case SMCCC_RET_NOT_REQUIRED:
464     + pr_info_once("%s mitigation not required\n", entry->desc);
465     + ssbd_state = ARM64_SSBD_MITIGATED;
466     + return false;
467     +
468     + case SMCCC_RET_SUCCESS:
469     + required = true;
470     + break;
471     +
472     + case 1: /* Mitigation not required on this CPU */
473     + required = false;
474     + break;
475     +
476     + default:
477     + WARN_ON(1);
478     + return false;
479     + }
480     +
481     + switch (ssbd_state) {
482     + case ARM64_SSBD_FORCE_DISABLE:
483     + pr_info_once("%s disabled from command-line\n", entry->desc);
484     + arm64_set_ssbd_mitigation(false);
485     + required = false;
486     + break;
487     +
488     + case ARM64_SSBD_KERNEL:
489     + if (required) {
490     + __this_cpu_write(arm64_ssbd_callback_required, 1);
491     + arm64_set_ssbd_mitigation(true);
492     + }
493     + break;
494     +
495     + case ARM64_SSBD_FORCE_ENABLE:
496     + pr_info_once("%s forced from command-line\n", entry->desc);
497     + arm64_set_ssbd_mitigation(true);
498     + required = true;
499     + break;
500     +
501     + default:
502     + WARN_ON(1);
503     + break;
504     + }
505     +
506     + return required;
507     +}
508     +#endif /* CONFIG_ARM64_SSBD */
509     +
510     #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
511     .matches = is_affected_midr_range, \
512     .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
513     @@ -487,6 +659,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
514     .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
515     ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
516     },
517     +#endif
518     +#ifdef CONFIG_ARM64_SSBD
519     + {
520     + .desc = "Speculative Store Bypass Disable",
521     + .capability = ARM64_SSBD,
522     + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
523     + .matches = has_ssbd_mitigation,
524     + },
525     #endif
526     {
527     }
528     diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
529     index ec2ee720e33e..28ad8799406f 100644
530     --- a/arch/arm64/kernel/entry.S
531     +++ b/arch/arm64/kernel/entry.S
532     @@ -18,6 +18,7 @@
533     * along with this program. If not, see <http://www.gnu.org/licenses/>.
534     */
535    
536     +#include <linux/arm-smccc.h>
537     #include <linux/init.h>
538     #include <linux/linkage.h>
539    
540     @@ -137,6 +138,25 @@ alternative_else_nop_endif
541     add \dst, \dst, #(\sym - .entry.tramp.text)
542     .endm
543    
544     + // This macro corrupts x0-x3. It is the caller's duty
545     + // to save/restore them if required.
546     + .macro apply_ssbd, state, targ, tmp1, tmp2
547     +#ifdef CONFIG_ARM64_SSBD
548     +alternative_cb arm64_enable_wa2_handling
549     + b \targ
550     +alternative_cb_end
551     + ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
552     + cbz \tmp2, \targ
553     + ldr \tmp2, [tsk, #TSK_TI_FLAGS]
554     + tbnz \tmp2, #TIF_SSBD, \targ
555     + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
556     + mov w1, #\state
557     +alternative_cb arm64_update_smccc_conduit
558     + nop // Patched to SMC/HVC #0
559     +alternative_cb_end
560     +#endif
561     + .endm
562     +
563     .macro kernel_entry, el, regsize = 64
564     .if \regsize == 32
565     mov w0, w0 // zero upper 32 bits of x0
566     @@ -163,6 +183,14 @@ alternative_else_nop_endif
567     ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
568     disable_step_tsk x19, x20 // exceptions when scheduling.
569    
570     + apply_ssbd 1, 1f, x22, x23
571     +
572     +#ifdef CONFIG_ARM64_SSBD
573     + ldp x0, x1, [sp, #16 * 0]
574     + ldp x2, x3, [sp, #16 * 1]
575     +#endif
576     +1:
577     +
578     mov x29, xzr // fp pointed to user-space
579     .else
580     add x21, sp, #S_FRAME_SIZE
581     @@ -303,6 +331,8 @@ alternative_if ARM64_WORKAROUND_845719
582     alternative_else_nop_endif
583     #endif
584     3:
585     + apply_ssbd 0, 5f, x0, x1
586     +5:
587     .endif
588    
589     msr elr_el1, x21 // set up the return data
590     diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
591     index 1ec5f28c39fc..6b2686d54411 100644
592     --- a/arch/arm64/kernel/hibernate.c
593     +++ b/arch/arm64/kernel/hibernate.c
594     @@ -313,6 +313,17 @@ int swsusp_arch_suspend(void)
595    
596     sleep_cpu = -EINVAL;
597     __cpu_suspend_exit();
598     +
599     + /*
600     + * Just in case the boot kernel did turn the SSBD
601     + * mitigation off behind our back, let's set the state
602     + * to what we expect it to be.
603     + */
604     + switch (arm64_get_ssbd_state()) {
605     + case ARM64_SSBD_FORCE_ENABLE:
606     + case ARM64_SSBD_KERNEL:
607     + arm64_set_ssbd_mitigation(true);
608     + }
609     }
610    
611     local_daif_restore(flags);
612     diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
613     new file mode 100644
614     index 000000000000..3432e5ef9f41
615     --- /dev/null
616     +++ b/arch/arm64/kernel/ssbd.c
617     @@ -0,0 +1,110 @@
618     +// SPDX-License-Identifier: GPL-2.0
619     +/*
620     + * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
621     + */
622     +
623     +#include <linux/errno.h>
624     +#include <linux/sched.h>
625     +#include <linux/thread_info.h>
626     +
627     +#include <asm/cpufeature.h>
628     +
629     +/*
630     + * prctl interface for SSBD
631     + * FIXME: Drop the below ifdefery once merged in 4.18.
632     + */
633     +#ifdef PR_SPEC_STORE_BYPASS
634     +static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
635     +{
636     + int state = arm64_get_ssbd_state();
637     +
638     + /* Unsupported */
639     + if (state == ARM64_SSBD_UNKNOWN)
640     + return -EINVAL;
641     +
642     + /* Treat the unaffected/mitigated state separately */
643     + if (state == ARM64_SSBD_MITIGATED) {
644     + switch (ctrl) {
645     + case PR_SPEC_ENABLE:
646     + return -EPERM;
647     + case PR_SPEC_DISABLE:
648     + case PR_SPEC_FORCE_DISABLE:
649     + return 0;
650     + }
651     + }
652     +
653     + /*
654     + * Things are a bit backward here: the arm64 internal API
655     + * *enables the mitigation* when the userspace API *disables
656     + * speculation*. So much fun.
657     + */
658     + switch (ctrl) {
659     + case PR_SPEC_ENABLE:
660     + /* If speculation is force disabled, enable is not allowed */
661     + if (state == ARM64_SSBD_FORCE_ENABLE ||
662     + task_spec_ssb_force_disable(task))
663     + return -EPERM;
664     + task_clear_spec_ssb_disable(task);
665     + clear_tsk_thread_flag(task, TIF_SSBD);
666     + break;
667     + case PR_SPEC_DISABLE:
668     + if (state == ARM64_SSBD_FORCE_DISABLE)
669     + return -EPERM;
670     + task_set_spec_ssb_disable(task);
671     + set_tsk_thread_flag(task, TIF_SSBD);
672     + break;
673     + case PR_SPEC_FORCE_DISABLE:
674     + if (state == ARM64_SSBD_FORCE_DISABLE)
675     + return -EPERM;
676     + task_set_spec_ssb_disable(task);
677     + task_set_spec_ssb_force_disable(task);
678     + set_tsk_thread_flag(task, TIF_SSBD);
679     + break;
680     + default:
681     + return -ERANGE;
682     + }
683     +
684     + return 0;
685     +}
686     +
687     +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
688     + unsigned long ctrl)
689     +{
690     + switch (which) {
691     + case PR_SPEC_STORE_BYPASS:
692     + return ssbd_prctl_set(task, ctrl);
693     + default:
694     + return -ENODEV;
695     + }
696     +}
697     +
698     +static int ssbd_prctl_get(struct task_struct *task)
699     +{
700     + switch (arm64_get_ssbd_state()) {
701     + case ARM64_SSBD_UNKNOWN:
702     + return -EINVAL;
703     + case ARM64_SSBD_FORCE_ENABLE:
704     + return PR_SPEC_DISABLE;
705     + case ARM64_SSBD_KERNEL:
706     + if (task_spec_ssb_force_disable(task))
707     + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
708     + if (task_spec_ssb_disable(task))
709     + return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
710     + return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
711     + case ARM64_SSBD_FORCE_DISABLE:
712     + return PR_SPEC_ENABLE;
713     + default:
714     + return PR_SPEC_NOT_AFFECTED;
715     + }
716     +}
717     +
718     +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
719     +{
720     + switch (which) {
721     + case PR_SPEC_STORE_BYPASS:
722     + return ssbd_prctl_get(task);
723     + default:
724     + return -ENODEV;
725     + }
726     +}
727     +#endif /* PR_SPEC_STORE_BYPASS */
728     diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
729     index a307b9e13392..70c283368b64 100644
730     --- a/arch/arm64/kernel/suspend.c
731     +++ b/arch/arm64/kernel/suspend.c
732     @@ -62,6 +62,14 @@ void notrace __cpu_suspend_exit(void)
733     */
734     if (hw_breakpoint_restore)
735     hw_breakpoint_restore(cpu);
736     +
737     + /*
738     + * On resume, firmware implementing dynamic mitigation will
739     + * have turned the mitigation on. If the user has forcefully
740     + * disabled it, make sure their wishes are obeyed.
741     + */
742     + if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
743     + arm64_set_ssbd_mitigation(false);
744     }
745    
746     /*
747     diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
748     index bffece27b5c1..05d836979032 100644
749     --- a/arch/arm64/kvm/hyp/hyp-entry.S
750     +++ b/arch/arm64/kvm/hyp/hyp-entry.S
751     @@ -106,8 +106,44 @@ el1_hvc_guest:
752     */
753     ldr x1, [sp] // Guest's x0
754     eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
755     + cbz w1, wa_epilogue
756     +
757     + /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
758     + eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
759     + ARM_SMCCC_ARCH_WORKAROUND_2)
760     cbnz w1, el1_trap
761     - mov x0, x1
762     +
763     +#ifdef CONFIG_ARM64_SSBD
764     +alternative_cb arm64_enable_wa2_handling
765     + b wa2_end
766     +alternative_cb_end
767     + get_vcpu_ptr x2, x0
768     + ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
769     +
770     + // Sanitize the argument and update the guest flags
771     + ldr x1, [sp, #8] // Guest's x1
772     + clz w1, w1 // Murphy's device:
773     + lsr w1, w1, #5 // w1 = !!w1 without using
774     + eor w1, w1, #1 // the flags...
775     + bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
776     + str x0, [x2, #VCPU_WORKAROUND_FLAGS]
777     +
778     + /* Check that we actually need to perform the call */
779     + hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
780     + cbz x0, wa2_end
781     +
782     + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
783     + smc #0
784     +
785     + /* Don't leak data from the SMC call */
786     + mov x3, xzr
787     +wa2_end:
788     + mov x2, xzr
789     + mov x1, xzr
790     +#endif
791     +
792     +wa_epilogue:
793     + mov x0, xzr
794     add sp, sp, #16
795     eret
796    
797     diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
798     index d9645236e474..c50cedc447f1 100644
799     --- a/arch/arm64/kvm/hyp/switch.c
800     +++ b/arch/arm64/kvm/hyp/switch.c
801     @@ -15,6 +15,7 @@
802     * along with this program. If not, see <http://www.gnu.org/licenses/>.
803     */
804    
805     +#include <linux/arm-smccc.h>
806     #include <linux/types.h>
807     #include <linux/jump_label.h>
808     #include <uapi/linux/psci.h>
809     @@ -389,6 +390,39 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
810     return false;
811     }
812    
813     +static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
814     +{
815     + if (!cpus_have_const_cap(ARM64_SSBD))
816     + return false;
817     +
818     + return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
819     +}
820     +
821     +static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
822     +{
823     +#ifdef CONFIG_ARM64_SSBD
824     + /*
825     + * The host runs with the workaround always present. If the
826     + * guest wants it disabled, so be it...
827     + */
828     + if (__needs_ssbd_off(vcpu) &&
829     + __hyp_this_cpu_read(arm64_ssbd_callback_required))
830     + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
831     +#endif
832     +}
833     +
834     +static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
835     +{
836     +#ifdef CONFIG_ARM64_SSBD
837     + /*
838     + * If the guest has disabled the workaround, bring it back on.
839     + */
840     + if (__needs_ssbd_off(vcpu) &&
841     + __hyp_this_cpu_read(arm64_ssbd_callback_required))
842     + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
843     +#endif
844     +}
845     +
846     /* Switch to the guest for VHE systems running in EL2 */
847     int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
848     {
849     @@ -409,6 +443,8 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
850     sysreg_restore_guest_state_vhe(guest_ctxt);
851     __debug_switch_to_guest(vcpu);
852    
853     + __set_guest_arch_workaround_state(vcpu);
854     +
855     do {
856     /* Jump in the fire! */
857     exit_code = __guest_enter(vcpu, host_ctxt);
858     @@ -416,6 +452,8 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
859     /* And we're baaack! */
860     } while (fixup_guest_exit(vcpu, &exit_code));
861    
862     + __set_host_arch_workaround_state(vcpu);
863     +
864     fp_enabled = fpsimd_enabled_vhe();
865    
866     sysreg_save_guest_state_vhe(guest_ctxt);
867     @@ -465,6 +503,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
868     __sysreg_restore_state_nvhe(guest_ctxt);
869     __debug_switch_to_guest(vcpu);
870    
871     + __set_guest_arch_workaround_state(vcpu);
872     +
873     do {
874     /* Jump in the fire! */
875     exit_code = __guest_enter(vcpu, host_ctxt);
876     @@ -472,6 +512,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
877     /* And we're baaack! */
878     } while (fixup_guest_exit(vcpu, &exit_code));
879    
880     + __set_host_arch_workaround_state(vcpu);
881     +
882     fp_enabled = __fpsimd_enabled_nvhe();
883    
884     __sysreg_save_state_nvhe(guest_ctxt);
885     diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
886     index 3256b9228e75..a74311beda35 100644
887     --- a/arch/arm64/kvm/reset.c
888     +++ b/arch/arm64/kvm/reset.c
889     @@ -122,6 +122,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
890     /* Reset PMU */
891     kvm_pmu_vcpu_reset(vcpu);
892    
893     + /* Default workaround setup is enabled (if supported) */
894     + if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
895     + vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
896     +
897     /* Reset timer */
898     return kvm_timer_vcpu_reset(vcpu);
899     }
900     diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
901     index 219faaec51df..990770f9e76b 100644
902     --- a/arch/x86/include/asm/asm.h
903     +++ b/arch/x86/include/asm/asm.h
904     @@ -46,6 +46,65 @@
905     #define _ASM_SI __ASM_REG(si)
906     #define _ASM_DI __ASM_REG(di)
907    
908     +#ifndef __x86_64__
909     +/* 32 bit */
910     +
911     +#define _ASM_ARG1 _ASM_AX
912     +#define _ASM_ARG2 _ASM_DX
913     +#define _ASM_ARG3 _ASM_CX
914     +
915     +#define _ASM_ARG1L eax
916     +#define _ASM_ARG2L edx
917     +#define _ASM_ARG3L ecx
918     +
919     +#define _ASM_ARG1W ax
920     +#define _ASM_ARG2W dx
921     +#define _ASM_ARG3W cx
922     +
923     +#define _ASM_ARG1B al
924     +#define _ASM_ARG2B dl
925     +#define _ASM_ARG3B cl
926     +
927     +#else
928     +/* 64 bit */
929     +
930     +#define _ASM_ARG1 _ASM_DI
931     +#define _ASM_ARG2 _ASM_SI
932     +#define _ASM_ARG3 _ASM_DX
933     +#define _ASM_ARG4 _ASM_CX
934     +#define _ASM_ARG5 r8
935     +#define _ASM_ARG6 r9
936     +
937     +#define _ASM_ARG1Q rdi
938     +#define _ASM_ARG2Q rsi
939     +#define _ASM_ARG3Q rdx
940     +#define _ASM_ARG4Q rcx
941     +#define _ASM_ARG5Q r8
942     +#define _ASM_ARG6Q r9
943     +
944     +#define _ASM_ARG1L edi
945     +#define _ASM_ARG2L esi
946     +#define _ASM_ARG3L edx
947     +#define _ASM_ARG4L ecx
948     +#define _ASM_ARG5L r8d
949     +#define _ASM_ARG6L r9d
950     +
951     +#define _ASM_ARG1W di
952     +#define _ASM_ARG2W si
953     +#define _ASM_ARG3W dx
954     +#define _ASM_ARG4W cx
955     +#define _ASM_ARG5W r8w
956     +#define _ASM_ARG6W r9w
957     +
958     +#define _ASM_ARG1B dil
959     +#define _ASM_ARG2B sil
960     +#define _ASM_ARG3B dl
961     +#define _ASM_ARG4B cl
962     +#define _ASM_ARG5B r8b
963     +#define _ASM_ARG6B r9b
964     +
965     +#endif
966     +
967     /*
968     * Macros to generate condition code outputs from inline assembly,
969     * The output operand must be type "bool".
970     diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
971     index 89f08955fff7..c4fc17220df9 100644
972     --- a/arch/x86/include/asm/irqflags.h
973     +++ b/arch/x86/include/asm/irqflags.h
974     @@ -13,7 +13,7 @@
975     * Interrupt control:
976     */
977    
978     -static inline unsigned long native_save_fl(void)
979     +extern inline unsigned long native_save_fl(void)
980     {
981     unsigned long flags;
982    
983     diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
984     index 02d6f5cf4e70..8824d01c0c35 100644
985     --- a/arch/x86/kernel/Makefile
986     +++ b/arch/x86/kernel/Makefile
987     @@ -61,6 +61,7 @@ obj-y += alternative.o i8253.o hw_breakpoint.o
988     obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
989     obj-y += pci-iommu_table.o
990     obj-y += resource.o
991     +obj-y += irqflags.o
992    
993     obj-y += process.o
994     obj-y += fpu/
995     diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S
996     new file mode 100644
997     index 000000000000..ddeeaac8adda
998     --- /dev/null
999     +++ b/arch/x86/kernel/irqflags.S
1000     @@ -0,0 +1,26 @@
1001     +/* SPDX-License-Identifier: GPL-2.0 */
1002     +
1003     +#include <asm/asm.h>
1004     +#include <asm/export.h>
1005     +#include <linux/linkage.h>
1006     +
1007     +/*
1008     + * unsigned long native_save_fl(void)
1009     + */
1010     +ENTRY(native_save_fl)
1011     + pushf
1012     + pop %_ASM_AX
1013     + ret
1014     +ENDPROC(native_save_fl)
1015     +EXPORT_SYMBOL(native_save_fl)
1016     +
1017     +/*
1018     + * void native_restore_fl(unsigned long flags)
1019     + * %eax/%rdi: flags
1020     + */
1021     +ENTRY(native_restore_fl)
1022     + push %_ASM_ARG1
1023     + popf
1024     + ret
1025     +ENDPROC(native_restore_fl)
1026     +EXPORT_SYMBOL(native_restore_fl)
1027     diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
1028     index 92fd433c50b9..1bbec387d289 100644
1029     --- a/arch/x86/kvm/Kconfig
1030     +++ b/arch/x86/kvm/Kconfig
1031     @@ -85,7 +85,7 @@ config KVM_AMD_SEV
1032     def_bool y
1033     bool "AMD Secure Encrypted Virtualization (SEV) support"
1034     depends on KVM_AMD && X86_64
1035     - depends on CRYPTO_DEV_CCP && CRYPTO_DEV_CCP_DD && CRYPTO_DEV_SP_PSP
1036     + depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
1037     ---help---
1038     Provides support for launching Encrypted VMs on AMD processors.
1039    
1040     diff --git a/block/blk-core.c b/block/blk-core.c
1041     index b559b9d4f1a2..47ab2d9d02d9 100644
1042     --- a/block/blk-core.c
1043     +++ b/block/blk-core.c
1044     @@ -2392,7 +2392,9 @@ blk_qc_t generic_make_request(struct bio *bio)
1045    
1046     if (bio->bi_opf & REQ_NOWAIT)
1047     flags = BLK_MQ_REQ_NOWAIT;
1048     - if (blk_queue_enter(q, flags) < 0) {
1049     + if (bio_flagged(bio, BIO_QUEUE_ENTERED))
1050     + blk_queue_enter_live(q);
1051     + else if (blk_queue_enter(q, flags) < 0) {
1052     if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
1053     bio_wouldblock_error(bio);
1054     else
1055     diff --git a/block/blk-merge.c b/block/blk-merge.c
1056     index 782940c65d8a..481dc02668f9 100644
1057     --- a/block/blk-merge.c
1058     +++ b/block/blk-merge.c
1059     @@ -210,6 +210,16 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
1060     /* there isn't chance to merge the splitted bio */
1061     split->bi_opf |= REQ_NOMERGE;
1062    
1063     + /*
1064     + * Since we're recursing into make_request here, ensure
1065     + * that we mark this bio as already having entered the queue.
1066     + * If not, and the queue is going away, we can get stuck
1067     + * forever on waiting for the queue reference to drop. But
1068     + * that will never happen, as we're already holding a
1069     + * reference to it.
1070     + */
1071     + bio_set_flag(*bio, BIO_QUEUE_ENTERED);
1072     +
1073     bio_chain(split, *bio);
1074     trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
1075     generic_make_request(*bio);
1076     diff --git a/crypto/af_alg.c b/crypto/af_alg.c
1077     index 7846c0c20cfe..b52a14fc3bae 100644
1078     --- a/crypto/af_alg.c
1079     +++ b/crypto/af_alg.c
1080     @@ -1156,8 +1156,10 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
1081    
1082     /* make one iovec available as scatterlist */
1083     err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
1084     - if (err < 0)
1085     + if (err < 0) {
1086     + rsgl->sg_num_bytes = 0;
1087     return err;
1088     + }
1089    
1090     /* chain the new scatterlist with previous one */
1091     if (areq->last_rsgl)
1092     diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
1093     index a8d2eb0ceb8d..2c288d1f42bb 100644
1094     --- a/drivers/atm/zatm.c
1095     +++ b/drivers/atm/zatm.c
1096     @@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
1097     return -EFAULT;
1098     if (pool < 0 || pool > ZATM_LAST_POOL)
1099     return -EINVAL;
1100     + pool = array_index_nospec(pool,
1101     + ZATM_LAST_POOL + 1);
1102     if (copy_from_user(&info,
1103     &((struct zatm_pool_req __user *) arg)->info,
1104     sizeof(info))) return -EFAULT;
1105     diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
1106     index 69716a7ea993..95a516ac6c39 100644
1107     --- a/drivers/infiniband/hw/mlx5/main.c
1108     +++ b/drivers/infiniband/hw/mlx5/main.c
1109     @@ -5736,7 +5736,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1110     dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
1111     MLX5_CAP_GEN(mdev, num_vhca_ports));
1112    
1113     - if (MLX5_VPORT_MANAGER(mdev) &&
1114     + if (MLX5_ESWITCH_MANAGER(mdev) &&
1115     mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
1116     dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
1117    
1118     diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
1119     index 567ee54504bc..5e5022fa1d04 100644
1120     --- a/drivers/net/ethernet/atheros/alx/main.c
1121     +++ b/drivers/net/ethernet/atheros/alx/main.c
1122     @@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev)
1123     struct pci_dev *pdev = to_pci_dev(dev);
1124     struct alx_priv *alx = pci_get_drvdata(pdev);
1125     struct alx_hw *hw = &alx->hw;
1126     + int err;
1127    
1128     alx_reset_phy(hw);
1129    
1130     if (!netif_running(alx->dev))
1131     return 0;
1132     netif_device_attach(alx->dev);
1133     - return __alx_open(alx, true);
1134     +
1135     + rtnl_lock();
1136     + err = __alx_open(alx, true);
1137     + rtnl_unlock();
1138     +
1139     + return err;
1140     }
1141    
1142     static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
1143     diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
1144     index b4c9268100bb..068f991395dc 100644
1145     --- a/drivers/net/ethernet/cadence/macb_main.c
1146     +++ b/drivers/net/ethernet/cadence/macb_main.c
1147     @@ -3732,6 +3732,8 @@ static int at91ether_init(struct platform_device *pdev)
1148     int err;
1149     u32 reg;
1150    
1151     + bp->queues[0].bp = bp;
1152     +
1153     dev->netdev_ops = &at91ether_netdev_ops;
1154     dev->ethtool_ops = &macb_ethtool_ops;
1155    
1156     diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
1157     index 2220c771092b..678835136bf8 100644
1158     --- a/drivers/net/ethernet/cadence/macb_ptp.c
1159     +++ b/drivers/net/ethernet/cadence/macb_ptp.c
1160     @@ -170,10 +170,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1161    
1162     if (delta > TSU_NSEC_MAX_VAL) {
1163     gem_tsu_get_time(&bp->ptp_clock_info, &now);
1164     - if (sign)
1165     - now = timespec64_sub(now, then);
1166     - else
1167     - now = timespec64_add(now, then);
1168     + now = timespec64_add(now, then);
1169    
1170     gem_tsu_set_time(&bp->ptp_clock_info,
1171     (const struct timespec64 *)&now);
1172     diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1173     index 2edfdbdaae48..b25fd543b6f0 100644
1174     --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1175     +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1176     @@ -51,6 +51,7 @@
1177     #include <linux/sched.h>
1178     #include <linux/slab.h>
1179     #include <linux/uaccess.h>
1180     +#include <linux/nospec.h>
1181    
1182     #include "common.h"
1183     #include "cxgb3_ioctl.h"
1184     @@ -2268,6 +2269,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1185    
1186     if (t.qset_idx >= nqsets)
1187     return -EINVAL;
1188     + t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
1189    
1190     q = &adapter->params.sge.qset[q1 + t.qset_idx];
1191     t.rspq_size = q->rspq_size;
1192     diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
1193     index 8a8b12b720ef..454e57ef047a 100644
1194     --- a/drivers/net/ethernet/cisco/enic/enic_main.c
1195     +++ b/drivers/net/ethernet/cisco/enic/enic_main.c
1196     @@ -1920,7 +1920,7 @@ static int enic_open(struct net_device *netdev)
1197     {
1198     struct enic *enic = netdev_priv(netdev);
1199     unsigned int i;
1200     - int err;
1201     + int err, ret;
1202    
1203     err = enic_request_intr(enic);
1204     if (err) {
1205     @@ -1977,10 +1977,9 @@ static int enic_open(struct net_device *netdev)
1206    
1207     err_out_free_rq:
1208     for (i = 0; i < enic->rq_count; i++) {
1209     - err = vnic_rq_disable(&enic->rq[i]);
1210     - if (err)
1211     - return err;
1212     - vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1213     + ret = vnic_rq_disable(&enic->rq[i]);
1214     + if (!ret)
1215     + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1216     }
1217     enic_dev_notify_unset(enic);
1218     err_out_free_intr:
1219     diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
1220     index e2e5cdc7119c..4c0f7eda1166 100644
1221     --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
1222     +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
1223     @@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
1224     {
1225     struct hinic_rq *rq = rxq->rq;
1226    
1227     + irq_set_affinity_hint(rq->irq, NULL);
1228     free_irq(rq->irq, rxq);
1229     rx_del_napi(rxq);
1230     }
1231     diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
1232     index f174c72480ab..8d3522c94c3f 100644
1233     --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
1234     +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
1235     @@ -2199,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
1236     return true;
1237     }
1238    
1239     -#define I40E_XDP_PASS 0
1240     -#define I40E_XDP_CONSUMED 1
1241     -#define I40E_XDP_TX 2
1242     +#define I40E_XDP_PASS 0
1243     +#define I40E_XDP_CONSUMED BIT(0)
1244     +#define I40E_XDP_TX BIT(1)
1245     +#define I40E_XDP_REDIR BIT(2)
1246    
1247     static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
1248     struct i40e_ring *xdp_ring);
1249     @@ -2235,7 +2236,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
1250     break;
1251     case XDP_REDIRECT:
1252     err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
1253     - result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
1254     + result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
1255     break;
1256     default:
1257     bpf_warn_invalid_xdp_action(act);
1258     @@ -2298,7 +2299,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1259     unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1260     struct sk_buff *skb = rx_ring->skb;
1261     u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1262     - bool failure = false, xdp_xmit = false;
1263     + unsigned int xdp_xmit = 0;
1264     + bool failure = false;
1265     struct xdp_buff xdp;
1266    
1267     xdp.rxq = &rx_ring->xdp_rxq;
1268     @@ -2359,8 +2361,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1269     }
1270    
1271     if (IS_ERR(skb)) {
1272     - if (PTR_ERR(skb) == -I40E_XDP_TX) {
1273     - xdp_xmit = true;
1274     + unsigned int xdp_res = -PTR_ERR(skb);
1275     +
1276     + if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
1277     + xdp_xmit |= xdp_res;
1278     i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
1279     } else {
1280     rx_buffer->pagecnt_bias++;
1281     @@ -2414,12 +2418,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1282     total_rx_packets++;
1283     }
1284    
1285     - if (xdp_xmit) {
1286     + if (xdp_xmit & I40E_XDP_REDIR)
1287     + xdp_do_flush_map();
1288     +
1289     + if (xdp_xmit & I40E_XDP_TX) {
1290     struct i40e_ring *xdp_ring =
1291     rx_ring->vsi->xdp_rings[rx_ring->queue_index];
1292    
1293     i40e_xdp_ring_update_tail(xdp_ring);
1294     - xdp_do_flush_map();
1295     }
1296    
1297     rx_ring->skb = skb;
1298     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1299     index 2ecd55856c50..a820a6cd831a 100644
1300     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1301     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1302     @@ -2257,9 +2257,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
1303     return skb;
1304     }
1305    
1306     -#define IXGBE_XDP_PASS 0
1307     -#define IXGBE_XDP_CONSUMED 1
1308     -#define IXGBE_XDP_TX 2
1309     +#define IXGBE_XDP_PASS 0
1310     +#define IXGBE_XDP_CONSUMED BIT(0)
1311     +#define IXGBE_XDP_TX BIT(1)
1312     +#define IXGBE_XDP_REDIR BIT(2)
1313    
1314     static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
1315     struct xdp_buff *xdp);
1316     @@ -2288,7 +2289,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
1317     case XDP_REDIRECT:
1318     err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
1319     if (!err)
1320     - result = IXGBE_XDP_TX;
1321     + result = IXGBE_XDP_REDIR;
1322     else
1323     result = IXGBE_XDP_CONSUMED;
1324     break;
1325     @@ -2348,7 +2349,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1326     unsigned int mss = 0;
1327     #endif /* IXGBE_FCOE */
1328     u16 cleaned_count = ixgbe_desc_unused(rx_ring);
1329     - bool xdp_xmit = false;
1330     + unsigned int xdp_xmit = 0;
1331     struct xdp_buff xdp;
1332    
1333     xdp.rxq = &rx_ring->xdp_rxq;
1334     @@ -2391,8 +2392,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1335     }
1336    
1337     if (IS_ERR(skb)) {
1338     - if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
1339     - xdp_xmit = true;
1340     + unsigned int xdp_res = -PTR_ERR(skb);
1341     +
1342     + if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
1343     + xdp_xmit |= xdp_res;
1344     ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
1345     } else {
1346     rx_buffer->pagecnt_bias++;
1347     @@ -2464,7 +2467,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1348     total_rx_packets++;
1349     }
1350    
1351     - if (xdp_xmit) {
1352     + if (xdp_xmit & IXGBE_XDP_REDIR)
1353     + xdp_do_flush_map();
1354     +
1355     + if (xdp_xmit & IXGBE_XDP_TX) {
1356     struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
1357    
1358     /* Force memory writes to complete before letting h/w
1359     @@ -2472,8 +2478,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1360     */
1361     wmb();
1362     writel(ring->next_to_use, ring->tail);
1363     -
1364     - xdp_do_flush_map();
1365     }
1366    
1367     u64_stats_update_begin(&rx_ring->syncp);
1368     diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
1369     index 17a904cc6a5e..0ad2f3f7da85 100644
1370     --- a/drivers/net/ethernet/marvell/mvneta.c
1371     +++ b/drivers/net/ethernet/marvell/mvneta.c
1372     @@ -1932,7 +1932,7 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
1373     rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1374     index = rx_desc - rxq->descs;
1375     data = rxq->buf_virt_addr[index];
1376     - phys_addr = rx_desc->buf_phys_addr;
1377     + phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction;
1378    
1379     if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1380     (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1381     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1382     index 21cd1703a862..33ab34dc6d96 100644
1383     --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1384     +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1385     @@ -803,6 +803,7 @@ static void cmd_work_handler(struct work_struct *work)
1386     unsigned long flags;
1387     bool poll_cmd = ent->polling;
1388     int alloc_ret;
1389     + int cmd_mode;
1390    
1391     sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
1392     down(sem);
1393     @@ -849,6 +850,7 @@ static void cmd_work_handler(struct work_struct *work)
1394     set_signature(ent, !cmd->checksum_disabled);
1395     dump_command(dev, ent, 1);
1396     ent->ts1 = ktime_get_ns();
1397     + cmd_mode = cmd->mode;
1398    
1399     if (ent->callback)
1400     schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
1401     @@ -873,7 +875,7 @@ static void cmd_work_handler(struct work_struct *work)
1402     iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
1403     mmiowb();
1404     /* if not in polling don't use ent after this point */
1405     - if (cmd->mode == CMD_MODE_POLLING || poll_cmd) {
1406     + if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
1407     poll_timeout(ent);
1408     /* make sure we read the descriptor after ownership is SW */
1409     rmb();
1410     @@ -1274,7 +1276,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
1411     {
1412     struct mlx5_core_dev *dev = filp->private_data;
1413     struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1414     - char outlen_str[8];
1415     + char outlen_str[8] = {0};
1416     int outlen;
1417     void *ptr;
1418     int err;
1419     @@ -1289,8 +1291,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
1420     if (copy_from_user(outlen_str, buf, count))
1421     return -EFAULT;
1422    
1423     - outlen_str[7] = 0;
1424     -
1425     err = sscanf(outlen_str, "%d", &outlen);
1426     if (err < 0)
1427     return err;
1428     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1429     index b29c1d93f058..d3a1a2281e77 100644
1430     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1431     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1432     @@ -2612,7 +2612,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
1433     mlx5e_activate_channels(&priv->channels);
1434     netif_tx_start_all_queues(priv->netdev);
1435    
1436     - if (MLX5_VPORT_MANAGER(priv->mdev))
1437     + if (MLX5_ESWITCH_MANAGER(priv->mdev))
1438     mlx5e_add_sqs_fwd_rules(priv);
1439    
1440     mlx5e_wait_channels_min_rx_wqes(&priv->channels);
1441     @@ -2623,7 +2623,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
1442     {
1443     mlx5e_redirect_rqts_to_drop(priv);
1444    
1445     - if (MLX5_VPORT_MANAGER(priv->mdev))
1446     + if (MLX5_ESWITCH_MANAGER(priv->mdev))
1447     mlx5e_remove_sqs_fwd_rules(priv);
1448    
1449     /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
1450     @@ -4315,7 +4315,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
1451     mlx5e_set_netdev_dev_addr(netdev);
1452    
1453     #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
1454     - if (MLX5_VPORT_MANAGER(mdev))
1455     + if (MLX5_ESWITCH_MANAGER(mdev))
1456     netdev->switchdev_ops = &mlx5e_switchdev_ops;
1457     #endif
1458    
1459     @@ -4465,7 +4465,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
1460    
1461     mlx5e_enable_async_events(priv);
1462    
1463     - if (MLX5_VPORT_MANAGER(priv->mdev))
1464     + if (MLX5_ESWITCH_MANAGER(priv->mdev))
1465     mlx5e_register_vport_reps(priv);
1466    
1467     if (netdev->reg_state != NETREG_REGISTERED)
1468     @@ -4500,7 +4500,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
1469    
1470     queue_work(priv->wq, &priv->set_rx_mode_work);
1471    
1472     - if (MLX5_VPORT_MANAGER(priv->mdev))
1473     + if (MLX5_ESWITCH_MANAGER(priv->mdev))
1474     mlx5e_unregister_vport_reps(priv);
1475    
1476     mlx5e_disable_async_events(priv);
1477     @@ -4684,7 +4684,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
1478     return NULL;
1479    
1480     #ifdef CONFIG_MLX5_ESWITCH
1481     - if (MLX5_VPORT_MANAGER(mdev)) {
1482     + if (MLX5_ESWITCH_MANAGER(mdev)) {
1483     rpriv = mlx5e_alloc_nic_rep_priv(mdev);
1484     if (!rpriv) {
1485     mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
1486     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1487     index 876c3e4c6193..286565862341 100644
1488     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1489     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1490     @@ -790,7 +790,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
1491     struct mlx5e_rep_priv *rpriv = priv->ppriv;
1492     struct mlx5_eswitch_rep *rep;
1493    
1494     - if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
1495     + if (!MLX5_ESWITCH_MANAGER(priv->mdev))
1496     return false;
1497    
1498     rep = rpriv->rep;
1499     @@ -804,8 +804,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
1500     static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
1501     {
1502     struct mlx5e_rep_priv *rpriv = priv->ppriv;
1503     - struct mlx5_eswitch_rep *rep = rpriv->rep;
1504     + struct mlx5_eswitch_rep *rep;
1505    
1506     + if (!MLX5_ESWITCH_MANAGER(priv->mdev))
1507     + return false;
1508     +
1509     + rep = rpriv->rep;
1510     if (rep && rep->vport != FDB_UPLINK_VPORT)
1511     return true;
1512    
1513     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1514     index 1352d13eedb3..c3a18ddf5dba 100644
1515     --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1516     +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1517     @@ -1604,7 +1604,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1518     if (!ESW_ALLOWED(esw))
1519     return 0;
1520    
1521     - if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1522     + if (!MLX5_ESWITCH_MANAGER(esw->dev) ||
1523     !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1524     esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1525     return -EOPNOTSUPP;
1526     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1527     index 35e256eb2f6e..2feb33dcad2f 100644
1528     --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1529     +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1530     @@ -983,8 +983,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
1531     if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1532     return -EOPNOTSUPP;
1533    
1534     - if (!MLX5_CAP_GEN(dev, vport_group_manager))
1535     - return -EOPNOTSUPP;
1536     + if(!MLX5_ESWITCH_MANAGER(dev))
1537     + return -EPERM;
1538    
1539     if (dev->priv.eswitch->mode == SRIOV_NONE)
1540     return -EOPNOTSUPP;
1541     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1542     index c39c1692e674..bd0ffc347bd7 100644
1543     --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1544     +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1545     @@ -32,6 +32,7 @@
1546    
1547     #include <linux/mutex.h>
1548     #include <linux/mlx5/driver.h>
1549     +#include <linux/mlx5/eswitch.h>
1550    
1551     #include "mlx5_core.h"
1552     #include "fs_core.h"
1553     @@ -2631,7 +2632,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
1554     goto err;
1555     }
1556    
1557     - if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
1558     + if (MLX5_ESWITCH_MANAGER(dev)) {
1559     if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
1560     err = init_fdb_root_ns(steering);
1561     if (err)
1562     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
1563     index afd9f4fa22f4..41ad24f0de2c 100644
1564     --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
1565     +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
1566     @@ -32,6 +32,7 @@
1567    
1568     #include <linux/mlx5/driver.h>
1569     #include <linux/mlx5/cmd.h>
1570     +#include <linux/mlx5/eswitch.h>
1571     #include <linux/module.h>
1572     #include "mlx5_core.h"
1573     #include "../../mlxfw/mlxfw.h"
1574     @@ -159,13 +160,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
1575     }
1576    
1577     if (MLX5_CAP_GEN(dev, vport_group_manager) &&
1578     - MLX5_CAP_GEN(dev, eswitch_flow_table)) {
1579     + MLX5_ESWITCH_MANAGER(dev)) {
1580     err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
1581     if (err)
1582     return err;
1583     }
1584    
1585     - if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
1586     + if (MLX5_ESWITCH_MANAGER(dev)) {
1587     err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
1588     if (err)
1589     return err;
1590     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
1591     index 7cb67122e8b5..98359559c77e 100644
1592     --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
1593     +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
1594     @@ -33,6 +33,7 @@
1595     #include <linux/etherdevice.h>
1596     #include <linux/mlx5/driver.h>
1597     #include <linux/mlx5/mlx5_ifc.h>
1598     +#include <linux/mlx5/eswitch.h>
1599     #include "mlx5_core.h"
1600     #include "lib/mpfs.h"
1601    
1602     @@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
1603     int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
1604     struct mlx5_mpfs *mpfs;
1605    
1606     - if (!MLX5_VPORT_MANAGER(dev))
1607     + if (!MLX5_ESWITCH_MANAGER(dev))
1608     return 0;
1609    
1610     mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL);
1611     @@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
1612     {
1613     struct mlx5_mpfs *mpfs = dev->priv.mpfs;
1614    
1615     - if (!MLX5_VPORT_MANAGER(dev))
1616     + if (!MLX5_ESWITCH_MANAGER(dev))
1617     return;
1618    
1619     WARN_ON(!hlist_empty(mpfs->hash));
1620     @@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
1621     u32 index;
1622     int err;
1623    
1624     - if (!MLX5_VPORT_MANAGER(dev))
1625     + if (!MLX5_ESWITCH_MANAGER(dev))
1626     return 0;
1627    
1628     mutex_lock(&mpfs->lock);
1629     @@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
1630     int err = 0;
1631     u32 index;
1632    
1633     - if (!MLX5_VPORT_MANAGER(dev))
1634     + if (!MLX5_ESWITCH_MANAGER(dev))
1635     return 0;
1636    
1637     mutex_lock(&mpfs->lock);
1638     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
1639     index fa9d0760dd36..31a9cbd85689 100644
1640     --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
1641     +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
1642     @@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
1643     static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
1644     int inlen)
1645     {
1646     - u32 out[MLX5_ST_SZ_DW(qtct_reg)];
1647     + u32 out[MLX5_ST_SZ_DW(qetc_reg)];
1648    
1649     if (!MLX5_CAP_GEN(mdev, ets))
1650     return -EOPNOTSUPP;
1651     @@ -713,7 +713,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
1652     static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
1653     int outlen)
1654     {
1655     - u32 in[MLX5_ST_SZ_DW(qtct_reg)];
1656     + u32 in[MLX5_ST_SZ_DW(qetc_reg)];
1657    
1658     if (!MLX5_CAP_GEN(mdev, ets))
1659     return -EOPNOTSUPP;
1660     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
1661     index 2a8b529ce6dd..a0674962f02c 100644
1662     --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
1663     +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
1664     @@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
1665     return -EBUSY;
1666     }
1667    
1668     + if (!MLX5_ESWITCH_MANAGER(dev))
1669     + goto enable_vfs_hca;
1670     +
1671     err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
1672     if (err) {
1673     mlx5_core_warn(dev,
1674     @@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
1675     return err;
1676     }
1677    
1678     +enable_vfs_hca:
1679     for (vf = 0; vf < num_vfs; vf++) {
1680     err = mlx5_core_enable_hca(dev, vf + 1);
1681     if (err) {
1682     @@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
1683     }
1684    
1685     out:
1686     - mlx5_eswitch_disable_sriov(dev->priv.eswitch);
1687     + if (MLX5_ESWITCH_MANAGER(dev))
1688     + mlx5_eswitch_disable_sriov(dev->priv.eswitch);
1689    
1690     if (mlx5_wait_for_vf_pages(dev))
1691     mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
1692     diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
1693     index 35fb31f682af..1a781281c57a 100644
1694     --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
1695     +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
1696     @@ -194,6 +194,9 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev,
1697     if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1698     return -EOPNOTSUPP;
1699    
1700     + if (tcf_block_shared(f->block))
1701     + return -EOPNOTSUPP;
1702     +
1703     switch (f->command) {
1704     case TC_BLOCK_BIND:
1705     return tcf_block_cb_register(f->block,
1706     diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
1707     index 91935405f586..84f7a5dbea9d 100644
1708     --- a/drivers/net/ethernet/netronome/nfp/flower/match.c
1709     +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
1710     @@ -123,6 +123,20 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
1711     NFP_FLOWER_MASK_MPLS_Q;
1712    
1713     frame->mpls_lse = cpu_to_be32(t_mpls);
1714     + } else if (dissector_uses_key(flow->dissector,
1715     + FLOW_DISSECTOR_KEY_BASIC)) {
1716     + /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
1717     + * bit, which indicates an mpls ether type but without any
1718     + * mpls fields.
1719     + */
1720     + struct flow_dissector_key_basic *key_basic;
1721     +
1722     + key_basic = skb_flow_dissector_target(flow->dissector,
1723     + FLOW_DISSECTOR_KEY_BASIC,
1724     + flow->key);
1725     + if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
1726     + key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
1727     + frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
1728     }
1729     }
1730    
1731     diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
1732     index 114d2ab02a38..4de30d0f9491 100644
1733     --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
1734     +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
1735     @@ -264,6 +264,14 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
1736     case cpu_to_be16(ETH_P_ARP):
1737     return -EOPNOTSUPP;
1738    
1739     + case cpu_to_be16(ETH_P_MPLS_UC):
1740     + case cpu_to_be16(ETH_P_MPLS_MC):
1741     + if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
1742     + key_layer |= NFP_FLOWER_LAYER_MAC;
1743     + key_size += sizeof(struct nfp_flower_mac_mpls);
1744     + }
1745     + break;
1746     +
1747     /* Will be included in layer 2. */
1748     case cpu_to_be16(ETH_P_8021Q):
1749     break;
1750     @@ -593,6 +601,9 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
1751     if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1752     return -EOPNOTSUPP;
1753    
1754     + if (tcf_block_shared(f->block))
1755     + return -EOPNOTSUPP;
1756     +
1757     switch (f->command) {
1758     case TC_BLOCK_BIND:
1759     return tcf_block_cb_register(f->block,
1760     diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
1761     index 449777f21237..e82986df9b8e 100644
1762     --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
1763     +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
1764     @@ -700,9 +700,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
1765     p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
1766    
1767     memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
1768     - ARRAY_SIZE(p_local->local_chassis_id));
1769     + sizeof(p_local->local_chassis_id));
1770     memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
1771     - ARRAY_SIZE(p_local->local_port_id));
1772     + sizeof(p_local->local_port_id));
1773     }
1774    
1775     static void
1776     @@ -714,9 +714,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
1777     p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
1778    
1779     memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
1780     - ARRAY_SIZE(p_remote->peer_chassis_id));
1781     + sizeof(p_remote->peer_chassis_id));
1782     memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
1783     - ARRAY_SIZE(p_remote->peer_port_id));
1784     + sizeof(p_remote->peer_port_id));
1785     }
1786    
1787     static int
1788     diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
1789     index d2ad5e92c74f..5644b24d85b0 100644
1790     --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
1791     +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
1792     @@ -1789,7 +1789,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
1793     DP_INFO(p_hwfn, "Failed to update driver state\n");
1794    
1795     rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
1796     - QED_OV_ESWITCH_VEB);
1797     + QED_OV_ESWITCH_NONE);
1798     if (rc)
1799     DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
1800     }
1801     diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
1802     index 7870ae2a6f7e..261f21d6b0b0 100644
1803     --- a/drivers/net/ethernet/qlogic/qed/qed_main.c
1804     +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
1805     @@ -780,6 +780,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
1806     /* We want a minimum of one slowpath and one fastpath vector per hwfn */
1807     cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
1808    
1809     + if (is_kdump_kernel()) {
1810     + DP_INFO(cdev,
1811     + "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
1812     + cdev->int_params.in.min_msix_cnt);
1813     + cdev->int_params.in.num_vectors =
1814     + cdev->int_params.in.min_msix_cnt;
1815     + }
1816     +
1817     rc = qed_set_int_mode(cdev, false);
1818     if (rc) {
1819     DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
1820     diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
1821     index 5acb91b3564c..419c681ea2be 100644
1822     --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
1823     +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
1824     @@ -4400,6 +4400,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
1825     static int qed_sriov_enable(struct qed_dev *cdev, int num)
1826     {
1827     struct qed_iov_vf_init_params params;
1828     + struct qed_hwfn *hwfn;
1829     + struct qed_ptt *ptt;
1830     int i, j, rc;
1831    
1832     if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
1833     @@ -4412,8 +4414,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
1834    
1835     /* Initialize HW for VF access */
1836     for_each_hwfn(cdev, j) {
1837     - struct qed_hwfn *hwfn = &cdev->hwfns[j];
1838     - struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1839     + hwfn = &cdev->hwfns[j];
1840     + ptt = qed_ptt_acquire(hwfn);
1841    
1842     /* Make sure not to use more than 16 queues per VF */
1843     params.num_queues = min_t(int,
1844     @@ -4449,6 +4451,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
1845     goto err;
1846     }
1847    
1848     + hwfn = QED_LEADING_HWFN(cdev);
1849     + ptt = qed_ptt_acquire(hwfn);
1850     + if (!ptt) {
1851     + DP_ERR(hwfn, "Failed to acquire ptt\n");
1852     + rc = -EBUSY;
1853     + goto err;
1854     + }
1855     +
1856     + rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
1857     + if (rc)
1858     + DP_INFO(cdev, "Failed to update eswitch mode\n");
1859     + qed_ptt_release(hwfn, ptt);
1860     +
1861     return num;
1862    
1863     err:
1864     diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
1865     index 02adb513f475..013ff567283c 100644
1866     --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
1867     +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
1868     @@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
1869     {
1870     struct qede_ptp *ptp = edev->ptp;
1871    
1872     - if (!ptp)
1873     - return -EIO;
1874     + if (!ptp) {
1875     + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1876     + SOF_TIMESTAMPING_RX_SOFTWARE |
1877     + SOF_TIMESTAMPING_SOFTWARE;
1878     + info->phc_index = -1;
1879     +
1880     + return 0;
1881     + }
1882    
1883     info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1884     SOF_TIMESTAMPING_RX_SOFTWARE |
1885     diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
1886     index c72adf8b52ea..9165e2b0c590 100644
1887     --- a/drivers/net/ethernet/sfc/farch.c
1888     +++ b/drivers/net/ethernet/sfc/farch.c
1889     @@ -2794,6 +2794,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
1890     if (!state)
1891     return -ENOMEM;
1892     efx->filter_state = state;
1893     + init_rwsem(&state->lock);
1894    
1895     table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
1896     table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
1897     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1898     index b65e2d144698..1e1cc5256eca 100644
1899     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1900     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1901     @@ -927,6 +927,7 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1902     static int stmmac_init_phy(struct net_device *dev)
1903     {
1904     struct stmmac_priv *priv = netdev_priv(dev);
1905     + u32 tx_cnt = priv->plat->tx_queues_to_use;
1906     struct phy_device *phydev;
1907     char phy_id_fmt[MII_BUS_ID_SIZE + 3];
1908     char bus_id[MII_BUS_ID_SIZE];
1909     @@ -967,6 +968,15 @@ static int stmmac_init_phy(struct net_device *dev)
1910     phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
1911     SUPPORTED_1000baseT_Full);
1912    
1913     + /*
1914     + * Half-duplex mode not supported with multiqueue
1915     + * half-duplex can only works with single queue
1916     + */
1917     + if (tx_cnt > 1)
1918     + phydev->supported &= ~(SUPPORTED_1000baseT_Half |
1919     + SUPPORTED_100baseT_Half |
1920     + SUPPORTED_10baseT_Half);
1921     +
1922     /*
1923     * Broken HW is sometimes missing the pull-up resistor on the
1924     * MDIO line, which results in reads to non-existent devices returning
1925     diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
1926     index 7a16d40a72d1..b9221fc1674d 100644
1927     --- a/drivers/net/ethernet/sun/sungem.c
1928     +++ b/drivers/net/ethernet/sun/sungem.c
1929     @@ -60,8 +60,7 @@
1930     #include <linux/sungem_phy.h>
1931     #include "sungem.h"
1932    
1933     -/* Stripping FCS is causing problems, disabled for now */
1934     -#undef STRIP_FCS
1935     +#define STRIP_FCS
1936    
1937     #define DEFAULT_MSG (NETIF_MSG_DRV | \
1938     NETIF_MSG_PROBE | \
1939     @@ -435,7 +434,7 @@ static int gem_rxmac_reset(struct gem *gp)
1940     writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
1941     writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1942     val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
1943     - ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
1944     + (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
1945     writel(val, gp->regs + RXDMA_CFG);
1946     if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
1947     writel(((5 & RXDMA_BLANK_IPKTS) |
1948     @@ -760,7 +759,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
1949     struct net_device *dev = gp->dev;
1950     int entry, drops, work_done = 0;
1951     u32 done;
1952     - __sum16 csum;
1953    
1954     if (netif_msg_rx_status(gp))
1955     printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
1956     @@ -855,9 +853,13 @@ static int gem_rx(struct gem *gp, int work_to_do)
1957     skb = copy_skb;
1958     }
1959    
1960     - csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
1961     - skb->csum = csum_unfold(csum);
1962     - skb->ip_summed = CHECKSUM_COMPLETE;
1963     + if (likely(dev->features & NETIF_F_RXCSUM)) {
1964     + __sum16 csum;
1965     +
1966     + csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
1967     + skb->csum = csum_unfold(csum);
1968     + skb->ip_summed = CHECKSUM_COMPLETE;
1969     + }
1970     skb->protocol = eth_type_trans(skb, gp->dev);
1971    
1972     napi_gro_receive(&gp->napi, skb);
1973     @@ -1761,7 +1763,7 @@ static void gem_init_dma(struct gem *gp)
1974     writel(0, gp->regs + TXDMA_KICK);
1975    
1976     val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
1977     - ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
1978     + (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
1979     writel(val, gp->regs + RXDMA_CFG);
1980    
1981     writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
1982     @@ -2985,8 +2987,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1983     pci_set_drvdata(pdev, dev);
1984    
1985     /* We can do scatter/gather and HW checksum */
1986     - dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
1987     - dev->features |= dev->hw_features | NETIF_F_RXCSUM;
1988     + dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
1989     + dev->features = dev->hw_features;
1990     if (pci_using_dac)
1991     dev->features |= NETIF_F_HIGHDMA;
1992    
1993     diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
1994     index b919e89a9b93..4b3986dda52e 100644
1995     --- a/drivers/net/geneve.c
1996     +++ b/drivers/net/geneve.c
1997     @@ -474,7 +474,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
1998     out_unlock:
1999     rcu_read_unlock();
2000     out:
2001     - NAPI_GRO_CB(skb)->flush |= flush;
2002     + skb_gro_flush_final(skb, pp, flush);
2003    
2004     return pp;
2005     }
2006     diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
2007     index 960f06141472..eaeee3201e8f 100644
2008     --- a/drivers/net/hyperv/hyperv_net.h
2009     +++ b/drivers/net/hyperv/hyperv_net.h
2010     @@ -211,7 +211,7 @@ int netvsc_recv_callback(struct net_device *net,
2011     void netvsc_channel_cb(void *context);
2012     int netvsc_poll(struct napi_struct *napi, int budget);
2013    
2014     -void rndis_set_subchannel(struct work_struct *w);
2015     +int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
2016     int rndis_filter_open(struct netvsc_device *nvdev);
2017     int rndis_filter_close(struct netvsc_device *nvdev);
2018     struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
2019     diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
2020     index 04f611e6f678..c418113c6b20 100644
2021     --- a/drivers/net/hyperv/netvsc.c
2022     +++ b/drivers/net/hyperv/netvsc.c
2023     @@ -66,6 +66,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
2024     VM_PKT_DATA_INBAND, 0);
2025     }
2026    
2027     +/* Worker to setup sub channels on initial setup
2028     + * Initial hotplug event occurs in softirq context
2029     + * and can't wait for channels.
2030     + */
2031     +static void netvsc_subchan_work(struct work_struct *w)
2032     +{
2033     + struct netvsc_device *nvdev =
2034     + container_of(w, struct netvsc_device, subchan_work);
2035     + struct rndis_device *rdev;
2036     + int i, ret;
2037     +
2038     + /* Avoid deadlock with device removal already under RTNL */
2039     + if (!rtnl_trylock()) {
2040     + schedule_work(w);
2041     + return;
2042     + }
2043     +
2044     + rdev = nvdev->extension;
2045     + if (rdev) {
2046     + ret = rndis_set_subchannel(rdev->ndev, nvdev);
2047     + if (ret == 0) {
2048     + netif_device_attach(rdev->ndev);
2049     + } else {
2050     + /* fallback to only primary channel */
2051     + for (i = 1; i < nvdev->num_chn; i++)
2052     + netif_napi_del(&nvdev->chan_table[i].napi);
2053     +
2054     + nvdev->max_chn = 1;
2055     + nvdev->num_chn = 1;
2056     + }
2057     + }
2058     +
2059     + rtnl_unlock();
2060     +}
2061     +
2062     static struct netvsc_device *alloc_net_device(void)
2063     {
2064     struct netvsc_device *net_device;
2065     @@ -82,7 +117,7 @@ static struct netvsc_device *alloc_net_device(void)
2066    
2067     init_completion(&net_device->channel_init_wait);
2068     init_waitqueue_head(&net_device->subchan_open);
2069     - INIT_WORK(&net_device->subchan_work, rndis_set_subchannel);
2070     + INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
2071    
2072     return net_device;
2073     }
2074     diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
2075     index eb8dccd24abf..82c3c8e200f0 100644
2076     --- a/drivers/net/hyperv/netvsc_drv.c
2077     +++ b/drivers/net/hyperv/netvsc_drv.c
2078     @@ -905,8 +905,20 @@ static int netvsc_attach(struct net_device *ndev,
2079     if (IS_ERR(nvdev))
2080     return PTR_ERR(nvdev);
2081    
2082     - /* Note: enable and attach happen when sub-channels setup */
2083     + if (nvdev->num_chn > 1) {
2084     + ret = rndis_set_subchannel(ndev, nvdev);
2085     +
2086     + /* if unavailable, just proceed with one queue */
2087     + if (ret) {
2088     + nvdev->max_chn = 1;
2089     + nvdev->num_chn = 1;
2090     + }
2091     + }
2092     +
2093     + /* In any case device is now ready */
2094     + netif_device_attach(ndev);
2095    
2096     + /* Note: enable and attach happen when sub-channels setup */
2097     netif_carrier_off(ndev);
2098    
2099     if (netif_running(ndev)) {
2100     @@ -2064,6 +2076,9 @@ static int netvsc_probe(struct hv_device *dev,
2101    
2102     memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
2103    
2104     + if (nvdev->num_chn > 1)
2105     + schedule_work(&nvdev->subchan_work);
2106     +
2107     /* hw_features computed in rndis_netdev_set_hwcaps() */
2108     net->features = net->hw_features |
2109     NETIF_F_HIGHDMA | NETIF_F_SG |
2110     diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
2111     index e7ca5b5f39ed..f362cda85425 100644
2112     --- a/drivers/net/hyperv/rndis_filter.c
2113     +++ b/drivers/net/hyperv/rndis_filter.c
2114     @@ -1061,29 +1061,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
2115     * This breaks overlap of processing the host message for the
2116     * new primary channel with the initialization of sub-channels.
2117     */
2118     -void rndis_set_subchannel(struct work_struct *w)
2119     +int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
2120     {
2121     - struct netvsc_device *nvdev
2122     - = container_of(w, struct netvsc_device, subchan_work);
2123     struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
2124     - struct net_device_context *ndev_ctx;
2125     - struct rndis_device *rdev;
2126     - struct net_device *ndev;
2127     - struct hv_device *hv_dev;
2128     + struct net_device_context *ndev_ctx = netdev_priv(ndev);
2129     + struct hv_device *hv_dev = ndev_ctx->device_ctx;
2130     + struct rndis_device *rdev = nvdev->extension;
2131     int i, ret;
2132    
2133     - if (!rtnl_trylock()) {
2134     - schedule_work(w);
2135     - return;
2136     - }
2137     -
2138     - rdev = nvdev->extension;
2139     - if (!rdev)
2140     - goto unlock; /* device was removed */
2141     -
2142     - ndev = rdev->ndev;
2143     - ndev_ctx = netdev_priv(ndev);
2144     - hv_dev = ndev_ctx->device_ctx;
2145     + ASSERT_RTNL();
2146    
2147     memset(init_packet, 0, sizeof(struct nvsp_message));
2148     init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
2149     @@ -1099,13 +1085,13 @@ void rndis_set_subchannel(struct work_struct *w)
2150     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2151     if (ret) {
2152     netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
2153     - goto failed;
2154     + return ret;
2155     }
2156    
2157     wait_for_completion(&nvdev->channel_init_wait);
2158     if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
2159     netdev_err(ndev, "sub channel request failed\n");
2160     - goto failed;
2161     + return -EIO;
2162     }
2163    
2164     nvdev->num_chn = 1 +
2165     @@ -1124,21 +1110,7 @@ void rndis_set_subchannel(struct work_struct *w)
2166     for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
2167     ndev_ctx->tx_table[i] = i % nvdev->num_chn;
2168    
2169     - netif_device_attach(ndev);
2170     - rtnl_unlock();
2171     - return;
2172     -
2173     -failed:
2174     - /* fallback to only primary channel */
2175     - for (i = 1; i < nvdev->num_chn; i++)
2176     - netif_napi_del(&nvdev->chan_table[i].napi);
2177     -
2178     - nvdev->max_chn = 1;
2179     - nvdev->num_chn = 1;
2180     -
2181     - netif_device_attach(ndev);
2182     -unlock:
2183     - rtnl_unlock();
2184     + return 0;
2185     }
2186    
2187     static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
2188     @@ -1329,21 +1301,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
2189     netif_napi_add(net, &net_device->chan_table[i].napi,
2190     netvsc_poll, NAPI_POLL_WEIGHT);
2191    
2192     - if (net_device->num_chn > 1)
2193     - schedule_work(&net_device->subchan_work);
2194     + return net_device;
2195    
2196     out:
2197     - /* if unavailable, just proceed with one queue */
2198     - if (ret) {
2199     - net_device->max_chn = 1;
2200     - net_device->num_chn = 1;
2201     - }
2202     -
2203     - /* No sub channels, device is ready */
2204     - if (net_device->num_chn == 1)
2205     - netif_device_attach(net);
2206     -
2207     - return net_device;
2208     + /* setting up multiple channels failed */
2209     + net_device->max_chn = 1;
2210     + net_device->num_chn = 1;
2211    
2212     err_dev_remv:
2213     rndis_filter_device_remove(dev, net_device);
2214     diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
2215     index 4377c26f714d..6641fd5355e0 100644
2216     --- a/drivers/net/ipvlan/ipvlan_main.c
2217     +++ b/drivers/net/ipvlan/ipvlan_main.c
2218     @@ -594,7 +594,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
2219     ipvlan->phy_dev = phy_dev;
2220     ipvlan->dev = dev;
2221     ipvlan->sfeatures = IPVLAN_FEATURES;
2222     - ipvlan_adjust_mtu(ipvlan, phy_dev);
2223     + if (!tb[IFLA_MTU])
2224     + ipvlan_adjust_mtu(ipvlan, phy_dev);
2225     INIT_LIST_HEAD(&ipvlan->addrs);
2226     spin_lock_init(&ipvlan->addrs_lock);
2227    
2228     diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
2229     index 0867f7275852..8a76c1e5de8d 100644
2230     --- a/drivers/net/usb/lan78xx.c
2231     +++ b/drivers/net/usb/lan78xx.c
2232     @@ -3193,6 +3193,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
2233     pkt_cnt = 0;
2234     count = 0;
2235     length = 0;
2236     + spin_lock_irqsave(&tqp->lock, flags);
2237     for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2238     if (skb_is_gso(skb)) {
2239     if (pkt_cnt) {
2240     @@ -3201,7 +3202,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
2241     }
2242     count = 1;
2243     length = skb->len - TX_OVERHEAD;
2244     - skb2 = skb_dequeue(tqp);
2245     + __skb_unlink(skb, tqp);
2246     + spin_unlock_irqrestore(&tqp->lock, flags);
2247     goto gso_skb;
2248     }
2249    
2250     @@ -3210,6 +3212,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
2251     skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2252     pkt_cnt++;
2253     }
2254     + spin_unlock_irqrestore(&tqp->lock, flags);
2255    
2256     /* copy to a single skb */
2257     skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2258     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2259     index 094680871687..04c22f508ed9 100644
2260     --- a/drivers/net/usb/qmi_wwan.c
2261     +++ b/drivers/net/usb/qmi_wwan.c
2262     @@ -1246,6 +1246,7 @@ static const struct usb_device_id products[] = {
2263     {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
2264     {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
2265     {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
2266     + {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */
2267     {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
2268     {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
2269     {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
2270     diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
2271     index 86f7196f9d91..2a58607a6aea 100644
2272     --- a/drivers/net/usb/r8152.c
2273     +++ b/drivers/net/usb/r8152.c
2274     @@ -3962,7 +3962,8 @@ static int rtl8152_close(struct net_device *netdev)
2275     #ifdef CONFIG_PM_SLEEP
2276     unregister_pm_notifier(&tp->pm_notifier);
2277     #endif
2278     - napi_disable(&tp->napi);
2279     + if (!test_bit(RTL8152_UNPLUG, &tp->flags))
2280     + napi_disable(&tp->napi);
2281     clear_bit(WORK_ENABLE, &tp->flags);
2282     usb_kill_urb(tp->intr_urb);
2283     cancel_delayed_work_sync(&tp->schedule);
2284     diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2285     index 8c7207535179..11a3915e92e9 100644
2286     --- a/drivers/net/virtio_net.c
2287     +++ b/drivers/net/virtio_net.c
2288     @@ -50,6 +50,10 @@ module_param(napi_tx, bool, 0644);
2289     /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
2290     #define VIRTIO_XDP_HEADROOM 256
2291    
2292     +/* Separating two types of XDP xmit */
2293     +#define VIRTIO_XDP_TX BIT(0)
2294     +#define VIRTIO_XDP_REDIR BIT(1)
2295     +
2296     /* RX packet size EWMA. The average packet size is used to determine the packet
2297     * buffer size when refilling RX rings. As the entire RX ring may be refilled
2298     * at once, the weight is chosen so that the EWMA will be insensitive to short-
2299     @@ -547,7 +551,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
2300     struct receive_queue *rq,
2301     void *buf, void *ctx,
2302     unsigned int len,
2303     - bool *xdp_xmit)
2304     + unsigned int *xdp_xmit)
2305     {
2306     struct sk_buff *skb;
2307     struct bpf_prog *xdp_prog;
2308     @@ -615,14 +619,14 @@ static struct sk_buff *receive_small(struct net_device *dev,
2309     trace_xdp_exception(vi->dev, xdp_prog, act);
2310     goto err_xdp;
2311     }
2312     - *xdp_xmit = true;
2313     + *xdp_xmit |= VIRTIO_XDP_TX;
2314     rcu_read_unlock();
2315     goto xdp_xmit;
2316     case XDP_REDIRECT:
2317     err = xdp_do_redirect(dev, &xdp, xdp_prog);
2318     if (err)
2319     goto err_xdp;
2320     - *xdp_xmit = true;
2321     + *xdp_xmit |= VIRTIO_XDP_REDIR;
2322     rcu_read_unlock();
2323     goto xdp_xmit;
2324     default:
2325     @@ -684,7 +688,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
2326     void *buf,
2327     void *ctx,
2328     unsigned int len,
2329     - bool *xdp_xmit)
2330     + unsigned int *xdp_xmit)
2331     {
2332     struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
2333     u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
2334     @@ -772,7 +776,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
2335     put_page(xdp_page);
2336     goto err_xdp;
2337     }
2338     - *xdp_xmit = true;
2339     + *xdp_xmit |= VIRTIO_XDP_REDIR;
2340     if (unlikely(xdp_page != page))
2341     put_page(page);
2342     rcu_read_unlock();
2343     @@ -784,7 +788,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
2344     put_page(xdp_page);
2345     goto err_xdp;
2346     }
2347     - *xdp_xmit = true;
2348     + *xdp_xmit |= VIRTIO_XDP_TX;
2349     if (unlikely(xdp_page != page))
2350     put_page(page);
2351     rcu_read_unlock();
2352     @@ -893,7 +897,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
2353     }
2354    
2355     static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
2356     - void *buf, unsigned int len, void **ctx, bool *xdp_xmit)
2357     + void *buf, unsigned int len, void **ctx,
2358     + unsigned int *xdp_xmit)
2359     {
2360     struct net_device *dev = vi->dev;
2361     struct sk_buff *skb;
2362     @@ -1186,7 +1191,8 @@ static void refill_work(struct work_struct *work)
2363     }
2364     }
2365    
2366     -static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
2367     +static int virtnet_receive(struct receive_queue *rq, int budget,
2368     + unsigned int *xdp_xmit)
2369     {
2370     struct virtnet_info *vi = rq->vq->vdev->priv;
2371     unsigned int len, received = 0, bytes = 0;
2372     @@ -1275,7 +1281,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
2373     struct virtnet_info *vi = rq->vq->vdev->priv;
2374     struct send_queue *sq;
2375     unsigned int received, qp;
2376     - bool xdp_xmit = false;
2377     + unsigned int xdp_xmit = 0;
2378    
2379     virtnet_poll_cleantx(rq);
2380    
2381     @@ -1285,12 +1291,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
2382     if (received < budget)
2383     virtqueue_napi_complete(napi, rq->vq, received);
2384    
2385     - if (xdp_xmit) {
2386     + if (xdp_xmit & VIRTIO_XDP_REDIR)
2387     + xdp_do_flush_map();
2388     +
2389     + if (xdp_xmit & VIRTIO_XDP_TX) {
2390     qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
2391     smp_processor_id();
2392     sq = &vi->sq[qp];
2393     virtqueue_kick(sq->vq);
2394     - xdp_do_flush_map();
2395     }
2396    
2397     return received;
2398     diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
2399     index fab7a4db249e..4b170599fa5e 100644
2400     --- a/drivers/net/vxlan.c
2401     +++ b/drivers/net/vxlan.c
2402     @@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
2403     flush = 0;
2404    
2405     out:
2406     - skb_gro_remcsum_cleanup(skb, &grc);
2407     - skb->remcsum_offload = 0;
2408     - NAPI_GRO_CB(skb)->flush |= flush;
2409     + skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
2410    
2411     return pp;
2412     }
2413     diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
2414     index 762a29cdf7ad..b23983737011 100644
2415     --- a/drivers/net/wireless/realtek/rtlwifi/base.c
2416     +++ b/drivers/net/wireless/realtek/rtlwifi/base.c
2417     @@ -485,18 +485,21 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
2418    
2419     }
2420    
2421     -void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
2422     +void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
2423     {
2424     struct rtl_priv *rtlpriv = rtl_priv(hw);
2425    
2426     del_timer_sync(&rtlpriv->works.watchdog_timer);
2427    
2428     - cancel_delayed_work(&rtlpriv->works.watchdog_wq);
2429     - cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
2430     - cancel_delayed_work(&rtlpriv->works.ps_work);
2431     - cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
2432     - cancel_delayed_work(&rtlpriv->works.fwevt_wq);
2433     - cancel_delayed_work(&rtlpriv->works.c2hcmd_wq);
2434     + cancel_delayed_work_sync(&rtlpriv->works.watchdog_wq);
2435     + if (ips_wq)
2436     + cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
2437     + else
2438     + cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
2439     + cancel_delayed_work_sync(&rtlpriv->works.ps_work);
2440     + cancel_delayed_work_sync(&rtlpriv->works.ps_rfon_wq);
2441     + cancel_delayed_work_sync(&rtlpriv->works.fwevt_wq);
2442     + cancel_delayed_work_sync(&rtlpriv->works.c2hcmd_wq);
2443     }
2444     EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
2445    
2446     diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
2447     index acc924635818..92b8cad6b563 100644
2448     --- a/drivers/net/wireless/realtek/rtlwifi/base.h
2449     +++ b/drivers/net/wireless/realtek/rtlwifi/base.h
2450     @@ -121,7 +121,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw);
2451     void rtl_deinit_rfkill(struct ieee80211_hw *hw);
2452    
2453     void rtl_watch_dog_timer_callback(struct timer_list *t);
2454     -void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
2455     +void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq);
2456    
2457     bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
2458     int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht,
2459     diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
2460     index cfea57efa7f4..4bf7967590ca 100644
2461     --- a/drivers/net/wireless/realtek/rtlwifi/core.c
2462     +++ b/drivers/net/wireless/realtek/rtlwifi/core.c
2463     @@ -130,7 +130,6 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
2464     firmware->size);
2465     rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
2466     }
2467     - rtlpriv->rtlhal.fwsize = firmware->size;
2468     release_firmware(firmware);
2469     }
2470    
2471     @@ -196,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
2472     /* reset sec info */
2473     rtl_cam_reset_sec_info(hw);
2474    
2475     - rtl_deinit_deferred_work(hw);
2476     + rtl_deinit_deferred_work(hw, false);
2477     }
2478     rtlpriv->intf_ops->adapter_stop(hw);
2479    
2480     diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
2481     index 57bb8f049e59..4dc3e3122f5d 100644
2482     --- a/drivers/net/wireless/realtek/rtlwifi/pci.c
2483     +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
2484     @@ -2375,7 +2375,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
2485     ieee80211_unregister_hw(hw);
2486     rtlmac->mac80211_registered = 0;
2487     } else {
2488     - rtl_deinit_deferred_work(hw);
2489     + rtl_deinit_deferred_work(hw, false);
2490     rtlpriv->intf_ops->adapter_stop(hw);
2491     }
2492     rtlpriv->cfg->ops->disable_interrupt(hw);
2493     diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
2494     index 71af24e2e051..479a4cfc245d 100644
2495     --- a/drivers/net/wireless/realtek/rtlwifi/ps.c
2496     +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
2497     @@ -71,7 +71,7 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
2498     struct rtl_priv *rtlpriv = rtl_priv(hw);
2499    
2500     /*<1> Stop all timer */
2501     - rtl_deinit_deferred_work(hw);
2502     + rtl_deinit_deferred_work(hw, true);
2503    
2504     /*<2> Disable Interrupt */
2505     rtlpriv->cfg->ops->disable_interrupt(hw);
2506     @@ -292,7 +292,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
2507     struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
2508     enum rf_pwrstate rtstate;
2509    
2510     - cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
2511     + cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
2512    
2513     mutex_lock(&rtlpriv->locks.ips_mutex);
2514     if (ppsc->inactiveps) {
2515     diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
2516     index ce3103bb8ebb..6771b2742b78 100644
2517     --- a/drivers/net/wireless/realtek/rtlwifi/usb.c
2518     +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
2519     @@ -1132,7 +1132,7 @@ void rtl_usb_disconnect(struct usb_interface *intf)
2520     ieee80211_unregister_hw(hw);
2521     rtlmac->mac80211_registered = 0;
2522     } else {
2523     - rtl_deinit_deferred_work(hw);
2524     + rtl_deinit_deferred_work(hw, false);
2525     rtlpriv->intf_ops->adapter_stop(hw);
2526     }
2527     /*deinit rfkill */
2528     diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
2529     index 4dd0668003e7..1d5082d30187 100644
2530     --- a/drivers/net/xen-netfront.c
2531     +++ b/drivers/net/xen-netfront.c
2532     @@ -1810,7 +1810,7 @@ static int talk_to_netback(struct xenbus_device *dev,
2533     err = xen_net_read_mac(dev, info->netdev->dev_addr);
2534     if (err) {
2535     xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
2536     - goto out;
2537     + goto out_unlocked;
2538     }
2539    
2540     rtnl_lock();
2541     @@ -1925,6 +1925,7 @@ static int talk_to_netback(struct xenbus_device *dev,
2542     xennet_destroy_queues(info);
2543     out:
2544     rtnl_unlock();
2545     +out_unlocked:
2546     device_unregister(&dev->dev);
2547     return err;
2548     }
2549     @@ -1950,10 +1951,6 @@ static int xennet_connect(struct net_device *dev)
2550     /* talk_to_netback() sets the correct number of queues */
2551     num_queues = dev->real_num_tx_queues;
2552    
2553     - rtnl_lock();
2554     - netdev_update_features(dev);
2555     - rtnl_unlock();
2556     -
2557     if (dev->reg_state == NETREG_UNINITIALIZED) {
2558     err = register_netdev(dev);
2559     if (err) {
2560     @@ -1963,6 +1960,10 @@ static int xennet_connect(struct net_device *dev)
2561     }
2562     }
2563    
2564     + rtnl_lock();
2565     + netdev_update_features(dev);
2566     + rtnl_unlock();
2567     +
2568     /*
2569     * All public and private state should now be sane. Get
2570     * ready to start sending and receiving packets and give the driver
2571     diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
2572     index da4b457a14e0..4690814cfc51 100644
2573     --- a/drivers/pci/host/pci-hyperv.c
2574     +++ b/drivers/pci/host/pci-hyperv.c
2575     @@ -1077,6 +1077,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
2576     struct pci_bus *pbus;
2577     struct pci_dev *pdev;
2578     struct cpumask *dest;
2579     + unsigned long flags;
2580     struct compose_comp_ctxt comp;
2581     struct tran_int_desc *int_desc;
2582     struct {
2583     @@ -1168,14 +1169,15 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
2584     * the channel callback directly when channel->target_cpu is
2585     * the current CPU. When the higher level interrupt code
2586     * calls us with interrupt enabled, let's add the
2587     - * local_bh_disable()/enable() to avoid race.
2588     + * local_irq_save()/restore() to avoid race:
2589     + * hv_pci_onchannelcallback() can also run in tasklet.
2590     */
2591     - local_bh_disable();
2592     + local_irq_save(flags);
2593    
2594     if (hbus->hdev->channel->target_cpu == smp_processor_id())
2595     hv_pci_onchannelcallback(hbus);
2596    
2597     - local_bh_enable();
2598     + local_irq_restore(flags);
2599    
2600     if (hpdev->state == hv_pcichild_ejecting) {
2601     dev_err_once(&hbus->hdev->device,
2602     diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
2603     index 06e8406c4440..9dc7cf211da0 100644
2604     --- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
2605     +++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
2606     @@ -1411,7 +1411,7 @@ static struct pinctrl_desc mtk_desc = {
2607    
2608     static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
2609     {
2610     - struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent);
2611     + struct mtk_pinctrl *hw = gpiochip_get_data(chip);
2612     int value, err;
2613    
2614     err = mtk_hw_get_value(hw, gpio, PINCTRL_PIN_REG_DI, &value);
2615     @@ -1423,7 +1423,7 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
2616    
2617     static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
2618     {
2619     - struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent);
2620     + struct mtk_pinctrl *hw = gpiochip_get_data(chip);
2621    
2622     mtk_hw_set_value(hw, gpio, PINCTRL_PIN_REG_DO, !!value);
2623     }
2624     @@ -1463,11 +1463,20 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
2625     if (ret < 0)
2626     return ret;
2627    
2628     - ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
2629     - chip->ngpio);
2630     - if (ret < 0) {
2631     - gpiochip_remove(chip);
2632     - return ret;
2633     + /* Just for backward compatible for these old pinctrl nodes without
2634     + * "gpio-ranges" property. Otherwise, called directly from a
2635     + * DeviceTree-supported pinctrl driver is DEPRECATED.
2636     + * Please see Section 2.1 of
2637     + * Documentation/devicetree/bindings/gpio/gpio.txt on how to
2638     + * bind pinctrl and gpio drivers via the "gpio-ranges" property.
2639     + */
2640     + if (!of_find_property(np, "gpio-ranges", NULL)) {
2641     + ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
2642     + chip->ngpio);
2643     + if (ret < 0) {
2644     + gpiochip_remove(chip);
2645     + return ret;
2646     + }
2647     }
2648    
2649     return 0;
2650     @@ -1561,7 +1570,7 @@ static int mtk_pinctrl_probe(struct platform_device *pdev)
2651     err = mtk_build_groups(hw);
2652     if (err) {
2653     dev_err(&pdev->dev, "Failed to build groups\n");
2654     - return 0;
2655     + return err;
2656     }
2657    
2658     /* Setup functions descriptions per SoC types */
2659     diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
2660     index b1bb7263532b..049b374aa4ae 100644
2661     --- a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
2662     +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
2663     @@ -22,12 +22,12 @@
2664     #include "sh_pfc.h"
2665    
2666     #define CPU_ALL_PORT(fn, sfx) \
2667     - PORT_GP_CFG_22(0, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
2668     - PORT_GP_CFG_28(1, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
2669     - PORT_GP_CFG_17(2, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
2670     - PORT_GP_CFG_17(3, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
2671     - PORT_GP_CFG_6(4, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
2672     - PORT_GP_CFG_15(5, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH)
2673     + PORT_GP_22(0, fn, sfx), \
2674     + PORT_GP_28(1, fn, sfx), \
2675     + PORT_GP_17(2, fn, sfx), \
2676     + PORT_GP_17(3, fn, sfx), \
2677     + PORT_GP_6(4, fn, sfx), \
2678     + PORT_GP_15(5, fn, sfx)
2679     /*
2680     * F_() : just information
2681     * FM() : macro for FN_xxx / xxx_MARK
2682     diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
2683     index 78b98b3e7efa..b7f75339683e 100644
2684     --- a/drivers/s390/net/qeth_core.h
2685     +++ b/drivers/s390/net/qeth_core.h
2686     @@ -831,6 +831,17 @@ struct qeth_trap_id {
2687     /*some helper functions*/
2688     #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
2689    
2690     +static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
2691     + unsigned int elements)
2692     +{
2693     + unsigned int i;
2694     +
2695     + for (i = 0; i < elements; i++)
2696     + memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
2697     + buf->element[14].sflags = 0;
2698     + buf->element[15].sflags = 0;
2699     +}
2700     +
2701     /**
2702     * qeth_get_elements_for_range() - find number of SBALEs to cover range.
2703     * @start: Start of the address range.
2704     diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
2705     index dffd820731f2..b2eebcffd502 100644
2706     --- a/drivers/s390/net/qeth_core_main.c
2707     +++ b/drivers/s390/net/qeth_core_main.c
2708     @@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
2709     struct qeth_qdio_out_buffer *buf,
2710     enum iucv_tx_notify notification);
2711     static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
2712     -static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2713     - struct qeth_qdio_out_buffer *buf,
2714     - enum qeth_qdio_buffer_states newbufstate);
2715     static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
2716    
2717     struct workqueue_struct *qeth_wq;
2718     @@ -488,6 +485,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
2719     struct qaob *aob;
2720     struct qeth_qdio_out_buffer *buffer;
2721     enum iucv_tx_notify notification;
2722     + unsigned int i;
2723    
2724     aob = (struct qaob *) phys_to_virt(phys_aob_addr);
2725     QETH_CARD_TEXT(card, 5, "haob");
2726     @@ -512,10 +510,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
2727     qeth_notify_skbs(buffer->q, buffer, notification);
2728    
2729     buffer->aob = NULL;
2730     - qeth_clear_output_buffer(buffer->q, buffer,
2731     - QETH_QDIO_BUF_HANDLED_DELAYED);
2732     + /* Free dangling allocations. The attached skbs are handled by
2733     + * qeth_cleanup_handled_pending().
2734     + */
2735     + for (i = 0;
2736     + i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
2737     + i++) {
2738     + if (aob->sba[i] && buffer->is_header[i])
2739     + kmem_cache_free(qeth_core_header_cache,
2740     + (void *) aob->sba[i]);
2741     + }
2742     + atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
2743    
2744     - /* from here on: do not touch buffer anymore */
2745     qdio_release_aob(aob);
2746     }
2747    
2748     @@ -3759,6 +3765,10 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
2749     QETH_CARD_TEXT(queue->card, 5, "aob");
2750     QETH_CARD_TEXT_(queue->card, 5, "%lx",
2751     virt_to_phys(buffer->aob));
2752     +
2753     + /* prepare the queue slot for re-use: */
2754     + qeth_scrub_qdio_buffer(buffer->buffer,
2755     + QETH_MAX_BUFFER_ELEMENTS(card));
2756     if (qeth_init_qdio_out_buf(queue, bidx)) {
2757     QETH_CARD_TEXT(card, 2, "outofbuf");
2758     qeth_schedule_recovery(card);
2759     @@ -4835,7 +4845,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
2760     goto out;
2761     }
2762    
2763     - ccw_device_get_id(CARD_RDEV(card), &id);
2764     + ccw_device_get_id(CARD_DDEV(card), &id);
2765     request->resp_buf_len = sizeof(*response);
2766     request->resp_version = DIAG26C_VERSION2;
2767     request->op_code = DIAG26C_GET_MAC;
2768     diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
2769     index b8079f2a65b3..16dc8b83ca6f 100644
2770     --- a/drivers/s390/net/qeth_l2_main.c
2771     +++ b/drivers/s390/net/qeth_l2_main.c
2772     @@ -141,7 +141,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
2773    
2774     static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
2775     {
2776     - enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
2777     + enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
2778     IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
2779     int rc;
2780    
2781     @@ -158,7 +158,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
2782    
2783     static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
2784     {
2785     - enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
2786     + enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
2787     IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
2788     int rc;
2789    
2790     @@ -523,27 +523,34 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
2791     return -ERESTARTSYS;
2792     }
2793    
2794     + /* avoid racing against concurrent state change: */
2795     + if (!mutex_trylock(&card->conf_mutex))
2796     + return -EAGAIN;
2797     +
2798     if (!qeth_card_hw_is_reachable(card)) {
2799     ether_addr_copy(dev->dev_addr, addr->sa_data);
2800     - return 0;
2801     + goto out_unlock;
2802     }
2803    
2804     /* don't register the same address twice */
2805     if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
2806     (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
2807     - return 0;
2808     + goto out_unlock;
2809    
2810     /* add the new address, switch over, drop the old */
2811     rc = qeth_l2_send_setmac(card, addr->sa_data);
2812     if (rc)
2813     - return rc;
2814     + goto out_unlock;
2815     ether_addr_copy(old_addr, dev->dev_addr);
2816     ether_addr_copy(dev->dev_addr, addr->sa_data);
2817    
2818     if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
2819     qeth_l2_remove_mac(card, old_addr);
2820     card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
2821     - return 0;
2822     +
2823     +out_unlock:
2824     + mutex_unlock(&card->conf_mutex);
2825     + return rc;
2826     }
2827    
2828     static void qeth_promisc_to_bridge(struct qeth_card *card)
2829     diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
2830     index eeaf6739215f..dd4eb986f693 100644
2831     --- a/drivers/vhost/net.c
2832     +++ b/drivers/vhost/net.c
2833     @@ -1219,7 +1219,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
2834     if (ubufs)
2835     vhost_net_ubuf_put_wait_and_free(ubufs);
2836     err_ubufs:
2837     - sockfd_put(sock);
2838     + if (sock)
2839     + sockfd_put(sock);
2840     err_vq:
2841     mutex_unlock(&vq->mutex);
2842     err:
2843     diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
2844     index 26f6b4f41ce6..00458e985cc3 100644
2845     --- a/fs/autofs4/dev-ioctl.c
2846     +++ b/fs/autofs4/dev-ioctl.c
2847     @@ -148,6 +148,15 @@ static int validate_dev_ioctl(int cmd, struct autofs_dev_ioctl *param)
2848     cmd);
2849     goto out;
2850     }
2851     + } else {
2852     + unsigned int inr = _IOC_NR(cmd);
2853     +
2854     + if (inr == AUTOFS_DEV_IOCTL_OPENMOUNT_CMD ||
2855     + inr == AUTOFS_DEV_IOCTL_REQUESTER_CMD ||
2856     + inr == AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD) {
2857     + err = -EINVAL;
2858     + goto out;
2859     + }
2860     }
2861    
2862     err = 0;
2863     @@ -284,7 +293,8 @@ static int autofs_dev_ioctl_openmount(struct file *fp,
2864     dev_t devid;
2865     int err, fd;
2866    
2867     - /* param->path has already been checked */
2868     + /* param->path has been checked in validate_dev_ioctl() */
2869     +
2870     if (!param->openmount.devid)
2871     return -EINVAL;
2872    
2873     @@ -446,10 +456,7 @@ static int autofs_dev_ioctl_requester(struct file *fp,
2874     dev_t devid;
2875     int err = -ENOENT;
2876    
2877     - if (param->size <= AUTOFS_DEV_IOCTL_SIZE) {
2878     - err = -EINVAL;
2879     - goto out;
2880     - }
2881     + /* param->path has been checked in validate_dev_ioctl() */
2882    
2883     devid = sbi->sb->s_dev;
2884    
2885     @@ -534,10 +541,7 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
2886     unsigned int devid, magic;
2887     int err = -ENOENT;
2888    
2889     - if (param->size <= AUTOFS_DEV_IOCTL_SIZE) {
2890     - err = -EINVAL;
2891     - goto out;
2892     - }
2893     + /* param->path has been checked in validate_dev_ioctl() */
2894    
2895     name = param->path;
2896     type = param->ismountpoint.in.type;
2897     diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
2898     index 7e288d97adcb..9fed1c05f1f4 100644
2899     --- a/fs/reiserfs/prints.c
2900     +++ b/fs/reiserfs/prints.c
2901     @@ -76,83 +76,99 @@ static char *le_type(struct reiserfs_key *key)
2902     }
2903    
2904     /* %k */
2905     -static void sprintf_le_key(char *buf, struct reiserfs_key *key)
2906     +static int scnprintf_le_key(char *buf, size_t size, struct reiserfs_key *key)
2907     {
2908     if (key)
2909     - sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id),
2910     - le32_to_cpu(key->k_objectid), le_offset(key),
2911     - le_type(key));
2912     + return scnprintf(buf, size, "[%d %d %s %s]",
2913     + le32_to_cpu(key->k_dir_id),
2914     + le32_to_cpu(key->k_objectid), le_offset(key),
2915     + le_type(key));
2916     else
2917     - sprintf(buf, "[NULL]");
2918     + return scnprintf(buf, size, "[NULL]");
2919     }
2920    
2921     /* %K */
2922     -static void sprintf_cpu_key(char *buf, struct cpu_key *key)
2923     +static int scnprintf_cpu_key(char *buf, size_t size, struct cpu_key *key)
2924     {
2925     if (key)
2926     - sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
2927     - key->on_disk_key.k_objectid, reiserfs_cpu_offset(key),
2928     - cpu_type(key));
2929     + return scnprintf(buf, size, "[%d %d %s %s]",
2930     + key->on_disk_key.k_dir_id,
2931     + key->on_disk_key.k_objectid,
2932     + reiserfs_cpu_offset(key), cpu_type(key));
2933     else
2934     - sprintf(buf, "[NULL]");
2935     + return scnprintf(buf, size, "[NULL]");
2936     }
2937    
2938     -static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh)
2939     +static int scnprintf_de_head(char *buf, size_t size,
2940     + struct reiserfs_de_head *deh)
2941     {
2942     if (deh)
2943     - sprintf(buf,
2944     - "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
2945     - deh_offset(deh), deh_dir_id(deh), deh_objectid(deh),
2946     - deh_location(deh), deh_state(deh));
2947     + return scnprintf(buf, size,
2948     + "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
2949     + deh_offset(deh), deh_dir_id(deh),
2950     + deh_objectid(deh), deh_location(deh),
2951     + deh_state(deh));
2952     else
2953     - sprintf(buf, "[NULL]");
2954     + return scnprintf(buf, size, "[NULL]");
2955    
2956     }
2957    
2958     -static void sprintf_item_head(char *buf, struct item_head *ih)
2959     +static int scnprintf_item_head(char *buf, size_t size, struct item_head *ih)
2960     {
2961     if (ih) {
2962     - strcpy(buf,
2963     - (ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*");
2964     - sprintf_le_key(buf + strlen(buf), &(ih->ih_key));
2965     - sprintf(buf + strlen(buf), ", item_len %d, item_location %d, "
2966     - "free_space(entry_count) %d",
2967     - ih_item_len(ih), ih_location(ih), ih_free_space(ih));
2968     + char *p = buf;
2969     + char * const end = buf + size;
2970     +
2971     + p += scnprintf(p, end - p, "%s",
2972     + (ih_version(ih) == KEY_FORMAT_3_6) ?
2973     + "*3.6* " : "*3.5*");
2974     +
2975     + p += scnprintf_le_key(p, end - p, &ih->ih_key);
2976     +
2977     + p += scnprintf(p, end - p,
2978     + ", item_len %d, item_location %d, free_space(entry_count) %d",
2979     + ih_item_len(ih), ih_location(ih),
2980     + ih_free_space(ih));
2981     + return p - buf;
2982     } else
2983     - sprintf(buf, "[NULL]");
2984     + return scnprintf(buf, size, "[NULL]");
2985     }
2986    
2987     -static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de)
2988     +static int scnprintf_direntry(char *buf, size_t size,
2989     + struct reiserfs_dir_entry *de)
2990     {
2991     char name[20];
2992    
2993     memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen);
2994     name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0;
2995     - sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid);
2996     + return scnprintf(buf, size, "\"%s\"==>[%d %d]",
2997     + name, de->de_dir_id, de->de_objectid);
2998     }
2999    
3000     -static void sprintf_block_head(char *buf, struct buffer_head *bh)
3001     +static int scnprintf_block_head(char *buf, size_t size, struct buffer_head *bh)
3002     {
3003     - sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ",
3004     - B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
3005     + return scnprintf(buf, size,
3006     + "level=%d, nr_items=%d, free_space=%d rdkey ",
3007     + B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
3008     }
3009    
3010     -static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
3011     +static int scnprintf_buffer_head(char *buf, size_t size, struct buffer_head *bh)
3012     {
3013     - sprintf(buf,
3014     - "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
3015     - bh->b_bdev, bh->b_size,
3016     - (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
3017     - bh->b_state, bh->b_page,
3018     - buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
3019     - buffer_dirty(bh) ? "DIRTY" : "CLEAN",
3020     - buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
3021     + return scnprintf(buf, size,
3022     + "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
3023     + bh->b_bdev, bh->b_size,
3024     + (unsigned long long)bh->b_blocknr,
3025     + atomic_read(&(bh->b_count)),
3026     + bh->b_state, bh->b_page,
3027     + buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
3028     + buffer_dirty(bh) ? "DIRTY" : "CLEAN",
3029     + buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
3030     }
3031    
3032     -static void sprintf_disk_child(char *buf, struct disk_child *dc)
3033     +static int scnprintf_disk_child(char *buf, size_t size, struct disk_child *dc)
3034     {
3035     - sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc),
3036     - dc_size(dc));
3037     + return scnprintf(buf, size, "[dc_number=%d, dc_size=%u]",
3038     + dc_block_number(dc), dc_size(dc));
3039     }
3040    
3041     static char *is_there_reiserfs_struct(char *fmt, int *what)
3042     @@ -189,55 +205,60 @@ static void prepare_error_buf(const char *fmt, va_list args)
3043     char *fmt1 = fmt_buf;
3044     char *k;
3045     char *p = error_buf;
3046     + char * const end = &error_buf[sizeof(error_buf)];
3047     int what;
3048    
3049     spin_lock(&error_lock);
3050    
3051     - strcpy(fmt1, fmt);
3052     + if (WARN_ON(strscpy(fmt_buf, fmt, sizeof(fmt_buf)) < 0)) {
3053     + strscpy(error_buf, "format string too long", end - error_buf);
3054     + goto out_unlock;
3055     + }
3056    
3057     while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) {
3058     *k = 0;
3059    
3060     - p += vsprintf(p, fmt1, args);
3061     + p += vscnprintf(p, end - p, fmt1, args);
3062    
3063     switch (what) {
3064     case 'k':
3065     - sprintf_le_key(p, va_arg(args, struct reiserfs_key *));
3066     + p += scnprintf_le_key(p, end - p,
3067     + va_arg(args, struct reiserfs_key *));
3068     break;
3069     case 'K':
3070     - sprintf_cpu_key(p, va_arg(args, struct cpu_key *));
3071     + p += scnprintf_cpu_key(p, end - p,
3072     + va_arg(args, struct cpu_key *));
3073     break;
3074     case 'h':
3075     - sprintf_item_head(p, va_arg(args, struct item_head *));
3076     + p += scnprintf_item_head(p, end - p,
3077     + va_arg(args, struct item_head *));
3078     break;
3079     case 't':
3080     - sprintf_direntry(p,
3081     - va_arg(args,
3082     - struct reiserfs_dir_entry *));
3083     + p += scnprintf_direntry(p, end - p,
3084     + va_arg(args, struct reiserfs_dir_entry *));
3085     break;
3086     case 'y':
3087     - sprintf_disk_child(p,
3088     - va_arg(args, struct disk_child *));
3089     + p += scnprintf_disk_child(p, end - p,
3090     + va_arg(args, struct disk_child *));
3091     break;
3092     case 'z':
3093     - sprintf_block_head(p,
3094     - va_arg(args, struct buffer_head *));
3095     + p += scnprintf_block_head(p, end - p,
3096     + va_arg(args, struct buffer_head *));
3097     break;
3098     case 'b':
3099     - sprintf_buffer_head(p,
3100     - va_arg(args, struct buffer_head *));
3101     + p += scnprintf_buffer_head(p, end - p,
3102     + va_arg(args, struct buffer_head *));
3103     break;
3104     case 'a':
3105     - sprintf_de_head(p,
3106     - va_arg(args,
3107     - struct reiserfs_de_head *));
3108     + p += scnprintf_de_head(p, end - p,
3109     + va_arg(args, struct reiserfs_de_head *));
3110     break;
3111     }
3112    
3113     - p += strlen(p);
3114     fmt1 = k + 2;
3115     }
3116     - vsprintf(p, fmt1, args);
3117     + p += vscnprintf(p, end - p, fmt1, args);
3118     +out_unlock:
3119     spin_unlock(&error_lock);
3120    
3121     }
3122     diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
3123     index a031897fca76..ca1d2cc2cdfa 100644
3124     --- a/include/linux/arm-smccc.h
3125     +++ b/include/linux/arm-smccc.h
3126     @@ -80,6 +80,11 @@
3127     ARM_SMCCC_SMC_32, \
3128     0, 0x8000)
3129    
3130     +#define ARM_SMCCC_ARCH_WORKAROUND_2 \
3131     + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
3132     + ARM_SMCCC_SMC_32, \
3133     + 0, 0x7fff)
3134     +
3135     #ifndef __ASSEMBLY__
3136    
3137     #include <linux/linkage.h>
3138     @@ -291,5 +296,10 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
3139     */
3140     #define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
3141    
3142     +/* Return codes defined in ARM DEN 0070A */
3143     +#define SMCCC_RET_SUCCESS 0
3144     +#define SMCCC_RET_NOT_SUPPORTED -1
3145     +#define SMCCC_RET_NOT_REQUIRED -2
3146     +
3147     #endif /*__ASSEMBLY__*/
3148     #endif /*__LINUX_ARM_SMCCC_H*/
3149     diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
3150     index 0c27515d2cf6..8124815eb121 100644
3151     --- a/include/linux/atmdev.h
3152     +++ b/include/linux/atmdev.h
3153     @@ -214,6 +214,7 @@ struct atmphy_ops {
3154     struct atm_skb_data {
3155     struct atm_vcc *vcc; /* ATM VCC */
3156     unsigned long atm_options; /* ATM layer options */
3157     + unsigned int acct_truesize; /* truesize accounted to vcc */
3158     };
3159    
3160     #define VCC_HTABLE_SIZE 32
3161     @@ -241,6 +242,20 @@ void vcc_insert_socket(struct sock *sk);
3162    
3163     void atm_dev_release_vccs(struct atm_dev *dev);
3164    
3165     +static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
3166     +{
3167     + /*
3168     + * Because ATM skbs may not belong to a sock (and we don't
3169     + * necessarily want to), skb->truesize may be adjusted,
3170     + * escaping the hack in pskb_expand_head() which avoids
3171     + * doing so for some cases. So stash the value of truesize
3172     + * at the time we accounted it, and atm_pop_raw() can use
3173     + * that value later, in case it changes.
3174     + */
3175     + refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
3176     + ATM_SKB(skb)->acct_truesize = skb->truesize;
3177     + ATM_SKB(skb)->atm_options = vcc->atm_options;
3178     +}
3179    
3180     static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
3181     {
3182     diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
3183     index 0bd432a4d7bd..24251762c20c 100644
3184     --- a/include/linux/backing-dev-defs.h
3185     +++ b/include/linux/backing-dev-defs.h
3186     @@ -22,7 +22,6 @@ struct dentry;
3187     */
3188     enum wb_state {
3189     WB_registered, /* bdi_register() was done */
3190     - WB_shutting_down, /* wb_shutdown() in progress */
3191     WB_writeback_running, /* Writeback is in progress */
3192     WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
3193     WB_start_all, /* nr_pages == 0 (all) work pending */
3194     @@ -189,6 +188,7 @@ struct backing_dev_info {
3195     #ifdef CONFIG_CGROUP_WRITEBACK
3196     struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
3197     struct rb_root cgwb_congested_tree; /* their congested states */
3198     + struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
3199     #else
3200     struct bdi_writeback_congested *wb_congested;
3201     #endif
3202     diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
3203     index 17b18b91ebac..1602bf4ab4cd 100644
3204     --- a/include/linux/blk_types.h
3205     +++ b/include/linux/blk_types.h
3206     @@ -186,6 +186,8 @@ struct bio {
3207     * throttling rules. Don't do it again. */
3208     #define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
3209     * of this bio. */
3210     +#define BIO_QUEUE_ENTERED 11 /* can use blk_queue_enter_live() */
3211     +
3212     /* See BVEC_POOL_OFFSET below before adding new flags */
3213    
3214     /*
3215     diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
3216     index b4bf73f5e38f..f1fa516bcf51 100644
3217     --- a/include/linux/compiler-gcc.h
3218     +++ b/include/linux/compiler-gcc.h
3219     @@ -65,6 +65,18 @@
3220     #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
3221     #endif
3222    
3223     +/*
3224     + * Feature detection for gnu_inline (gnu89 extern inline semantics). Either
3225     + * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
3226     + * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
3227     + * defined so the gnu89 semantics are the default.
3228     + */
3229     +#ifdef __GNUC_STDC_INLINE__
3230     +# define __gnu_inline __attribute__((gnu_inline))
3231     +#else
3232     +# define __gnu_inline
3233     +#endif
3234     +
3235     /*
3236     * Force always-inline if the user requests it so via the .config,
3237     * or if gcc is too old.
3238     @@ -72,19 +84,22 @@
3239     * -Wunused-function. This turns out to avoid the need for complex #ifdef
3240     * directives. Suppress the warning in clang as well by using "unused"
3241     * function attribute, which is redundant but not harmful for gcc.
3242     + * Prefer gnu_inline, so that extern inline functions do not emit an
3243     + * externally visible function. This makes extern inline behave as per gnu89
3244     + * semantics rather than c99. This prevents multiple symbol definition errors
3245     + * of extern inline functions at link time.
3246     + * A lot of inline functions can cause havoc with function tracing.
3247     */
3248     #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
3249     !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
3250     -#define inline inline __attribute__((always_inline,unused)) notrace
3251     -#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace
3252     -#define __inline __inline __attribute__((always_inline,unused)) notrace
3253     +#define inline \
3254     + inline __attribute__((always_inline, unused)) notrace __gnu_inline
3255     #else
3256     -/* A lot of inline functions can cause havoc with function tracing */
3257     -#define inline inline __attribute__((unused)) notrace
3258     -#define __inline__ __inline__ __attribute__((unused)) notrace
3259     -#define __inline __inline __attribute__((unused)) notrace
3260     +#define inline inline __attribute__((unused)) notrace __gnu_inline
3261     #endif
3262    
3263     +#define __inline__ inline
3264     +#define __inline inline
3265     #define __always_inline inline __attribute__((always_inline))
3266     #define noinline __attribute__((noinline))
3267    
3268     diff --git a/include/linux/filter.h b/include/linux/filter.h
3269     index fc4e8f91b03d..b49658f9001e 100644
3270     --- a/include/linux/filter.h
3271     +++ b/include/linux/filter.h
3272     @@ -453,15 +453,16 @@ struct sock_fprog_kern {
3273     };
3274    
3275     struct bpf_binary_header {
3276     - unsigned int pages;
3277     - u8 image[];
3278     + u32 pages;
3279     + /* Some arches need word alignment for their instructions */
3280     + u8 image[] __aligned(4);
3281     };
3282    
3283     struct bpf_prog {
3284     u16 pages; /* Number of allocated pages */
3285     u16 jited:1, /* Is our filter JIT'ed? */
3286     jit_requested:1,/* archs need to JIT the prog */
3287     - locked:1, /* Program image locked? */
3288     + undo_set_mem:1, /* Passed set_memory_ro() checkpoint */
3289     gpl_compatible:1, /* Is filter GPL compatible? */
3290     cb_access:1, /* Is control block accessed? */
3291     dst_needed:1, /* Do we need dst entry? */
3292     @@ -644,50 +645,27 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, const u32 size_default)
3293    
3294     #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
3295    
3296     -#ifdef CONFIG_ARCH_HAS_SET_MEMORY
3297     -static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
3298     -{
3299     - fp->locked = 1;
3300     - WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
3301     -}
3302     -
3303     -static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
3304     -{
3305     - if (fp->locked) {
3306     - WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
3307     - /* In case set_memory_rw() fails, we want to be the first
3308     - * to crash here instead of some random place later on.
3309     - */
3310     - fp->locked = 0;
3311     - }
3312     -}
3313     -
3314     -static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
3315     -{
3316     - WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
3317     -}
3318     -
3319     -static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
3320     -{
3321     - WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
3322     -}
3323     -#else
3324     static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
3325     {
3326     + fp->undo_set_mem = 1;
3327     + set_memory_ro((unsigned long)fp, fp->pages);
3328     }
3329    
3330     static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
3331     {
3332     + if (fp->undo_set_mem)
3333     + set_memory_rw((unsigned long)fp, fp->pages);
3334     }
3335    
3336     static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
3337     {
3338     + set_memory_ro((unsigned long)hdr, hdr->pages);
3339     }
3340    
3341     static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
3342     {
3343     + set_memory_rw((unsigned long)hdr, hdr->pages);
3344     }
3345     -#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
3346    
3347     static inline struct bpf_binary_header *
3348     bpf_jit_binary_hdr(const struct bpf_prog *fp)
3349     diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
3350     index d3c9db492b30..fab5121ffb8f 100644
3351     --- a/include/linux/mlx5/eswitch.h
3352     +++ b/include/linux/mlx5/eswitch.h
3353     @@ -8,6 +8,8 @@
3354    
3355     #include <linux/mlx5/driver.h>
3356    
3357     +#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
3358     +
3359     enum {
3360     SRIOV_NONE,
3361     SRIOV_LEGACY,
3362     diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
3363     index 1aad455538f4..5b662ea2e32a 100644
3364     --- a/include/linux/mlx5/mlx5_ifc.h
3365     +++ b/include/linux/mlx5/mlx5_ifc.h
3366     @@ -905,7 +905,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
3367     u8 vnic_env_queue_counters[0x1];
3368     u8 ets[0x1];
3369     u8 nic_flow_table[0x1];
3370     - u8 eswitch_flow_table[0x1];
3371     + u8 eswitch_manager[0x1];
3372     u8 device_memory[0x1];
3373     u8 mcam_reg[0x1];
3374     u8 pcam_reg[0x1];
3375     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
3376     index cf44503ea81a..5ad916d31471 100644
3377     --- a/include/linux/netdevice.h
3378     +++ b/include/linux/netdevice.h
3379     @@ -2735,11 +2735,31 @@ static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp,
3380     if (PTR_ERR(pp) != -EINPROGRESS)
3381     NAPI_GRO_CB(skb)->flush |= flush;
3382     }
3383     +static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
3384     + struct sk_buff **pp,
3385     + int flush,
3386     + struct gro_remcsum *grc)
3387     +{
3388     + if (PTR_ERR(pp) != -EINPROGRESS) {
3389     + NAPI_GRO_CB(skb)->flush |= flush;
3390     + skb_gro_remcsum_cleanup(skb, grc);
3391     + skb->remcsum_offload = 0;
3392     + }
3393     +}
3394     #else
3395     static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
3396     {
3397     NAPI_GRO_CB(skb)->flush |= flush;
3398     }
3399     +static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
3400     + struct sk_buff **pp,
3401     + int flush,
3402     + struct gro_remcsum *grc)
3403     +{
3404     + NAPI_GRO_CB(skb)->flush |= flush;
3405     + skb_gro_remcsum_cleanup(skb, grc);
3406     + skb->remcsum_offload = 0;
3407     +}
3408     #endif
3409    
3410     static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
3411     diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
3412     index e828d31be5da..3b4fbf690957 100644
3413     --- a/include/net/pkt_cls.h
3414     +++ b/include/net/pkt_cls.h
3415     @@ -111,6 +111,11 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
3416     {
3417     }
3418    
3419     +static inline bool tcf_block_shared(struct tcf_block *block)
3420     +{
3421     + return false;
3422     +}
3423     +
3424     static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
3425     {
3426     return NULL;
3427     diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
3428     index 6ef6746a7871..78509e3f68da 100644
3429     --- a/kernel/bpf/core.c
3430     +++ b/kernel/bpf/core.c
3431     @@ -1513,6 +1513,17 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
3432     return 0;
3433     }
3434    
3435     +static void bpf_prog_select_func(struct bpf_prog *fp)
3436     +{
3437     +#ifndef CONFIG_BPF_JIT_ALWAYS_ON
3438     + u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
3439     +
3440     + fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
3441     +#else
3442     + fp->bpf_func = __bpf_prog_ret0_warn;
3443     +#endif
3444     +}
3445     +
3446     /**
3447     * bpf_prog_select_runtime - select exec runtime for BPF program
3448     * @fp: bpf_prog populated with internal BPF program
3449     @@ -1523,13 +1534,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
3450     */
3451     struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
3452     {
3453     -#ifndef CONFIG_BPF_JIT_ALWAYS_ON
3454     - u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
3455     + /* In case of BPF to BPF calls, verifier did all the prep
3456     + * work with regards to JITing, etc.
3457     + */
3458     + if (fp->bpf_func)
3459     + goto finalize;
3460    
3461     - fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
3462     -#else
3463     - fp->bpf_func = __bpf_prog_ret0_warn;
3464     -#endif
3465     + bpf_prog_select_func(fp);
3466    
3467     /* eBPF JITs can rewrite the program in case constant
3468     * blinding is active. However, in case of error during
3469     @@ -1550,6 +1561,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
3470     if (*err)
3471     return fp;
3472     }
3473     +
3474     +finalize:
3475     bpf_prog_lock_ro(fp);
3476    
3477     /* The tail call compatibility check can only be done at
3478     diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
3479     index 95a84b2f10ce..fc7ee4357381 100644
3480     --- a/kernel/bpf/sockmap.c
3481     +++ b/kernel/bpf/sockmap.c
3482     @@ -112,6 +112,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
3483     static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
3484     static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
3485     int offset, size_t size, int flags);
3486     +static void bpf_tcp_close(struct sock *sk, long timeout);
3487    
3488     static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
3489     {
3490     @@ -133,7 +134,42 @@ static bool bpf_tcp_stream_read(const struct sock *sk)
3491     return !empty;
3492     }
3493    
3494     -static struct proto tcp_bpf_proto;
3495     +enum {
3496     + SOCKMAP_IPV4,
3497     + SOCKMAP_IPV6,
3498     + SOCKMAP_NUM_PROTS,
3499     +};
3500     +
3501     +enum {
3502     + SOCKMAP_BASE,
3503     + SOCKMAP_TX,
3504     + SOCKMAP_NUM_CONFIGS,
3505     +};
3506     +
3507     +static struct proto *saved_tcpv6_prot __read_mostly;
3508     +static DEFINE_SPINLOCK(tcpv6_prot_lock);
3509     +static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS];
3510     +static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
3511     + struct proto *base)
3512     +{
3513     + prot[SOCKMAP_BASE] = *base;
3514     + prot[SOCKMAP_BASE].close = bpf_tcp_close;
3515     + prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg;
3516     + prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read;
3517     +
3518     + prot[SOCKMAP_TX] = prot[SOCKMAP_BASE];
3519     + prot[SOCKMAP_TX].sendmsg = bpf_tcp_sendmsg;
3520     + prot[SOCKMAP_TX].sendpage = bpf_tcp_sendpage;
3521     +}
3522     +
3523     +static void update_sk_prot(struct sock *sk, struct smap_psock *psock)
3524     +{
3525     + int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4;
3526     + int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE;
3527     +
3528     + sk->sk_prot = &bpf_tcp_prots[family][conf];
3529     +}
3530     +
3531     static int bpf_tcp_init(struct sock *sk)
3532     {
3533     struct smap_psock *psock;
3534     @@ -153,14 +189,17 @@ static int bpf_tcp_init(struct sock *sk)
3535     psock->save_close = sk->sk_prot->close;
3536     psock->sk_proto = sk->sk_prot;
3537    
3538     - if (psock->bpf_tx_msg) {
3539     - tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg;
3540     - tcp_bpf_proto.sendpage = bpf_tcp_sendpage;
3541     - tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg;
3542     - tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read;
3543     + /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */
3544     + if (sk->sk_family == AF_INET6 &&
3545     + unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
3546     + spin_lock_bh(&tcpv6_prot_lock);
3547     + if (likely(sk->sk_prot != saved_tcpv6_prot)) {
3548     + build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot);
3549     + smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
3550     + }
3551     + spin_unlock_bh(&tcpv6_prot_lock);
3552     }
3553     -
3554     - sk->sk_prot = &tcp_bpf_proto;
3555     + update_sk_prot(sk, psock);
3556     rcu_read_unlock();
3557     return 0;
3558     }
3559     @@ -432,7 +471,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
3560     while (sg[i].length) {
3561     free += sg[i].length;
3562     sk_mem_uncharge(sk, sg[i].length);
3563     - put_page(sg_page(&sg[i]));
3564     + if (!md->skb)
3565     + put_page(sg_page(&sg[i]));
3566     sg[i].length = 0;
3567     sg[i].page_link = 0;
3568     sg[i].offset = 0;
3569     @@ -441,6 +481,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
3570     if (i == MAX_SKB_FRAGS)
3571     i = 0;
3572     }
3573     + if (md->skb)
3574     + consume_skb(md->skb);
3575    
3576     return free;
3577     }
3578     @@ -1070,8 +1112,7 @@ static void bpf_tcp_msg_add(struct smap_psock *psock,
3579    
3580     static int bpf_tcp_ulp_register(void)
3581     {
3582     - tcp_bpf_proto = tcp_prot;
3583     - tcp_bpf_proto.close = bpf_tcp_close;
3584     + build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot);
3585     /* Once BPF TX ULP is registered it is never unregistered. It
3586     * will be in the ULP list for the lifetime of the system. Doing
3587     * duplicate registers is not a problem.
3588     diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
3589     index 016ef9025827..74fa60b4b438 100644
3590     --- a/kernel/bpf/syscall.c
3591     +++ b/kernel/bpf/syscall.c
3592     @@ -1328,9 +1328,7 @@ static int bpf_prog_load(union bpf_attr *attr)
3593     if (err < 0)
3594     goto free_used_maps;
3595    
3596     - /* eBPF program is ready to be JITed */
3597     - if (!prog->bpf_func)
3598     - prog = bpf_prog_select_runtime(prog, &err);
3599     + prog = bpf_prog_select_runtime(prog, &err);
3600     if (err < 0)
3601     goto free_used_maps;
3602    
3603     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
3604     index 56212edd6f23..1b586f31cbfd 100644
3605     --- a/kernel/bpf/verifier.c
3606     +++ b/kernel/bpf/verifier.c
3607     @@ -5349,6 +5349,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
3608     if (insn->code != (BPF_JMP | BPF_CALL) ||
3609     insn->src_reg != BPF_PSEUDO_CALL)
3610     continue;
3611     + /* Upon error here we cannot fall back to interpreter but
3612     + * need a hard reject of the program. Thus -EFAULT is
3613     + * propagated in any case.
3614     + */
3615     subprog = find_subprog(env, i + insn->imm + 1);
3616     if (subprog < 0) {
3617     WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
3618     @@ -5369,7 +5373,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
3619    
3620     func = kzalloc(sizeof(prog) * (env->subprog_cnt + 1), GFP_KERNEL);
3621     if (!func)
3622     - return -ENOMEM;
3623     + goto out_undo_insn;
3624    
3625     for (i = 0; i <= env->subprog_cnt; i++) {
3626     subprog_start = subprog_end;
3627     @@ -5424,7 +5428,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
3628     tmp = bpf_int_jit_compile(func[i]);
3629     if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
3630     verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
3631     - err = -EFAULT;
3632     + err = -ENOTSUPP;
3633     goto out_free;
3634     }
3635     cond_resched();
3636     @@ -5466,6 +5470,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
3637     if (func[i])
3638     bpf_jit_free(func[i]);
3639     kfree(func);
3640     +out_undo_insn:
3641     /* cleanup main prog to be interpreted */
3642     prog->jit_requested = 0;
3643     for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
3644     @@ -5492,6 +5497,8 @@ static int fixup_call_args(struct bpf_verifier_env *env)
3645     err = jit_subprogs(env);
3646     if (err == 0)
3647     return 0;
3648     + if (err == -EFAULT)
3649     + return err;
3650     }
3651     #ifndef CONFIG_BPF_JIT_ALWAYS_ON
3652     for (i = 0; i < prog->len; i++, insn++) {
3653     diff --git a/mm/backing-dev.c b/mm/backing-dev.c
3654     index 8fe3ebd6ac00..048d0651aa98 100644
3655     --- a/mm/backing-dev.c
3656     +++ b/mm/backing-dev.c
3657     @@ -359,15 +359,8 @@ static void wb_shutdown(struct bdi_writeback *wb)
3658     spin_lock_bh(&wb->work_lock);
3659     if (!test_and_clear_bit(WB_registered, &wb->state)) {
3660     spin_unlock_bh(&wb->work_lock);
3661     - /*
3662     - * Wait for wb shutdown to finish if someone else is just
3663     - * running wb_shutdown(). Otherwise we could proceed to wb /
3664     - * bdi destruction before wb_shutdown() is finished.
3665     - */
3666     - wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE);
3667     return;
3668     }
3669     - set_bit(WB_shutting_down, &wb->state);
3670     spin_unlock_bh(&wb->work_lock);
3671    
3672     cgwb_remove_from_bdi_list(wb);
3673     @@ -379,12 +372,6 @@ static void wb_shutdown(struct bdi_writeback *wb)
3674     mod_delayed_work(bdi_wq, &wb->dwork, 0);
3675     flush_delayed_work(&wb->dwork);
3676     WARN_ON(!list_empty(&wb->work_list));
3677     - /*
3678     - * Make sure bit gets cleared after shutdown is finished. Matches with
3679     - * the barrier provided by test_and_clear_bit() above.
3680     - */
3681     - smp_wmb();
3682     - clear_and_wake_up_bit(WB_shutting_down, &wb->state);
3683     }
3684    
3685     static void wb_exit(struct bdi_writeback *wb)
3686     @@ -508,10 +495,12 @@ static void cgwb_release_workfn(struct work_struct *work)
3687     struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
3688     release_work);
3689    
3690     + mutex_lock(&wb->bdi->cgwb_release_mutex);
3691     wb_shutdown(wb);
3692    
3693     css_put(wb->memcg_css);
3694     css_put(wb->blkcg_css);
3695     + mutex_unlock(&wb->bdi->cgwb_release_mutex);
3696    
3697     fprop_local_destroy_percpu(&wb->memcg_completions);
3698     percpu_ref_exit(&wb->refcnt);
3699     @@ -697,6 +686,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
3700    
3701     INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
3702     bdi->cgwb_congested_tree = RB_ROOT;
3703     + mutex_init(&bdi->cgwb_release_mutex);
3704    
3705     ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
3706     if (!ret) {
3707     @@ -717,7 +707,10 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
3708     spin_lock_irq(&cgwb_lock);
3709     radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
3710     cgwb_kill(*slot);
3711     + spin_unlock_irq(&cgwb_lock);
3712    
3713     + mutex_lock(&bdi->cgwb_release_mutex);
3714     + spin_lock_irq(&cgwb_lock);
3715     while (!list_empty(&bdi->wb_list)) {
3716     wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
3717     bdi_node);
3718     @@ -726,6 +719,7 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
3719     spin_lock_irq(&cgwb_lock);
3720     }
3721     spin_unlock_irq(&cgwb_lock);
3722     + mutex_unlock(&bdi->cgwb_release_mutex);
3723     }
3724    
3725     /**
3726     diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
3727     index 5505ee6ebdbe..d3a5ec02e64c 100644
3728     --- a/net/8021q/vlan.c
3729     +++ b/net/8021q/vlan.c
3730     @@ -688,7 +688,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
3731     out_unlock:
3732     rcu_read_unlock();
3733     out:
3734     - NAPI_GRO_CB(skb)->flush |= flush;
3735     + skb_gro_flush_final(skb, pp, flush);
3736    
3737     return pp;
3738     }
3739     diff --git a/net/atm/br2684.c b/net/atm/br2684.c
3740     index fd94bea36ee8..82c8d33bd8ba 100644
3741     --- a/net/atm/br2684.c
3742     +++ b/net/atm/br2684.c
3743     @@ -252,8 +252,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
3744    
3745     ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
3746     pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
3747     - refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
3748     - ATM_SKB(skb)->atm_options = atmvcc->atm_options;
3749     + atm_account_tx(atmvcc, skb);
3750     dev->stats.tx_packets++;
3751     dev->stats.tx_bytes += skb->len;
3752    
3753     diff --git a/net/atm/clip.c b/net/atm/clip.c
3754     index f07dbc632222..0edebf8decc0 100644
3755     --- a/net/atm/clip.c
3756     +++ b/net/atm/clip.c
3757     @@ -381,8 +381,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
3758     memcpy(here, llc_oui, sizeof(llc_oui));
3759     ((__be16 *) here)[3] = skb->protocol;
3760     }
3761     - refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
3762     - ATM_SKB(skb)->atm_options = vcc->atm_options;
3763     + atm_account_tx(vcc, skb);
3764     entry->vccs->last_use = jiffies;
3765     pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
3766     old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
3767     diff --git a/net/atm/common.c b/net/atm/common.c
3768     index fc78a0508ae1..a7a68e509628 100644
3769     --- a/net/atm/common.c
3770     +++ b/net/atm/common.c
3771     @@ -630,10 +630,9 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
3772     goto out;
3773     }
3774     pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
3775     - refcount_add(skb->truesize, &sk->sk_wmem_alloc);
3776     + atm_account_tx(vcc, skb);
3777    
3778     skb->dev = NULL; /* for paths shared with net_device interfaces */
3779     - ATM_SKB(skb)->atm_options = vcc->atm_options;
3780     if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
3781     kfree_skb(skb);
3782     error = -EFAULT;
3783     diff --git a/net/atm/lec.c b/net/atm/lec.c
3784     index 3138a869b5c0..19ad2fd04983 100644
3785     --- a/net/atm/lec.c
3786     +++ b/net/atm/lec.c
3787     @@ -182,9 +182,8 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
3788     struct net_device *dev = skb->dev;
3789    
3790     ATM_SKB(skb)->vcc = vcc;
3791     - ATM_SKB(skb)->atm_options = vcc->atm_options;
3792     + atm_account_tx(vcc, skb);
3793    
3794     - refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
3795     if (vcc->send(vcc, skb) < 0) {
3796     dev->stats.tx_dropped++;
3797     return;
3798     diff --git a/net/atm/mpc.c b/net/atm/mpc.c
3799     index 31e0dcb970f8..44ddcdd5fd35 100644
3800     --- a/net/atm/mpc.c
3801     +++ b/net/atm/mpc.c
3802     @@ -555,8 +555,7 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
3803     sizeof(struct llc_snap_hdr));
3804     }
3805    
3806     - refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
3807     - ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
3808     + atm_account_tx(entry->shortcut, skb);
3809     entry->shortcut->send(entry->shortcut, skb);
3810     entry->packets_fwded++;
3811     mpc->in_ops->put(entry);
3812     diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
3813     index 21d9d341a619..af8c4b38b746 100644
3814     --- a/net/atm/pppoatm.c
3815     +++ b/net/atm/pppoatm.c
3816     @@ -350,8 +350,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
3817     return 1;
3818     }
3819    
3820     - refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
3821     - ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
3822     + atm_account_tx(vcc, skb);
3823     pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
3824     skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
3825     ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
3826     diff --git a/net/atm/raw.c b/net/atm/raw.c
3827     index ee10e8d46185..b3ba44aab0ee 100644
3828     --- a/net/atm/raw.c
3829     +++ b/net/atm/raw.c
3830     @@ -35,8 +35,8 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
3831     struct sock *sk = sk_atm(vcc);
3832    
3833     pr_debug("(%d) %d -= %d\n",
3834     - vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
3835     - WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
3836     + vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize);
3837     + WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc));
3838     dev_kfree_skb_any(skb);
3839     sk->sk_write_space(sk);
3840     }
3841     diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
3842     index 499123afcab5..9d37d91b34e5 100644
3843     --- a/net/bridge/netfilter/ebtables.c
3844     +++ b/net/bridge/netfilter/ebtables.c
3845     @@ -396,6 +396,12 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
3846     watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
3847     if (IS_ERR(watcher))
3848     return PTR_ERR(watcher);
3849     +
3850     + if (watcher->family != NFPROTO_BRIDGE) {
3851     + module_put(watcher->me);
3852     + return -ENOENT;
3853     + }
3854     +
3855     w->u.watcher = watcher;
3856    
3857     par->target = watcher;
3858     @@ -717,6 +723,13 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
3859     goto cleanup_watchers;
3860     }
3861    
3862     + /* Reject UNSPEC, xtables verdicts/return values are incompatible */
3863     + if (target->family != NFPROTO_BRIDGE) {
3864     + module_put(target->me);
3865     + ret = -ENOENT;
3866     + goto cleanup_watchers;
3867     + }
3868     +
3869     t->u.target = target;
3870     if (t->u.target == &ebt_standard_target) {
3871     if (gap < sizeof(struct ebt_standard_target)) {
3872     diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
3873     index a04e1e88bf3a..50537ff961a7 100644
3874     --- a/net/core/dev_ioctl.c
3875     +++ b/net/core/dev_ioctl.c
3876     @@ -285,16 +285,9 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3877     if (ifr->ifr_qlen < 0)
3878     return -EINVAL;
3879     if (dev->tx_queue_len ^ ifr->ifr_qlen) {
3880     - unsigned int orig_len = dev->tx_queue_len;
3881     -
3882     - dev->tx_queue_len = ifr->ifr_qlen;
3883     - err = call_netdevice_notifiers(
3884     - NETDEV_CHANGE_TX_QUEUE_LEN, dev);
3885     - err = notifier_to_errno(err);
3886     - if (err) {
3887     - dev->tx_queue_len = orig_len;
3888     + err = dev_change_tx_queue_len(dev, ifr->ifr_qlen);
3889     + if (err)
3890     return err;
3891     - }
3892     }
3893     return 0;
3894    
3895     diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
3896     index 8b5ba6dffac7..12877a1514e7 100644
3897     --- a/net/dccp/ccids/ccid3.c
3898     +++ b/net/dccp/ccids/ccid3.c
3899     @@ -600,7 +600,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
3900     {
3901     struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
3902     struct dccp_sock *dp = dccp_sk(sk);
3903     - ktime_t now = ktime_get_real();
3904     + ktime_t now = ktime_get();
3905     s64 delta = 0;
3906    
3907     switch (fbtype) {
3908     @@ -625,15 +625,14 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
3909     case CCID3_FBACK_PERIODIC:
3910     delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
3911     if (delta <= 0)
3912     - DCCP_BUG("delta (%ld) <= 0", (long)delta);
3913     - else
3914     - hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
3915     + delta = 1;
3916     + hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
3917     break;
3918     default:
3919     return;
3920     }
3921    
3922     - ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
3923     + ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta,
3924     hc->rx_x_recv, hc->rx_pinv);
3925    
3926     hc->rx_tstamp_last_feedback = now;
3927     @@ -680,7 +679,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
3928     static u32 ccid3_first_li(struct sock *sk)
3929     {
3930     struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
3931     - u32 x_recv, p, delta;
3932     + u32 x_recv, p;
3933     + s64 delta;
3934     u64 fval;
3935    
3936     if (hc->rx_rtt == 0) {
3937     @@ -688,7 +688,9 @@ static u32 ccid3_first_li(struct sock *sk)
3938     hc->rx_rtt = DCCP_FALLBACK_RTT;
3939     }
3940    
3941     - delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
3942     + delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
3943     + if (delta <= 0)
3944     + delta = 1;
3945     x_recv = scaled_div32(hc->rx_bytes_recv, delta);
3946     if (x_recv == 0) { /* would also trigger divide-by-zero */
3947     DCCP_WARN("X_recv==0\n");
3948     diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
3949     index 40c851693f77..0c9478b91fa5 100644
3950     --- a/net/dns_resolver/dns_key.c
3951     +++ b/net/dns_resolver/dns_key.c
3952     @@ -86,35 +86,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
3953     opt++;
3954     kdebug("options: '%s'", opt);
3955     do {
3956     + int opt_len, opt_nlen;
3957     const char *eq;
3958     - int opt_len, opt_nlen, opt_vlen, tmp;
3959     + char optval[128];
3960    
3961     next_opt = memchr(opt, '#', end - opt) ?: end;
3962     opt_len = next_opt - opt;
3963     - if (opt_len <= 0 || opt_len > 128) {
3964     + if (opt_len <= 0 || opt_len > sizeof(optval)) {
3965     pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
3966     opt_len);
3967     return -EINVAL;
3968     }
3969    
3970     - eq = memchr(opt, '=', opt_len) ?: end;
3971     - opt_nlen = eq - opt;
3972     - eq++;
3973     - opt_vlen = next_opt - eq; /* will be -1 if no value */
3974     + eq = memchr(opt, '=', opt_len);
3975     + if (eq) {
3976     + opt_nlen = eq - opt;
3977     + eq++;
3978     + memcpy(optval, eq, next_opt - eq);
3979     + optval[next_opt - eq] = '\0';
3980     + } else {
3981     + opt_nlen = opt_len;
3982     + optval[0] = '\0';
3983     + }
3984    
3985     - tmp = opt_vlen >= 0 ? opt_vlen : 0;
3986     - kdebug("option '%*.*s' val '%*.*s'",
3987     - opt_nlen, opt_nlen, opt, tmp, tmp, eq);
3988     + kdebug("option '%*.*s' val '%s'",
3989     + opt_nlen, opt_nlen, opt, optval);
3990    
3991     /* see if it's an error number representing a DNS error
3992     * that's to be recorded as the result in this key */
3993     if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
3994     memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
3995     kdebug("dns error number option");
3996     - if (opt_vlen <= 0)
3997     - goto bad_option_value;
3998    
3999     - ret = kstrtoul(eq, 10, &derrno);
4000     + ret = kstrtoul(optval, 10, &derrno);
4001     if (ret < 0)
4002     goto bad_option_value;
4003    
4004     diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
4005     index 1540db65241a..c9ec1603666b 100644
4006     --- a/net/ipv4/fou.c
4007     +++ b/net/ipv4/fou.c
4008     @@ -448,9 +448,7 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
4009     out_unlock:
4010     rcu_read_unlock();
4011     out:
4012     - NAPI_GRO_CB(skb)->flush |= flush;
4013     - skb_gro_remcsum_cleanup(skb, &grc);
4014     - skb->remcsum_offload = 0;
4015     + skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
4016    
4017     return pp;
4018     }
4019     diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
4020     index 1859c473b21a..6a7d980105f6 100644
4021     --- a/net/ipv4/gre_offload.c
4022     +++ b/net/ipv4/gre_offload.c
4023     @@ -223,7 +223,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
4024     out_unlock:
4025     rcu_read_unlock();
4026     out:
4027     - NAPI_GRO_CB(skb)->flush |= flush;
4028     + skb_gro_flush_final(skb, pp, flush);
4029    
4030     return pp;
4031     }
4032     diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
4033     index 31ff46daae97..3647167c8fa3 100644
4034     --- a/net/ipv4/inet_hashtables.c
4035     +++ b/net/ipv4/inet_hashtables.c
4036     @@ -243,9 +243,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
4037     bool dev_match = (sk->sk_bound_dev_if == dif ||
4038     sk->sk_bound_dev_if == sdif);
4039    
4040     - if (exact_dif && !dev_match)
4041     + if (!dev_match)
4042     return -1;
4043     - if (sk->sk_bound_dev_if && dev_match)
4044     + if (sk->sk_bound_dev_if)
4045     score += 4;
4046     }
4047     if (sk->sk_incoming_cpu == raw_smp_processor_id())
4048     diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
4049     index 4b195bac8ac0..2f600f261690 100644
4050     --- a/net/ipv4/sysctl_net_ipv4.c
4051     +++ b/net/ipv4/sysctl_net_ipv4.c
4052     @@ -263,8 +263,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
4053     ipv4.sysctl_tcp_fastopen);
4054     struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
4055     struct tcp_fastopen_context *ctxt;
4056     - int ret;
4057     u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
4058     + __le32 key[4];
4059     + int ret, i;
4060    
4061     tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
4062     if (!tbl.data)
4063     @@ -273,11 +274,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
4064     rcu_read_lock();
4065     ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
4066     if (ctxt)
4067     - memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
4068     + memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
4069     else
4070     - memset(user_key, 0, sizeof(user_key));
4071     + memset(key, 0, sizeof(key));
4072     rcu_read_unlock();
4073    
4074     + for (i = 0; i < ARRAY_SIZE(key); i++)
4075     + user_key[i] = le32_to_cpu(key[i]);
4076     +
4077     snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
4078     user_key[0], user_key[1], user_key[2], user_key[3]);
4079     ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
4080     @@ -288,13 +292,17 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
4081     ret = -EINVAL;
4082     goto bad_key;
4083     }
4084     - tcp_fastopen_reset_cipher(net, NULL, user_key,
4085     +
4086     + for (i = 0; i < ARRAY_SIZE(user_key); i++)
4087     + key[i] = cpu_to_le32(user_key[i]);
4088     +
4089     + tcp_fastopen_reset_cipher(net, NULL, key,
4090     TCP_FASTOPEN_KEY_LENGTH);
4091     }
4092    
4093     bad_key:
4094     pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
4095     - user_key[0], user_key[1], user_key[2], user_key[3],
4096     + user_key[0], user_key[1], user_key[2], user_key[3],
4097     (char *)tbl.data, ret);
4098     kfree(tbl.data);
4099     return ret;
4100     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
4101     index e51c644484dc..1f25ebab25d2 100644
4102     --- a/net/ipv4/tcp_input.c
4103     +++ b/net/ipv4/tcp_input.c
4104     @@ -3149,6 +3149,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
4105    
4106     if (tcp_is_reno(tp)) {
4107     tcp_remove_reno_sacks(sk, pkts_acked);
4108     +
4109     + /* If any of the cumulatively ACKed segments was
4110     + * retransmitted, non-SACK case cannot confirm that
4111     + * progress was due to original transmission due to
4112     + * lack of TCPCB_SACKED_ACKED bits even if some of
4113     + * the packets may have been never retransmitted.
4114     + */
4115     + if (flag & FLAG_RETRANS_DATA_ACKED)
4116     + flag &= ~FLAG_ORIG_SACK_ACKED;
4117     } else {
4118     int delta;
4119    
4120     diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
4121     index ea6e6e7df0ee..cde2719fcb89 100644
4122     --- a/net/ipv4/udp_offload.c
4123     +++ b/net/ipv4/udp_offload.c
4124     @@ -295,7 +295,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
4125     out_unlock:
4126     rcu_read_unlock();
4127     out:
4128     - NAPI_GRO_CB(skb)->flush |= flush;
4129     + skb_gro_flush_final(skb, pp, flush);
4130     return pp;
4131     }
4132     EXPORT_SYMBOL(udp_gro_receive);
4133     diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
4134     index 2febe26de6a1..595ad408dba0 100644
4135     --- a/net/ipv6/inet6_hashtables.c
4136     +++ b/net/ipv6/inet6_hashtables.c
4137     @@ -113,9 +113,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
4138     bool dev_match = (sk->sk_bound_dev_if == dif ||
4139     sk->sk_bound_dev_if == sdif);
4140    
4141     - if (exact_dif && !dev_match)
4142     + if (!dev_match)
4143     return -1;
4144     - if (sk->sk_bound_dev_if && dev_match)
4145     + if (sk->sk_bound_dev_if)
4146     score++;
4147     }
4148     if (sk->sk_incoming_cpu == raw_smp_processor_id())
4149     diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
4150     index 5e0332014c17..eeb4d3098ff4 100644
4151     --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
4152     +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
4153     @@ -585,6 +585,8 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
4154     fq->q.meat == fq->q.len &&
4155     nf_ct_frag6_reasm(fq, skb, dev))
4156     ret = 0;
4157     + else
4158     + skb_dst_drop(skb);
4159    
4160     out_unlock:
4161     spin_unlock_bh(&fq->q.lock);
4162     diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
4163     index 33fb35cbfac1..558fe8cc6d43 100644
4164     --- a/net/ipv6/seg6_hmac.c
4165     +++ b/net/ipv6/seg6_hmac.c
4166     @@ -373,7 +373,7 @@ static int seg6_hmac_init_algo(void)
4167     return -ENOMEM;
4168    
4169     for_each_possible_cpu(cpu) {
4170     - tfm = crypto_alloc_shash(algo->name, 0, GFP_KERNEL);
4171     + tfm = crypto_alloc_shash(algo->name, 0, 0);
4172     if (IS_ERR(tfm))
4173     return PTR_ERR(tfm);
4174     p_tfm = per_cpu_ptr(algo->tfms, cpu);
4175     diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
4176     index 3057e453bf31..83918119ceb8 100644
4177     --- a/net/netfilter/ipvs/ip_vs_lblc.c
4178     +++ b/net/netfilter/ipvs/ip_vs_lblc.c
4179     @@ -371,6 +371,7 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
4180     tbl->counter = 1;
4181     tbl->dead = false;
4182     tbl->svc = svc;
4183     + atomic_set(&tbl->entries, 0);
4184    
4185     /*
4186     * Hook periodic timer for garbage collection
4187     diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
4188     index 92adc04557ed..bc2bc5eebcb8 100644
4189     --- a/net/netfilter/ipvs/ip_vs_lblcr.c
4190     +++ b/net/netfilter/ipvs/ip_vs_lblcr.c
4191     @@ -534,6 +534,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
4192     tbl->counter = 1;
4193     tbl->dead = false;
4194     tbl->svc = svc;
4195     + atomic_set(&tbl->entries, 0);
4196    
4197     /*
4198     * Hook periodic timer for garbage collection
4199     diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
4200     index 2ceefa183cee..6a196e438b6c 100644
4201     --- a/net/nfc/llcp_commands.c
4202     +++ b/net/nfc/llcp_commands.c
4203     @@ -752,11 +752,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
4204     pr_debug("Fragment %zd bytes remaining %zd",
4205     frag_len, remaining_len);
4206    
4207     - pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
4208     + pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
4209     frag_len + LLCP_HEADER_SIZE, &err);
4210     if (pdu == NULL) {
4211     - pr_err("Could not allocate PDU\n");
4212     - continue;
4213     + pr_err("Could not allocate PDU (error=%d)\n", err);
4214     + len -= remaining_len;
4215     + if (len == 0)
4216     + len = err;
4217     + break;
4218     }
4219    
4220     pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
4221     diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c
4222     index 9696ef96b719..1a30e165eeb4 100644
4223     --- a/net/nsh/nsh.c
4224     +++ b/net/nsh/nsh.c
4225     @@ -104,7 +104,7 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
4226     __skb_pull(skb, nsh_len);
4227    
4228     skb_reset_mac_header(skb);
4229     - skb_reset_mac_len(skb);
4230     + skb->mac_len = proto == htons(ETH_P_TEB) ? ETH_HLEN : 0;
4231     skb->protocol = proto;
4232    
4233     features &= NETIF_F_SG;
4234     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
4235     index 38d132d007ba..cb0f02785749 100644
4236     --- a/net/packet/af_packet.c
4237     +++ b/net/packet/af_packet.c
4238     @@ -2294,6 +2294,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
4239     if (po->stats.stats1.tp_drops)
4240     status |= TP_STATUS_LOSING;
4241     }
4242     +
4243     + if (do_vnet &&
4244     + virtio_net_hdr_from_skb(skb, h.raw + macoff -
4245     + sizeof(struct virtio_net_hdr),
4246     + vio_le(), true, 0))
4247     + goto drop_n_account;
4248     +
4249     po->stats.stats1.tp_packets++;
4250     if (copy_skb) {
4251     status |= TP_STATUS_COPY;
4252     @@ -2301,15 +2308,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
4253     }
4254     spin_unlock(&sk->sk_receive_queue.lock);
4255    
4256     - if (do_vnet) {
4257     - if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
4258     - sizeof(struct virtio_net_hdr),
4259     - vio_le(), true, 0)) {
4260     - spin_lock(&sk->sk_receive_queue.lock);
4261     - goto drop_n_account;
4262     - }
4263     - }
4264     -
4265     skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
4266    
4267     if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
4268     diff --git a/net/rds/loop.c b/net/rds/loop.c
4269     index f2bf78de5688..dac6218a460e 100644
4270     --- a/net/rds/loop.c
4271     +++ b/net/rds/loop.c
4272     @@ -193,4 +193,5 @@ struct rds_transport rds_loop_transport = {
4273     .inc_copy_to_user = rds_message_inc_copy_to_user,
4274     .inc_free = rds_loop_inc_free,
4275     .t_name = "loopback",
4276     + .t_type = RDS_TRANS_LOOP,
4277     };
4278     diff --git a/net/rds/rds.h b/net/rds/rds.h
4279     index b04c333d9d1c..f2272fb8cd45 100644
4280     --- a/net/rds/rds.h
4281     +++ b/net/rds/rds.h
4282     @@ -479,6 +479,11 @@ struct rds_notifier {
4283     int n_status;
4284     };
4285    
4286     +/* Available as part of RDS core, so doesn't need to participate
4287     + * in get_preferred transport etc
4288     + */
4289     +#define RDS_TRANS_LOOP 3
4290     +
4291     /**
4292     * struct rds_transport - transport specific behavioural hooks
4293     *
4294     diff --git a/net/rds/recv.c b/net/rds/recv.c
4295     index dc67458b52f0..192ac6f78ded 100644
4296     --- a/net/rds/recv.c
4297     +++ b/net/rds/recv.c
4298     @@ -103,6 +103,11 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
4299     rds_stats_add(s_recv_bytes_added_to_socket, delta);
4300     else
4301     rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
4302     +
4303     + /* loop transport doesn't send/recv congestion updates */
4304     + if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
4305     + return;
4306     +
4307     now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
4308    
4309     rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
4310     diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
4311     index 8527cfdc446d..20d7d36b2fc9 100644
4312     --- a/net/sched/act_ife.c
4313     +++ b/net/sched/act_ife.c
4314     @@ -415,7 +415,8 @@ static void tcf_ife_cleanup(struct tc_action *a)
4315     spin_unlock_bh(&ife->tcf_lock);
4316    
4317     p = rcu_dereference_protected(ife->params, 1);
4318     - kfree_rcu(p, rcu);
4319     + if (p)
4320     + kfree_rcu(p, rcu);
4321     }
4322    
4323     /* under ife->tcf_lock for existing action */
4324     @@ -516,8 +517,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
4325     saddr = nla_data(tb[TCA_IFE_SMAC]);
4326     }
4327    
4328     - ife->tcf_action = parm->action;
4329     -
4330     if (parm->flags & IFE_ENCODE) {
4331     if (daddr)
4332     ether_addr_copy(p->eth_dst, daddr);
4333     @@ -543,10 +542,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
4334     NULL, NULL);
4335     if (err) {
4336     metadata_parse_err:
4337     - if (exists)
4338     - tcf_idr_release(*a, bind);
4339     if (ret == ACT_P_CREATED)
4340     - _tcf_ife_cleanup(*a);
4341     + tcf_idr_release(*a, bind);
4342    
4343     if (exists)
4344     spin_unlock_bh(&ife->tcf_lock);
4345     @@ -567,7 +564,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
4346     err = use_all_metadata(ife);
4347     if (err) {
4348     if (ret == ACT_P_CREATED)
4349     - _tcf_ife_cleanup(*a);
4350     + tcf_idr_release(*a, bind);
4351    
4352     if (exists)
4353     spin_unlock_bh(&ife->tcf_lock);
4354     @@ -576,6 +573,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
4355     }
4356     }
4357    
4358     + ife->tcf_action = parm->action;
4359     if (exists)
4360     spin_unlock_bh(&ife->tcf_lock);
4361    
4362     diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c
4363     index c98a61e980ba..9c4c2bb547d7 100644
4364     --- a/net/sched/sch_blackhole.c
4365     +++ b/net/sched/sch_blackhole.c
4366     @@ -21,7 +21,7 @@ static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
4367     struct sk_buff **to_free)
4368     {
4369     qdisc_drop(skb, sch, to_free);
4370     - return NET_XMIT_SUCCESS;
4371     + return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
4372     }
4373    
4374     static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
4375     diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
4376     index 092bebc70048..7afd66949a91 100644
4377     --- a/net/strparser/strparser.c
4378     +++ b/net/strparser/strparser.c
4379     @@ -35,7 +35,6 @@ struct _strp_msg {
4380     */
4381     struct strp_msg strp;
4382     int accum_len;
4383     - int early_eaten;
4384     };
4385    
4386     static inline struct _strp_msg *_strp_msg(struct sk_buff *skb)
4387     @@ -115,20 +114,6 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
4388     head = strp->skb_head;
4389     if (head) {
4390     /* Message already in progress */
4391     -
4392     - stm = _strp_msg(head);
4393     - if (unlikely(stm->early_eaten)) {
4394     - /* Already some number of bytes on the receive sock
4395     - * data saved in skb_head, just indicate they
4396     - * are consumed.
4397     - */
4398     - eaten = orig_len <= stm->early_eaten ?
4399     - orig_len : stm->early_eaten;
4400     - stm->early_eaten -= eaten;
4401     -
4402     - return eaten;
4403     - }
4404     -
4405     if (unlikely(orig_offset)) {
4406     /* Getting data with a non-zero offset when a message is
4407     * in progress is not expected. If it does happen, we
4408     @@ -297,9 +282,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
4409     }
4410    
4411     stm->accum_len += cand_len;
4412     + eaten += cand_len;
4413     strp->need_bytes = stm->strp.full_len -
4414     stm->accum_len;
4415     - stm->early_eaten = cand_len;
4416     STRP_STATS_ADD(strp->stats.bytes, cand_len);
4417     desc->count = 0; /* Stop reading socket */
4418     break;
4419     diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
4420     index 5fe29121b9a8..9a7f91232de8 100644
4421     --- a/net/tls/tls_sw.c
4422     +++ b/net/tls/tls_sw.c
4423     @@ -440,7 +440,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
4424     ret = tls_push_record(sk, msg->msg_flags, record_type);
4425     if (!ret)
4426     continue;
4427     - if (ret == -EAGAIN)
4428     + if (ret < 0)
4429     goto send_end;
4430    
4431     copied -= try_to_copy;
4432     diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
4433     index 8e03bd3f3668..5d3cce9e8744 100644
4434     --- a/net/vmw_vsock/virtio_transport.c
4435     +++ b/net/vmw_vsock/virtio_transport.c
4436     @@ -201,7 +201,7 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
4437     return -ENODEV;
4438     }
4439    
4440     - if (le32_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
4441     + if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
4442     return virtio_transport_send_pkt_loopback(vsock, pkt);
4443    
4444     if (pkt->reply)
4445     diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
4446     index a4c1b76240df..2d9b4795edb2 100644
4447     --- a/virt/kvm/arm/arm.c
4448     +++ b/virt/kvm/arm/arm.c
4449     @@ -1490,6 +1490,10 @@ static int init_hyp_mode(void)
4450     }
4451     }
4452    
4453     + err = hyp_map_aux_data();
4454     + if (err)
4455     + kvm_err("Cannot map host auxilary data: %d\n", err);
4456     +
4457     return 0;
4458    
4459     out_err:
4460     diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
4461     index c4762bef13c6..c95ab4c5a475 100644
4462     --- a/virt/kvm/arm/psci.c
4463     +++ b/virt/kvm/arm/psci.c
4464     @@ -405,7 +405,7 @@ static int kvm_psci_call(struct kvm_vcpu *vcpu)
4465     int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
4466     {
4467     u32 func_id = smccc_get_function(vcpu);
4468     - u32 val = PSCI_RET_NOT_SUPPORTED;
4469     + u32 val = SMCCC_RET_NOT_SUPPORTED;
4470     u32 feature;
4471    
4472     switch (func_id) {
4473     @@ -417,7 +417,21 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
4474     switch(feature) {
4475     case ARM_SMCCC_ARCH_WORKAROUND_1:
4476     if (kvm_arm_harden_branch_predictor())
4477     - val = 0;
4478     + val = SMCCC_RET_SUCCESS;
4479     + break;
4480     + case ARM_SMCCC_ARCH_WORKAROUND_2:
4481     + switch (kvm_arm_have_ssbd()) {
4482     + case KVM_SSBD_FORCE_DISABLE:
4483     + case KVM_SSBD_UNKNOWN:
4484     + break;
4485     + case KVM_SSBD_KERNEL:
4486     + val = SMCCC_RET_SUCCESS;
4487     + break;
4488     + case KVM_SSBD_FORCE_ENABLE:
4489     + case KVM_SSBD_MITIGATED:
4490     + val = SMCCC_RET_NOT_REQUIRED;
4491     + break;
4492     + }
4493     break;
4494     }
4495     break;