Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.20/0112-4.20.13-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3289 - (show annotations) (download)
Mon Mar 4 10:38:38 2019 UTC (5 years, 1 month ago) by niro
File size: 283893 byte(s)
linux-4.20.13
1 diff --git a/Makefile b/Makefile
2 index 0a92b4e116210..c83abc1e689b4 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 20
9 -SUBLEVEL = 12
10 +SUBLEVEL = 13
11 EXTRAVERSION =
12 NAME = Shy Crocodile
13
14 diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
15 index f393b663413e4..2ad77fb43639c 100644
16 --- a/arch/arc/include/asm/cache.h
17 +++ b/arch/arc/include/asm/cache.h
18 @@ -52,6 +52,17 @@
19 #define cache_line_size() SMP_CACHE_BYTES
20 #define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
21
22 +/*
23 + * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
24 + * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
25 + * alignment for any atomic64_t embedded in buffer.
26 + * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
27 + * value of 4 (and not 8) in ARC ABI.
28 + */
29 +#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
30 +#define ARCH_SLAB_MINALIGN 8
31 +#endif
32 +
33 extern void arc_cache_init(void);
34 extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
35 extern void read_decode_cache_bcr(void);
36 diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
37 index 8b90d25a15cca..1f945d0f40daa 100644
38 --- a/arch/arc/kernel/head.S
39 +++ b/arch/arc/kernel/head.S
40 @@ -17,6 +17,7 @@
41 #include <asm/entry.h>
42 #include <asm/arcregs.h>
43 #include <asm/cache.h>
44 +#include <asm/irqflags.h>
45
46 .macro CPU_EARLY_SETUP
47
48 @@ -47,6 +48,15 @@
49 sr r5, [ARC_REG_DC_CTRL]
50
51 1:
52 +
53 +#ifdef CONFIG_ISA_ARCV2
54 + ; Unaligned access is disabled at reset, so re-enable early as
55 + ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
56 + ; by default
57 + lr r5, [status32]
58 + bset r5, r5, STATUS_AD_BIT
59 + kflag r5
60 +#endif
61 .endm
62
63 .section .init.text, "ax",@progbits
64 @@ -93,9 +103,9 @@ ENTRY(stext)
65 #ifdef CONFIG_ARC_UBOOT_SUPPORT
66 ; Uboot - kernel ABI
67 ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
68 - ; r1 = magic number (board identity, unused as of now
69 + ; r1 = magic number (always zero as of now)
70 ; r2 = pointer to uboot provided cmdline or external DTB in mem
71 - ; These are handled later in setup_arch()
72 + ; These are handled later in handle_uboot_args()
73 st r0, [@uboot_tag]
74 st r2, [@uboot_arg]
75 #endif
76 diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
77 index eea8c5ce63350..80dd1a716ca72 100644
78 --- a/arch/arc/kernel/setup.c
79 +++ b/arch/arc/kernel/setup.c
80 @@ -452,43 +452,80 @@ void setup_processor(void)
81 arc_chk_core_config();
82 }
83
84 -static inline int is_kernel(unsigned long addr)
85 +static inline bool uboot_arg_invalid(unsigned long addr)
86 {
87 - if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
88 - return 1;
89 - return 0;
90 + /*
91 + * Check that it is a untranslated address (although MMU is not enabled
92 + * yet, it being a high address ensures this is not by fluke)
93 + */
94 + if (addr < PAGE_OFFSET)
95 + return true;
96 +
97 + /* Check that address doesn't clobber resident kernel image */
98 + return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
99 }
100
101 -void __init setup_arch(char **cmdline_p)
102 +#define IGNORE_ARGS "Ignore U-boot args: "
103 +
104 +/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
105 +#define UBOOT_TAG_NONE 0
106 +#define UBOOT_TAG_CMDLINE 1
107 +#define UBOOT_TAG_DTB 2
108 +
109 +void __init handle_uboot_args(void)
110 {
111 + bool use_embedded_dtb = true;
112 + bool append_cmdline = false;
113 +
114 #ifdef CONFIG_ARC_UBOOT_SUPPORT
115 - /* make sure that uboot passed pointer to cmdline/dtb is valid */
116 - if (uboot_tag && is_kernel((unsigned long)uboot_arg))
117 - panic("Invalid uboot arg\n");
118 + /* check that we know this tag */
119 + if (uboot_tag != UBOOT_TAG_NONE &&
120 + uboot_tag != UBOOT_TAG_CMDLINE &&
121 + uboot_tag != UBOOT_TAG_DTB) {
122 + pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
123 + goto ignore_uboot_args;
124 + }
125 +
126 + if (uboot_tag != UBOOT_TAG_NONE &&
127 + uboot_arg_invalid((unsigned long)uboot_arg)) {
128 + pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
129 + goto ignore_uboot_args;
130 + }
131 +
132 + /* see if U-boot passed an external Device Tree blob */
133 + if (uboot_tag == UBOOT_TAG_DTB) {
134 + machine_desc = setup_machine_fdt((void *)uboot_arg);
135
136 - /* See if u-boot passed an external Device Tree blob */
137 - machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */
138 - if (!machine_desc)
139 + /* external Device Tree blob is invalid - use embedded one */
140 + use_embedded_dtb = !machine_desc;
141 + }
142 +
143 + if (uboot_tag == UBOOT_TAG_CMDLINE)
144 + append_cmdline = true;
145 +
146 +ignore_uboot_args:
147 #endif
148 - {
149 - /* No, so try the embedded one */
150 +
151 + if (use_embedded_dtb) {
152 machine_desc = setup_machine_fdt(__dtb_start);
153 if (!machine_desc)
154 panic("Embedded DT invalid\n");
155 + }
156
157 - /*
158 - * If we are here, it is established that @uboot_arg didn't
159 - * point to DT blob. Instead if u-boot says it is cmdline,
160 - * append to embedded DT cmdline.
161 - * setup_machine_fdt() would have populated @boot_command_line
162 - */
163 - if (uboot_tag == 1) {
164 - /* Ensure a whitespace between the 2 cmdlines */
165 - strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
166 - strlcat(boot_command_line, uboot_arg,
167 - COMMAND_LINE_SIZE);
168 - }
169 + /*
170 + * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
171 + * append processing can only happen after.
172 + */
173 + if (append_cmdline) {
174 + /* Ensure a whitespace between the 2 cmdlines */
175 + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
176 + strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
177 }
178 +}
179 +
180 +void __init setup_arch(char **cmdline_p)
181 +{
182 + handle_uboot_args();
183
184 /* Save unparsed command line copy for /proc/cmdline */
185 *cmdline_p = boot_command_line;
186 diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
187 index 2c118a6ab3587..0dc23fc227ed2 100644
188 --- a/arch/arm/probes/kprobes/opt-arm.c
189 +++ b/arch/arm/probes/kprobes/opt-arm.c
190 @@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
191 }
192
193 /* Copy arch-dep-instance from template. */
194 - memcpy(code, (unsigned char *)optprobe_template_entry,
195 + memcpy(code, (unsigned long *)&optprobe_template_entry,
196 TMPL_END_IDX * sizeof(kprobe_opcode_t));
197
198 /* Adjust buffer according to instruction. */
199 diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
200 index 951c4231bdb85..4c47b3fd958b6 100644
201 --- a/arch/mips/configs/ath79_defconfig
202 +++ b/arch/mips/configs/ath79_defconfig
203 @@ -71,6 +71,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
204 # CONFIG_SERIAL_8250_PCI is not set
205 CONFIG_SERIAL_8250_NR_UARTS=1
206 CONFIG_SERIAL_8250_RUNTIME_UARTS=1
207 +CONFIG_SERIAL_OF_PLATFORM=y
208 CONFIG_SERIAL_AR933X=y
209 CONFIG_SERIAL_AR933X_CONSOLE=y
210 # CONFIG_HW_RANDOM is not set
211 diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
212 index 4c41ed0a637e5..415a08376c362 100644
213 --- a/arch/mips/jazz/jazzdma.c
214 +++ b/arch/mips/jazz/jazzdma.c
215 @@ -74,14 +74,15 @@ static int __init vdma_init(void)
216 get_order(VDMA_PGTBL_SIZE));
217 BUG_ON(!pgtbl);
218 dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
219 - pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
220 + pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
221
222 /*
223 * Clear the R4030 translation table
224 */
225 vdma_pgtbl_init();
226
227 - r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
228 + r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
229 + CPHYSADDR((unsigned long)pgtbl));
230 r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
231 r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
232
233 diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
234 index aeb7b1b0f2024..252c00985c973 100644
235 --- a/arch/mips/net/ebpf_jit.c
236 +++ b/arch/mips/net/ebpf_jit.c
237 @@ -343,12 +343,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
238 const struct bpf_prog *prog = ctx->skf;
239 int stack_adjust = ctx->stack_size;
240 int store_offset = stack_adjust - 8;
241 + enum reg_val_type td;
242 int r0 = MIPS_R_V0;
243
244 - if (dest_reg == MIPS_R_RA &&
245 - get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
246 + if (dest_reg == MIPS_R_RA) {
247 /* Don't let zero extended value escape. */
248 - emit_instr(ctx, sll, r0, r0, 0);
249 + td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
250 + if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
251 + emit_instr(ctx, sll, r0, r0, 0);
252 + }
253
254 if (ctx->flags & EBPF_SAVE_RA) {
255 emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
256 diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
257 index 2582df1c529bb..0964c236e3e5a 100644
258 --- a/arch/parisc/kernel/ptrace.c
259 +++ b/arch/parisc/kernel/ptrace.c
260 @@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
261
262 long do_syscall_trace_enter(struct pt_regs *regs)
263 {
264 - if (test_thread_flag(TIF_SYSCALL_TRACE) &&
265 - tracehook_report_syscall_entry(regs)) {
266 + if (test_thread_flag(TIF_SYSCALL_TRACE)) {
267 + int rc = tracehook_report_syscall_entry(regs);
268 +
269 /*
270 - * Tracing decided this syscall should not happen or the
271 - * debugger stored an invalid system call number. Skip
272 - * the system call and the system call restart handling.
273 + * As tracesys_next does not set %r28 to -ENOSYS
274 + * when %r20 is set to -1, initialize it here.
275 */
276 - regs->gr[20] = -1UL;
277 - goto out;
278 + regs->gr[28] = -ENOSYS;
279 +
280 + if (rc) {
281 + /*
282 + * A nonzero return code from
283 + * tracehook_report_syscall_entry() tells us
284 + * to prevent the syscall execution. Skip
285 + * the syscall call and the syscall restart handling.
286 + *
287 + * Note that the tracer may also just change
288 + * regs->gr[20] to an invalid syscall number,
289 + * that is handled by tracesys_next.
290 + */
291 + regs->gr[20] = -1UL;
292 + return -1;
293 + }
294 }
295
296 /* Do the secure computing check after ptrace. */
297 @@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs)
298 regs->gr[24] & 0xffffffff,
299 regs->gr[23] & 0xffffffff);
300
301 -out:
302 /*
303 * Sign extend the syscall number to 64bit since it may have been
304 * modified by a compat ptrace call
305 diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
306 index 3b67b9533c82f..438512759e827 100644
307 --- a/arch/powerpc/kernel/head_8xx.S
308 +++ b/arch/powerpc/kernel/head_8xx.S
309 @@ -927,11 +927,12 @@ start_here:
310
311 /* set up the PTE pointers for the Abatron bdiGDB.
312 */
313 - tovirt(r6,r6)
314 lis r5, abatron_pteptrs@h
315 ori r5, r5, abatron_pteptrs@l
316 stw r5, 0xf0(0) /* Must match your Abatron config file */
317 tophys(r5,r5)
318 + lis r6, swapper_pg_dir@h
319 + ori r6, r6, swapper_pg_dir@l
320 stw r6, 0(r5)
321
322 /* Now turn on the MMU for real! */
323 diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
324 index a153257bf7d98..d62fa148558b9 100644
325 --- a/arch/s390/kvm/vsie.c
326 +++ b/arch/s390/kvm/vsie.c
327 @@ -297,7 +297,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
328 scb_s->crycbd = 0;
329
330 apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
331 - if (!apie_h && !key_msk)
332 + if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0))
333 return 0;
334
335 if (!crycb_addr)
336 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
337 index e5c0174e330e5..5a0cbc717997b 100644
338 --- a/arch/x86/include/asm/kvm_host.h
339 +++ b/arch/x86/include/asm/kvm_host.h
340 @@ -299,6 +299,7 @@ union kvm_mmu_extended_role {
341 unsigned int cr4_smap:1;
342 unsigned int cr4_smep:1;
343 unsigned int cr4_la57:1;
344 + unsigned int maxphyaddr:6;
345 };
346 };
347
348 @@ -397,6 +398,7 @@ struct kvm_mmu {
349 void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
350 u64 *spte, const void *pte);
351 hpa_t root_hpa;
352 + gpa_t root_cr3;
353 union kvm_mmu_role mmu_role;
354 u8 root_level;
355 u8 shadow_root_level;
356 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
357 index 7bcfa61375c09..98d13c6a64be0 100644
358 --- a/arch/x86/kvm/cpuid.c
359 +++ b/arch/x86/kvm/cpuid.c
360 @@ -337,6 +337,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
361 unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
362 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
363 unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
364 + unsigned f_la57 = 0;
365
366 /* cpuid 1.edx */
367 const u32 kvm_cpuid_1_edx_x86_features =
368 @@ -491,7 +492,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
369 // TSC_ADJUST is emulated
370 entry->ebx |= F(TSC_ADJUST);
371 entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
372 + f_la57 = entry->ecx & F(LA57);
373 cpuid_mask(&entry->ecx, CPUID_7_ECX);
374 + /* Set LA57 based on hardware capability. */
375 + entry->ecx |= f_la57;
376 entry->ecx |= f_umip;
377 /* PKU is not yet implemented for shadow paging. */
378 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
379 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
380 index 7c03c0f35444f..e763e5445e3ca 100644
381 --- a/arch/x86/kvm/mmu.c
382 +++ b/arch/x86/kvm/mmu.c
383 @@ -3517,6 +3517,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
384 &invalid_list);
385 mmu->root_hpa = INVALID_PAGE;
386 }
387 + mmu->root_cr3 = 0;
388 }
389
390 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
391 @@ -3572,6 +3573,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
392 vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
393 } else
394 BUG();
395 + vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
396
397 return 0;
398 }
399 @@ -3580,10 +3582,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
400 {
401 struct kvm_mmu_page *sp;
402 u64 pdptr, pm_mask;
403 - gfn_t root_gfn;
404 + gfn_t root_gfn, root_cr3;
405 int i;
406
407 - root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT;
408 + root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
409 + root_gfn = root_cr3 >> PAGE_SHIFT;
410
411 if (mmu_check_root(vcpu, root_gfn))
412 return 1;
413 @@ -3608,7 +3611,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
414 ++sp->root_count;
415 spin_unlock(&vcpu->kvm->mmu_lock);
416 vcpu->arch.mmu->root_hpa = root;
417 - return 0;
418 + goto set_root_cr3;
419 }
420
421 /*
422 @@ -3674,6 +3677,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
423 vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
424 }
425
426 +set_root_cr3:
427 + vcpu->arch.mmu->root_cr3 = root_cr3;
428 +
429 return 0;
430 }
431
432 @@ -4125,7 +4131,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
433 struct kvm_mmu_root_info root;
434 struct kvm_mmu *mmu = vcpu->arch.mmu;
435
436 - root.cr3 = mmu->get_cr3(vcpu);
437 + root.cr3 = mmu->root_cr3;
438 root.hpa = mmu->root_hpa;
439
440 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
441 @@ -4138,6 +4144,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
442 }
443
444 mmu->root_hpa = root.hpa;
445 + mmu->root_cr3 = root.cr3;
446
447 return i < KVM_MMU_NUM_PREV_ROOTS;
448 }
449 @@ -4731,6 +4738,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
450 ext.cr4_pse = !!is_pse(vcpu);
451 ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
452 ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
453 + ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
454
455 ext.valid = 1;
456
457 @@ -5477,11 +5485,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
458 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
459
460 vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
461 + vcpu->arch.root_mmu.root_cr3 = 0;
462 vcpu->arch.root_mmu.translate_gpa = translate_gpa;
463 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
464 vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
465
466 vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
467 + vcpu->arch.guest_mmu.root_cr3 = 0;
468 vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
469 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
470 vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
471 diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
472 index 2f6787fc71066..c54a493e139a7 100644
473 --- a/arch/x86/xen/enlighten_pv.c
474 +++ b/arch/x86/xen/enlighten_pv.c
475 @@ -898,10 +898,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
476 val = native_read_msr_safe(msr, err);
477 switch (msr) {
478 case MSR_IA32_APICBASE:
479 -#ifdef CONFIG_X86_X2APIC
480 - if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
481 -#endif
482 - val &= ~X2APIC_ENABLE;
483 + val &= ~X2APIC_ENABLE;
484 break;
485 }
486 return val;
487 diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
488 index bb3d96dea6dba..26d4164d394fb 100644
489 --- a/drivers/acpi/bus.c
490 +++ b/drivers/acpi/bus.c
491 @@ -1054,18 +1054,6 @@ void __init acpi_early_init(void)
492 goto error0;
493 }
494
495 - /*
496 - * ACPI 2.0 requires the EC driver to be loaded and work before
497 - * the EC device is found in the namespace (i.e. before
498 - * acpi_load_tables() is called).
499 - *
500 - * This is accomplished by looking for the ECDT table, and getting
501 - * the EC parameters out of that.
502 - *
503 - * Ignore the result. Not having an ECDT is not fatal.
504 - */
505 - status = acpi_ec_ecdt_probe();
506 -
507 #ifdef CONFIG_X86
508 if (!acpi_ioapic) {
509 /* compatible (0) means level (3) */
510 @@ -1142,6 +1130,18 @@ static int __init acpi_bus_init(void)
511 goto error1;
512 }
513
514 + /*
515 + * ACPI 2.0 requires the EC driver to be loaded and work before the EC
516 + * device is found in the namespace.
517 + *
518 + * This is accomplished by looking for the ECDT table and getting the EC
519 + * parameters out of that.
520 + *
521 + * Do that before calling acpi_initialize_objects() which may trigger EC
522 + * address space accesses.
523 + */
524 + acpi_ec_ecdt_probe();
525 +
526 status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
527 if (ACPI_FAILURE(status)) {
528 printk(KERN_ERR PREFIX
529 diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
530 index 8535e7999769b..2a2d7ec772526 100644
531 --- a/drivers/acpi/nfit/core.c
532 +++ b/drivers/acpi/nfit/core.c
533 @@ -724,6 +724,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
534 struct acpi_nfit_memory_map *memdev;
535 struct acpi_nfit_desc *acpi_desc;
536 struct nfit_mem *nfit_mem;
537 + u16 physical_id;
538
539 mutex_lock(&acpi_desc_lock);
540 list_for_each_entry(acpi_desc, &acpi_descs, list) {
541 @@ -731,10 +732,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
542 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
543 memdev = __to_nfit_memdev(nfit_mem);
544 if (memdev->device_handle == device_handle) {
545 + *flags = memdev->flags;
546 + physical_id = memdev->physical_id;
547 mutex_unlock(&acpi_desc->init_mutex);
548 mutex_unlock(&acpi_desc_lock);
549 - *flags = memdev->flags;
550 - return memdev->physical_id;
551 + return physical_id;
552 }
553 }
554 mutex_unlock(&acpi_desc->init_mutex);
555 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
556 index 29f102dcfec49..329ce9072ee9f 100644
557 --- a/drivers/atm/he.c
558 +++ b/drivers/atm/he.c
559 @@ -717,7 +717,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
560 instead of '/ 512', use '>> 9' to prevent a call
561 to divdu3 on x86 platforms
562 */
563 - rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
564 + rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
565
566 if (rate_cps < 10)
567 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
568 diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
569 index 2fe225a697df8..3487e03d4bc61 100644
570 --- a/drivers/clk/at91/at91sam9x5.c
571 +++ b/drivers/clk/at91/at91sam9x5.c
572 @@ -144,8 +144,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
573 return;
574
575 at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1,
576 - nck(at91sam9x5_systemck),
577 - nck(at91sam9x35_periphck), 0);
578 + nck(at91sam9x5_systemck), 31, 0);
579 if (!at91sam9x5_pmc)
580 return;
581
582 @@ -210,7 +209,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
583 parent_names[1] = "mainck";
584 parent_names[2] = "plladivck";
585 parent_names[3] = "utmick";
586 - parent_names[4] = "mck";
587 + parent_names[4] = "masterck";
588 for (i = 0; i < 2; i++) {
589 char name[6];
590
591 diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
592 index d69ad96fe988b..cd0ef7274fdbf 100644
593 --- a/drivers/clk/at91/sama5d2.c
594 +++ b/drivers/clk/at91/sama5d2.c
595 @@ -240,7 +240,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
596 parent_names[1] = "mainck";
597 parent_names[2] = "plladivck";
598 parent_names[3] = "utmick";
599 - parent_names[4] = "mck";
600 + parent_names[4] = "masterck";
601 for (i = 0; i < 3; i++) {
602 char name[6];
603
604 @@ -291,7 +291,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
605 parent_names[1] = "mainck";
606 parent_names[2] = "plladivck";
607 parent_names[3] = "utmick";
608 - parent_names[4] = "mck";
609 + parent_names[4] = "masterck";
610 parent_names[5] = "audiopll_pmcck";
611 for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) {
612 hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
613 diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
614 index e358be7f6c8d5..b645a9d59cdbd 100644
615 --- a/drivers/clk/at91/sama5d4.c
616 +++ b/drivers/clk/at91/sama5d4.c
617 @@ -207,7 +207,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
618 parent_names[1] = "mainck";
619 parent_names[2] = "plladivck";
620 parent_names[3] = "utmick";
621 - parent_names[4] = "mck";
622 + parent_names[4] = "masterck";
623 for (i = 0; i < 3; i++) {
624 char name[6];
625
626 diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
627 index 242c3370544e6..9ed46d188cb5b 100644
628 --- a/drivers/cpufreq/scmi-cpufreq.c
629 +++ b/drivers/cpufreq/scmi-cpufreq.c
630 @@ -187,8 +187,8 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
631
632 cpufreq_cooling_unregister(priv->cdev);
633 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
634 - kfree(priv);
635 dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
636 + kfree(priv);
637
638 return 0;
639 }
640 diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
641 index 00e954f22bc92..74401e0adb29c 100644
642 --- a/drivers/gpio/gpio-mt7621.c
643 +++ b/drivers/gpio/gpio-mt7621.c
644 @@ -30,6 +30,7 @@
645 #define GPIO_REG_EDGE 0xA0
646
647 struct mtk_gc {
648 + struct irq_chip irq_chip;
649 struct gpio_chip chip;
650 spinlock_t lock;
651 int bank;
652 @@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
653 return 0;
654 }
655
656 -static struct irq_chip mediatek_gpio_irq_chip = {
657 - .irq_unmask = mediatek_gpio_irq_unmask,
658 - .irq_mask = mediatek_gpio_irq_mask,
659 - .irq_mask_ack = mediatek_gpio_irq_mask,
660 - .irq_set_type = mediatek_gpio_irq_type,
661 -};
662 -
663 static int
664 mediatek_gpio_xlate(struct gpio_chip *chip,
665 const struct of_phandle_args *spec, u32 *flags)
666 @@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev,
667 return ret;
668 }
669
670 + rg->irq_chip.name = dev_name(dev);
671 + rg->irq_chip.parent_device = dev;
672 + rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
673 + rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
674 + rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
675 + rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
676 +
677 if (mtk->gpio_irq) {
678 /*
679 * Manually request the irq here instead of passing
680 @@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev,
681 return ret;
682 }
683
684 - ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
685 + ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
686 0, handle_simple_irq, IRQ_TYPE_NONE);
687 if (ret) {
688 dev_err(dev, "failed to add gpiochip_irqchip\n");
689 return ret;
690 }
691
692 - gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
693 + gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
694 mtk->gpio_irq, NULL);
695 }
696
697 @@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev)
698 mtk->gpio_irq = irq_of_parse_and_map(np, 0);
699 mtk->dev = dev;
700 platform_set_drvdata(pdev, mtk);
701 - mediatek_gpio_irq_chip.name = dev_name(dev);
702
703 for (i = 0; i < MTK_BANK_CNT; i++) {
704 ret = mediatek_gpio_bank_probe(dev, np, i);
705 diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
706 index e9600b556f397..bcc6be4a5cb2e 100644
707 --- a/drivers/gpio/gpio-pxa.c
708 +++ b/drivers/gpio/gpio-pxa.c
709 @@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void)
710 {
711 switch (gpio_type) {
712 case PXA3XX_GPIO:
713 + case MMP2_GPIO:
714 return false;
715
716 default:
717 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
718 index 8f3d44e5e7878..722b1421d8f39 100644
719 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
720 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
721 @@ -212,6 +212,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
722 }
723
724 if (amdgpu_device_is_px(dev)) {
725 + dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
726 pm_runtime_use_autosuspend(dev->dev);
727 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
728 pm_runtime_set_active(dev->dev);
729 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
730 index 8c9abaa7601a7..62df4bd0a0fc2 100644
731 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
732 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
733 @@ -637,12 +637,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
734 struct ttm_bo_global *glob = adev->mman.bdev.glob;
735 struct amdgpu_vm_bo_base *bo_base;
736
737 +#if 0
738 if (vm->bulk_moveable) {
739 spin_lock(&glob->lru_lock);
740 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
741 spin_unlock(&glob->lru_lock);
742 return;
743 }
744 +#endif
745
746 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
747
748 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
749 index e3843c5929edf..fffece5e42c56 100644
750 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
751 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
752 @@ -1074,8 +1074,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
753 * the GPU device is not already present in the topology device
754 * list then return NULL. This means a new topology device has to
755 * be created for this GPU.
756 - * TODO: Rather than assiging @gpu to first topology device withtout
757 - * gpu attached, it will better to have more stringent check.
758 */
759 static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
760 {
761 @@ -1083,12 +1081,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
762 struct kfd_topology_device *out_dev = NULL;
763
764 down_write(&topology_lock);
765 - list_for_each_entry(dev, &topology_device_list, list)
766 + list_for_each_entry(dev, &topology_device_list, list) {
767 + /* Discrete GPUs need their own topology device list
768 + * entries. Don't assign them to CPU/APU nodes.
769 + */
770 + if (!gpu->device_info->needs_iommu_device &&
771 + dev->node_props.cpu_cores_count)
772 + continue;
773 +
774 if (!dev->gpu && (dev->node_props.simd_count > 0)) {
775 dev->gpu = gpu;
776 out_dev = dev;
777 break;
778 }
779 + }
780 up_write(&topology_lock);
781 return out_dev;
782 }
783 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
784 index 315a245aedc29..d92120b62e89f 100644
785 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
786 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
787 @@ -704,12 +704,13 @@ static int dm_suspend(void *handle)
788 struct amdgpu_display_manager *dm = &adev->dm;
789 int ret = 0;
790
791 + WARN_ON(adev->dm.cached_state);
792 + adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
793 +
794 s3_handle_mst(adev->ddev, true);
795
796 amdgpu_dm_irq_suspend(adev);
797
798 - WARN_ON(adev->dm.cached_state);
799 - adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
800
801 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
802
803 diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
804 index 4443a916a0fb6..e84275f15e7ad 100644
805 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
806 +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
807 @@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
808
809 pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
810
811 - if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
812 + if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
813 /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
814 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
815 /* un-mute audio */
816 @@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
817 pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
818 pipe_ctx->stream_res.stream_enc, true);
819 if (pipe_ctx->stream_res.audio) {
820 + struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
821 +
822 if (option != KEEP_ACQUIRED_RESOURCE ||
823 !dc->debug.az_endpoint_mute_only) {
824 /*only disalbe az_endpoint if power down or free*/
825 @@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
826 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
827 pipe_ctx->stream_res.audio = NULL;
828 }
829 + if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
830 + /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
831 + pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
832
833 /* TODO: notify audio driver for if audio modes list changed
834 * add audio mode list change flag */
835 diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
836 index 9e2e998b198f0..e0c02a9889b2c 100644
837 --- a/drivers/gpu/drm/i915/intel_fbdev.c
838 +++ b/drivers/gpu/drm/i915/intel_fbdev.c
839 @@ -336,8 +336,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
840 bool *enabled, int width, int height)
841 {
842 struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
843 - unsigned long conn_configured, conn_seq, mask;
844 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
845 + unsigned long conn_configured, conn_seq;
846 int i, j;
847 bool *save_enabled;
848 bool fallback = true, ret = true;
849 @@ -355,10 +355,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
850 drm_modeset_backoff(&ctx);
851
852 memcpy(save_enabled, enabled, count);
853 - mask = GENMASK(count - 1, 0);
854 + conn_seq = GENMASK(count - 1, 0);
855 conn_configured = 0;
856 retry:
857 - conn_seq = conn_configured;
858 for (i = 0; i < count; i++) {
859 struct drm_fb_helper_connector *fb_conn;
860 struct drm_connector *connector;
861 @@ -371,7 +370,8 @@ retry:
862 if (conn_configured & BIT(i))
863 continue;
864
865 - if (conn_seq == 0 && !connector->has_tile)
866 + /* First pass, only consider tiled connectors */
867 + if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
868 continue;
869
870 if (connector->status == connector_status_connected)
871 @@ -475,8 +475,10 @@ retry:
872 conn_configured |= BIT(i);
873 }
874
875 - if ((conn_configured & mask) != mask && conn_configured != conn_seq)
876 + if (conn_configured != conn_seq) { /* repeat until no more are found */
877 + conn_seq = conn_configured;
878 goto retry;
879 + }
880
881 /*
882 * If the BIOS didn't enable everything it could, fall back to have the
883 diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
884 index bf5f294f172fa..611ac340fb289 100644
885 --- a/drivers/gpu/drm/meson/meson_drv.c
886 +++ b/drivers/gpu/drm/meson/meson_drv.c
887 @@ -368,8 +368,10 @@ static int meson_probe_remote(struct platform_device *pdev,
888 remote_node = of_graph_get_remote_port_parent(ep);
889 if (!remote_node ||
890 remote_node == parent || /* Ignore parent endpoint */
891 - !of_device_is_available(remote_node))
892 + !of_device_is_available(remote_node)) {
893 + of_node_put(remote_node);
894 continue;
895 + }
896
897 count += meson_probe_remote(pdev, match, remote, remote_node);
898
899 @@ -388,10 +390,13 @@ static int meson_drv_probe(struct platform_device *pdev)
900
901 for_each_endpoint_of_node(np, ep) {
902 remote = of_graph_get_remote_port_parent(ep);
903 - if (!remote || !of_device_is_available(remote))
904 + if (!remote || !of_device_is_available(remote)) {
905 + of_node_put(remote);
906 continue;
907 + }
908
909 count += meson_probe_remote(pdev, &match, np, remote);
910 + of_node_put(remote);
911 }
912
913 if (count && !match)
914 diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
915 index dec1e081f5295..6a8fb6fd183c3 100644
916 --- a/drivers/gpu/drm/radeon/radeon_kms.c
917 +++ b/drivers/gpu/drm/radeon/radeon_kms.c
918 @@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
919 }
920
921 if (radeon_is_px(dev)) {
922 + dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
923 pm_runtime_use_autosuspend(dev->dev);
924 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
925 pm_runtime_set_active(dev->dev);
926 diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
927 index bf49c55b0f2c7..9f27d5464804b 100644
928 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c
929 +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
930 @@ -704,17 +704,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
931 remote = of_graph_get_remote_port_parent(ep);
932 if (!remote)
933 continue;
934 + of_node_put(remote);
935
936 /* does this node match any registered engines? */
937 list_for_each_entry(frontend, &drv->frontend_list, list) {
938 if (remote == frontend->node) {
939 - of_node_put(remote);
940 of_node_put(port);
941 + of_node_put(ep);
942 return frontend;
943 }
944 }
945 }
946 -
947 + of_node_put(port);
948 return ERR_PTR(-EINVAL);
949 }
950
951 diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
952 index c3040079b1cb6..4adec4ab7d066 100644
953 --- a/drivers/hwmon/nct6775.c
954 +++ b/drivers/hwmon/nct6775.c
955 @@ -44,8 +44,8 @@
956 * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3
957 * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3
958 * (0xd451)
959 - * nct6798d 14 7 7 2+6 0xd458 0xc1 0x5ca3
960 - * (0xd459)
961 + * nct6798d 14 7 7 2+6 0xd428 0xc1 0x5ca3
962 + * (0xd429)
963 *
964 * #temp lists the number of monitored temperature sources (first value) plus
965 * the number of directly connectable temperature sensors (second value).
966 @@ -138,7 +138,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
967 #define SIO_NCT6795_ID 0xd350
968 #define SIO_NCT6796_ID 0xd420
969 #define SIO_NCT6797_ID 0xd450
970 -#define SIO_NCT6798_ID 0xd458
971 +#define SIO_NCT6798_ID 0xd428
972 #define SIO_ID_MASK 0xFFF8
973
974 enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
975 @@ -4508,7 +4508,8 @@ static int __maybe_unused nct6775_resume(struct device *dev)
976
977 if (data->kind == nct6791 || data->kind == nct6792 ||
978 data->kind == nct6793 || data->kind == nct6795 ||
979 - data->kind == nct6796)
980 + data->kind == nct6796 || data->kind == nct6797 ||
981 + data->kind == nct6798)
982 nct6791_enable_io_mapping(sioreg);
983
984 superio_exit(sioreg);
985 @@ -4644,7 +4645,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
986
987 if (sio_data->kind == nct6791 || sio_data->kind == nct6792 ||
988 sio_data->kind == nct6793 || sio_data->kind == nct6795 ||
989 - sio_data->kind == nct6796)
990 + sio_data->kind == nct6796 || sio_data->kind == nct6797 ||
991 + sio_data->kind == nct6798)
992 nct6791_enable_io_mapping(sioaddr);
993
994 superio_exit(sioaddr);
995 diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
996 index 8844c9565d2a4..7053be59ad2e4 100644
997 --- a/drivers/hwmon/tmp421.c
998 +++ b/drivers/hwmon/tmp421.c
999 @@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = {
1000 .data = (void *)2
1001 },
1002 {
1003 - .compatible = "ti,tmp422",
1004 + .compatible = "ti,tmp442",
1005 .data = (void *)3
1006 },
1007 { },
1008 diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
1009 index 691c6f0489386..2428c7d89c6be 100644
1010 --- a/drivers/infiniband/hw/mthca/mthca_provider.c
1011 +++ b/drivers/infiniband/hw/mthca/mthca_provider.c
1012 @@ -533,7 +533,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
1013 {
1014 struct mthca_ucontext *context;
1015
1016 - qp = kmalloc(sizeof *qp, GFP_KERNEL);
1017 + qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1018 if (!qp)
1019 return ERR_PTR(-ENOMEM);
1020
1021 @@ -599,7 +599,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
1022 if (pd->uobject)
1023 return ERR_PTR(-EINVAL);
1024
1025 - qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
1026 + qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
1027 if (!qp)
1028 return ERR_PTR(-ENOMEM);
1029
1030 diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1031 index eed0eb3bb04c6..0466f2ac9ad08 100644
1032 --- a/drivers/infiniband/ulp/srp/ib_srp.c
1033 +++ b/drivers/infiniband/ulp/srp/ib_srp.c
1034 @@ -2942,7 +2942,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
1035 {
1036 struct srp_target_port *target = host_to_target(scmnd->device->host);
1037 struct srp_rdma_ch *ch;
1038 - int i, j;
1039 u8 status;
1040
1041 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
1042 @@ -2954,15 +2953,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
1043 if (status)
1044 return FAILED;
1045
1046 - for (i = 0; i < target->ch_count; i++) {
1047 - ch = &target->ch[i];
1048 - for (j = 0; j < target->req_ring_size; ++j) {
1049 - struct srp_request *req = &ch->req_ring[j];
1050 -
1051 - srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
1052 - }
1053 - }
1054 -
1055 return SUCCESS;
1056 }
1057
1058 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
1059 index 4ac378e489023..40ca1e8fa09fc 100644
1060 --- a/drivers/isdn/hardware/avm/b1.c
1061 +++ b/drivers/isdn/hardware/avm/b1.c
1062 @@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo)
1063 int i, j;
1064
1065 for (j = 0; j < AVM_MAXVERSION; j++)
1066 - cinfo->version[j] = "\0\0" + 1;
1067 + cinfo->version[j] = "";
1068 for (i = 0, j = 0;
1069 j < AVM_MAXVERSION && i < cinfo->versionlen;
1070 j++, i += cinfo->versionbuf[i] + 1)
1071 diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
1072 index 1b2239c1d5694..dc1cded716c1a 100644
1073 --- a/drivers/isdn/i4l/isdn_tty.c
1074 +++ b/drivers/isdn/i4l/isdn_tty.c
1075 @@ -1437,15 +1437,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
1076 {
1077 modem_info *info = (modem_info *) tty->driver_data;
1078
1079 + mutex_lock(&modem_info_mutex);
1080 if (!old_termios)
1081 isdn_tty_change_speed(info);
1082 else {
1083 if (tty->termios.c_cflag == old_termios->c_cflag &&
1084 tty->termios.c_ispeed == old_termios->c_ispeed &&
1085 - tty->termios.c_ospeed == old_termios->c_ospeed)
1086 + tty->termios.c_ospeed == old_termios->c_ospeed) {
1087 + mutex_unlock(&modem_info_mutex);
1088 return;
1089 + }
1090 isdn_tty_change_speed(info);
1091 }
1092 + mutex_unlock(&modem_info_mutex);
1093 }
1094
1095 /*
1096 diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
1097 index a2e74feee2b2f..fd64df5a57a5e 100644
1098 --- a/drivers/leds/leds-lp5523.c
1099 +++ b/drivers/leds/leds-lp5523.c
1100 @@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
1101
1102 /* Let the programs run for couple of ms and check the engine status */
1103 usleep_range(3000, 6000);
1104 - lp55xx_read(chip, LP5523_REG_STATUS, &status);
1105 + ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
1106 + if (ret)
1107 + return ret;
1108 status &= LP5523_ENG_STATUS_MASK;
1109
1110 if (status != LP5523_ENG_STATUS_MASK) {
1111 diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
1112 index 8c5dfdce4326c..f461460a2aeb9 100644
1113 --- a/drivers/mfd/Kconfig
1114 +++ b/drivers/mfd/Kconfig
1115 @@ -102,6 +102,7 @@ config MFD_AAT2870_CORE
1116 config MFD_AT91_USART
1117 tristate "AT91 USART Driver"
1118 select MFD_CORE
1119 + depends on ARCH_AT91 || COMPILE_TEST
1120 help
1121 Select this to get support for AT91 USART IP. This is a wrapper
1122 over at91-usart-serial driver and usart-spi-driver. Only one function
1123 diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
1124 index 30d09d1771717..11ab17f64c649 100644
1125 --- a/drivers/mfd/ab8500-core.c
1126 +++ b/drivers/mfd/ab8500-core.c
1127 @@ -261,7 +261,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
1128 mutex_unlock(&ab8500->lock);
1129 dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
1130
1131 - return ret;
1132 + return (ret < 0) ? ret : 0;
1133 }
1134
1135 static int ab8500_get_register(struct device *dev, u8 bank,
1136 diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
1137 index 0be511dd93d01..f8e0fa97bb31e 100644
1138 --- a/drivers/mfd/axp20x.c
1139 +++ b/drivers/mfd/axp20x.c
1140 @@ -640,9 +640,9 @@ static const struct mfd_cell axp221_cells[] = {
1141
1142 static const struct mfd_cell axp223_cells[] = {
1143 {
1144 - .name = "axp221-pek",
1145 - .num_resources = ARRAY_SIZE(axp22x_pek_resources),
1146 - .resources = axp22x_pek_resources,
1147 + .name = "axp221-pek",
1148 + .num_resources = ARRAY_SIZE(axp22x_pek_resources),
1149 + .resources = axp22x_pek_resources,
1150 }, {
1151 .name = "axp22x-adc",
1152 .of_compatible = "x-powers,axp221-adc",
1153 @@ -650,7 +650,7 @@ static const struct mfd_cell axp223_cells[] = {
1154 .name = "axp20x-battery-power-supply",
1155 .of_compatible = "x-powers,axp221-battery-power-supply",
1156 }, {
1157 - .name = "axp20x-regulator",
1158 + .name = "axp20x-regulator",
1159 }, {
1160 .name = "axp20x-ac-power-supply",
1161 .of_compatible = "x-powers,axp221-ac-power-supply",
1162 @@ -666,9 +666,9 @@ static const struct mfd_cell axp223_cells[] = {
1163
1164 static const struct mfd_cell axp152_cells[] = {
1165 {
1166 - .name = "axp20x-pek",
1167 - .num_resources = ARRAY_SIZE(axp152_pek_resources),
1168 - .resources = axp152_pek_resources,
1169 + .name = "axp20x-pek",
1170 + .num_resources = ARRAY_SIZE(axp152_pek_resources),
1171 + .resources = axp152_pek_resources,
1172 },
1173 };
1174
1175 @@ -697,87 +697,101 @@ static const struct resource axp288_charger_resources[] = {
1176
1177 static const struct mfd_cell axp288_cells[] = {
1178 {
1179 - .name = "axp288_adc",
1180 - .num_resources = ARRAY_SIZE(axp288_adc_resources),
1181 - .resources = axp288_adc_resources,
1182 - },
1183 - {
1184 - .name = "axp288_extcon",
1185 - .num_resources = ARRAY_SIZE(axp288_extcon_resources),
1186 - .resources = axp288_extcon_resources,
1187 - },
1188 - {
1189 - .name = "axp288_charger",
1190 - .num_resources = ARRAY_SIZE(axp288_charger_resources),
1191 - .resources = axp288_charger_resources,
1192 - },
1193 - {
1194 - .name = "axp288_fuel_gauge",
1195 - .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
1196 - .resources = axp288_fuel_gauge_resources,
1197 - },
1198 - {
1199 - .name = "axp221-pek",
1200 - .num_resources = ARRAY_SIZE(axp288_power_button_resources),
1201 - .resources = axp288_power_button_resources,
1202 - },
1203 - {
1204 - .name = "axp288_pmic_acpi",
1205 + .name = "axp288_adc",
1206 + .num_resources = ARRAY_SIZE(axp288_adc_resources),
1207 + .resources = axp288_adc_resources,
1208 + }, {
1209 + .name = "axp288_extcon",
1210 + .num_resources = ARRAY_SIZE(axp288_extcon_resources),
1211 + .resources = axp288_extcon_resources,
1212 + }, {
1213 + .name = "axp288_charger",
1214 + .num_resources = ARRAY_SIZE(axp288_charger_resources),
1215 + .resources = axp288_charger_resources,
1216 + }, {
1217 + .name = "axp288_fuel_gauge",
1218 + .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
1219 + .resources = axp288_fuel_gauge_resources,
1220 + }, {
1221 + .name = "axp221-pek",
1222 + .num_resources = ARRAY_SIZE(axp288_power_button_resources),
1223 + .resources = axp288_power_button_resources,
1224 + }, {
1225 + .name = "axp288_pmic_acpi",
1226 },
1227 };
1228
1229 static const struct mfd_cell axp803_cells[] = {
1230 {
1231 - .name = "axp221-pek",
1232 - .num_resources = ARRAY_SIZE(axp803_pek_resources),
1233 - .resources = axp803_pek_resources,
1234 + .name = "axp221-pek",
1235 + .num_resources = ARRAY_SIZE(axp803_pek_resources),
1236 + .resources = axp803_pek_resources,
1237 + }, {
1238 + .name = "axp20x-gpio",
1239 + .of_compatible = "x-powers,axp813-gpio",
1240 + }, {
1241 + .name = "axp813-adc",
1242 + .of_compatible = "x-powers,axp813-adc",
1243 + }, {
1244 + .name = "axp20x-battery-power-supply",
1245 + .of_compatible = "x-powers,axp813-battery-power-supply",
1246 + }, {
1247 + .name = "axp20x-ac-power-supply",
1248 + .of_compatible = "x-powers,axp813-ac-power-supply",
1249 + .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
1250 + .resources = axp20x_ac_power_supply_resources,
1251 },
1252 - { .name = "axp20x-regulator" },
1253 + { .name = "axp20x-regulator" },
1254 };
1255
1256 static const struct mfd_cell axp806_self_working_cells[] = {
1257 {
1258 - .name = "axp221-pek",
1259 - .num_resources = ARRAY_SIZE(axp806_pek_resources),
1260 - .resources = axp806_pek_resources,
1261 + .name = "axp221-pek",
1262 + .num_resources = ARRAY_SIZE(axp806_pek_resources),
1263 + .resources = axp806_pek_resources,
1264 },
1265 - { .name = "axp20x-regulator" },
1266 + { .name = "axp20x-regulator" },
1267 };
1268
1269 static const struct mfd_cell axp806_cells[] = {
1270 {
1271 - .id = 2,
1272 - .name = "axp20x-regulator",
1273 + .id = 2,
1274 + .name = "axp20x-regulator",
1275 },
1276 };
1277
1278 static const struct mfd_cell axp809_cells[] = {
1279 {
1280 - .name = "axp221-pek",
1281 - .num_resources = ARRAY_SIZE(axp809_pek_resources),
1282 - .resources = axp809_pek_resources,
1283 + .name = "axp221-pek",
1284 + .num_resources = ARRAY_SIZE(axp809_pek_resources),
1285 + .resources = axp809_pek_resources,
1286 }, {
1287 - .id = 1,
1288 - .name = "axp20x-regulator",
1289 + .id = 1,
1290 + .name = "axp20x-regulator",
1291 },
1292 };
1293
1294 static const struct mfd_cell axp813_cells[] = {
1295 {
1296 - .name = "axp221-pek",
1297 - .num_resources = ARRAY_SIZE(axp803_pek_resources),
1298 - .resources = axp803_pek_resources,
1299 + .name = "axp221-pek",
1300 + .num_resources = ARRAY_SIZE(axp803_pek_resources),
1301 + .resources = axp803_pek_resources,
1302 }, {
1303 - .name = "axp20x-regulator",
1304 + .name = "axp20x-regulator",
1305 }, {
1306 - .name = "axp20x-gpio",
1307 - .of_compatible = "x-powers,axp813-gpio",
1308 + .name = "axp20x-gpio",
1309 + .of_compatible = "x-powers,axp813-gpio",
1310 }, {
1311 - .name = "axp813-adc",
1312 - .of_compatible = "x-powers,axp813-adc",
1313 + .name = "axp813-adc",
1314 + .of_compatible = "x-powers,axp813-adc",
1315 }, {
1316 .name = "axp20x-battery-power-supply",
1317 .of_compatible = "x-powers,axp813-battery-power-supply",
1318 + }, {
1319 + .name = "axp20x-ac-power-supply",
1320 + .of_compatible = "x-powers,axp813-ac-power-supply",
1321 + .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
1322 + .resources = axp20x_ac_power_supply_resources,
1323 },
1324 };
1325
1326 diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
1327 index 503979c81dae1..fab3cdc27ed64 100644
1328 --- a/drivers/mfd/bd9571mwv.c
1329 +++ b/drivers/mfd/bd9571mwv.c
1330 @@ -59,6 +59,7 @@ static const struct regmap_access_table bd9571mwv_writable_table = {
1331 };
1332
1333 static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = {
1334 + regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC),
1335 regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN),
1336 regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT),
1337 regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ),
1338 diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
1339 index b99a194ce5a4a..2d0fee488c5aa 100644
1340 --- a/drivers/mfd/cros_ec_dev.c
1341 +++ b/drivers/mfd/cros_ec_dev.c
1342 @@ -499,6 +499,7 @@ static int ec_device_remove(struct platform_device *pdev)
1343
1344 cros_ec_debugfs_remove(ec);
1345
1346 + mfd_remove_devices(ec->dev);
1347 cdev_del(&ec->cdev);
1348 device_unregister(&ec->class_dev);
1349 return 0;
1350 diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
1351 index 5970b8def5487..aec20e1c7d3d5 100644
1352 --- a/drivers/mfd/db8500-prcmu.c
1353 +++ b/drivers/mfd/db8500-prcmu.c
1354 @@ -2584,7 +2584,7 @@ static struct irq_chip prcmu_irq_chip = {
1355 .irq_unmask = prcmu_irq_unmask,
1356 };
1357
1358 -static __init char *fw_project_name(u32 project)
1359 +static char *fw_project_name(u32 project)
1360 {
1361 switch (project) {
1362 case PRCMU_FW_PROJECT_U8500:
1363 @@ -2732,7 +2732,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
1364 INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
1365 }
1366
1367 -static void __init init_prcm_registers(void)
1368 +static void init_prcm_registers(void)
1369 {
1370 u32 val;
1371
1372 diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
1373 index f475e848252fa..d0bf50e3568d7 100644
1374 --- a/drivers/mfd/mc13xxx-core.c
1375 +++ b/drivers/mfd/mc13xxx-core.c
1376 @@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
1377
1378 mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
1379
1380 - mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
1381 + ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
1382 + if (ret)
1383 + goto out;
1384
1385 adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 |
1386 MC13XXX_ADC0_CHRGRAWDIV;
1387 diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
1388 index 77b64bd64df36..ab24e176ef448 100644
1389 --- a/drivers/mfd/mt6397-core.c
1390 +++ b/drivers/mfd/mt6397-core.c
1391 @@ -329,8 +329,7 @@ static int mt6397_probe(struct platform_device *pdev)
1392
1393 default:
1394 dev_err(&pdev->dev, "unsupported chip: %d\n", id);
1395 - ret = -ENODEV;
1396 - break;
1397 + return -ENODEV;
1398 }
1399
1400 if (ret) {
1401 diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
1402 index 52fafea06067e..8d420c37b2a61 100644
1403 --- a/drivers/mfd/qcom_rpm.c
1404 +++ b/drivers/mfd/qcom_rpm.c
1405 @@ -638,6 +638,10 @@ static int qcom_rpm_probe(struct platform_device *pdev)
1406 return -EFAULT;
1407 }
1408
1409 + writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
1410 + writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
1411 + writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
1412 +
1413 dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
1414 fw_version[1],
1415 fw_version[2]);
1416 diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
1417 index c2d47d78705b8..fd111296b9592 100644
1418 --- a/drivers/mfd/ti_am335x_tscadc.c
1419 +++ b/drivers/mfd/ti_am335x_tscadc.c
1420 @@ -264,8 +264,9 @@ static int ti_tscadc_probe(struct platform_device *pdev)
1421 cell->pdata_size = sizeof(tscadc);
1422 }
1423
1424 - err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
1425 - tscadc->used_cells, NULL, 0, NULL);
1426 + err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
1427 + tscadc->cells, tscadc->used_cells, NULL,
1428 + 0, NULL);
1429 if (err < 0)
1430 goto err_disable_clk;
1431
1432 diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
1433 index 910f569ff77c1..8bcdecf494d05 100644
1434 --- a/drivers/mfd/tps65218.c
1435 +++ b/drivers/mfd/tps65218.c
1436 @@ -235,9 +235,9 @@ static int tps65218_probe(struct i2c_client *client,
1437
1438 mutex_init(&tps->tps_lock);
1439
1440 - ret = regmap_add_irq_chip(tps->regmap, tps->irq,
1441 - IRQF_ONESHOT, 0, &tps65218_irq_chip,
1442 - &tps->irq_data);
1443 + ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq,
1444 + IRQF_ONESHOT, 0, &tps65218_irq_chip,
1445 + &tps->irq_data);
1446 if (ret < 0)
1447 return ret;
1448
1449 @@ -253,26 +253,9 @@ static int tps65218_probe(struct i2c_client *client,
1450 ARRAY_SIZE(tps65218_cells), NULL, 0,
1451 regmap_irq_get_domain(tps->irq_data));
1452
1453 - if (ret < 0)
1454 - goto err_irq;
1455 -
1456 - return 0;
1457 -
1458 -err_irq:
1459 - regmap_del_irq_chip(tps->irq, tps->irq_data);
1460 -
1461 return ret;
1462 }
1463
1464 -static int tps65218_remove(struct i2c_client *client)
1465 -{
1466 - struct tps65218 *tps = i2c_get_clientdata(client);
1467 -
1468 - regmap_del_irq_chip(tps->irq, tps->irq_data);
1469 -
1470 - return 0;
1471 -}
1472 -
1473 static const struct i2c_device_id tps65218_id_table[] = {
1474 { "tps65218", TPS65218 },
1475 { },
1476 @@ -285,7 +268,6 @@ static struct i2c_driver tps65218_driver = {
1477 .of_match_table = of_tps65218_match_table,
1478 },
1479 .probe = tps65218_probe,
1480 - .remove = tps65218_remove,
1481 .id_table = tps65218_id_table,
1482 };
1483
1484 diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
1485 index 4be3d239da9ec..299016bc46d90 100644
1486 --- a/drivers/mfd/twl-core.c
1487 +++ b/drivers/mfd/twl-core.c
1488 @@ -979,7 +979,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
1489 * letting it generate the right frequencies for USB, MADC, and
1490 * other purposes.
1491 */
1492 -static inline int __init protect_pm_master(void)
1493 +static inline int protect_pm_master(void)
1494 {
1495 int e = 0;
1496
1497 @@ -988,7 +988,7 @@ static inline int __init protect_pm_master(void)
1498 return e;
1499 }
1500
1501 -static inline int __init unprotect_pm_master(void)
1502 +static inline int unprotect_pm_master(void)
1503 {
1504 int e = 0;
1505
1506 diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
1507 index 1ee68bd440fbc..16c6e2accfaa5 100644
1508 --- a/drivers/mfd/wm5110-tables.c
1509 +++ b/drivers/mfd/wm5110-tables.c
1510 @@ -1618,6 +1618,7 @@ static const struct reg_default wm5110_reg_default[] = {
1511 { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
1512 { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
1513 { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
1514 + { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
1515 { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
1516 { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
1517 { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
1518 @@ -2869,6 +2870,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
1519 case ARIZONA_ASRC_ENABLE:
1520 case ARIZONA_ASRC_STATUS:
1521 case ARIZONA_ASRC_RATE1:
1522 + case ARIZONA_ASRC_RATE2:
1523 case ARIZONA_ISRC_1_CTRL_1:
1524 case ARIZONA_ISRC_1_CTRL_2:
1525 case ARIZONA_ISRC_1_CTRL_3:
1526 diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1527 index a70bb1bb90e7d..a6eacf2099c30 100644
1528 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
1529 +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1530 @@ -2663,11 +2663,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
1531 goto err_device_destroy;
1532 }
1533
1534 - clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
1535 - /* Make sure we don't have a race with AENQ Links state handler */
1536 - if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1537 - netif_carrier_on(adapter->netdev);
1538 -
1539 rc = ena_enable_msix_and_set_admin_interrupts(adapter,
1540 adapter->num_queues);
1541 if (rc) {
1542 @@ -2684,6 +2679,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
1543 }
1544
1545 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
1546 +
1547 + clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
1548 + if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1549 + netif_carrier_on(adapter->netdev);
1550 +
1551 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
1552 dev_err(&pdev->dev,
1553 "Device reset completed successfully, Driver info: %s\n",
1554 diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1555 index 6e0f47f2c8a37..3e53be0fcd7ec 100644
1556 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1557 +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1558 @@ -2051,6 +2051,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1559 bool nonlinear = skb_is_nonlinear(skb);
1560 struct rtnl_link_stats64 *percpu_stats;
1561 struct dpaa_percpu_priv *percpu_priv;
1562 + struct netdev_queue *txq;
1563 struct dpaa_priv *priv;
1564 struct qm_fd fd;
1565 int offset = 0;
1566 @@ -2100,6 +2101,11 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1567 if (unlikely(err < 0))
1568 goto skb_to_fd_failed;
1569
1570 + txq = netdev_get_tx_queue(net_dev, queue_mapping);
1571 +
1572 + /* LLTX requires to do our own update of trans_start */
1573 + txq->trans_start = jiffies;
1574 +
1575 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1576 fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
1577 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1578 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1579 index ad1779fc410e6..a78bfafd212c8 100644
1580 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1581 +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1582 @@ -147,12 +147,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
1583 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
1584 int i;
1585
1586 - vf_cb->mac_cb = NULL;
1587 -
1588 - kfree(vf_cb);
1589 -
1590 for (i = 0; i < handle->q_num; i++)
1591 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
1592 +
1593 + kfree(vf_cb);
1594 }
1595
1596 static int hns_ae_wait_flow_down(struct hnae_handle *handle)
1597 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1598 index db00bf1c23f5a..d47d4f86ac11d 100644
1599 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1600 +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1601 @@ -620,6 +620,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
1602 }
1603 #endif
1604
1605 +#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
1606 +
1607 /* We reach this function only after checking that any of
1608 * the (IPv4 | IPv6) bits are set in cqe->status.
1609 */
1610 @@ -627,9 +629,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
1611 netdev_features_t dev_features)
1612 {
1613 __wsum hw_checksum = 0;
1614 + void *hdr;
1615 +
1616 + /* CQE csum doesn't cover padding octets in short ethernet
1617 + * frames. And the pad field is appended prior to calculating
1618 + * and appending the FCS field.
1619 + *
1620 + * Detecting these padded frames requires to verify and parse
1621 + * IP headers, so we simply force all those small frames to skip
1622 + * checksum complete.
1623 + */
1624 + if (short_frame(skb->len))
1625 + return -EINVAL;
1626
1627 - void *hdr = (u8 *)va + sizeof(struct ethhdr);
1628 -
1629 + hdr = (u8 *)va + sizeof(struct ethhdr);
1630 hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
1631
1632 if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
1633 @@ -822,6 +835,11 @@ xdp_drop_no_cnt:
1634 skb_record_rx_queue(skb, cq_ring);
1635
1636 if (likely(dev->features & NETIF_F_RXCSUM)) {
1637 + /* TODO: For IP non TCP/UDP packets when csum complete is
1638 + * not an option (not supported or any other reason) we can
1639 + * actually check cqe IPOK status bit and report
1640 + * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
1641 + */
1642 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
1643 MLX4_CQE_STATUS_UDP)) &&
1644 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
1645 diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
1646 index 4b4351141b94c..76b84d08a058b 100644
1647 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c
1648 +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
1649 @@ -57,12 +57,12 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
1650 int i;
1651
1652 if (chunk->nsg > 0)
1653 - pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
1654 + pci_unmap_sg(dev->persist->pdev, chunk->sg, chunk->npages,
1655 PCI_DMA_BIDIRECTIONAL);
1656
1657 for (i = 0; i < chunk->npages; ++i)
1658 - __free_pages(sg_page(&chunk->mem[i]),
1659 - get_order(chunk->mem[i].length));
1660 + __free_pages(sg_page(&chunk->sg[i]),
1661 + get_order(chunk->sg[i].length));
1662 }
1663
1664 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
1665 @@ -71,9 +71,9 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
1666
1667 for (i = 0; i < chunk->npages; ++i)
1668 dma_free_coherent(&dev->persist->pdev->dev,
1669 - chunk->mem[i].length,
1670 - lowmem_page_address(sg_page(&chunk->mem[i])),
1671 - sg_dma_address(&chunk->mem[i]));
1672 + chunk->buf[i].size,
1673 + chunk->buf[i].addr,
1674 + chunk->buf[i].dma_addr);
1675 }
1676
1677 void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
1678 @@ -111,22 +111,21 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
1679 return 0;
1680 }
1681
1682 -static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
1683 - int order, gfp_t gfp_mask)
1684 +static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
1685 + int order, gfp_t gfp_mask)
1686 {
1687 - void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
1688 - &sg_dma_address(mem), gfp_mask);
1689 - if (!buf)
1690 + buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
1691 + &buf->dma_addr, gfp_mask);
1692 + if (!buf->addr)
1693 return -ENOMEM;
1694
1695 - if (offset_in_page(buf)) {
1696 - dma_free_coherent(dev, PAGE_SIZE << order,
1697 - buf, sg_dma_address(mem));
1698 + if (offset_in_page(buf->addr)) {
1699 + dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
1700 + buf->dma_addr);
1701 return -ENOMEM;
1702 }
1703
1704 - sg_set_buf(mem, buf, PAGE_SIZE << order);
1705 - sg_dma_len(mem) = PAGE_SIZE << order;
1706 + buf->size = PAGE_SIZE << order;
1707 return 0;
1708 }
1709
1710 @@ -159,21 +158,21 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
1711
1712 while (npages > 0) {
1713 if (!chunk) {
1714 - chunk = kmalloc_node(sizeof(*chunk),
1715 + chunk = kzalloc_node(sizeof(*chunk),
1716 gfp_mask & ~(__GFP_HIGHMEM |
1717 __GFP_NOWARN),
1718 dev->numa_node);
1719 if (!chunk) {
1720 - chunk = kmalloc(sizeof(*chunk),
1721 + chunk = kzalloc(sizeof(*chunk),
1722 gfp_mask & ~(__GFP_HIGHMEM |
1723 __GFP_NOWARN));
1724 if (!chunk)
1725 goto fail;
1726 }
1727 + chunk->coherent = coherent;
1728
1729 - sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
1730 - chunk->npages = 0;
1731 - chunk->nsg = 0;
1732 + if (!coherent)
1733 + sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
1734 list_add_tail(&chunk->list, &icm->chunk_list);
1735 }
1736
1737 @@ -186,10 +185,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
1738
1739 if (coherent)
1740 ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
1741 - &chunk->mem[chunk->npages],
1742 - cur_order, mask);
1743 + &chunk->buf[chunk->npages],
1744 + cur_order, mask);
1745 else
1746 - ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
1747 + ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
1748 cur_order, mask,
1749 dev->numa_node);
1750
1751 @@ -205,7 +204,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
1752 if (coherent)
1753 ++chunk->nsg;
1754 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
1755 - chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
1756 + chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
1757 chunk->npages,
1758 PCI_DMA_BIDIRECTIONAL);
1759
1760 @@ -220,7 +219,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
1761 }
1762
1763 if (!coherent && chunk) {
1764 - chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
1765 + chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
1766 chunk->npages,
1767 PCI_DMA_BIDIRECTIONAL);
1768
1769 @@ -320,7 +319,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
1770 u64 idx;
1771 struct mlx4_icm_chunk *chunk;
1772 struct mlx4_icm *icm;
1773 - struct page *page = NULL;
1774 + void *addr = NULL;
1775
1776 if (!table->lowmem)
1777 return NULL;
1778 @@ -336,28 +335,49 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
1779
1780 list_for_each_entry(chunk, &icm->chunk_list, list) {
1781 for (i = 0; i < chunk->npages; ++i) {
1782 + dma_addr_t dma_addr;
1783 + size_t len;
1784 +
1785 + if (table->coherent) {
1786 + len = chunk->buf[i].size;
1787 + dma_addr = chunk->buf[i].dma_addr;
1788 + addr = chunk->buf[i].addr;
1789 + } else {
1790 + struct page *page;
1791 +
1792 + len = sg_dma_len(&chunk->sg[i]);
1793 + dma_addr = sg_dma_address(&chunk->sg[i]);
1794 +
1795 + /* XXX: we should never do this for highmem
1796 + * allocation. This function either needs
1797 + * to be split, or the kernel virtual address
1798 + * return needs to be made optional.
1799 + */
1800 + page = sg_page(&chunk->sg[i]);
1801 + addr = lowmem_page_address(page);
1802 + }
1803 +
1804 if (dma_handle && dma_offset >= 0) {
1805 - if (sg_dma_len(&chunk->mem[i]) > dma_offset)
1806 - *dma_handle = sg_dma_address(&chunk->mem[i]) +
1807 - dma_offset;
1808 - dma_offset -= sg_dma_len(&chunk->mem[i]);
1809 + if (len > dma_offset)
1810 + *dma_handle = dma_addr + dma_offset;
1811 + dma_offset -= len;
1812 }
1813 +
1814 /*
1815 * DMA mapping can merge pages but not split them,
1816 * so if we found the page, dma_handle has already
1817 * been assigned to.
1818 */
1819 - if (chunk->mem[i].length > offset) {
1820 - page = sg_page(&chunk->mem[i]);
1821 + if (len > offset)
1822 goto out;
1823 - }
1824 - offset -= chunk->mem[i].length;
1825 + offset -= len;
1826 }
1827 }
1828
1829 + addr = NULL;
1830 out:
1831 mutex_unlock(&table->mutex);
1832 - return page ? lowmem_page_address(page) + offset : NULL;
1833 + return addr ? addr + offset : NULL;
1834 }
1835
1836 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
1837 diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
1838 index c9169a490557c..d199874b1c074 100644
1839 --- a/drivers/net/ethernet/mellanox/mlx4/icm.h
1840 +++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
1841 @@ -47,11 +47,21 @@ enum {
1842 MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
1843 };
1844
1845 +struct mlx4_icm_buf {
1846 + void *addr;
1847 + size_t size;
1848 + dma_addr_t dma_addr;
1849 +};
1850 +
1851 struct mlx4_icm_chunk {
1852 struct list_head list;
1853 int npages;
1854 int nsg;
1855 - struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
1856 + bool coherent;
1857 + union {
1858 + struct scatterlist sg[MLX4_ICM_CHUNK_LEN];
1859 + struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN];
1860 + };
1861 };
1862
1863 struct mlx4_icm {
1864 @@ -114,12 +124,18 @@ static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
1865
1866 static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
1867 {
1868 - return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
1869 + if (iter->chunk->coherent)
1870 + return iter->chunk->buf[iter->page_idx].dma_addr;
1871 + else
1872 + return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
1873 }
1874
1875 static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
1876 {
1877 - return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
1878 + if (iter->chunk->coherent)
1879 + return iter->chunk->buf[iter->page_idx].size;
1880 + else
1881 + return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
1882 }
1883
1884 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
1885 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
1886 index 1183248029264..7c72b3b5eedfa 100644
1887 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
1888 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
1889 @@ -636,6 +636,7 @@ enum {
1890 MLX5E_STATE_ASYNC_EVENTS_ENABLED,
1891 MLX5E_STATE_OPENED,
1892 MLX5E_STATE_DESTROYING,
1893 + MLX5E_STATE_XDP_TX_ENABLED,
1894 };
1895
1896 struct mlx5e_rqt {
1897 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
1898 index ad6d471d00dd4..4a33c9a7cac7e 100644
1899 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
1900 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
1901 @@ -262,7 +262,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1902 int sq_num;
1903 int i;
1904
1905 - if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
1906 + /* this flag is sufficient, no need to test internal sq state */
1907 + if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
1908 return -ENETDOWN;
1909
1910 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1911 @@ -275,9 +276,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1912
1913 sq = &priv->channels.c[sq_num]->xdpsq;
1914
1915 - if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
1916 - return -ENETDOWN;
1917 -
1918 for (i = 0; i < n; i++) {
1919 struct xdp_frame *xdpf = frames[i];
1920 struct mlx5e_xdp_info xdpi;
1921 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
1922 index 6dfab045925f0..4d096623178b9 100644
1923 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
1924 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
1925 @@ -49,6 +49,23 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
1926 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1927 u32 flags);
1928
1929 +static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
1930 +{
1931 + set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
1932 +}
1933 +
1934 +static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
1935 +{
1936 + clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
1937 + /* let other device's napi(s) see our new state */
1938 + synchronize_rcu();
1939 +}
1940 +
1941 +static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
1942 +{
1943 + return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
1944 +}
1945 +
1946 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
1947 {
1948 struct mlx5_wq_cyc *wq = &sq->wq;
1949 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1950 index 9577d06578398..1d66a4e22d64f 100644
1951 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1952 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1953 @@ -2903,6 +2903,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
1954
1955 mlx5e_build_tx2sq_maps(priv);
1956 mlx5e_activate_channels(&priv->channels);
1957 + mlx5e_xdp_tx_enable(priv);
1958 netif_tx_start_all_queues(priv->netdev);
1959
1960 if (MLX5_ESWITCH_MANAGER(priv->mdev))
1961 @@ -2924,6 +2925,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
1962 */
1963 netif_tx_stop_all_queues(priv->netdev);
1964 netif_tx_disable(priv->netdev);
1965 + mlx5e_xdp_tx_disable(priv);
1966 mlx5e_deactivate_channels(&priv->channels);
1967 }
1968
1969 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1970 index 820fe85100b08..4dccc84fdcf2c 100644
1971 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1972 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1973 @@ -143,6 +143,7 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
1974
1975 s->tx_packets += sq_stats->packets;
1976 s->tx_bytes += sq_stats->bytes;
1977 + s->tx_queue_dropped += sq_stats->dropped;
1978 }
1979 }
1980 }
1981 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1982 index 4e0151918db13..3fba80a8b436f 100644
1983 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1984 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1985 @@ -98,6 +98,7 @@ struct mlx5e_tc_flow_parse_attr {
1986 struct ip_tunnel_info tun_info;
1987 struct mlx5_flow_spec spec;
1988 int num_mod_hdr_actions;
1989 + int max_mod_hdr_actions;
1990 void *mod_hdr_actions;
1991 int mirred_ifindex;
1992 };
1993 @@ -1888,9 +1889,9 @@ static struct mlx5_fields fields[] = {
1994 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
1995 };
1996
1997 -/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
1998 - * max from the SW pedit action. On success, it says how many HW actions were
1999 - * actually parsed.
2000 +/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
2001 + * max from the SW pedit action. On success, attr->num_mod_hdr_actions
2002 + * says how many HW actions were actually parsed.
2003 */
2004 static int offload_pedit_fields(struct pedit_headers *masks,
2005 struct pedit_headers *vals,
2006 @@ -1914,9 +1915,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
2007 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
2008
2009 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2010 - action = parse_attr->mod_hdr_actions;
2011 - max_actions = parse_attr->num_mod_hdr_actions;
2012 - nactions = 0;
2013 + action = parse_attr->mod_hdr_actions +
2014 + parse_attr->num_mod_hdr_actions * action_size;
2015 +
2016 + max_actions = parse_attr->max_mod_hdr_actions;
2017 + nactions = parse_attr->num_mod_hdr_actions;
2018
2019 for (i = 0; i < ARRAY_SIZE(fields); i++) {
2020 f = &fields[i];
2021 @@ -2027,7 +2030,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
2022 if (!parse_attr->mod_hdr_actions)
2023 return -ENOMEM;
2024
2025 - parse_attr->num_mod_hdr_actions = max_actions;
2026 + parse_attr->max_mod_hdr_actions = max_actions;
2027 return 0;
2028 }
2029
2030 @@ -2073,9 +2076,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2031 goto out_err;
2032 }
2033
2034 - err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
2035 - if (err)
2036 - goto out_err;
2037 + if (!parse_attr->mod_hdr_actions) {
2038 + err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
2039 + if (err)
2040 + goto out_err;
2041 + }
2042
2043 err = offload_pedit_fields(masks, vals, parse_attr, extack);
2044 if (err < 0)
2045 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
2046 index 6dacaeba2fbff..0b03d65474e93 100644
2047 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
2048 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
2049 @@ -387,8 +387,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
2050 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
2051 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
2052 if (unlikely(contig_wqebbs_room < num_wqebbs)) {
2053 +#ifdef CONFIG_MLX5_EN_IPSEC
2054 + struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
2055 +#endif
2056 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
2057 mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
2058 +#ifdef CONFIG_MLX5_EN_IPSEC
2059 + wqe->eth = cur_eth;
2060 +#endif
2061 }
2062
2063 /* fill wqe */
2064 diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
2065 index 8a291eb36c64c..7338c9bac4e6a 100644
2066 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
2067 +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
2068 @@ -78,6 +78,7 @@ config MLXSW_SPECTRUM
2069 depends on IPV6 || IPV6=n
2070 depends on NET_IPGRE || NET_IPGRE=n
2071 depends on IPV6_GRE || IPV6_GRE=n
2072 + depends on VXLAN || VXLAN=n
2073 select GENERIC_ALLOCATOR
2074 select PARMAN
2075 select MLXFW
2076 diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
2077 index c7901a3f2a794..a903e97793f9a 100644
2078 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
2079 +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
2080 @@ -1367,10 +1367,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
2081 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
2082
2083 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
2084 - break;
2085 + return 0;
2086 cond_resched();
2087 } while (time_before(jiffies, end));
2088 - return 0;
2089 + return -EBUSY;
2090 }
2091
2092 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
2093 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
2094 index e3c6fe8b1d406..1dcf152b28138 100644
2095 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
2096 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
2097 @@ -75,7 +75,15 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
2098 act_set = mlxsw_afa_block_first_set(rulei->act_block);
2099 mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
2100
2101 - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
2102 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
2103 + if (err)
2104 + goto err_ptce2_write;
2105 +
2106 + return 0;
2107 +
2108 +err_ptce2_write:
2109 + cregion->ops->entry_remove(cregion, centry);
2110 + return err;
2111 }
2112
2113 static void
2114 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2115 index c514af438fc28..b606db9833e9e 100644
2116 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2117 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2118 @@ -1219,7 +1219,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
2119 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
2120 {
2121 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
2122 - MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
2123 + MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
2124 }
2125
2126 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
2127 @@ -1276,7 +1276,7 @@ out:
2128 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2129 const char *mac, u16 fid, bool adding,
2130 enum mlxsw_reg_sfd_rec_action action,
2131 - bool dynamic)
2132 + enum mlxsw_reg_sfd_rec_policy policy)
2133 {
2134 char *sfd_pl;
2135 u8 num_rec;
2136 @@ -1287,8 +1287,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2137 return -ENOMEM;
2138
2139 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
2140 - mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
2141 - mac, fid, action, local_port);
2142 + mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
2143 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
2144 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
2145 if (err)
2146 @@ -1307,7 +1306,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2147 bool dynamic)
2148 {
2149 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
2150 - MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
2151 + MLXSW_REG_SFD_REC_ACTION_NOP,
2152 + mlxsw_sp_sfd_rec_policy(dynamic));
2153 }
2154
2155 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
2156 @@ -1315,7 +1315,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
2157 {
2158 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
2159 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
2160 - false);
2161 + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
2162 }
2163
2164 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
2165 diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2166 index c6f4bab67a5fc..9e728ec82c218 100644
2167 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2168 +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2169 @@ -1603,6 +1603,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
2170 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
2171 rx_prod.bd_prod = cpu_to_le16(bd_prod);
2172 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
2173 +
2174 + /* Make sure chain element is updated before ringing the doorbell */
2175 + dma_wmb();
2176 +
2177 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
2178 }
2179
2180 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
2181 index 6c5092e7771cd..c5e25580a43fa 100644
2182 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
2183 +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
2184 @@ -263,6 +263,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
2185 struct stmmac_extra_stats *x, u32 chan)
2186 {
2187 u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
2188 + u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
2189 int ret = 0;
2190
2191 /* ABNORMAL interrupts */
2192 @@ -282,8 +283,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
2193 x->normal_irq_n++;
2194
2195 if (likely(intr_status & XGMAC_RI)) {
2196 - u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
2197 - if (likely(value & XGMAC_RIE)) {
2198 + if (likely(intr_en & XGMAC_RIE)) {
2199 x->rx_normal_irq_n++;
2200 ret |= handle_rx;
2201 }
2202 @@ -295,7 +295,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
2203 }
2204
2205 /* Clear interrupts */
2206 - writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan));
2207 + writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
2208
2209 return ret;
2210 }
2211 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2212 index c4a35e932f052..5d83d6a7694b0 100644
2213 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2214 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2215 @@ -3525,27 +3525,28 @@ static int stmmac_napi_poll(struct napi_struct *napi, int budget)
2216 struct stmmac_channel *ch =
2217 container_of(napi, struct stmmac_channel, napi);
2218 struct stmmac_priv *priv = ch->priv_data;
2219 - int work_done = 0, work_rem = budget;
2220 + int work_done, rx_done = 0, tx_done = 0;
2221 u32 chan = ch->index;
2222
2223 priv->xstats.napi_poll++;
2224
2225 - if (ch->has_tx) {
2226 - int done = stmmac_tx_clean(priv, work_rem, chan);
2227 + if (ch->has_tx)
2228 + tx_done = stmmac_tx_clean(priv, budget, chan);
2229 + if (ch->has_rx)
2230 + rx_done = stmmac_rx(priv, budget, chan);
2231
2232 - work_done += done;
2233 - work_rem -= done;
2234 - }
2235 -
2236 - if (ch->has_rx) {
2237 - int done = stmmac_rx(priv, work_rem, chan);
2238 + work_done = max(rx_done, tx_done);
2239 + work_done = min(work_done, budget);
2240
2241 - work_done += done;
2242 - work_rem -= done;
2243 - }
2244 + if (work_done < budget && napi_complete_done(napi, work_done)) {
2245 + int stat;
2246
2247 - if (work_done < budget && napi_complete_done(napi, work_done))
2248 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
2249 + stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2250 + &priv->xstats, chan);
2251 + if (stat && napi_reschedule(napi))
2252 + stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2253 + }
2254
2255 return work_done;
2256 }
2257 @@ -4194,6 +4195,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2258 return ret;
2259 }
2260
2261 + /* Rx Watchdog is available in the COREs newer than the 3.40.
2262 + * In some case, for example on bugged HW this feature
2263 + * has to be disable and this can be done by passing the
2264 + * riwt_off field from the platform.
2265 + */
2266 + if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
2267 + (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
2268 + priv->use_riwt = 1;
2269 + dev_info(priv->device,
2270 + "Enable RX Mitigation via HW Watchdog Timer\n");
2271 + }
2272 +
2273 return 0;
2274 }
2275
2276 @@ -4326,18 +4339,6 @@ int stmmac_dvr_probe(struct device *device,
2277 if (flow_ctrl)
2278 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
2279
2280 - /* Rx Watchdog is available in the COREs newer than the 3.40.
2281 - * In some case, for example on bugged HW this feature
2282 - * has to be disable and this can be done by passing the
2283 - * riwt_off field from the platform.
2284 - */
2285 - if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
2286 - (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
2287 - priv->use_riwt = 1;
2288 - dev_info(priv->device,
2289 - "Enable RX Mitigation via HW Watchdog Timer\n");
2290 - }
2291 -
2292 /* Setup channels NAPI */
2293 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
2294
2295 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
2296 index c54a50dbd5ac2..d819e8eaba122 100644
2297 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
2298 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
2299 @@ -299,7 +299,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
2300 */
2301 static void stmmac_pci_remove(struct pci_dev *pdev)
2302 {
2303 + int i;
2304 +
2305 stmmac_dvr_remove(&pdev->dev);
2306 +
2307 + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
2308 + if (pci_resource_len(pdev, i) == 0)
2309 + continue;
2310 + pcim_iounmap_regions(pdev, BIT(i));
2311 + break;
2312 + }
2313 +
2314 pci_disable_device(pdev);
2315 }
2316
2317 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2318 index 531294f4978bc..58ea18af9813a 100644
2319 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2320 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2321 @@ -301,6 +301,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
2322 /* Queue 0 is not AVB capable */
2323 if (queue <= 0 || queue >= tx_queues_count)
2324 return -EINVAL;
2325 + if (!priv->dma_cap.av)
2326 + return -EOPNOTSUPP;
2327 if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
2328 return -EOPNOTSUPP;
2329
2330 diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
2331 index a0cd1c41cf5f0..2e6e11d8cf5cb 100644
2332 --- a/drivers/net/geneve.c
2333 +++ b/drivers/net/geneve.c
2334 @@ -1426,9 +1426,13 @@ static void geneve_link_config(struct net_device *dev,
2335 }
2336 #if IS_ENABLED(CONFIG_IPV6)
2337 case AF_INET6: {
2338 - struct rt6_info *rt = rt6_lookup(geneve->net,
2339 - &info->key.u.ipv6.dst, NULL, 0,
2340 - NULL, 0);
2341 + struct rt6_info *rt;
2342 +
2343 + if (!__in6_dev_get(dev))
2344 + break;
2345 +
2346 + rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0,
2347 + NULL, 0);
2348
2349 if (rt && rt->dst.dev)
2350 ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
2351 diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
2352 index 51611c7a23d1c..22dfbd4c6aaf4 100644
2353 --- a/drivers/net/phy/micrel.c
2354 +++ b/drivers/net/phy/micrel.c
2355 @@ -1076,6 +1076,7 @@ static struct phy_driver ksphy_driver[] = {
2356 .driver_data = &ksz9021_type,
2357 .probe = kszphy_probe,
2358 .config_init = ksz9031_config_init,
2359 + .soft_reset = genphy_soft_reset,
2360 .read_status = ksz9031_read_status,
2361 .ack_interrupt = kszphy_ack_interrupt,
2362 .config_intr = kszphy_config_intr,
2363 diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
2364 index 9b8dd0d0ee42c..b60c82065fd11 100644
2365 --- a/drivers/net/phy/phylink.c
2366 +++ b/drivers/net/phy/phylink.c
2367 @@ -475,6 +475,17 @@ static void phylink_run_resolve(struct phylink *pl)
2368 queue_work(system_power_efficient_wq, &pl->resolve);
2369 }
2370
2371 +static void phylink_run_resolve_and_disable(struct phylink *pl, int bit)
2372 +{
2373 + unsigned long state = pl->phylink_disable_state;
2374 +
2375 + set_bit(bit, &pl->phylink_disable_state);
2376 + if (state == 0) {
2377 + queue_work(system_power_efficient_wq, &pl->resolve);
2378 + flush_work(&pl->resolve);
2379 + }
2380 +}
2381 +
2382 static void phylink_fixed_poll(struct timer_list *t)
2383 {
2384 struct phylink *pl = container_of(t, struct phylink, link_poll);
2385 @@ -928,9 +939,7 @@ void phylink_stop(struct phylink *pl)
2386 if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
2387 del_timer_sync(&pl->link_poll);
2388
2389 - set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
2390 - queue_work(system_power_efficient_wq, &pl->resolve);
2391 - flush_work(&pl->resolve);
2392 + phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
2393 }
2394 EXPORT_SYMBOL_GPL(phylink_stop);
2395
2396 @@ -1637,9 +1646,7 @@ static void phylink_sfp_link_down(void *upstream)
2397
2398 ASSERT_RTNL();
2399
2400 - set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
2401 - queue_work(system_power_efficient_wq, &pl->resolve);
2402 - flush_work(&pl->resolve);
2403 + phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK);
2404 }
2405
2406 static void phylink_sfp_link_up(void *upstream)
2407 diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
2408 index ad9db652874dc..fef701bfad62e 100644
2409 --- a/drivers/net/phy/sfp-bus.c
2410 +++ b/drivers/net/phy/sfp-bus.c
2411 @@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
2412 return ret;
2413 }
2414 }
2415 + bus->socket_ops->attach(bus->sfp);
2416 if (bus->started)
2417 bus->socket_ops->start(bus->sfp);
2418 bus->netdev->sfp_bus = bus;
2419 @@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
2420 if (bus->registered) {
2421 if (bus->started)
2422 bus->socket_ops->stop(bus->sfp);
2423 + bus->socket_ops->detach(bus->sfp);
2424 if (bus->phydev && ops && ops->disconnect_phy)
2425 ops->disconnect_phy(bus->upstream);
2426 }
2427 diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
2428 index fd8bb998ae52d..68c8fbf099f87 100644
2429 --- a/drivers/net/phy/sfp.c
2430 +++ b/drivers/net/phy/sfp.c
2431 @@ -184,6 +184,7 @@ struct sfp {
2432
2433 struct gpio_desc *gpio[GPIO_MAX];
2434
2435 + bool attached;
2436 unsigned int state;
2437 struct delayed_work poll;
2438 struct delayed_work timeout;
2439 @@ -1475,7 +1476,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
2440 */
2441 switch (sfp->sm_mod_state) {
2442 default:
2443 - if (event == SFP_E_INSERT) {
2444 + if (event == SFP_E_INSERT && sfp->attached) {
2445 sfp_module_tx_disable(sfp);
2446 sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
2447 }
2448 @@ -1607,6 +1608,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
2449 mutex_unlock(&sfp->sm_mutex);
2450 }
2451
2452 +static void sfp_attach(struct sfp *sfp)
2453 +{
2454 + sfp->attached = true;
2455 + if (sfp->state & SFP_F_PRESENT)
2456 + sfp_sm_event(sfp, SFP_E_INSERT);
2457 +}
2458 +
2459 +static void sfp_detach(struct sfp *sfp)
2460 +{
2461 + sfp->attached = false;
2462 + sfp_sm_event(sfp, SFP_E_REMOVE);
2463 +}
2464 +
2465 static void sfp_start(struct sfp *sfp)
2466 {
2467 sfp_sm_event(sfp, SFP_E_DEV_UP);
2468 @@ -1667,6 +1681,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
2469 }
2470
2471 static const struct sfp_socket_ops sfp_module_ops = {
2472 + .attach = sfp_attach,
2473 + .detach = sfp_detach,
2474 .start = sfp_start,
2475 .stop = sfp_stop,
2476 .module_info = sfp_module_info,
2477 @@ -1834,10 +1850,6 @@ static int sfp_probe(struct platform_device *pdev)
2478 dev_info(sfp->dev, "Host maximum power %u.%uW\n",
2479 sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
2480
2481 - sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
2482 - if (!sfp->sfp_bus)
2483 - return -ENOMEM;
2484 -
2485 /* Get the initial state, and always signal TX disable,
2486 * since the network interface will not be up.
2487 */
2488 @@ -1848,10 +1860,6 @@ static int sfp_probe(struct platform_device *pdev)
2489 sfp->state |= SFP_F_RATE_SELECT;
2490 sfp_set_state(sfp, sfp->state);
2491 sfp_module_tx_disable(sfp);
2492 - rtnl_lock();
2493 - if (sfp->state & SFP_F_PRESENT)
2494 - sfp_sm_event(sfp, SFP_E_INSERT);
2495 - rtnl_unlock();
2496
2497 for (i = 0; i < GPIO_MAX; i++) {
2498 if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
2499 @@ -1884,6 +1892,10 @@ static int sfp_probe(struct platform_device *pdev)
2500 dev_warn(sfp->dev,
2501 "No tx_disable pin: SFP modules will always be emitting.\n");
2502
2503 + sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
2504 + if (!sfp->sfp_bus)
2505 + return -ENOMEM;
2506 +
2507 return 0;
2508 }
2509
2510 diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
2511 index 31b0acf337e27..64f54b0bbd8c4 100644
2512 --- a/drivers/net/phy/sfp.h
2513 +++ b/drivers/net/phy/sfp.h
2514 @@ -7,6 +7,8 @@
2515 struct sfp;
2516
2517 struct sfp_socket_ops {
2518 + void (*attach)(struct sfp *sfp);
2519 + void (*detach)(struct sfp *sfp);
2520 void (*start)(struct sfp *sfp);
2521 void (*stop)(struct sfp *sfp);
2522 int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo);
2523 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
2524 index 364f514d56d87..86db1205a3968 100644
2525 --- a/drivers/net/team/team.c
2526 +++ b/drivers/net/team/team.c
2527 @@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team,
2528 }
2529 }
2530
2531 -static bool __team_option_inst_tmp_find(const struct list_head *opts,
2532 - const struct team_option_inst *needle)
2533 -{
2534 - struct team_option_inst *opt_inst;
2535 -
2536 - list_for_each_entry(opt_inst, opts, tmp_list)
2537 - if (opt_inst == needle)
2538 - return true;
2539 - return false;
2540 -}
2541 -
2542 static int __team_options_register(struct team *team,
2543 const struct team_option *option,
2544 size_t option_count)
2545 @@ -2460,7 +2449,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2546 int err = 0;
2547 int i;
2548 struct nlattr *nl_option;
2549 - LIST_HEAD(opt_inst_list);
2550
2551 rtnl_lock();
2552
2553 @@ -2480,6 +2468,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2554 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2555 struct nlattr *attr;
2556 struct nlattr *attr_data;
2557 + LIST_HEAD(opt_inst_list);
2558 enum team_option_type opt_type;
2559 int opt_port_ifindex = 0; /* != 0 for per-port options */
2560 u32 opt_array_index = 0;
2561 @@ -2584,23 +2573,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2562 if (err)
2563 goto team_put;
2564 opt_inst->changed = true;
2565 -
2566 - /* dumb/evil user-space can send us duplicate opt,
2567 - * keep only the last one
2568 - */
2569 - if (__team_option_inst_tmp_find(&opt_inst_list,
2570 - opt_inst))
2571 - continue;
2572 -
2573 list_add(&opt_inst->tmp_list, &opt_inst_list);
2574 }
2575 if (!opt_found) {
2576 err = -ENOENT;
2577 goto team_put;
2578 }
2579 - }
2580
2581 - err = team_nl_send_event_options_get(team, &opt_inst_list);
2582 + err = team_nl_send_event_options_get(team, &opt_inst_list);
2583 + if (err)
2584 + break;
2585 + }
2586
2587 team_put:
2588 team_nl_team_put(team);
2589 diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
2590 index 4d6409605207c..af13d8cf94ad4 100644
2591 --- a/drivers/net/wan/fsl_ucc_hdlc.c
2592 +++ b/drivers/net/wan/fsl_ucc_hdlc.c
2593 @@ -1049,6 +1049,54 @@ static const struct net_device_ops uhdlc_ops = {
2594 .ndo_tx_timeout = uhdlc_tx_timeout,
2595 };
2596
2597 +static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
2598 +{
2599 + struct device_node *np;
2600 + struct platform_device *pdev;
2601 + struct resource *res;
2602 + static int siram_init_flag;
2603 + int ret = 0;
2604 +
2605 + np = of_find_compatible_node(NULL, NULL, name);
2606 + if (!np)
2607 + return -EINVAL;
2608 +
2609 + pdev = of_find_device_by_node(np);
2610 + if (!pdev) {
2611 + pr_err("%pOFn: failed to lookup pdev\n", np);
2612 + of_node_put(np);
2613 + return -EINVAL;
2614 + }
2615 +
2616 + of_node_put(np);
2617 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2618 + if (!res) {
2619 + ret = -EINVAL;
2620 + goto error_put_device;
2621 + }
2622 + *ptr = ioremap(res->start, resource_size(res));
2623 + if (!*ptr) {
2624 + ret = -ENOMEM;
2625 + goto error_put_device;
2626 + }
2627 +
2628 + /* We've remapped the addresses, and we don't need the device any
2629 + * more, so we should release it.
2630 + */
2631 + put_device(&pdev->dev);
2632 +
2633 + if (init_flag && siram_init_flag == 0) {
2634 + memset_io(*ptr, 0, resource_size(res));
2635 + siram_init_flag = 1;
2636 + }
2637 + return 0;
2638 +
2639 +error_put_device:
2640 + put_device(&pdev->dev);
2641 +
2642 + return ret;
2643 +}
2644 +
2645 static int ucc_hdlc_probe(struct platform_device *pdev)
2646 {
2647 struct device_node *np = pdev->dev.of_node;
2648 @@ -1143,6 +1191,15 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
2649 ret = ucc_of_parse_tdm(np, utdm, ut_info);
2650 if (ret)
2651 goto free_utdm;
2652 +
2653 + ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
2654 + (void __iomem **)&utdm->si_regs);
2655 + if (ret)
2656 + goto free_utdm;
2657 + ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
2658 + (void __iomem **)&utdm->siram);
2659 + if (ret)
2660 + goto unmap_si_regs;
2661 }
2662
2663 if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
2664 @@ -1151,7 +1208,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
2665 ret = uhdlc_init(uhdlc_priv);
2666 if (ret) {
2667 dev_err(&pdev->dev, "Failed to init uhdlc\n");
2668 - goto free_utdm;
2669 + goto undo_uhdlc_init;
2670 }
2671
2672 dev = alloc_hdlcdev(uhdlc_priv);
2673 @@ -1181,6 +1238,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
2674 free_dev:
2675 free_netdev(dev);
2676 undo_uhdlc_init:
2677 + iounmap(utdm->siram);
2678 +unmap_si_regs:
2679 + iounmap(utdm->si_regs);
2680 free_utdm:
2681 if (uhdlc_priv->tsa)
2682 kfree(utdm);
2683 diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
2684 index ea517864186b4..76f25008491a5 100644
2685 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
2686 +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
2687 @@ -159,39 +159,49 @@ static const struct ieee80211_ops mt76x0u_ops = {
2688 .wake_tx_queue = mt76_wake_tx_queue,
2689 };
2690
2691 -static int mt76x0u_register_device(struct mt76x02_dev *dev)
2692 +static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
2693 {
2694 - struct ieee80211_hw *hw = dev->mt76.hw;
2695 int err;
2696
2697 - err = mt76u_alloc_queues(&dev->mt76);
2698 - if (err < 0)
2699 - goto out_err;
2700 -
2701 - err = mt76u_mcu_init_rx(&dev->mt76);
2702 - if (err < 0)
2703 - goto out_err;
2704 -
2705 mt76x0_chip_onoff(dev, true, true);
2706 - if (!mt76x02_wait_for_mac(&dev->mt76)) {
2707 - err = -ETIMEDOUT;
2708 - goto out_err;
2709 - }
2710 +
2711 + if (!mt76x02_wait_for_mac(&dev->mt76))
2712 + return -ETIMEDOUT;
2713
2714 err = mt76x0u_mcu_init(dev);
2715 if (err < 0)
2716 - goto out_err;
2717 + return err;
2718
2719 mt76x0_init_usb_dma(dev);
2720 err = mt76x0_init_hardware(dev);
2721 if (err < 0)
2722 - goto out_err;
2723 + return err;
2724
2725 mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
2726 mt76_wr(dev, MT_TXOP_CTRL_CFG,
2727 FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
2728 FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
2729
2730 + return 0;
2731 +}
2732 +
2733 +static int mt76x0u_register_device(struct mt76x02_dev *dev)
2734 +{
2735 + struct ieee80211_hw *hw = dev->mt76.hw;
2736 + int err;
2737 +
2738 + err = mt76u_alloc_queues(&dev->mt76);
2739 + if (err < 0)
2740 + goto out_err;
2741 +
2742 + err = mt76u_mcu_init_rx(&dev->mt76);
2743 + if (err < 0)
2744 + goto out_err;
2745 +
2746 + err = mt76x0u_init_hardware(dev);
2747 + if (err < 0)
2748 + goto out_err;
2749 +
2750 err = mt76x0_register_device(dev);
2751 if (err < 0)
2752 goto out_err;
2753 @@ -300,6 +310,8 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
2754
2755 mt76u_stop_queues(&dev->mt76);
2756 mt76x0u_mac_stop(dev);
2757 + clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
2758 + mt76x0_chip_onoff(dev, false, false);
2759 usb_kill_urb(usb->mcu.res.urb);
2760
2761 return 0;
2762 @@ -327,7 +339,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
2763 tasklet_enable(&usb->rx_tasklet);
2764 tasklet_enable(&usb->tx_tasklet);
2765
2766 - ret = mt76x0_init_hardware(dev);
2767 + ret = mt76x0u_init_hardware(dev);
2768 if (ret)
2769 goto err;
2770
2771 diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
2772 index a7f37063518ec..3d05bc1937d40 100644
2773 --- a/drivers/pinctrl/pinctrl-max77620.c
2774 +++ b/drivers/pinctrl/pinctrl-max77620.c
2775 @@ -34,14 +34,12 @@ enum max77620_pin_ppdrv {
2776 MAX77620_PIN_PP_DRV,
2777 };
2778
2779 -enum max77620_pinconf_param {
2780 - MAX77620_ACTIVE_FPS_SOURCE = PIN_CONFIG_END + 1,
2781 - MAX77620_ACTIVE_FPS_POWER_ON_SLOTS,
2782 - MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS,
2783 - MAX77620_SUSPEND_FPS_SOURCE,
2784 - MAX77620_SUSPEND_FPS_POWER_ON_SLOTS,
2785 - MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS,
2786 -};
2787 +#define MAX77620_ACTIVE_FPS_SOURCE (PIN_CONFIG_END + 1)
2788 +#define MAX77620_ACTIVE_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 2)
2789 +#define MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 3)
2790 +#define MAX77620_SUSPEND_FPS_SOURCE (PIN_CONFIG_END + 4)
2791 +#define MAX77620_SUSPEND_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 5)
2792 +#define MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 6)
2793
2794 struct max77620_pin_function {
2795 const char *name;
2796 diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c
2797 index 7aae52a09ff03..4ffd56ff809eb 100644
2798 --- a/drivers/pinctrl/qcom/pinctrl-qcs404.c
2799 +++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c
2800 @@ -79,7 +79,7 @@ enum {
2801 .intr_cfg_reg = 0, \
2802 .intr_status_reg = 0, \
2803 .intr_target_reg = 0, \
2804 - .tile = NORTH, \
2805 + .tile = SOUTH, \
2806 .mux_bit = -1, \
2807 .pull_bit = pull, \
2808 .drv_bit = drv, \
2809 diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
2810 index bf07735275a49..0fc382cb977bf 100644
2811 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
2812 +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
2813 @@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
2814 }
2815
2816 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
2817 - unsigned int tid, int pg_idx, bool reply)
2818 + unsigned int tid, int pg_idx)
2819 {
2820 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
2821 GFP_KERNEL);
2822 @@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
2823 req = (struct cpl_set_tcb_field *)skb->head;
2824 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
2825 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2826 - req->reply = V_NO_REPLY(reply ? 0 : 1);
2827 + req->reply = V_NO_REPLY(1);
2828 req->cpu_idx = 0;
2829 req->word = htons(31);
2830 req->mask = cpu_to_be64(0xF0000000);
2831 @@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
2832 * @tid: connection id
2833 * @hcrc: header digest enabled
2834 * @dcrc: data digest enabled
2835 - * @reply: request reply from h/w
2836 * set up the iscsi digest settings for a connection identified by tid
2837 */
2838 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2839 - int hcrc, int dcrc, int reply)
2840 + int hcrc, int dcrc)
2841 {
2842 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
2843 GFP_KERNEL);
2844 @@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2845 req = (struct cpl_set_tcb_field *)skb->head;
2846 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
2847 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2848 - req->reply = V_NO_REPLY(reply ? 0 : 1);
2849 + req->reply = V_NO_REPLY(1);
2850 req->cpu_idx = 0;
2851 req->word = htons(31);
2852 req->mask = cpu_to_be64(0x0F000000);
2853 diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
2854 index 064ef57351828..bd6cc014cab04 100644
2855 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
2856 +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
2857 @@ -1548,16 +1548,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
2858 struct cxgbi_sock *csk;
2859
2860 csk = lookup_tid(t, tid);
2861 - if (!csk)
2862 + if (!csk) {
2863 pr_err("can't find conn. for tid %u.\n", tid);
2864 + return;
2865 + }
2866
2867 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2868 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
2869 csk, csk->state, csk->flags, csk->tid, rpl->status);
2870
2871 - if (rpl->status != CPL_ERR_NONE)
2872 + if (rpl->status != CPL_ERR_NONE) {
2873 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
2874 csk, tid, rpl->status);
2875 + csk->err = -EINVAL;
2876 + }
2877 +
2878 + complete(&csk->cmpl);
2879
2880 __kfree_skb(skb);
2881 }
2882 @@ -1984,7 +1990,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
2883 }
2884
2885 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2886 - int pg_idx, bool reply)
2887 + int pg_idx)
2888 {
2889 struct sk_buff *skb;
2890 struct cpl_set_tcb_field *req;
2891 @@ -2000,7 +2006,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2892 req = (struct cpl_set_tcb_field *)skb->head;
2893 INIT_TP_WR(req, csk->tid);
2894 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
2895 - req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
2896 + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2897 req->word_cookie = htons(0);
2898 req->mask = cpu_to_be64(0x3 << 8);
2899 req->val = cpu_to_be64(pg_idx << 8);
2900 @@ -2009,12 +2015,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2901 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2902 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
2903
2904 + reinit_completion(&csk->cmpl);
2905 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2906 - return 0;
2907 + wait_for_completion(&csk->cmpl);
2908 +
2909 + return csk->err;
2910 }
2911
2912 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2913 - int hcrc, int dcrc, int reply)
2914 + int hcrc, int dcrc)
2915 {
2916 struct sk_buff *skb;
2917 struct cpl_set_tcb_field *req;
2918 @@ -2032,7 +2041,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2919 req = (struct cpl_set_tcb_field *)skb->head;
2920 INIT_TP_WR(req, tid);
2921 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2922 - req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
2923 + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2924 req->word_cookie = htons(0);
2925 req->mask = cpu_to_be64(0x3 << 4);
2926 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
2927 @@ -2042,8 +2051,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2928 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2929 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
2930
2931 + reinit_completion(&csk->cmpl);
2932 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2933 - return 0;
2934 + wait_for_completion(&csk->cmpl);
2935 +
2936 + return csk->err;
2937 }
2938
2939 static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
2940 diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
2941 index 75f876409fb9d..245742557c036 100644
2942 --- a/drivers/scsi/cxgbi/libcxgbi.c
2943 +++ b/drivers/scsi/cxgbi/libcxgbi.c
2944 @@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
2945 skb_queue_head_init(&csk->receive_queue);
2946 skb_queue_head_init(&csk->write_queue);
2947 timer_setup(&csk->retry_timer, NULL, 0);
2948 + init_completion(&csk->cmpl);
2949 rwlock_init(&csk->callback_lock);
2950 csk->cdev = cdev;
2951 csk->flags = 0;
2952 @@ -2251,14 +2252,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
2953 if (!err && conn->hdrdgst_en)
2954 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2955 conn->hdrdgst_en,
2956 - conn->datadgst_en, 0);
2957 + conn->datadgst_en);
2958 break;
2959 case ISCSI_PARAM_DATADGST_EN:
2960 err = iscsi_set_param(cls_conn, param, buf, buflen);
2961 if (!err && conn->datadgst_en)
2962 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2963 conn->hdrdgst_en,
2964 - conn->datadgst_en, 0);
2965 + conn->datadgst_en);
2966 break;
2967 case ISCSI_PARAM_MAX_R2T:
2968 return iscsi_tcp_set_max_r2t(conn, buf);
2969 @@ -2384,7 +2385,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
2970
2971 ppm = csk->cdev->cdev2ppm(csk->cdev);
2972 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
2973 - ppm->tformat.pgsz_idx_dflt, 0);
2974 + ppm->tformat.pgsz_idx_dflt);
2975 if (err < 0)
2976 return err;
2977
2978 diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
2979 index 5d5d8b50d8426..1917ff57651d7 100644
2980 --- a/drivers/scsi/cxgbi/libcxgbi.h
2981 +++ b/drivers/scsi/cxgbi/libcxgbi.h
2982 @@ -149,6 +149,7 @@ struct cxgbi_sock {
2983 struct sk_buff_head receive_queue;
2984 struct sk_buff_head write_queue;
2985 struct timer_list retry_timer;
2986 + struct completion cmpl;
2987 int err;
2988 rwlock_t callback_lock;
2989 void *user_data;
2990 @@ -490,9 +491,9 @@ struct cxgbi_device {
2991 struct cxgbi_ppm *,
2992 struct cxgbi_task_tag_info *);
2993 int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
2994 - unsigned int, int, int, int);
2995 + unsigned int, int, int);
2996 int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
2997 - unsigned int, int, bool);
2998 + unsigned int, int);
2999
3000 void (*csk_release_offload_resources)(struct cxgbi_sock *);
3001 int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
3002 diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
3003 index 08c7b1e25fe48..dde84f7443136 100644
3004 --- a/drivers/scsi/isci/init.c
3005 +++ b/drivers/scsi/isci/init.c
3006 @@ -588,6 +588,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
3007 shost->max_lun = ~0;
3008 shost->max_cmd_len = MAX_COMMAND_SIZE;
3009
3010 + /* turn on DIF support */
3011 + scsi_host_set_prot(shost,
3012 + SHOST_DIF_TYPE1_PROTECTION |
3013 + SHOST_DIF_TYPE2_PROTECTION |
3014 + SHOST_DIF_TYPE3_PROTECTION);
3015 + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
3016 +
3017 err = scsi_add_host(shost, &pdev->dev);
3018 if (err)
3019 goto err_shost;
3020 @@ -675,13 +682,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3021 goto err_host_alloc;
3022 }
3023 pci_info->hosts[i] = h;
3024 -
3025 - /* turn on DIF support */
3026 - scsi_host_set_prot(to_shost(h),
3027 - SHOST_DIF_TYPE1_PROTECTION |
3028 - SHOST_DIF_TYPE2_PROTECTION |
3029 - SHOST_DIF_TYPE3_PROTECTION);
3030 - scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
3031 }
3032
3033 err = isci_setup_interrupts(pdev);
3034 diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
3035 index 2f0a4f2c5ff80..d4821b9dea45d 100644
3036 --- a/drivers/scsi/qedi/qedi_iscsi.c
3037 +++ b/drivers/scsi/qedi/qedi_iscsi.c
3038 @@ -954,6 +954,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
3039
3040 qedi_ep = ep->dd_data;
3041 if (qedi_ep->state == EP_STATE_IDLE ||
3042 + qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
3043 qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
3044 return -1;
3045
3046 @@ -1036,6 +1037,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
3047
3048 switch (qedi_ep->state) {
3049 case EP_STATE_OFLDCONN_START:
3050 + case EP_STATE_OFLDCONN_NONE:
3051 goto ep_release_conn;
3052 case EP_STATE_OFLDCONN_FAILED:
3053 break;
3054 @@ -1226,6 +1228,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
3055
3056 if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
3057 QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
3058 + qedi_ep->state = EP_STATE_OFLDCONN_NONE;
3059 ret = -EIO;
3060 goto set_path_exit;
3061 }
3062 diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
3063 index 11260776212fa..892d70d545537 100644
3064 --- a/drivers/scsi/qedi/qedi_iscsi.h
3065 +++ b/drivers/scsi/qedi/qedi_iscsi.h
3066 @@ -59,6 +59,7 @@ enum {
3067 EP_STATE_OFLDCONN_FAILED = 0x2000,
3068 EP_STATE_CONNECT_FAILED = 0x4000,
3069 EP_STATE_DISCONN_TIMEDOUT = 0x8000,
3070 + EP_STATE_OFLDCONN_NONE = 0x10000,
3071 };
3072
3073 struct qedi_conn;
3074 diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
3075 index 15a50cc7e4b36..c8589926014d4 100644
3076 --- a/drivers/scsi/qla1280.c
3077 +++ b/drivers/scsi/qla1280.c
3078 @@ -4259,7 +4259,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3079 ha->devnum = devnum; /* specifies microcode load address */
3080
3081 #ifdef QLA_64BIT_PTR
3082 - if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
3083 + if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
3084 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
3085 printk(KERN_WARNING "scsi(%li): Unable to set a "
3086 "suitable DMA mask - aborting\n", ha->host_no);
3087 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
3088 index 051164f755a4c..a13396c56a6a1 100644
3089 --- a/drivers/scsi/qla4xxx/ql4_os.c
3090 +++ b/drivers/scsi/qla4xxx/ql4_os.c
3091 @@ -7237,6 +7237,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
3092
3093 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
3094 fw_ddb_entry);
3095 + if (rc)
3096 + goto free_sess;
3097
3098 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
3099 __func__, fnode_sess->dev.kobj.name);
3100 diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
3101 index 3f81bab48ac24..7d8442c377dfa 100644
3102 --- a/drivers/scsi/sd_zbc.c
3103 +++ b/drivers/scsi/sd_zbc.c
3104 @@ -142,10 +142,12 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
3105 return -EOPNOTSUPP;
3106
3107 /*
3108 - * Get a reply buffer for the number of requested zones plus a header.
3109 - * For ATA, buffers must be aligned to 512B.
3110 + * Get a reply buffer for the number of requested zones plus a header,
3111 + * without exceeding the device maximum command size. For ATA disks,
3112 + * buffers must be aligned to 512B.
3113 */
3114 - buflen = roundup((nrz + 1) * 64, 512);
3115 + buflen = min(queue_max_hw_sectors(disk->queue) << 9,
3116 + roundup((nrz + 1) * 64, 512));
3117 buf = kmalloc(buflen, gfp_mask);
3118 if (!buf)
3119 return -ENOMEM;
3120 diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
3121 index 58087d3916d05..5417ce09b1054 100644
3122 --- a/drivers/scsi/ufs/ufs.h
3123 +++ b/drivers/scsi/ufs/ufs.h
3124 @@ -195,7 +195,7 @@ enum ufs_desc_def_size {
3125 QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
3126 QUERY_DESC_UNIT_DEF_SIZE = 0x23,
3127 QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
3128 - QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
3129 + QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
3130 QUERY_DESC_POWER_DEF_SIZE = 0x62,
3131 QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
3132 };
3133 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
3134 index 1cb35ab8a4ec2..2772ff4357fc4 100644
3135 --- a/drivers/scsi/ufs/ufshcd.c
3136 +++ b/drivers/scsi/ufs/ufshcd.c
3137 @@ -7924,6 +7924,8 @@ out:
3138 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
3139 ktime_to_us(ktime_sub(ktime_get(), start)),
3140 hba->curr_dev_pwr_mode, hba->uic_link_state);
3141 + if (!ret)
3142 + hba->is_sys_suspended = false;
3143 return ret;
3144 }
3145 EXPORT_SYMBOL(ufshcd_system_resume);
3146 diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
3147 index f78c34647ca2d..76480df195a87 100644
3148 --- a/drivers/soc/fsl/qe/qe_tdm.c
3149 +++ b/drivers/soc/fsl/qe/qe_tdm.c
3150 @@ -44,10 +44,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
3151 const char *sprop;
3152 int ret = 0;
3153 u32 val;
3154 - struct resource *res;
3155 - struct device_node *np2;
3156 - static int siram_init_flag;
3157 - struct platform_device *pdev;
3158
3159 sprop = of_get_property(np, "fsl,rx-sync-clock", NULL);
3160 if (sprop) {
3161 @@ -124,57 +120,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
3162 utdm->siram_entry_id = val;
3163
3164 set_si_param(utdm, ut_info);
3165 -
3166 - np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si");
3167 - if (!np2)
3168 - return -EINVAL;
3169 -
3170 - pdev = of_find_device_by_node(np2);
3171 - if (!pdev) {
3172 - pr_err("%pOFn: failed to lookup pdev\n", np2);
3173 - of_node_put(np2);
3174 - return -EINVAL;
3175 - }
3176 -
3177 - of_node_put(np2);
3178 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3179 - utdm->si_regs = devm_ioremap_resource(&pdev->dev, res);
3180 - if (IS_ERR(utdm->si_regs)) {
3181 - ret = PTR_ERR(utdm->si_regs);
3182 - goto err_miss_siram_property;
3183 - }
3184 -
3185 - np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram");
3186 - if (!np2) {
3187 - ret = -EINVAL;
3188 - goto err_miss_siram_property;
3189 - }
3190 -
3191 - pdev = of_find_device_by_node(np2);
3192 - if (!pdev) {
3193 - ret = -EINVAL;
3194 - pr_err("%pOFn: failed to lookup pdev\n", np2);
3195 - of_node_put(np2);
3196 - goto err_miss_siram_property;
3197 - }
3198 -
3199 - of_node_put(np2);
3200 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3201 - utdm->siram = devm_ioremap_resource(&pdev->dev, res);
3202 - if (IS_ERR(utdm->siram)) {
3203 - ret = PTR_ERR(utdm->siram);
3204 - goto err_miss_siram_property;
3205 - }
3206 -
3207 - if (siram_init_flag == 0) {
3208 - memset_io(utdm->siram, 0, resource_size(res));
3209 - siram_init_flag = 1;
3210 - }
3211 -
3212 - return ret;
3213 -
3214 -err_miss_siram_property:
3215 - devm_iounmap(&pdev->dev, utdm->si_regs);
3216 return ret;
3217 }
3218 EXPORT_SYMBOL(ucc_of_parse_tdm);
3219 diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
3220 index d1cb0d78ab844..e44ca93dcdc68 100644
3221 --- a/drivers/staging/erofs/dir.c
3222 +++ b/drivers/staging/erofs/dir.c
3223 @@ -53,8 +53,11 @@ static int erofs_fill_dentries(struct dir_context *ctx,
3224 strnlen(de_name, maxsize - nameoff) :
3225 le16_to_cpu(de[1].nameoff) - nameoff;
3226
3227 - /* the corrupted directory found */
3228 - BUG_ON(de_namelen < 0);
3229 + /* a corrupted entry is found */
3230 + if (unlikely(de_namelen < 0)) {
3231 + DBG_BUGON(1);
3232 + return -EIO;
3233 + }
3234
3235 #ifdef CONFIG_EROFS_FS_DEBUG
3236 dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
3237 diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
3238 index 04c61a9d7b766..d7fbf5f4600f3 100644
3239 --- a/drivers/staging/erofs/inode.c
3240 +++ b/drivers/staging/erofs/inode.c
3241 @@ -133,7 +133,13 @@ static int fill_inline_data(struct inode *inode, void *data,
3242 return -ENOMEM;
3243
3244 m_pofs += vi->inode_isize + vi->xattr_isize;
3245 - BUG_ON(m_pofs + inode->i_size > PAGE_SIZE);
3246 +
3247 + /* inline symlink data shouldn't across page boundary as well */
3248 + if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
3249 + DBG_BUGON(1);
3250 + kfree(lnk);
3251 + return -EIO;
3252 + }
3253
3254 /* get in-page inline data */
3255 memcpy(lnk, data + m_pofs, inode->i_size);
3256 @@ -171,7 +177,7 @@ static int fill_inode(struct inode *inode, int isdir)
3257 return PTR_ERR(page);
3258 }
3259
3260 - BUG_ON(!PageUptodate(page));
3261 + DBG_BUGON(!PageUptodate(page));
3262 data = page_address(page);
3263
3264 err = read_inode(inode, data + ofs);
3265 diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
3266 index 3ac4599bbe011..8929443558676 100644
3267 --- a/drivers/staging/erofs/internal.h
3268 +++ b/drivers/staging/erofs/internal.h
3269 @@ -194,50 +194,70 @@ struct erofs_workgroup {
3270
3271 #define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
3272
3273 -static inline bool erofs_workgroup_try_to_freeze(
3274 - struct erofs_workgroup *grp, int v)
3275 +#if defined(CONFIG_SMP)
3276 +static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
3277 + int val)
3278 {
3279 -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
3280 - if (v != atomic_cmpxchg(&grp->refcount,
3281 - v, EROFS_LOCKED_MAGIC))
3282 - return false;
3283 preempt_disable();
3284 + if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
3285 + preempt_enable();
3286 + return false;
3287 + }
3288 + return true;
3289 +}
3290 +
3291 +static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
3292 + int orig_val)
3293 +{
3294 + /*
3295 + * other observers should notice all modifications
3296 + * in the freezing period.
3297 + */
3298 + smp_mb();
3299 + atomic_set(&grp->refcount, orig_val);
3300 + preempt_enable();
3301 +}
3302 +
3303 +static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
3304 +{
3305 + return atomic_cond_read_relaxed(&grp->refcount,
3306 + VAL != EROFS_LOCKED_MAGIC);
3307 +}
3308 #else
3309 +static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
3310 + int val)
3311 +{
3312 preempt_disable();
3313 - if (atomic_read(&grp->refcount) != v) {
3314 + /* no need to spin on UP platforms, let's just disable preemption. */
3315 + if (val != atomic_read(&grp->refcount)) {
3316 preempt_enable();
3317 return false;
3318 }
3319 -#endif
3320 return true;
3321 }
3322
3323 -static inline void erofs_workgroup_unfreeze(
3324 - struct erofs_workgroup *grp, int v)
3325 +static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
3326 + int orig_val)
3327 {
3328 -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
3329 - atomic_set(&grp->refcount, v);
3330 -#endif
3331 preempt_enable();
3332 }
3333
3334 +static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
3335 +{
3336 + int v = atomic_read(&grp->refcount);
3337 +
3338 + /* workgroup is never freezed on uniprocessor systems */
3339 + DBG_BUGON(v == EROFS_LOCKED_MAGIC);
3340 + return v;
3341 +}
3342 +#endif
3343 +
3344 static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
3345 {
3346 - const int locked = (int)EROFS_LOCKED_MAGIC;
3347 int o;
3348
3349 repeat:
3350 - o = atomic_read(&grp->refcount);
3351 -
3352 - /* spin if it is temporarily locked at the reclaim path */
3353 - if (unlikely(o == locked)) {
3354 -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
3355 - do
3356 - cpu_relax();
3357 - while (atomic_read(&grp->refcount) == locked);
3358 -#endif
3359 - goto repeat;
3360 - }
3361 + o = erofs_wait_on_workgroup_freezed(grp);
3362
3363 if (unlikely(o <= 0))
3364 return -1;
3365 diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
3366 index f69e619807a17..1c2eb69682efc 100644
3367 --- a/drivers/staging/erofs/super.c
3368 +++ b/drivers/staging/erofs/super.c
3369 @@ -40,7 +40,6 @@ static int __init erofs_init_inode_cache(void)
3370
3371 static void erofs_exit_inode_cache(void)
3372 {
3373 - BUG_ON(erofs_inode_cachep == NULL);
3374 kmem_cache_destroy(erofs_inode_cachep);
3375 }
3376
3377 @@ -303,8 +302,8 @@ static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
3378 int ret = 1; /* 0 - busy */
3379 struct address_space *const mapping = page->mapping;
3380
3381 - BUG_ON(!PageLocked(page));
3382 - BUG_ON(mapping->a_ops != &managed_cache_aops);
3383 + DBG_BUGON(!PageLocked(page));
3384 + DBG_BUGON(mapping->a_ops != &managed_cache_aops);
3385
3386 if (PagePrivate(page))
3387 ret = erofs_try_to_free_cached_page(mapping, page);
3388 @@ -317,10 +316,10 @@ static void managed_cache_invalidatepage(struct page *page,
3389 {
3390 const unsigned int stop = length + offset;
3391
3392 - BUG_ON(!PageLocked(page));
3393 + DBG_BUGON(!PageLocked(page));
3394
3395 - /* Check for overflow */
3396 - BUG_ON(stop > PAGE_SIZE || stop < length);
3397 + /* Check for potential overflow in debug mode */
3398 + DBG_BUGON(stop > PAGE_SIZE || stop < length);
3399
3400 if (offset == 0 && stop == PAGE_SIZE)
3401 while (!managed_cache_releasepage(page, GFP_NOFS))
3402 @@ -442,12 +441,6 @@ static int erofs_read_super(struct super_block *sb,
3403
3404 erofs_register_super(sb);
3405
3406 - /*
3407 - * We already have a positive dentry, which was instantiated
3408 - * by d_make_root. Just need to d_rehash it.
3409 - */
3410 - d_rehash(sb->s_root);
3411 -
3412 if (!silent)
3413 infoln("mounted on %s with opts: %s.", dev_name,
3414 (char *)data);
3415 @@ -655,7 +648,7 @@ static int erofs_remount(struct super_block *sb, int *flags, char *data)
3416 unsigned int org_inject_rate = erofs_get_fault_rate(sbi);
3417 int err;
3418
3419 - BUG_ON(!sb_rdonly(sb));
3420 + DBG_BUGON(!sb_rdonly(sb));
3421 err = parse_options(sb, data);
3422 if (err)
3423 goto out;
3424 diff --git a/drivers/staging/erofs/unzip_pagevec.h b/drivers/staging/erofs/unzip_pagevec.h
3425 index 0956615b86f72..23856ba2742d8 100644
3426 --- a/drivers/staging/erofs/unzip_pagevec.h
3427 +++ b/drivers/staging/erofs/unzip_pagevec.h
3428 @@ -150,7 +150,7 @@ z_erofs_pagevec_ctor_dequeue(struct z_erofs_pagevec_ctor *ctor,
3429 erofs_vtptr_t t;
3430
3431 if (unlikely(ctor->index >= ctor->nr)) {
3432 - BUG_ON(ctor->next == NULL);
3433 + DBG_BUGON(!ctor->next);
3434 z_erofs_pagevec_ctor_pagedown(ctor, true);
3435 }
3436
3437 diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
3438 index 45e88bada907f..1c4b3e0343f58 100644
3439 --- a/drivers/staging/erofs/unzip_vle.c
3440 +++ b/drivers/staging/erofs/unzip_vle.c
3441 @@ -20,9 +20,6 @@ static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
3442
3443 void z_erofs_exit_zip_subsystem(void)
3444 {
3445 - BUG_ON(z_erofs_workqueue == NULL);
3446 - BUG_ON(z_erofs_workgroup_cachep == NULL);
3447 -
3448 destroy_workqueue(z_erofs_workqueue);
3449 kmem_cache_destroy(z_erofs_workgroup_cachep);
3450 }
3451 @@ -366,7 +363,10 @@ z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
3452 struct z_erofs_vle_work *work;
3453
3454 /* if multiref is disabled, grp should never be nullptr */
3455 - BUG_ON(grp != NULL);
3456 + if (unlikely(grp)) {
3457 + DBG_BUGON(1);
3458 + return ERR_PTR(-EINVAL);
3459 + }
3460
3461 /* no available workgroup, let's allocate one */
3462 grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
3463 @@ -745,7 +745,7 @@ static inline void z_erofs_vle_read_endio(struct bio *bio)
3464 bool cachemngd = false;
3465
3466 DBG_BUGON(PageUptodate(page));
3467 - BUG_ON(page->mapping == NULL);
3468 + DBG_BUGON(!page->mapping);
3469
3470 #ifdef EROFS_FS_HAS_MANAGED_CACHE
3471 if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
3472 @@ -803,7 +803,7 @@ static int z_erofs_vle_unzip(struct super_block *sb,
3473
3474 might_sleep();
3475 work = z_erofs_vle_grab_primary_work(grp);
3476 - BUG_ON(!READ_ONCE(work->nr_pages));
3477 + DBG_BUGON(!READ_ONCE(work->nr_pages));
3478
3479 mutex_lock(&work->lock);
3480 nr_pages = work->nr_pages;
3481 @@ -852,8 +852,8 @@ repeat:
3482 else
3483 pagenr = z_erofs_onlinepage_index(page);
3484
3485 - BUG_ON(pagenr >= nr_pages);
3486 - BUG_ON(pages[pagenr] != NULL);
3487 + DBG_BUGON(pagenr >= nr_pages);
3488 + DBG_BUGON(pages[pagenr]);
3489
3490 pages[pagenr] = page;
3491 }
3492 @@ -876,9 +876,8 @@ repeat:
3493 if (z_erofs_is_stagingpage(page))
3494 continue;
3495 #ifdef EROFS_FS_HAS_MANAGED_CACHE
3496 - else if (page->mapping == mngda) {
3497 - BUG_ON(PageLocked(page));
3498 - BUG_ON(!PageUptodate(page));
3499 + if (page->mapping == mngda) {
3500 + DBG_BUGON(!PageUptodate(page));
3501 continue;
3502 }
3503 #endif
3504 @@ -886,8 +885,8 @@ repeat:
3505 /* only non-head page could be reused as a compressed page */
3506 pagenr = z_erofs_onlinepage_index(page);
3507
3508 - BUG_ON(pagenr >= nr_pages);
3509 - BUG_ON(pages[pagenr] != NULL);
3510 + DBG_BUGON(pagenr >= nr_pages);
3511 + DBG_BUGON(pages[pagenr]);
3512 ++sparsemem_pages;
3513 pages[pagenr] = page;
3514
3515 @@ -897,9 +896,6 @@ repeat:
3516 llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
3517
3518 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
3519 - /* FIXME! this should be fixed in the future */
3520 - BUG_ON(grp->llen != llen);
3521 -
3522 err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
3523 pages, nr_pages, work->pageofs);
3524 goto out;
3525 @@ -914,10 +910,8 @@ repeat:
3526 if (err != -ENOTSUPP)
3527 goto out_percpu;
3528
3529 - if (sparsemem_pages >= nr_pages) {
3530 - BUG_ON(sparsemem_pages > nr_pages);
3531 + if (sparsemem_pages >= nr_pages)
3532 goto skip_allocpage;
3533 - }
3534
3535 for (i = 0; i < nr_pages; ++i) {
3536 if (pages[i] != NULL)
3537 @@ -1010,7 +1004,7 @@ static void z_erofs_vle_unzip_wq(struct work_struct *work)
3538 struct z_erofs_vle_unzip_io_sb, io.u.work);
3539 LIST_HEAD(page_pool);
3540
3541 - BUG_ON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
3542 + DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
3543 z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
3544
3545 put_pages_list(&page_pool);
3546 @@ -1344,7 +1338,6 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
3547 continue;
3548 }
3549
3550 - BUG_ON(PagePrivate(page));
3551 set_page_private(page, (unsigned long)head);
3552 head = page;
3553 }
3554 diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
3555 index 1a428658cbea2..16ac335ee59f4 100644
3556 --- a/drivers/staging/erofs/unzip_vle_lz4.c
3557 +++ b/drivers/staging/erofs/unzip_vle_lz4.c
3558 @@ -57,7 +57,7 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages,
3559 if (compressed_pages[j] != page)
3560 continue;
3561
3562 - BUG_ON(mirrored[j]);
3563 + DBG_BUGON(mirrored[j]);
3564 memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
3565 mirrored[j] = true;
3566 break;
3567 diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
3568 index d2e3ace910469..b535898ca753f 100644
3569 --- a/drivers/staging/erofs/utils.c
3570 +++ b/drivers/staging/erofs/utils.c
3571 @@ -23,9 +23,6 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
3572 list_del(&page->lru);
3573 } else {
3574 page = alloc_pages(gfp | __GFP_NOFAIL, 0);
3575 -
3576 - BUG_ON(page == NULL);
3577 - BUG_ON(page->mapping != NULL);
3578 }
3579 return page;
3580 }
3581 @@ -58,7 +55,7 @@ repeat:
3582 /* decrease refcount added by erofs_workgroup_put */
3583 if (unlikely(oldcount == 1))
3584 atomic_long_dec(&erofs_global_shrink_cnt);
3585 - BUG_ON(index != grp->index);
3586 + DBG_BUGON(index != grp->index);
3587 }
3588 rcu_read_unlock();
3589 return grp;
3590 @@ -71,8 +68,11 @@ int erofs_register_workgroup(struct super_block *sb,
3591 struct erofs_sb_info *sbi;
3592 int err;
3593
3594 - /* grp->refcount should not < 1 */
3595 - BUG_ON(!atomic_read(&grp->refcount));
3596 + /* grp shouldn't be broken or used before */
3597 + if (unlikely(atomic_read(&grp->refcount) != 1)) {
3598 + DBG_BUGON(1);
3599 + return -EINVAL;
3600 + }
3601
3602 err = radix_tree_preload(GFP_NOFS);
3603 if (err)
3604 diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
3605 index 9cd404acdb82b..ac7620120491b 100644
3606 --- a/drivers/target/target_core_user.c
3607 +++ b/drivers/target/target_core_user.c
3608 @@ -148,7 +148,7 @@ struct tcmu_dev {
3609 size_t ring_size;
3610
3611 struct mutex cmdr_lock;
3612 - struct list_head cmdr_queue;
3613 + struct list_head qfull_queue;
3614
3615 uint32_t dbi_max;
3616 uint32_t dbi_thresh;
3617 @@ -159,6 +159,7 @@ struct tcmu_dev {
3618
3619 struct timer_list cmd_timer;
3620 unsigned int cmd_time_out;
3621 + struct list_head inflight_queue;
3622
3623 struct timer_list qfull_timer;
3624 int qfull_time_out;
3625 @@ -179,7 +180,7 @@ struct tcmu_dev {
3626 struct tcmu_cmd {
3627 struct se_cmd *se_cmd;
3628 struct tcmu_dev *tcmu_dev;
3629 - struct list_head cmdr_queue_entry;
3630 + struct list_head queue_entry;
3631
3632 uint16_t cmd_id;
3633
3634 @@ -192,6 +193,7 @@ struct tcmu_cmd {
3635 unsigned long deadline;
3636
3637 #define TCMU_CMD_BIT_EXPIRED 0
3638 +#define TCMU_CMD_BIT_INFLIGHT 1
3639 unsigned long flags;
3640 };
3641 /*
3642 @@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
3643 if (!tcmu_cmd)
3644 return NULL;
3645
3646 - INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry);
3647 + INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
3648 tcmu_cmd->se_cmd = se_cmd;
3649 tcmu_cmd->tcmu_dev = udev;
3650
3651 @@ -915,11 +917,13 @@ setup_timer:
3652 return 0;
3653
3654 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
3655 - mod_timer(timer, tcmu_cmd->deadline);
3656 + if (!timer_pending(timer))
3657 + mod_timer(timer, tcmu_cmd->deadline);
3658 +
3659 return 0;
3660 }
3661
3662 -static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
3663 +static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
3664 {
3665 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
3666 unsigned int tmo;
3667 @@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
3668 if (ret)
3669 return ret;
3670
3671 - list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue);
3672 + list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
3673 pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
3674 tcmu_cmd->cmd_id, udev->name);
3675 return 0;
3676 @@ -999,7 +1003,7 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
3677 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
3678 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
3679
3680 - if (!list_empty(&udev->cmdr_queue))
3681 + if (!list_empty(&udev->qfull_queue))
3682 goto queue;
3683
3684 mb = udev->mb_addr;
3685 @@ -1096,13 +1100,16 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
3686 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
3687 tcmu_flush_dcache_range(mb, sizeof(*mb));
3688
3689 + list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
3690 + set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
3691 +
3692 /* TODO: only if FLUSH and FUA? */
3693 uio_event_notify(&udev->uio_info);
3694
3695 return 0;
3696
3697 queue:
3698 - if (add_to_cmdr_queue(tcmu_cmd)) {
3699 + if (add_to_qfull_queue(tcmu_cmd)) {
3700 *scsi_err = TCM_OUT_OF_RESOURCES;
3701 return -1;
3702 }
3703 @@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
3704 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
3705 goto out;
3706
3707 + list_del_init(&cmd->queue_entry);
3708 +
3709 tcmu_cmd_reset_dbi_cur(cmd);
3710
3711 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
3712 @@ -1194,9 +1203,29 @@ out:
3713 tcmu_free_cmd(cmd);
3714 }
3715
3716 +static void tcmu_set_next_deadline(struct list_head *queue,
3717 + struct timer_list *timer)
3718 +{
3719 + struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
3720 + unsigned long deadline = 0;
3721 +
3722 + list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
3723 + if (!time_after(jiffies, tcmu_cmd->deadline)) {
3724 + deadline = tcmu_cmd->deadline;
3725 + break;
3726 + }
3727 + }
3728 +
3729 + if (deadline)
3730 + mod_timer(timer, deadline);
3731 + else
3732 + del_timer(timer);
3733 +}
3734 +
3735 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3736 {
3737 struct tcmu_mailbox *mb;
3738 + struct tcmu_cmd *cmd;
3739 int handled = 0;
3740
3741 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
3742 @@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3743 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
3744
3745 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
3746 - struct tcmu_cmd *cmd;
3747
3748 tcmu_flush_dcache_range(entry, sizeof(*entry));
3749
3750 @@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3751 /* no more pending commands */
3752 del_timer(&udev->cmd_timer);
3753
3754 - if (list_empty(&udev->cmdr_queue)) {
3755 + if (list_empty(&udev->qfull_queue)) {
3756 /*
3757 * no more pending or waiting commands so try to
3758 * reclaim blocks if needed.
3759 @@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3760 tcmu_global_max_blocks)
3761 schedule_delayed_work(&tcmu_unmap_work, 0);
3762 }
3763 + } else if (udev->cmd_time_out) {
3764 + tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
3765 }
3766
3767 return handled;
3768 @@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
3769 if (!time_after(jiffies, cmd->deadline))
3770 return 0;
3771
3772 - is_running = list_empty(&cmd->cmdr_queue_entry);
3773 + is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
3774 se_cmd = cmd->se_cmd;
3775
3776 if (is_running) {
3777 @@ -1288,12 +1318,11 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
3778 */
3779 scsi_status = SAM_STAT_CHECK_CONDITION;
3780 } else {
3781 - list_del_init(&cmd->cmdr_queue_entry);
3782 -
3783 idr_remove(&udev->commands, id);
3784 tcmu_free_cmd(cmd);
3785 scsi_status = SAM_STAT_TASK_SET_FULL;
3786 }
3787 + list_del_init(&cmd->queue_entry);
3788
3789 pr_debug("Timing out cmd %u on dev %s that is %s.\n",
3790 id, udev->name, is_running ? "inflight" : "queued");
3791 @@ -1372,7 +1401,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
3792
3793 INIT_LIST_HEAD(&udev->node);
3794 INIT_LIST_HEAD(&udev->timedout_entry);
3795 - INIT_LIST_HEAD(&udev->cmdr_queue);
3796 + INIT_LIST_HEAD(&udev->qfull_queue);
3797 + INIT_LIST_HEAD(&udev->inflight_queue);
3798 idr_init(&udev->commands);
3799
3800 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
3801 @@ -1383,7 +1413,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
3802 return &udev->se_dev;
3803 }
3804
3805 -static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
3806 +static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
3807 {
3808 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
3809 LIST_HEAD(cmds);
3810 @@ -1391,15 +1421,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
3811 sense_reason_t scsi_ret;
3812 int ret;
3813
3814 - if (list_empty(&udev->cmdr_queue))
3815 + if (list_empty(&udev->qfull_queue))
3816 return true;
3817
3818 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
3819
3820 - list_splice_init(&udev->cmdr_queue, &cmds);
3821 + list_splice_init(&udev->qfull_queue, &cmds);
3822
3823 - list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) {
3824 - list_del_init(&tcmu_cmd->cmdr_queue_entry);
3825 + list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
3826 + list_del_init(&tcmu_cmd->queue_entry);
3827
3828 pr_debug("removing cmd %u on dev %s from queue\n",
3829 tcmu_cmd->cmd_id, udev->name);
3830 @@ -1437,14 +1467,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
3831 * cmd was requeued, so just put all cmds back in
3832 * the queue
3833 */
3834 - list_splice_tail(&cmds, &udev->cmdr_queue);
3835 + list_splice_tail(&cmds, &udev->qfull_queue);
3836 drained = false;
3837 - goto done;
3838 + break;
3839 }
3840 }
3841 - if (list_empty(&udev->cmdr_queue))
3842 - del_timer(&udev->qfull_timer);
3843 -done:
3844 +
3845 + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
3846 return drained;
3847 }
3848
3849 @@ -1454,7 +1483,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
3850
3851 mutex_lock(&udev->cmdr_lock);
3852 tcmu_handle_completions(udev);
3853 - run_cmdr_queue(udev, false);
3854 + run_qfull_queue(udev, false);
3855 mutex_unlock(&udev->cmdr_lock);
3856
3857 return 0;
3858 @@ -1982,7 +2011,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev)
3859 /* complete IO that has executed successfully */
3860 tcmu_handle_completions(udev);
3861 /* fail IO waiting to be queued */
3862 - run_cmdr_queue(udev, true);
3863 + run_qfull_queue(udev, true);
3864
3865 unlock:
3866 mutex_unlock(&udev->cmdr_lock);
3867 @@ -1997,7 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
3868 mutex_lock(&udev->cmdr_lock);
3869
3870 idr_for_each_entry(&udev->commands, cmd, i) {
3871 - if (!list_empty(&cmd->cmdr_queue_entry))
3872 + if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
3873 continue;
3874
3875 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
3876 @@ -2006,6 +2035,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
3877
3878 idr_remove(&udev->commands, i);
3879 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
3880 + list_del_init(&cmd->queue_entry);
3881 if (err_level == 1) {
3882 /*
3883 * Userspace was not able to start the
3884 @@ -2666,6 +2696,10 @@ static void check_timedout_devices(void)
3885
3886 mutex_lock(&udev->cmdr_lock);
3887 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
3888 +
3889 + tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
3890 + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
3891 +
3892 mutex_unlock(&udev->cmdr_lock);
3893
3894 spin_lock_bh(&timed_out_udevs_lock);
3895 diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
3896 index 73a4adeab096b..11bd8b6422ebf 100644
3897 --- a/drivers/vhost/scsi.c
3898 +++ b/drivers/vhost/scsi.c
3899 @@ -1132,16 +1132,18 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
3900 struct vhost_virtqueue *vq,
3901 struct vhost_scsi_ctx *vc)
3902 {
3903 - struct virtio_scsi_ctrl_tmf_resp __user *resp;
3904 struct virtio_scsi_ctrl_tmf_resp rsp;
3905 + struct iov_iter iov_iter;
3906 int ret;
3907
3908 pr_debug("%s\n", __func__);
3909 memset(&rsp, 0, sizeof(rsp));
3910 rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
3911 - resp = vq->iov[vc->out].iov_base;
3912 - ret = __copy_to_user(resp, &rsp, sizeof(rsp));
3913 - if (!ret)
3914 +
3915 + iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
3916 +
3917 + ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
3918 + if (likely(ret == sizeof(rsp)))
3919 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
3920 else
3921 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
3922 @@ -1152,16 +1154,18 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs,
3923 struct vhost_virtqueue *vq,
3924 struct vhost_scsi_ctx *vc)
3925 {
3926 - struct virtio_scsi_ctrl_an_resp __user *resp;
3927 struct virtio_scsi_ctrl_an_resp rsp;
3928 + struct iov_iter iov_iter;
3929 int ret;
3930
3931 pr_debug("%s\n", __func__);
3932 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
3933 rsp.response = VIRTIO_SCSI_S_OK;
3934 - resp = vq->iov[vc->out].iov_base;
3935 - ret = __copy_to_user(resp, &rsp, sizeof(rsp));
3936 - if (!ret)
3937 +
3938 + iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
3939 +
3940 + ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
3941 + if (likely(ret == sizeof(rsp)))
3942 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
3943 else
3944 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
3945 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
3946 index 5eaeca805c95c..b214a72d5caad 100644
3947 --- a/drivers/vhost/vhost.c
3948 +++ b/drivers/vhost/vhost.c
3949 @@ -1035,8 +1035,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
3950 int type, ret;
3951
3952 ret = copy_from_iter(&type, sizeof(type), from);
3953 - if (ret != sizeof(type))
3954 + if (ret != sizeof(type)) {
3955 + ret = -EINVAL;
3956 goto done;
3957 + }
3958
3959 switch (type) {
3960 case VHOST_IOTLB_MSG:
3961 @@ -1055,8 +1057,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
3962
3963 iov_iter_advance(from, offset);
3964 ret = copy_from_iter(&msg, sizeof(msg), from);
3965 - if (ret != sizeof(msg))
3966 + if (ret != sizeof(msg)) {
3967 + ret = -EINVAL;
3968 goto done;
3969 + }
3970 if (vhost_process_iotlb_msg(dev, &msg)) {
3971 ret = -EFAULT;
3972 goto done;
3973 diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
3974 index f9ef0673a083c..aded3213bfb51 100644
3975 --- a/drivers/video/backlight/pwm_bl.c
3976 +++ b/drivers/video/backlight/pwm_bl.c
3977 @@ -268,6 +268,16 @@ static int pwm_backlight_parse_dt(struct device *dev,
3978
3979 memset(data, 0, sizeof(*data));
3980
3981 + /*
3982 + * These values are optional and set as 0 by default, the out values
3983 + * are modified only if a valid u32 value can be decoded.
3984 + */
3985 + of_property_read_u32(node, "post-pwm-on-delay-ms",
3986 + &data->post_pwm_on_delay);
3987 + of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
3988 +
3989 + data->enable_gpio = -EINVAL;
3990 +
3991 /*
3992 * Determine the number of brightness levels, if this property is not
3993 * set a default table of brightness levels will be used.
3994 @@ -380,15 +390,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
3995 data->max_brightness--;
3996 }
3997
3998 - /*
3999 - * These values are optional and set as 0 by default, the out values
4000 - * are modified only if a valid u32 value can be decoded.
4001 - */
4002 - of_property_read_u32(node, "post-pwm-on-delay-ms",
4003 - &data->post_pwm_on_delay);
4004 - of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
4005 -
4006 - data->enable_gpio = -EINVAL;
4007 return 0;
4008 }
4009
4010 diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
4011 index 5c4a764717c4d..81208cd3f4ecb 100644
4012 --- a/drivers/watchdog/mt7621_wdt.c
4013 +++ b/drivers/watchdog/mt7621_wdt.c
4014 @@ -17,6 +17,7 @@
4015 #include <linux/watchdog.h>
4016 #include <linux/moduleparam.h>
4017 #include <linux/platform_device.h>
4018 +#include <linux/mod_devicetable.h>
4019
4020 #include <asm/mach-ralink/ralink_regs.h>
4021
4022 diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
4023 index 98967f0a7d10e..db7c57d82cfdc 100644
4024 --- a/drivers/watchdog/rt2880_wdt.c
4025 +++ b/drivers/watchdog/rt2880_wdt.c
4026 @@ -18,6 +18,7 @@
4027 #include <linux/watchdog.h>
4028 #include <linux/moduleparam.h>
4029 #include <linux/platform_device.h>
4030 +#include <linux/mod_devicetable.h>
4031
4032 #include <asm/mach-ralink/ralink_regs.h>
4033
4034 diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
4035 index 2e5d845b50914..7aa64d1b119c2 100644
4036 --- a/drivers/xen/pvcalls-back.c
4037 +++ b/drivers/xen/pvcalls-back.c
4038 @@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque)
4039
4040 /* write the data, then modify the indexes */
4041 virt_wmb();
4042 - if (ret < 0)
4043 + if (ret < 0) {
4044 + atomic_set(&map->read, 0);
4045 intf->in_error = ret;
4046 - else
4047 + } else
4048 intf->in_prod = prod + ret;
4049 /* update the indexes, then notify the other end */
4050 virt_wmb();
4051 @@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev,
4052 static void pvcalls_sk_state_change(struct sock *sock)
4053 {
4054 struct sock_mapping *map = sock->sk_user_data;
4055 - struct pvcalls_data_intf *intf;
4056
4057 if (map == NULL)
4058 return;
4059
4060 - intf = map->ring;
4061 - intf->in_error = -ENOTCONN;
4062 + atomic_inc(&map->read);
4063 notify_remote_via_irq(map->irq);
4064 }
4065
4066 diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
4067 index 77224d8f3e6fe..91da7e44d5d4f 100644
4068 --- a/drivers/xen/pvcalls-front.c
4069 +++ b/drivers/xen/pvcalls-front.c
4070 @@ -31,6 +31,12 @@
4071 #define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
4072 #define PVCALLS_FRONT_MAX_SPIN 5000
4073
4074 +static struct proto pvcalls_proto = {
4075 + .name = "PVCalls",
4076 + .owner = THIS_MODULE,
4077 + .obj_size = sizeof(struct sock),
4078 +};
4079 +
4080 struct pvcalls_bedata {
4081 struct xen_pvcalls_front_ring ring;
4082 grant_ref_t ref;
4083 @@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock)
4084 return ret;
4085 }
4086
4087 +static void free_active_ring(struct sock_mapping *map)
4088 +{
4089 + if (!map->active.ring)
4090 + return;
4091 +
4092 + free_pages((unsigned long)map->active.data.in,
4093 + map->active.ring->ring_order);
4094 + free_page((unsigned long)map->active.ring);
4095 +}
4096 +
4097 +static int alloc_active_ring(struct sock_mapping *map)
4098 +{
4099 + void *bytes;
4100 +
4101 + map->active.ring = (struct pvcalls_data_intf *)
4102 + get_zeroed_page(GFP_KERNEL);
4103 + if (!map->active.ring)
4104 + goto out;
4105 +
4106 + map->active.ring->ring_order = PVCALLS_RING_ORDER;
4107 + bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
4108 + PVCALLS_RING_ORDER);
4109 + if (!bytes)
4110 + goto out;
4111 +
4112 + map->active.data.in = bytes;
4113 + map->active.data.out = bytes +
4114 + XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
4115 +
4116 + return 0;
4117 +
4118 +out:
4119 + free_active_ring(map);
4120 + return -ENOMEM;
4121 +}
4122 +
4123 static int create_active(struct sock_mapping *map, int *evtchn)
4124 {
4125 void *bytes;
4126 @@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
4127 *evtchn = -1;
4128 init_waitqueue_head(&map->active.inflight_conn_req);
4129
4130 - map->active.ring = (struct pvcalls_data_intf *)
4131 - __get_free_page(GFP_KERNEL | __GFP_ZERO);
4132 - if (map->active.ring == NULL)
4133 - goto out_error;
4134 - map->active.ring->ring_order = PVCALLS_RING_ORDER;
4135 - bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
4136 - PVCALLS_RING_ORDER);
4137 - if (bytes == NULL)
4138 - goto out_error;
4139 + bytes = map->active.data.in;
4140 for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
4141 map->active.ring->ref[i] = gnttab_grant_foreign_access(
4142 pvcalls_front_dev->otherend_id,
4143 @@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
4144 pvcalls_front_dev->otherend_id,
4145 pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
4146
4147 - map->active.data.in = bytes;
4148 - map->active.data.out = bytes +
4149 - XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
4150 -
4151 ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
4152 if (ret)
4153 goto out_error;
4154 @@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
4155 out_error:
4156 if (*evtchn >= 0)
4157 xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
4158 - free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
4159 - free_page((unsigned long)map->active.ring);
4160 return ret;
4161 }
4162
4163 @@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
4164 return PTR_ERR(map);
4165
4166 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
4167 + ret = alloc_active_ring(map);
4168 + if (ret < 0) {
4169 + pvcalls_exit_sock(sock);
4170 + return ret;
4171 + }
4172
4173 spin_lock(&bedata->socket_lock);
4174 ret = get_request(bedata, &req_id);
4175 if (ret < 0) {
4176 spin_unlock(&bedata->socket_lock);
4177 + free_active_ring(map);
4178 pvcalls_exit_sock(sock);
4179 return ret;
4180 }
4181 ret = create_active(map, &evtchn);
4182 if (ret < 0) {
4183 spin_unlock(&bedata->socket_lock);
4184 + free_active_ring(map);
4185 pvcalls_exit_sock(sock);
4186 return ret;
4187 }
4188 @@ -560,15 +595,13 @@ static int __read_ring(struct pvcalls_data_intf *intf,
4189 error = intf->in_error;
4190 /* get pointers before reading from the ring */
4191 virt_rmb();
4192 - if (error < 0)
4193 - return error;
4194
4195 size = pvcalls_queued(prod, cons, array_size);
4196 masked_prod = pvcalls_mask(prod, array_size);
4197 masked_cons = pvcalls_mask(cons, array_size);
4198
4199 if (size == 0)
4200 - return 0;
4201 + return error ?: size;
4202
4203 if (len > size)
4204 len = size;
4205 @@ -780,25 +813,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
4206 }
4207 }
4208
4209 - spin_lock(&bedata->socket_lock);
4210 - ret = get_request(bedata, &req_id);
4211 - if (ret < 0) {
4212 + map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
4213 + if (map2 == NULL) {
4214 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
4215 (void *)&map->passive.flags);
4216 - spin_unlock(&bedata->socket_lock);
4217 + pvcalls_exit_sock(sock);
4218 + return -ENOMEM;
4219 + }
4220 + ret = alloc_active_ring(map2);
4221 + if (ret < 0) {
4222 + clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
4223 + (void *)&map->passive.flags);
4224 + kfree(map2);
4225 pvcalls_exit_sock(sock);
4226 return ret;
4227 }
4228 - map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
4229 - if (map2 == NULL) {
4230 + spin_lock(&bedata->socket_lock);
4231 + ret = get_request(bedata, &req_id);
4232 + if (ret < 0) {
4233 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
4234 (void *)&map->passive.flags);
4235 spin_unlock(&bedata->socket_lock);
4236 + free_active_ring(map2);
4237 + kfree(map2);
4238 pvcalls_exit_sock(sock);
4239 - return -ENOMEM;
4240 + return ret;
4241 }
4242 +
4243 ret = create_active(map2, &evtchn);
4244 if (ret < 0) {
4245 + free_active_ring(map2);
4246 kfree(map2);
4247 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
4248 (void *)&map->passive.flags);
4249 @@ -839,7 +883,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
4250
4251 received:
4252 map2->sock = newsock;
4253 - newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
4254 + newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false);
4255 if (!newsock->sk) {
4256 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
4257 map->passive.inflight_req_id = PVCALLS_INVALID_ID;
4258 @@ -1032,8 +1076,8 @@ int pvcalls_front_release(struct socket *sock)
4259 spin_lock(&bedata->socket_lock);
4260 list_del(&map->list);
4261 spin_unlock(&bedata->socket_lock);
4262 - if (READ_ONCE(map->passive.inflight_req_id) !=
4263 - PVCALLS_INVALID_ID) {
4264 + if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID &&
4265 + READ_ONCE(map->passive.inflight_req_id) != 0) {
4266 pvcalls_front_free_map(bedata,
4267 map->passive.accept_map);
4268 }
4269 diff --git a/fs/afs/flock.c b/fs/afs/flock.c
4270 index 0568fd9868210..e432bd27a2e7b 100644
4271 --- a/fs/afs/flock.c
4272 +++ b/fs/afs/flock.c
4273 @@ -208,7 +208,7 @@ again:
4274 /* The new front of the queue now owns the state variables. */
4275 next = list_entry(vnode->pending_locks.next,
4276 struct file_lock, fl_u.afs.link);
4277 - vnode->lock_key = afs_file_key(next->fl_file);
4278 + vnode->lock_key = key_get(afs_file_key(next->fl_file));
4279 vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
4280 vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
4281 goto again;
4282 @@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
4283 /* The new front of the queue now owns the state variables. */
4284 next = list_entry(vnode->pending_locks.next,
4285 struct file_lock, fl_u.afs.link);
4286 - vnode->lock_key = afs_file_key(next->fl_file);
4287 + vnode->lock_key = key_get(afs_file_key(next->fl_file));
4288 vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
4289 vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
4290 afs_lock_may_be_available(vnode);
4291 diff --git a/fs/afs/inode.c b/fs/afs/inode.c
4292 index 6b17d36204142..1a4ce07fb406d 100644
4293 --- a/fs/afs/inode.c
4294 +++ b/fs/afs/inode.c
4295 @@ -414,7 +414,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
4296 } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
4297 valid = true;
4298 } else {
4299 - vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
4300 vnode->cb_v_break = vnode->volume->cb_v_break;
4301 valid = false;
4302 }
4303 @@ -546,6 +545,8 @@ void afs_evict_inode(struct inode *inode)
4304 #endif
4305
4306 afs_put_permits(rcu_access_pointer(vnode->permit_cache));
4307 + key_put(vnode->lock_key);
4308 + vnode->lock_key = NULL;
4309 _leave("");
4310 }
4311
4312 diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
4313 index 07bc10f076aac..d443e2bfa0946 100644
4314 --- a/fs/afs/protocol_yfs.h
4315 +++ b/fs/afs/protocol_yfs.h
4316 @@ -161,3 +161,14 @@ struct yfs_xdr_YFSStoreVolumeStatus {
4317 struct yfs_xdr_u64 max_quota;
4318 struct yfs_xdr_u64 file_quota;
4319 } __packed;
4320 +
4321 +enum yfs_lock_type {
4322 + yfs_LockNone = -1,
4323 + yfs_LockRead = 0,
4324 + yfs_LockWrite = 1,
4325 + yfs_LockExtend = 2,
4326 + yfs_LockRelease = 3,
4327 + yfs_LockMandatoryRead = 0x100,
4328 + yfs_LockMandatoryWrite = 0x101,
4329 + yfs_LockMandatoryExtend = 0x102,
4330 +};
4331 diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
4332 index a7b44863d502e..2c588f9bbbda2 100644
4333 --- a/fs/afs/rxrpc.c
4334 +++ b/fs/afs/rxrpc.c
4335 @@ -23,6 +23,7 @@ struct workqueue_struct *afs_async_calls;
4336 static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
4337 static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *);
4338 static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
4339 +static void afs_delete_async_call(struct work_struct *);
4340 static void afs_process_async_call(struct work_struct *);
4341 static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
4342 static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
4343 @@ -203,20 +204,26 @@ void afs_put_call(struct afs_call *call)
4344 }
4345 }
4346
4347 +static struct afs_call *afs_get_call(struct afs_call *call,
4348 + enum afs_call_trace why)
4349 +{
4350 + int u = atomic_inc_return(&call->usage);
4351 +
4352 + trace_afs_call(call, why, u,
4353 + atomic_read(&call->net->nr_outstanding_calls),
4354 + __builtin_return_address(0));
4355 + return call;
4356 +}
4357 +
4358 /*
4359 * Queue the call for actual work.
4360 */
4361 static void afs_queue_call_work(struct afs_call *call)
4362 {
4363 if (call->type->work) {
4364 - int u = atomic_inc_return(&call->usage);
4365 -
4366 - trace_afs_call(call, afs_call_trace_work, u,
4367 - atomic_read(&call->net->nr_outstanding_calls),
4368 - __builtin_return_address(0));
4369 -
4370 INIT_WORK(&call->work, call->type->work);
4371
4372 + afs_get_call(call, afs_call_trace_work);
4373 if (!queue_work(afs_wq, &call->work))
4374 afs_put_call(call);
4375 }
4376 @@ -398,6 +405,12 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
4377 }
4378 }
4379
4380 + /* If the call is going to be asynchronous, we need an extra ref for
4381 + * the call to hold itself so the caller need not hang on to its ref.
4382 + */
4383 + if (call->async)
4384 + afs_get_call(call, afs_call_trace_get);
4385 +
4386 /* create a call */
4387 rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
4388 (unsigned long)call,
4389 @@ -438,15 +451,17 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
4390 goto error_do_abort;
4391 }
4392
4393 - /* at this point, an async call may no longer exist as it may have
4394 - * already completed */
4395 - if (call->async)
4396 + /* Note that at this point, we may have received the reply or an abort
4397 + * - and an asynchronous call may already have completed.
4398 + */
4399 + if (call->async) {
4400 + afs_put_call(call);
4401 return -EINPROGRESS;
4402 + }
4403
4404 return afs_wait_for_call_to_complete(call, ac);
4405
4406 error_do_abort:
4407 - call->state = AFS_CALL_COMPLETE;
4408 if (ret != -ECONNABORTED) {
4409 rxrpc_kernel_abort_call(call->net->socket, rxcall,
4410 RX_USER_ABORT, ret, "KSD");
4411 @@ -463,8 +478,24 @@ error_do_abort:
4412 error_kill_call:
4413 if (call->type->done)
4414 call->type->done(call);
4415 - afs_put_call(call);
4416 +
4417 + /* We need to dispose of the extra ref we grabbed for an async call.
4418 + * The call, however, might be queued on afs_async_calls and we need to
4419 + * make sure we don't get any more notifications that might requeue it.
4420 + */
4421 + if (call->rxcall) {
4422 + rxrpc_kernel_end_call(call->net->socket, call->rxcall);
4423 + call->rxcall = NULL;
4424 + }
4425 + if (call->async) {
4426 + if (cancel_work_sync(&call->async_work))
4427 + afs_put_call(call);
4428 + afs_put_call(call);
4429 + }
4430 +
4431 ac->error = ret;
4432 + call->state = AFS_CALL_COMPLETE;
4433 + afs_put_call(call);
4434 _leave(" = %d", ret);
4435 return ret;
4436 }
4437 diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
4438 index 12658c1363ae4..5aa57929e8c23 100644
4439 --- a/fs/afs/yfsclient.c
4440 +++ b/fs/afs/yfsclient.c
4441 @@ -803,7 +803,7 @@ int yfs_fs_create_file(struct afs_fs_cursor *fc,
4442 bp = xdr_encode_YFSFid(bp, &vnode->fid);
4443 bp = xdr_encode_string(bp, name, namesz);
4444 bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
4445 - bp = xdr_encode_u32(bp, 0); /* ViceLockType */
4446 + bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */
4447 yfs_check_req(call, bp);
4448
4449 afs_use_fs_server(call, fc->cbi);
4450 diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
4451 index 041c27ea8de15..f74193da0e092 100644
4452 --- a/fs/ceph/snap.c
4453 +++ b/fs/ceph/snap.c
4454 @@ -616,7 +616,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
4455 capsnap->size);
4456
4457 spin_lock(&mdsc->snap_flush_lock);
4458 - list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
4459 + if (list_empty(&ci->i_snap_flush_item))
4460 + list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
4461 spin_unlock(&mdsc->snap_flush_lock);
4462 return 1; /* caller may want to ceph_flush_snaps */
4463 }
4464 diff --git a/fs/proc/base.c b/fs/proc/base.c
4465 index ce34654794472..bde45ca75ba3e 100644
4466 --- a/fs/proc/base.c
4467 +++ b/fs/proc/base.c
4468 @@ -1084,10 +1084,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
4469
4470 task_lock(p);
4471 if (!p->vfork_done && process_shares_mm(p, mm)) {
4472 - pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
4473 - task_pid_nr(p), p->comm,
4474 - p->signal->oom_score_adj, oom_adj,
4475 - task_pid_nr(task), task->comm);
4476 p->signal->oom_score_adj = oom_adj;
4477 if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
4478 p->signal->oom_score_adj_min = (short)oom_adj;
4479 diff --git a/include/keys/user-type.h b/include/keys/user-type.h
4480 index e098cbe27db54..12babe9915944 100644
4481 --- a/include/keys/user-type.h
4482 +++ b/include/keys/user-type.h
4483 @@ -31,7 +31,7 @@
4484 struct user_key_payload {
4485 struct rcu_head rcu; /* RCU destructor */
4486 unsigned short datalen; /* length of this data */
4487 - char data[0]; /* actual data */
4488 + char data[0] __aligned(__alignof__(u64)); /* actual data */
4489 };
4490
4491 extern struct key_type key_type_user;
4492 diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
4493 index 3e7dafb3ea809..7ddaeb5182e33 100644
4494 --- a/include/linux/compiler-clang.h
4495 +++ b/include/linux/compiler-clang.h
4496 @@ -3,9 +3,8 @@
4497 #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
4498 #endif
4499
4500 -/* Some compiler specific definitions are overwritten here
4501 - * for Clang compiler
4502 - */
4503 +/* Compiler specific definitions for Clang compiler */
4504 +
4505 #define uninitialized_var(x) x = *(&(x))
4506
4507 /* same as gcc, this was present in clang-2.6 so we can assume it works
4508 diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
4509 index 977ddf2774f97..c61c4bb2bd15f 100644
4510 --- a/include/linux/compiler-gcc.h
4511 +++ b/include/linux/compiler-gcc.h
4512 @@ -58,10 +58,6 @@
4513 (typeof(ptr)) (__ptr + (off)); \
4514 })
4515
4516 -/* Make the optimizer believe the variable can be manipulated arbitrarily. */
4517 -#define OPTIMIZER_HIDE_VAR(var) \
4518 - __asm__ ("" : "=r" (var) : "0" (var))
4519 -
4520 /*
4521 * A trick to suppress uninitialized variable warning without generating any
4522 * code
4523 diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
4524 index 517bd14e12224..b17f3cd18334d 100644
4525 --- a/include/linux/compiler-intel.h
4526 +++ b/include/linux/compiler-intel.h
4527 @@ -5,9 +5,7 @@
4528
4529 #ifdef __ECC
4530
4531 -/* Some compiler specific definitions are overwritten here
4532 - * for Intel ECC compiler
4533 - */
4534 +/* Compiler specific definitions for Intel ECC compiler */
4535
4536 #include <asm/intrinsics.h>
4537
4538 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
4539 index fc5004a4b07d7..445348facea97 100644
4540 --- a/include/linux/compiler.h
4541 +++ b/include/linux/compiler.h
4542 @@ -161,7 +161,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
4543 #endif
4544
4545 #ifndef OPTIMIZER_HIDE_VAR
4546 -#define OPTIMIZER_HIDE_VAR(var) barrier()
4547 +/* Make the optimizer believe the variable can be manipulated arbitrarily. */
4548 +#define OPTIMIZER_HIDE_VAR(var) \
4549 + __asm__ ("" : "=r" (var) : "0" (var))
4550 #endif
4551
4552 /* Not-quite-unique ID. */
4553 diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
4554 index 59ddf9af909e4..2dd0a9ed5b361 100644
4555 --- a/include/linux/qed/qed_chain.h
4556 +++ b/include/linux/qed/qed_chain.h
4557 @@ -663,6 +663,37 @@ out:
4558 static inline void qed_chain_set_prod(struct qed_chain *p_chain,
4559 u32 prod_idx, void *p_prod_elem)
4560 {
4561 + if (p_chain->mode == QED_CHAIN_MODE_PBL) {
4562 + u32 cur_prod, page_mask, page_cnt, page_diff;
4563 +
4564 + cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
4565 + p_chain->u.chain32.prod_idx;
4566 +
4567 + /* Assume that number of elements in a page is power of 2 */
4568 + page_mask = ~p_chain->elem_per_page_mask;
4569 +
4570 + /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
4571 + * reaches the first element of next page before the page index
4572 + * is incremented. See qed_chain_produce().
4573 + * Index wrap around is not a problem because the difference
4574 + * between current and given producer indices is always
4575 + * positive and lower than the chain's capacity.
4576 + */
4577 + page_diff = (((cur_prod - 1) & page_mask) -
4578 + ((prod_idx - 1) & page_mask)) /
4579 + p_chain->elem_per_page;
4580 +
4581 + page_cnt = qed_chain_get_page_cnt(p_chain);
4582 + if (is_chain_u16(p_chain))
4583 + p_chain->pbl.c.u16.prod_page_idx =
4584 + (p_chain->pbl.c.u16.prod_page_idx -
4585 + page_diff + page_cnt) % page_cnt;
4586 + else
4587 + p_chain->pbl.c.u32.prod_page_idx =
4588 + (p_chain->pbl.c.u32.prod_page_idx -
4589 + page_diff + page_cnt) % page_cnt;
4590 + }
4591 +
4592 if (is_chain_u16(p_chain))
4593 p_chain->u.chain16.prod_idx = (u16) prod_idx;
4594 else
4595 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
4596 index a6d820ad17f07..8e63c166765ef 100644
4597 --- a/include/linux/skbuff.h
4598 +++ b/include/linux/skbuff.h
4599 @@ -2418,7 +2418,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
4600
4601 if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
4602 skb_set_transport_header(skb, keys.control.thoff);
4603 - else
4604 + else if (offset_hint >= 0)
4605 skb_set_transport_header(skb, offset_hint);
4606 }
4607
4608 diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
4609 index cb462f9ab7dd5..e0348cb0a1dd7 100644
4610 --- a/include/linux/virtio_net.h
4611 +++ b/include/linux/virtio_net.h
4612 @@ -57,6 +57,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
4613
4614 if (!skb_partial_csum_set(skb, start, off))
4615 return -EINVAL;
4616 + } else {
4617 + /* gso packets without NEEDS_CSUM do not set transport_offset.
4618 + * probe and drop if does not match one of the above types.
4619 + */
4620 + if (gso_type && skb->network_header) {
4621 + if (!skb->protocol)
4622 + virtio_net_hdr_set_proto(skb, hdr);
4623 +retry:
4624 + skb_probe_transport_header(skb, -1);
4625 + if (!skb_transport_header_was_set(skb)) {
4626 + /* UFO does not specify ipv4 or 6: try both */
4627 + if (gso_type & SKB_GSO_UDP &&
4628 + skb->protocol == htons(ETH_P_IP)) {
4629 + skb->protocol = htons(ETH_P_IPV6);
4630 + goto retry;
4631 + }
4632 + return -EINVAL;
4633 + }
4634 + }
4635 }
4636
4637 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
4638 diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
4639 index 77e2761d4f2f9..ff4eb9869e5ba 100644
4640 --- a/include/net/netfilter/nf_flow_table.h
4641 +++ b/include/net/netfilter/nf_flow_table.h
4642 @@ -84,7 +84,6 @@ struct flow_offload {
4643 struct nf_flow_route {
4644 struct {
4645 struct dst_entry *dst;
4646 - int ifindex;
4647 } tuple[FLOW_OFFLOAD_DIR_MAX];
4648 };
4649
4650 diff --git a/include/sound/soc.h b/include/sound/soc.h
4651 index 3e0ac310a3df9..e721082c84a36 100644
4652 --- a/include/sound/soc.h
4653 +++ b/include/sound/soc.h
4654 @@ -985,6 +985,12 @@ struct snd_soc_dai_link {
4655 /* Do not create a PCM for this DAI link (Backend link) */
4656 unsigned int ignore:1;
4657
4658 + /*
4659 + * This driver uses legacy platform naming. Set by the core, machine
4660 + * drivers should not modify this value.
4661 + */
4662 + unsigned int legacy_platform:1;
4663 +
4664 struct list_head list; /* DAI link list of the soc card */
4665 struct snd_soc_dobj dobj; /* For topology */
4666 };
4667 diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
4668 index 33d291888ba9c..e3f005eae1f76 100644
4669 --- a/include/trace/events/afs.h
4670 +++ b/include/trace/events/afs.h
4671 @@ -25,6 +25,7 @@
4672 enum afs_call_trace {
4673 afs_call_trace_alloc,
4674 afs_call_trace_free,
4675 + afs_call_trace_get,
4676 afs_call_trace_put,
4677 afs_call_trace_wake,
4678 afs_call_trace_work,
4679 @@ -159,6 +160,7 @@ enum afs_file_error {
4680 #define afs_call_traces \
4681 EM(afs_call_trace_alloc, "ALLOC") \
4682 EM(afs_call_trace_free, "FREE ") \
4683 + EM(afs_call_trace_get, "GET ") \
4684 EM(afs_call_trace_put, "PUT ") \
4685 EM(afs_call_trace_wake, "WAKE ") \
4686 E_(afs_call_trace_work, "WORK ")
4687 diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
4688 index 14565d703291b..e8baca85bac6a 100644
4689 --- a/include/uapi/linux/inet_diag.h
4690 +++ b/include/uapi/linux/inet_diag.h
4691 @@ -137,15 +137,21 @@ enum {
4692 INET_DIAG_TCLASS,
4693 INET_DIAG_SKMEMINFO,
4694 INET_DIAG_SHUTDOWN,
4695 - INET_DIAG_DCTCPINFO,
4696 - INET_DIAG_PROTOCOL, /* response attribute only */
4697 +
4698 + /*
4699 + * Next extenstions cannot be requested in struct inet_diag_req_v2:
4700 + * its field idiag_ext has only 8 bits.
4701 + */
4702 +
4703 + INET_DIAG_DCTCPINFO, /* request as INET_DIAG_VEGASINFO */
4704 + INET_DIAG_PROTOCOL, /* response attribute only */
4705 INET_DIAG_SKV6ONLY,
4706 INET_DIAG_LOCALS,
4707 INET_DIAG_PEERS,
4708 INET_DIAG_PAD,
4709 - INET_DIAG_MARK,
4710 - INET_DIAG_BBRINFO,
4711 - INET_DIAG_CLASS_ID,
4712 + INET_DIAG_MARK, /* only with CAP_NET_ADMIN */
4713 + INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */
4714 + INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */
4715 INET_DIAG_MD5SIG,
4716 __INET_DIAG_MAX,
4717 };
4718 diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
4719 index 90daf285de032..d43b145358275 100644
4720 --- a/kernel/bpf/stackmap.c
4721 +++ b/kernel/bpf/stackmap.c
4722 @@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr,
4723
4724 if (nhdr->n_type == BPF_BUILD_ID &&
4725 nhdr->n_namesz == sizeof("GNU") &&
4726 - nhdr->n_descsz == BPF_BUILD_ID_SIZE) {
4727 + nhdr->n_descsz > 0 &&
4728 + nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
4729 memcpy(build_id,
4730 note_start + note_offs +
4731 ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
4732 - BPF_BUILD_ID_SIZE);
4733 + nhdr->n_descsz);
4734 + memset(build_id + nhdr->n_descsz, 0,
4735 + BPF_BUILD_ID_SIZE - nhdr->n_descsz);
4736 return 0;
4737 }
4738 new_offs = note_offs + sizeof(Elf32_Nhdr) +
4739 @@ -260,7 +263,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
4740 return -EFAULT; /* page not mapped */
4741
4742 ret = -EINVAL;
4743 - page_addr = page_address(page);
4744 + page_addr = kmap_atomic(page);
4745 ehdr = (Elf32_Ehdr *)page_addr;
4746
4747 /* compare magic x7f "ELF" */
4748 @@ -276,6 +279,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
4749 else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
4750 ret = stack_map_get_build_id_64(page_addr, build_id);
4751 out:
4752 + kunmap_atomic(page_addr);
4753 put_page(page);
4754 return ret;
4755 }
4756 @@ -310,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
4757 for (i = 0; i < trace_nr; i++) {
4758 id_offs[i].status = BPF_STACK_BUILD_ID_IP;
4759 id_offs[i].ip = ips[i];
4760 + memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
4761 }
4762 return;
4763 }
4764 @@ -320,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
4765 /* per entry fall back to ips */
4766 id_offs[i].status = BPF_STACK_BUILD_ID_IP;
4767 id_offs[i].ip = ips[i];
4768 + memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
4769 continue;
4770 }
4771 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
4772 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4773 index ff1c4b20cd0a6..b331562989bd2 100644
4774 --- a/kernel/trace/trace.c
4775 +++ b/kernel/trace/trace.c
4776 @@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
4777 const char tgid_space[] = " ";
4778 const char space[] = " ";
4779
4780 + print_event_info(buf, m);
4781 +
4782 seq_printf(m, "# %s _-----=> irqs-off\n",
4783 tgid ? tgid_space : space);
4784 seq_printf(m, "# %s / _----=> need-resched\n",
4785 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
4786 index fec67188c4d28..3387408a15c27 100644
4787 --- a/kernel/trace/trace_kprobe.c
4788 +++ b/kernel/trace/trace_kprobe.c
4789 @@ -878,22 +878,14 @@ static const struct file_operations kprobe_profile_ops = {
4790 static nokprobe_inline int
4791 fetch_store_strlen(unsigned long addr)
4792 {
4793 - mm_segment_t old_fs;
4794 int ret, len = 0;
4795 u8 c;
4796
4797 - old_fs = get_fs();
4798 - set_fs(KERNEL_DS);
4799 - pagefault_disable();
4800 -
4801 do {
4802 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
4803 + ret = probe_mem_read(&c, (u8 *)addr + len, 1);
4804 len++;
4805 } while (c && ret == 0 && len < MAX_STRING_SIZE);
4806
4807 - pagefault_enable();
4808 - set_fs(old_fs);
4809 -
4810 return (ret < 0) ? ret : len;
4811 }
4812
4813 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
4814 index d4496d9d34f53..ee2bce59d2bff 100644
4815 --- a/mm/mempolicy.c
4816 +++ b/mm/mempolicy.c
4817 @@ -1314,7 +1314,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
4818 nodemask_t *nodes)
4819 {
4820 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
4821 - const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
4822 + unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
4823
4824 if (copy > nbytes) {
4825 if (copy > PAGE_SIZE)
4826 @@ -1491,7 +1491,7 @@ static int kernel_get_mempolicy(int __user *policy,
4827 int uninitialized_var(pval);
4828 nodemask_t nodes;
4829
4830 - if (nmask != NULL && maxnode < MAX_NUMNODES)
4831 + if (nmask != NULL && maxnode < nr_node_ids)
4832 return -EINVAL;
4833
4834 err = do_get_mempolicy(&pval, &nodes, addr, flags);
4835 @@ -1527,7 +1527,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
4836 unsigned long nr_bits, alloc_size;
4837 DECLARE_BITMAP(bm, MAX_NUMNODES);
4838
4839 - nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
4840 + nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
4841 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
4842
4843 if (nmask)
4844 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
4845 index b85ca809e5092..ffc83bebfe403 100644
4846 --- a/net/batman-adv/soft-interface.c
4847 +++ b/net/batman-adv/soft-interface.c
4848 @@ -227,6 +227,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
4849
4850 switch (ntohs(ethhdr->h_proto)) {
4851 case ETH_P_8021Q:
4852 + if (!pskb_may_pull(skb, sizeof(*vhdr)))
4853 + goto dropped;
4854 vhdr = vlan_eth_hdr(skb);
4855
4856 /* drop batman-in-batman packets to prevent loops */
4857 diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
4858 index e56ba3912a905..8b8abf88befbd 100644
4859 --- a/net/bridge/br_fdb.c
4860 +++ b/net/bridge/br_fdb.c
4861 @@ -1102,6 +1102,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
4862 err = -ENOMEM;
4863 goto err_unlock;
4864 }
4865 + if (swdev_notify)
4866 + fdb->added_by_user = 1;
4867 fdb->added_by_external_learn = 1;
4868 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
4869 } else {
4870 @@ -1121,6 +1123,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
4871 modified = true;
4872 }
4873
4874 + if (swdev_notify)
4875 + fdb->added_by_user = 1;
4876 +
4877 if (modified)
4878 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
4879 }
4880 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
4881 index 6bac0d6b7b941..024139b51d3a5 100644
4882 --- a/net/bridge/br_multicast.c
4883 +++ b/net/bridge/br_multicast.c
4884 @@ -1422,14 +1422,7 @@ static void br_multicast_query_received(struct net_bridge *br,
4885 return;
4886
4887 br_multicast_update_query_timer(br, query, max_delay);
4888 -
4889 - /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
4890 - * the arrival port for IGMP Queries where the source address
4891 - * is 0.0.0.0 should not be added to router port list.
4892 - */
4893 - if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
4894 - saddr->proto == htons(ETH_P_IPV6))
4895 - br_multicast_mark_router(br, port);
4896 + br_multicast_mark_router(br, port);
4897 }
4898
4899 static void br_ip4_multicast_query(struct net_bridge *br,
4900 diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
4901 index 664f886f464da..b59b81fc1ab66 100644
4902 --- a/net/ceph/messenger.c
4903 +++ b/net/ceph/messenger.c
4904 @@ -2071,6 +2071,8 @@ static int process_connect(struct ceph_connection *con)
4905 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
4906
4907 if (con->auth) {
4908 + int len = le32_to_cpu(con->in_reply.authorizer_len);
4909 +
4910 /*
4911 * Any connection that defines ->get_authorizer()
4912 * should also define ->add_authorizer_challenge() and
4913 @@ -2080,8 +2082,7 @@ static int process_connect(struct ceph_connection *con)
4914 */
4915 if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
4916 ret = con->ops->add_authorizer_challenge(
4917 - con, con->auth->authorizer_reply_buf,
4918 - le32_to_cpu(con->in_reply.authorizer_len));
4919 + con, con->auth->authorizer_reply_buf, len);
4920 if (ret < 0)
4921 return ret;
4922
4923 @@ -2091,10 +2092,12 @@ static int process_connect(struct ceph_connection *con)
4924 return 0;
4925 }
4926
4927 - ret = con->ops->verify_authorizer_reply(con);
4928 - if (ret < 0) {
4929 - con->error_msg = "bad authorize reply";
4930 - return ret;
4931 + if (len) {
4932 + ret = con->ops->verify_authorizer_reply(con);
4933 + if (ret < 0) {
4934 + con->error_msg = "bad authorize reply";
4935 + return ret;
4936 + }
4937 }
4938 }
4939
4940 diff --git a/net/core/filter.c b/net/core/filter.c
4941 index eb0007f30142b..16350f8c8815a 100644
4942 --- a/net/core/filter.c
4943 +++ b/net/core/filter.c
4944 @@ -3935,6 +3935,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4945 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
4946 break;
4947 case SO_MAX_PACING_RATE: /* 32bit version */
4948 + if (val != ~0U)
4949 + cmpxchg(&sk->sk_pacing_status,
4950 + SK_PACING_NONE,
4951 + SK_PACING_NEEDED);
4952 sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
4953 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
4954 sk->sk_max_pacing_rate);
4955 @@ -3948,7 +3952,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4956 sk->sk_rcvlowat = val ? : 1;
4957 break;
4958 case SO_MARK:
4959 - sk->sk_mark = val;
4960 + if (sk->sk_mark != val) {
4961 + sk->sk_mark = val;
4962 + sk_dst_reset(sk);
4963 + }
4964 break;
4965 default:
4966 ret = -EINVAL;
4967 @@ -4019,7 +4026,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4968 /* Only some options are supported */
4969 switch (optname) {
4970 case TCP_BPF_IW:
4971 - if (val <= 0 || tp->data_segs_out > 0)
4972 + if (val <= 0 || tp->data_segs_out > tp->syn_data)
4973 ret = -EINVAL;
4974 else
4975 tp->snd_cwnd = val;
4976 diff --git a/net/dsa/port.c b/net/dsa/port.c
4977 index ed0595459df13..792a13068c50b 100644
4978 --- a/net/dsa/port.c
4979 +++ b/net/dsa/port.c
4980 @@ -255,7 +255,7 @@ int dsa_port_vlan_add(struct dsa_port *dp,
4981 if (netif_is_bridge_master(vlan->obj.orig_dev))
4982 return -EOPNOTSUPP;
4983
4984 - if (br_vlan_enabled(dp->bridge_dev))
4985 + if (dp->bridge_dev && br_vlan_enabled(dp->bridge_dev))
4986 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
4987
4988 return 0;
4989 @@ -273,7 +273,7 @@ int dsa_port_vlan_del(struct dsa_port *dp,
4990 if (netif_is_bridge_master(vlan->obj.orig_dev))
4991 return -EOPNOTSUPP;
4992
4993 - if (br_vlan_enabled(dp->bridge_dev))
4994 + if (dp->bridge_dev && br_vlan_enabled(dp->bridge_dev))
4995 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
4996
4997 return 0;
4998 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
4999 index 1a4e9ff02762e..5731670c560b0 100644
5000 --- a/net/ipv4/inet_diag.c
5001 +++ b/net/ipv4/inet_diag.c
5002 @@ -108,6 +108,7 @@ static size_t inet_sk_attr_size(struct sock *sk,
5003 + nla_total_size(1) /* INET_DIAG_TOS */
5004 + nla_total_size(1) /* INET_DIAG_TCLASS */
5005 + nla_total_size(4) /* INET_DIAG_MARK */
5006 + + nla_total_size(4) /* INET_DIAG_CLASS_ID */
5007 + nla_total_size(sizeof(struct inet_diag_meminfo))
5008 + nla_total_size(sizeof(struct inet_diag_msg))
5009 + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
5010 @@ -287,12 +288,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
5011 goto errout;
5012 }
5013
5014 - if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) {
5015 + if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
5016 + ext & (1 << (INET_DIAG_TCLASS - 1))) {
5017 u32 classid = 0;
5018
5019 #ifdef CONFIG_SOCK_CGROUP_DATA
5020 classid = sock_cgroup_classid(&sk->sk_cgrp_data);
5021 #endif
5022 + /* Fallback to socket priority if class id isn't set.
5023 + * Classful qdiscs use it as direct reference to class.
5024 + * For cgroup2 classid is always zero.
5025 + */
5026 + if (!classid)
5027 + classid = sk->sk_priority;
5028
5029 if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
5030 goto errout;
5031 diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
5032 index fb1e7f237f531..3cd237b42f446 100644
5033 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
5034 +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
5035 @@ -56,7 +56,7 @@ struct clusterip_config {
5036 #endif
5037 enum clusterip_hashmode hash_mode; /* which hashing mode */
5038 u_int32_t hash_initval; /* hash initialization */
5039 - struct rcu_head rcu;
5040 + struct rcu_head rcu; /* for call_rcu_bh */
5041 struct net *net; /* netns for pernet list */
5042 char ifname[IFNAMSIZ]; /* device ifname */
5043 };
5044 @@ -72,6 +72,8 @@ struct clusterip_net {
5045
5046 #ifdef CONFIG_PROC_FS
5047 struct proc_dir_entry *procdir;
5048 + /* mutex protects the config->pde*/
5049 + struct mutex mutex;
5050 #endif
5051 };
5052
5053 @@ -118,17 +120,18 @@ clusterip_config_entry_put(struct clusterip_config *c)
5054
5055 local_bh_disable();
5056 if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
5057 + list_del_rcu(&c->list);
5058 + spin_unlock(&cn->lock);
5059 + local_bh_enable();
5060 /* In case anyone still accesses the file, the open/close
5061 * functions are also incrementing the refcount on their own,
5062 * so it's safe to remove the entry even if it's in use. */
5063 #ifdef CONFIG_PROC_FS
5064 + mutex_lock(&cn->mutex);
5065 if (cn->procdir)
5066 proc_remove(c->pde);
5067 + mutex_unlock(&cn->mutex);
5068 #endif
5069 - list_del_rcu(&c->list);
5070 - spin_unlock(&cn->lock);
5071 - local_bh_enable();
5072 -
5073 return;
5074 }
5075 local_bh_enable();
5076 @@ -278,9 +281,11 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
5077
5078 /* create proc dir entry */
5079 sprintf(buffer, "%pI4", &ip);
5080 + mutex_lock(&cn->mutex);
5081 c->pde = proc_create_data(buffer, 0600,
5082 cn->procdir,
5083 &clusterip_proc_fops, c);
5084 + mutex_unlock(&cn->mutex);
5085 if (!c->pde) {
5086 err = -ENOMEM;
5087 goto err;
5088 @@ -833,6 +838,7 @@ static int clusterip_net_init(struct net *net)
5089 pr_err("Unable to proc dir entry\n");
5090 return -ENOMEM;
5091 }
5092 + mutex_init(&cn->mutex);
5093 #endif /* CONFIG_PROC_FS */
5094
5095 return 0;
5096 @@ -841,9 +847,12 @@ static int clusterip_net_init(struct net *net)
5097 static void clusterip_net_exit(struct net *net)
5098 {
5099 struct clusterip_net *cn = clusterip_pernet(net);
5100 +
5101 #ifdef CONFIG_PROC_FS
5102 + mutex_lock(&cn->mutex);
5103 proc_remove(cn->procdir);
5104 cn->procdir = NULL;
5105 + mutex_unlock(&cn->mutex);
5106 #endif
5107 nf_unregister_net_hook(net, &cip_arp_ops);
5108 }
5109 diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
5110 index 8b075f0bc3516..6d0b1f3e927bd 100644
5111 --- a/net/ipv6/netfilter.c
5112 +++ b/net/ipv6/netfilter.c
5113 @@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
5114 struct sock *sk = sk_to_full_sk(skb->sk);
5115 unsigned int hh_len;
5116 struct dst_entry *dst;
5117 + int strict = (ipv6_addr_type(&iph->daddr) &
5118 + (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
5119 struct flowi6 fl6 = {
5120 .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
5121 - rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
5122 + strict ? skb_dst(skb)->dev->ifindex : 0,
5123 .flowi6_mark = skb->mark,
5124 .flowi6_uid = sock_net_uid(net, sk),
5125 .daddr = iph->daddr,
5126 diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
5127 index 8d0ba757a46ce..9b2f272ca1649 100644
5128 --- a/net/ipv6/seg6.c
5129 +++ b/net/ipv6/seg6.c
5130 @@ -221,9 +221,7 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info)
5131 rcu_read_unlock();
5132
5133 genlmsg_end(msg, hdr);
5134 - genlmsg_reply(msg, info);
5135 -
5136 - return 0;
5137 + return genlmsg_reply(msg, info);
5138
5139 nla_put_failure:
5140 rcu_read_unlock();
5141 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
5142 index 1e03305c05492..e8a1dabef803e 100644
5143 --- a/net/ipv6/sit.c
5144 +++ b/net/ipv6/sit.c
5145 @@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
5146 }
5147
5148 err = 0;
5149 - if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
5150 + if (__in6_dev_get(skb->dev) &&
5151 + !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
5152 goto out;
5153
5154 if (t->parms.iph.daddr == 0)
5155 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
5156 index d01ec252cb81d..848dd38a907a1 100644
5157 --- a/net/ipv6/udp.c
5158 +++ b/net/ipv6/udp.c
5159 @@ -1322,10 +1322,7 @@ do_udp_sendmsg:
5160 ipc6.opt = opt;
5161
5162 fl6.flowi6_proto = sk->sk_protocol;
5163 - if (!ipv6_addr_any(daddr))
5164 - fl6.daddr = *daddr;
5165 - else
5166 - fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
5167 + fl6.daddr = *daddr;
5168 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
5169 fl6.saddr = np->saddr;
5170 fl6.fl6_sport = inet->inet_sport;
5171 @@ -1353,6 +1350,9 @@ do_udp_sendmsg:
5172 }
5173 }
5174
5175 + if (ipv6_addr_any(&fl6.daddr))
5176 + fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
5177 +
5178 final_p = fl6_update_dst(&fl6, opt, &final);
5179 if (final_p)
5180 connected = false;
5181 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
5182 index 818aa00603495..517dad83c2fa7 100644
5183 --- a/net/mac80211/cfg.c
5184 +++ b/net/mac80211/cfg.c
5185 @@ -941,6 +941,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
5186 BSS_CHANGED_P2P_PS |
5187 BSS_CHANGED_TXPOWER;
5188 int err;
5189 + int prev_beacon_int;
5190
5191 old = sdata_dereference(sdata->u.ap.beacon, sdata);
5192 if (old)
5193 @@ -963,6 +964,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
5194
5195 sdata->needed_rx_chains = sdata->local->rx_chains;
5196
5197 + prev_beacon_int = sdata->vif.bss_conf.beacon_int;
5198 sdata->vif.bss_conf.beacon_int = params->beacon_interval;
5199
5200 if (params->he_cap)
5201 @@ -974,8 +976,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
5202 if (!err)
5203 ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
5204 mutex_unlock(&local->mtx);
5205 - if (err)
5206 + if (err) {
5207 + sdata->vif.bss_conf.beacon_int = prev_beacon_int;
5208 return err;
5209 + }
5210
5211 /*
5212 * Apply control port protocol, this allows us to
5213 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
5214 index 7b8320d4a8e4b..3131356e290a0 100644
5215 --- a/net/mac80211/main.c
5216 +++ b/net/mac80211/main.c
5217 @@ -615,13 +615,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
5218 * We need a bit of data queued to build aggregates properly, so
5219 * instruct the TCP stack to allow more than a single ms of data
5220 * to be queued in the stack. The value is a bit-shift of 1
5221 - * second, so 8 is ~4ms of queued data. Only affects local TCP
5222 + * second, so 7 is ~8ms of queued data. Only affects local TCP
5223 * sockets.
5224 * This is the default, anyhow - drivers may need to override it
5225 * for local reasons (longer buffers, longer completion time, or
5226 * similar).
5227 */
5228 - local->hw.tx_sk_pacing_shift = 8;
5229 + local->hw.tx_sk_pacing_shift = 7;
5230
5231 /* set up some defaults */
5232 local->hw.queues = 1;
5233 diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
5234 index 21526630bf655..e84103b405341 100644
5235 --- a/net/mac80211/mesh.h
5236 +++ b/net/mac80211/mesh.h
5237 @@ -70,6 +70,7 @@ enum mesh_deferred_task_flags {
5238 * @dst: mesh path destination mac address
5239 * @mpp: mesh proxy mac address
5240 * @rhash: rhashtable list pointer
5241 + * @walk_list: linked list containing all mesh_path objects.
5242 * @gate_list: list pointer for known gates list
5243 * @sdata: mesh subif
5244 * @next_hop: mesh neighbor to which frames for this destination will be
5245 @@ -105,6 +106,7 @@ struct mesh_path {
5246 u8 dst[ETH_ALEN];
5247 u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
5248 struct rhash_head rhash;
5249 + struct hlist_node walk_list;
5250 struct hlist_node gate_list;
5251 struct ieee80211_sub_if_data *sdata;
5252 struct sta_info __rcu *next_hop;
5253 @@ -133,12 +135,16 @@ struct mesh_path {
5254 * gate's mpath may or may not be resolved and active.
5255 * @gates_lock: protects updates to known_gates
5256 * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
5257 + * @walk_head: linked list containging all mesh_path objects
5258 + * @walk_lock: lock protecting walk_head
5259 * @entries: number of entries in the table
5260 */
5261 struct mesh_table {
5262 struct hlist_head known_gates;
5263 spinlock_t gates_lock;
5264 struct rhashtable rhead;
5265 + struct hlist_head walk_head;
5266 + spinlock_t walk_lock;
5267 atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
5268 };
5269
5270 diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
5271 index a5125624a76dc..c3a7396fb9556 100644
5272 --- a/net/mac80211/mesh_pathtbl.c
5273 +++ b/net/mac80211/mesh_pathtbl.c
5274 @@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void)
5275 return NULL;
5276
5277 INIT_HLIST_HEAD(&newtbl->known_gates);
5278 + INIT_HLIST_HEAD(&newtbl->walk_head);
5279 atomic_set(&newtbl->entries, 0);
5280 spin_lock_init(&newtbl->gates_lock);
5281 + spin_lock_init(&newtbl->walk_lock);
5282
5283 return newtbl;
5284 }
5285 @@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
5286 static struct mesh_path *
5287 __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
5288 {
5289 - int i = 0, ret;
5290 - struct mesh_path *mpath = NULL;
5291 - struct rhashtable_iter iter;
5292 -
5293 - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5294 - if (ret)
5295 - return NULL;
5296 -
5297 - rhashtable_walk_start(&iter);
5298 + int i = 0;
5299 + struct mesh_path *mpath;
5300
5301 - while ((mpath = rhashtable_walk_next(&iter))) {
5302 - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5303 - continue;
5304 - if (IS_ERR(mpath))
5305 - break;
5306 + hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
5307 if (i++ == idx)
5308 break;
5309 }
5310 - rhashtable_walk_stop(&iter);
5311 - rhashtable_walk_exit(&iter);
5312
5313 - if (IS_ERR(mpath) || !mpath)
5314 + if (!mpath)
5315 return NULL;
5316
5317 if (mpath_expired(mpath)) {
5318 @@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
5319 return ERR_PTR(-ENOMEM);
5320
5321 tbl = sdata->u.mesh.mesh_paths;
5322 + spin_lock_bh(&tbl->walk_lock);
5323 do {
5324 ret = rhashtable_lookup_insert_fast(&tbl->rhead,
5325 &new_mpath->rhash,
5326 @@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
5327 mpath = rhashtable_lookup_fast(&tbl->rhead,
5328 dst,
5329 mesh_rht_params);
5330 -
5331 + else if (!ret)
5332 + hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
5333 } while (unlikely(ret == -EEXIST && !mpath));
5334 + spin_unlock_bh(&tbl->walk_lock);
5335
5336 - if (ret && ret != -EEXIST)
5337 - return ERR_PTR(ret);
5338 -
5339 - /* At this point either new_mpath was added, or we found a
5340 - * matching entry already in the table; in the latter case
5341 - * free the unnecessary new entry.
5342 - */
5343 - if (ret == -EEXIST) {
5344 + if (ret) {
5345 kfree(new_mpath);
5346 +
5347 + if (ret != -EEXIST)
5348 + return ERR_PTR(ret);
5349 +
5350 new_mpath = mpath;
5351 }
5352 +
5353 sdata->u.mesh.mesh_paths_generation++;
5354 return new_mpath;
5355 }
5356 @@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
5357
5358 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
5359 tbl = sdata->u.mesh.mpp_paths;
5360 +
5361 + spin_lock_bh(&tbl->walk_lock);
5362 ret = rhashtable_lookup_insert_fast(&tbl->rhead,
5363 &new_mpath->rhash,
5364 mesh_rht_params);
5365 + if (!ret)
5366 + hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
5367 + spin_unlock_bh(&tbl->walk_lock);
5368 +
5369 + if (ret)
5370 + kfree(new_mpath);
5371
5372 sdata->u.mesh.mpp_paths_generation++;
5373 return ret;
5374 @@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta)
5375 struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
5376 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
5377 struct mesh_path *mpath;
5378 - struct rhashtable_iter iter;
5379 - int ret;
5380 -
5381 - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5382 - if (ret)
5383 - return;
5384
5385 - rhashtable_walk_start(&iter);
5386 -
5387 - while ((mpath = rhashtable_walk_next(&iter))) {
5388 - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5389 - continue;
5390 - if (IS_ERR(mpath))
5391 - break;
5392 + rcu_read_lock();
5393 + hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
5394 if (rcu_access_pointer(mpath->next_hop) == sta &&
5395 mpath->flags & MESH_PATH_ACTIVE &&
5396 !(mpath->flags & MESH_PATH_FIXED)) {
5397 @@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta)
5398 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
5399 }
5400 }
5401 - rhashtable_walk_stop(&iter);
5402 - rhashtable_walk_exit(&iter);
5403 + rcu_read_unlock();
5404 }
5405
5406 static void mesh_path_free_rcu(struct mesh_table *tbl,
5407 @@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
5408
5409 static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
5410 {
5411 + hlist_del_rcu(&mpath->walk_list);
5412 rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
5413 mesh_path_free_rcu(tbl, mpath);
5414 }
5415 @@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
5416 struct ieee80211_sub_if_data *sdata = sta->sdata;
5417 struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
5418 struct mesh_path *mpath;
5419 - struct rhashtable_iter iter;
5420 - int ret;
5421 -
5422 - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5423 - if (ret)
5424 - return;
5425 -
5426 - rhashtable_walk_start(&iter);
5427 -
5428 - while ((mpath = rhashtable_walk_next(&iter))) {
5429 - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5430 - continue;
5431 - if (IS_ERR(mpath))
5432 - break;
5433 + struct hlist_node *n;
5434
5435 + spin_lock_bh(&tbl->walk_lock);
5436 + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
5437 if (rcu_access_pointer(mpath->next_hop) == sta)
5438 __mesh_path_del(tbl, mpath);
5439 }
5440 -
5441 - rhashtable_walk_stop(&iter);
5442 - rhashtable_walk_exit(&iter);
5443 + spin_unlock_bh(&tbl->walk_lock);
5444 }
5445
5446 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
5447 @@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
5448 {
5449 struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
5450 struct mesh_path *mpath;
5451 - struct rhashtable_iter iter;
5452 - int ret;
5453 -
5454 - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5455 - if (ret)
5456 - return;
5457 -
5458 - rhashtable_walk_start(&iter);
5459 -
5460 - while ((mpath = rhashtable_walk_next(&iter))) {
5461 - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5462 - continue;
5463 - if (IS_ERR(mpath))
5464 - break;
5465 + struct hlist_node *n;
5466
5467 + spin_lock_bh(&tbl->walk_lock);
5468 + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
5469 if (ether_addr_equal(mpath->mpp, proxy))
5470 __mesh_path_del(tbl, mpath);
5471 }
5472 -
5473 - rhashtable_walk_stop(&iter);
5474 - rhashtable_walk_exit(&iter);
5475 + spin_unlock_bh(&tbl->walk_lock);
5476 }
5477
5478 static void table_flush_by_iface(struct mesh_table *tbl)
5479 {
5480 struct mesh_path *mpath;
5481 - struct rhashtable_iter iter;
5482 - int ret;
5483 -
5484 - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5485 - if (ret)
5486 - return;
5487 -
5488 - rhashtable_walk_start(&iter);
5489 + struct hlist_node *n;
5490
5491 - while ((mpath = rhashtable_walk_next(&iter))) {
5492 - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5493 - continue;
5494 - if (IS_ERR(mpath))
5495 - break;
5496 + spin_lock_bh(&tbl->walk_lock);
5497 + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
5498 __mesh_path_del(tbl, mpath);
5499 }
5500 -
5501 - rhashtable_walk_stop(&iter);
5502 - rhashtable_walk_exit(&iter);
5503 + spin_unlock_bh(&tbl->walk_lock);
5504 }
5505
5506 /**
5507 @@ -675,7 +624,7 @@ static int table_path_del(struct mesh_table *tbl,
5508 {
5509 struct mesh_path *mpath;
5510
5511 - rcu_read_lock();
5512 + spin_lock_bh(&tbl->walk_lock);
5513 mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
5514 if (!mpath) {
5515 rcu_read_unlock();
5516 @@ -683,7 +632,7 @@ static int table_path_del(struct mesh_table *tbl,
5517 }
5518
5519 __mesh_path_del(tbl, mpath);
5520 - rcu_read_unlock();
5521 + spin_unlock_bh(&tbl->walk_lock);
5522 return 0;
5523 }
5524
5525 @@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
5526 struct mesh_table *tbl)
5527 {
5528 struct mesh_path *mpath;
5529 - struct rhashtable_iter iter;
5530 - int ret;
5531 + struct hlist_node *n;
5532
5533 - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
5534 - if (ret)
5535 - return;
5536 -
5537 - rhashtable_walk_start(&iter);
5538 -
5539 - while ((mpath = rhashtable_walk_next(&iter))) {
5540 - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5541 - continue;
5542 - if (IS_ERR(mpath))
5543 - break;
5544 + spin_lock_bh(&tbl->walk_lock);
5545 + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
5546 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
5547 (!(mpath->flags & MESH_PATH_FIXED)) &&
5548 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
5549 __mesh_path_del(tbl, mpath);
5550 }
5551 -
5552 - rhashtable_walk_stop(&iter);
5553 - rhashtable_walk_exit(&iter);
5554 + spin_unlock_bh(&tbl->walk_lock);
5555 }
5556
5557 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
5558 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
5559 index 85c365fc7a0c1..46ecc417c4210 100644
5560 --- a/net/mac80211/rx.c
5561 +++ b/net/mac80211/rx.c
5562 @@ -2640,6 +2640,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
5563 struct ieee80211_sub_if_data *sdata = rx->sdata;
5564 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
5565 u16 ac, q, hdrlen;
5566 + int tailroom = 0;
5567
5568 hdr = (struct ieee80211_hdr *) skb->data;
5569 hdrlen = ieee80211_hdrlen(hdr->frame_control);
5570 @@ -2726,8 +2727,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
5571 if (!ifmsh->mshcfg.dot11MeshForwarding)
5572 goto out;
5573
5574 + if (sdata->crypto_tx_tailroom_needed_cnt)
5575 + tailroom = IEEE80211_ENCRYPT_TAILROOM;
5576 +
5577 fwd_skb = skb_copy_expand(skb, local->tx_headroom +
5578 - sdata->encrypt_headroom, 0, GFP_ATOMIC);
5579 + sdata->encrypt_headroom,
5580 + tailroom, GFP_ATOMIC);
5581 if (!fwd_skb)
5582 goto out;
5583
5584 diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
5585 index b7a4816add765..cc91b4d6aa22f 100644
5586 --- a/net/netfilter/nf_flow_table_core.c
5587 +++ b/net/netfilter/nf_flow_table_core.c
5588 @@ -28,6 +28,7 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
5589 {
5590 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
5591 struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
5592 + struct dst_entry *other_dst = route->tuple[!dir].dst;
5593 struct dst_entry *dst = route->tuple[dir].dst;
5594
5595 ft->dir = dir;
5596 @@ -50,8 +51,8 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
5597 ft->src_port = ctt->src.u.tcp.port;
5598 ft->dst_port = ctt->dst.u.tcp.port;
5599
5600 - ft->iifidx = route->tuple[dir].ifindex;
5601 - ft->oifidx = route->tuple[!dir].ifindex;
5602 + ft->iifidx = other_dst->dev->ifindex;
5603 + ft->oifidx = dst->dev->ifindex;
5604 ft->dst_cache = dst;
5605 }
5606
5607 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
5608 index 6e548d7c9f67b..5114a0d2a41eb 100644
5609 --- a/net/netfilter/nf_tables_api.c
5610 +++ b/net/netfilter/nf_tables_api.c
5611 @@ -307,6 +307,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
5612 int err;
5613
5614 list_for_each_entry(rule, &ctx->chain->rules, list) {
5615 + if (!nft_is_active_next(ctx->net, rule))
5616 + continue;
5617 +
5618 err = nft_delrule(ctx, rule);
5619 if (err < 0)
5620 return err;
5621 @@ -4474,6 +4477,8 @@ err6:
5622 err5:
5623 kfree(trans);
5624 err4:
5625 + if (obj)
5626 + obj->use--;
5627 kfree(elem.priv);
5628 err3:
5629 if (nla[NFTA_SET_ELEM_DATA] != NULL)
5630 diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
5631 index 6f41dd74729d9..1f1d90c1716b5 100644
5632 --- a/net/netfilter/nfnetlink_osf.c
5633 +++ b/net/netfilter/nfnetlink_osf.c
5634 @@ -66,6 +66,7 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
5635 int ttl_check,
5636 struct nf_osf_hdr_ctx *ctx)
5637 {
5638 + const __u8 *optpinit = ctx->optp;
5639 unsigned int check_WSS = 0;
5640 int fmatch = FMATCH_WRONG;
5641 int foptsize, optnum;
5642 @@ -155,6 +156,9 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
5643 }
5644 }
5645
5646 + if (fmatch != FMATCH_OK)
5647 + ctx->optp = optpinit;
5648 +
5649 return fmatch == FMATCH_OK;
5650 }
5651
5652 diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
5653 index 7334e0b80a5ef..c90a4640723f5 100644
5654 --- a/net/netfilter/nft_compat.c
5655 +++ b/net/netfilter/nft_compat.c
5656 @@ -282,6 +282,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
5657 {
5658 struct xt_target *target = expr->ops->data;
5659 void *info = nft_expr_priv(expr);
5660 + struct module *me = target->me;
5661 struct xt_tgdtor_param par;
5662
5663 par.net = ctx->net;
5664 @@ -292,7 +293,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
5665 par.target->destroy(&par);
5666
5667 if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
5668 - module_put(target->me);
5669 + module_put(me);
5670 }
5671
5672 static int nft_extension_dump_info(struct sk_buff *skb, int attr,
5673 diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
5674 index 974525eb92df7..6e6b9adf7d387 100644
5675 --- a/net/netfilter/nft_flow_offload.c
5676 +++ b/net/netfilter/nft_flow_offload.c
5677 @@ -12,6 +12,7 @@
5678 #include <net/netfilter/nf_conntrack_core.h>
5679 #include <linux/netfilter/nf_conntrack_common.h>
5680 #include <net/netfilter/nf_flow_table.h>
5681 +#include <net/netfilter/nf_conntrack_helper.h>
5682
5683 struct nft_flow_offload {
5684 struct nft_flowtable *flowtable;
5685 @@ -29,10 +30,12 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
5686 memset(&fl, 0, sizeof(fl));
5687 switch (nft_pf(pkt)) {
5688 case NFPROTO_IPV4:
5689 - fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
5690 + fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
5691 + fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
5692 break;
5693 case NFPROTO_IPV6:
5694 - fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
5695 + fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
5696 + fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
5697 break;
5698 }
5699
5700 @@ -41,9 +44,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
5701 return -ENOENT;
5702
5703 route->tuple[dir].dst = this_dst;
5704 - route->tuple[dir].ifindex = nft_in(pkt)->ifindex;
5705 route->tuple[!dir].dst = other_dst;
5706 - route->tuple[!dir].ifindex = nft_out(pkt)->ifindex;
5707
5708 return 0;
5709 }
5710 @@ -66,6 +67,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
5711 {
5712 struct nft_flow_offload *priv = nft_expr_priv(expr);
5713 struct nf_flowtable *flowtable = &priv->flowtable->data;
5714 + const struct nf_conn_help *help;
5715 enum ip_conntrack_info ctinfo;
5716 struct nf_flow_route route;
5717 struct flow_offload *flow;
5718 @@ -88,7 +90,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
5719 goto out;
5720 }
5721
5722 - if (test_bit(IPS_HELPER_BIT, &ct->status))
5723 + help = nfct_help(ct);
5724 + if (help)
5725 goto out;
5726
5727 if (ctinfo == IP_CT_NEW ||
5728 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
5729 index 3b1a78906bc0c..1cd1d83a4be08 100644
5730 --- a/net/packet/af_packet.c
5731 +++ b/net/packet/af_packet.c
5732 @@ -4292,7 +4292,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
5733 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
5734 if (unlikely(rb->frames_per_block == 0))
5735 goto out;
5736 - if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
5737 + if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
5738 goto out;
5739 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
5740 req->tp_frame_nr))
5741 diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
5742 index 9ccc93f257db0..38bb882bb9587 100644
5743 --- a/net/sched/cls_tcindex.c
5744 +++ b/net/sched/cls_tcindex.c
5745 @@ -48,7 +48,7 @@ struct tcindex_data {
5746 u32 hash; /* hash table size; 0 if undefined */
5747 u32 alloc_hash; /* allocated size */
5748 u32 fall_through; /* 0: only classify if explicit match */
5749 - struct rcu_head rcu;
5750 + struct rcu_work rwork;
5751 };
5752
5753 static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
5754 @@ -221,17 +221,11 @@ found:
5755 return 0;
5756 }
5757
5758 -static int tcindex_destroy_element(struct tcf_proto *tp,
5759 - void *arg, struct tcf_walker *walker)
5760 -{
5761 - bool last;
5762 -
5763 - return tcindex_delete(tp, arg, &last, NULL);
5764 -}
5765 -
5766 -static void __tcindex_destroy(struct rcu_head *head)
5767 +static void tcindex_destroy_work(struct work_struct *work)
5768 {
5769 - struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
5770 + struct tcindex_data *p = container_of(to_rcu_work(work),
5771 + struct tcindex_data,
5772 + rwork);
5773
5774 kfree(p->perfect);
5775 kfree(p->h);
5776 @@ -258,9 +252,11 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r)
5777 return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
5778 }
5779
5780 -static void __tcindex_partial_destroy(struct rcu_head *head)
5781 +static void tcindex_partial_destroy_work(struct work_struct *work)
5782 {
5783 - struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
5784 + struct tcindex_data *p = container_of(to_rcu_work(work),
5785 + struct tcindex_data,
5786 + rwork);
5787
5788 kfree(p->perfect);
5789 kfree(p);
5790 @@ -275,7 +271,7 @@ static void tcindex_free_perfect_hash(struct tcindex_data *cp)
5791 kfree(cp->perfect);
5792 }
5793
5794 -static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
5795 +static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
5796 {
5797 int i, err = 0;
5798
5799 @@ -289,6 +285,9 @@ static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
5800 TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
5801 if (err < 0)
5802 goto errout;
5803 +#ifdef CONFIG_NET_CLS_ACT
5804 + cp->perfect[i].exts.net = net;
5805 +#endif
5806 }
5807
5808 return 0;
5809 @@ -305,9 +304,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5810 struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
5811 {
5812 struct tcindex_filter_result new_filter_result, *old_r = r;
5813 - struct tcindex_filter_result cr;
5814 struct tcindex_data *cp = NULL, *oldp;
5815 struct tcindex_filter *f = NULL; /* make gcc behave */
5816 + struct tcf_result cr = {};
5817 int err, balloc = 0;
5818 struct tcf_exts e;
5819
5820 @@ -337,7 +336,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5821 if (p->perfect) {
5822 int i;
5823
5824 - if (tcindex_alloc_perfect_hash(cp) < 0)
5825 + if (tcindex_alloc_perfect_hash(net, cp) < 0)
5826 goto errout;
5827 for (i = 0; i < cp->hash; i++)
5828 cp->perfect[i].res = p->perfect[i].res;
5829 @@ -346,13 +345,10 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5830 cp->h = p->h;
5831
5832 err = tcindex_filter_result_init(&new_filter_result);
5833 - if (err < 0)
5834 - goto errout1;
5835 - err = tcindex_filter_result_init(&cr);
5836 if (err < 0)
5837 goto errout1;
5838 if (old_r)
5839 - cr.res = r->res;
5840 + cr = r->res;
5841
5842 if (tb[TCA_TCINDEX_HASH])
5843 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
5844 @@ -406,7 +402,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5845 err = -ENOMEM;
5846 if (!cp->perfect && !cp->h) {
5847 if (valid_perfect_hash(cp)) {
5848 - if (tcindex_alloc_perfect_hash(cp) < 0)
5849 + if (tcindex_alloc_perfect_hash(net, cp) < 0)
5850 goto errout_alloc;
5851 balloc = 1;
5852 } else {
5853 @@ -443,8 +439,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5854 }
5855
5856 if (tb[TCA_TCINDEX_CLASSID]) {
5857 - cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
5858 - tcf_bind_filter(tp, &cr.res, base);
5859 + cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
5860 + tcf_bind_filter(tp, &cr, base);
5861 }
5862
5863 if (old_r && old_r != r) {
5864 @@ -456,7 +452,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5865 }
5866
5867 oldp = p;
5868 - r->res = cr.res;
5869 + r->res = cr;
5870 tcf_exts_change(&r->exts, &e);
5871
5872 rcu_assign_pointer(tp->root, cp);
5873 @@ -475,10 +471,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5874 ; /* nothing */
5875
5876 rcu_assign_pointer(*fp, f);
5877 + } else {
5878 + tcf_exts_destroy(&new_filter_result.exts);
5879 }
5880
5881 if (oldp)
5882 - call_rcu(&oldp->rcu, __tcindex_partial_destroy);
5883 + tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
5884 return 0;
5885
5886 errout_alloc:
5887 @@ -487,7 +485,6 @@ errout_alloc:
5888 else if (balloc == 2)
5889 kfree(cp->h);
5890 errout1:
5891 - tcf_exts_destroy(&cr.exts);
5892 tcf_exts_destroy(&new_filter_result.exts);
5893 errout:
5894 kfree(cp);
5895 @@ -562,15 +559,34 @@ static void tcindex_destroy(struct tcf_proto *tp,
5896 struct netlink_ext_ack *extack)
5897 {
5898 struct tcindex_data *p = rtnl_dereference(tp->root);
5899 - struct tcf_walker walker;
5900 + int i;
5901
5902 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
5903 - walker.count = 0;
5904 - walker.skip = 0;
5905 - walker.fn = tcindex_destroy_element;
5906 - tcindex_walk(tp, &walker);
5907
5908 - call_rcu(&p->rcu, __tcindex_destroy);
5909 + if (p->perfect) {
5910 + for (i = 0; i < p->hash; i++) {
5911 + struct tcindex_filter_result *r = p->perfect + i;
5912 +
5913 + tcf_unbind_filter(tp, &r->res);
5914 + if (tcf_exts_get_net(&r->exts))
5915 + tcf_queue_work(&r->rwork,
5916 + tcindex_destroy_rexts_work);
5917 + else
5918 + __tcindex_destroy_rexts(r);
5919 + }
5920 + }
5921 +
5922 + for (i = 0; p->h && i < p->hash; i++) {
5923 + struct tcindex_filter *f, *next;
5924 + bool last;
5925 +
5926 + for (f = rtnl_dereference(p->h[i]); f; f = next) {
5927 + next = rtnl_dereference(f->next);
5928 + tcindex_delete(tp, &f->result, &last, NULL);
5929 + }
5930 + }
5931 +
5932 + tcf_queue_work(&p->rwork, tcindex_destroy_work);
5933 }
5934
5935
5936 diff --git a/net/sctp/diag.c b/net/sctp/diag.c
5937 index 078f01a8d582a..435847d98b51c 100644
5938 --- a/net/sctp/diag.c
5939 +++ b/net/sctp/diag.c
5940 @@ -256,6 +256,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
5941 + nla_total_size(1) /* INET_DIAG_TOS */
5942 + nla_total_size(1) /* INET_DIAG_TCLASS */
5943 + nla_total_size(4) /* INET_DIAG_MARK */
5944 + + nla_total_size(4) /* INET_DIAG_CLASS_ID */
5945 + nla_total_size(addrlen * asoc->peer.transport_count)
5946 + nla_total_size(addrlen * addrcnt)
5947 + nla_total_size(sizeof(struct inet_diag_meminfo))
5948 diff --git a/net/sctp/offload.c b/net/sctp/offload.c
5949 index 123e9f2dc2265..edfcf16e704c4 100644
5950 --- a/net/sctp/offload.c
5951 +++ b/net/sctp/offload.c
5952 @@ -36,6 +36,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
5953 {
5954 skb->ip_summed = CHECKSUM_NONE;
5955 skb->csum_not_inet = 0;
5956 + gso_reset_checksum(skb, ~0);
5957 return sctp_compute_cksum(skb, skb_transport_offset(skb));
5958 }
5959
5960 diff --git a/net/sctp/stream.c b/net/sctp/stream.c
5961 index f24633114dfdf..2936ed17bf9ef 100644
5962 --- a/net/sctp/stream.c
5963 +++ b/net/sctp/stream.c
5964 @@ -144,8 +144,10 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
5965 }
5966 }
5967
5968 - for (i = outcnt; i < stream->outcnt; i++)
5969 + for (i = outcnt; i < stream->outcnt; i++) {
5970 kfree(SCTP_SO(stream, i)->ext);
5971 + SCTP_SO(stream, i)->ext = NULL;
5972 + }
5973 }
5974
5975 static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
5976 diff --git a/net/socket.c b/net/socket.c
5977 index 334fcc617ef27..93a45f15ee40d 100644
5978 --- a/net/socket.c
5979 +++ b/net/socket.c
5980 @@ -941,8 +941,7 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
5981 EXPORT_SYMBOL(dlci_ioctl_set);
5982
5983 static long sock_do_ioctl(struct net *net, struct socket *sock,
5984 - unsigned int cmd, unsigned long arg,
5985 - unsigned int ifreq_size)
5986 + unsigned int cmd, unsigned long arg)
5987 {
5988 int err;
5989 void __user *argp = (void __user *)arg;
5990 @@ -968,11 +967,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
5991 } else {
5992 struct ifreq ifr;
5993 bool need_copyout;
5994 - if (copy_from_user(&ifr, argp, ifreq_size))
5995 + if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
5996 return -EFAULT;
5997 err = dev_ioctl(net, cmd, &ifr, &need_copyout);
5998 if (!err && need_copyout)
5999 - if (copy_to_user(argp, &ifr, ifreq_size))
6000 + if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
6001 return -EFAULT;
6002 }
6003 return err;
6004 @@ -1071,8 +1070,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
6005 err = open_related_ns(&net->ns, get_net_ns);
6006 break;
6007 default:
6008 - err = sock_do_ioctl(net, sock, cmd, arg,
6009 - sizeof(struct ifreq));
6010 + err = sock_do_ioctl(net, sock, cmd, arg);
6011 break;
6012 }
6013 return err;
6014 @@ -2750,8 +2748,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
6015 int err;
6016
6017 set_fs(KERNEL_DS);
6018 - err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
6019 - sizeof(struct compat_ifreq));
6020 + err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
6021 set_fs(old_fs);
6022 if (!err)
6023 err = compat_put_timeval(&ktv, up);
6024 @@ -2767,8 +2764,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
6025 int err;
6026
6027 set_fs(KERNEL_DS);
6028 - err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
6029 - sizeof(struct compat_ifreq));
6030 + err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
6031 set_fs(old_fs);
6032 if (!err)
6033 err = compat_put_timespec(&kts, up);
6034 @@ -2964,6 +2960,54 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
6035 return dev_ioctl(net, cmd, &ifreq, NULL);
6036 }
6037
6038 +static int compat_ifreq_ioctl(struct net *net, struct socket *sock,
6039 + unsigned int cmd,
6040 + struct compat_ifreq __user *uifr32)
6041 +{
6042 + struct ifreq __user *uifr;
6043 + int err;
6044 +
6045 + /* Handle the fact that while struct ifreq has the same *layout* on
6046 + * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data,
6047 + * which are handled elsewhere, it still has different *size* due to
6048 + * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit,
6049 + * resulting in struct ifreq being 32 and 40 bytes respectively).
6050 + * As a result, if the struct happens to be at the end of a page and
6051 + * the next page isn't readable/writable, we get a fault. To prevent
6052 + * that, copy back and forth to the full size.
6053 + */
6054 +
6055 + uifr = compat_alloc_user_space(sizeof(*uifr));
6056 + if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
6057 + return -EFAULT;
6058 +
6059 + err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
6060 +
6061 + if (!err) {
6062 + switch (cmd) {
6063 + case SIOCGIFFLAGS:
6064 + case SIOCGIFMETRIC:
6065 + case SIOCGIFMTU:
6066 + case SIOCGIFMEM:
6067 + case SIOCGIFHWADDR:
6068 + case SIOCGIFINDEX:
6069 + case SIOCGIFADDR:
6070 + case SIOCGIFBRDADDR:
6071 + case SIOCGIFDSTADDR:
6072 + case SIOCGIFNETMASK:
6073 + case SIOCGIFPFLAGS:
6074 + case SIOCGIFTXQLEN:
6075 + case SIOCGMIIPHY:
6076 + case SIOCGMIIREG:
6077 + case SIOCGIFNAME:
6078 + if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
6079 + err = -EFAULT;
6080 + break;
6081 + }
6082 + }
6083 + return err;
6084 +}
6085 +
6086 static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
6087 struct compat_ifreq __user *uifr32)
6088 {
6089 @@ -3079,8 +3123,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
6090 }
6091
6092 set_fs(KERNEL_DS);
6093 - ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
6094 - sizeof(struct compat_ifreq));
6095 + ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
6096 set_fs(old_fs);
6097
6098 out:
6099 @@ -3180,21 +3223,22 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
6100 case SIOCSIFTXQLEN:
6101 case SIOCBRADDIF:
6102 case SIOCBRDELIF:
6103 + case SIOCGIFNAME:
6104 case SIOCSIFNAME:
6105 case SIOCGMIIPHY:
6106 case SIOCGMIIREG:
6107 case SIOCSMIIREG:
6108 - case SIOCSARP:
6109 - case SIOCGARP:
6110 - case SIOCDARP:
6111 - case SIOCATMARK:
6112 case SIOCBONDENSLAVE:
6113 case SIOCBONDRELEASE:
6114 case SIOCBONDSETHWADDR:
6115 case SIOCBONDCHANGEACTIVE:
6116 - case SIOCGIFNAME:
6117 - return sock_do_ioctl(net, sock, cmd, arg,
6118 - sizeof(struct compat_ifreq));
6119 + return compat_ifreq_ioctl(net, sock, cmd, argp);
6120 +
6121 + case SIOCSARP:
6122 + case SIOCGARP:
6123 + case SIOCDARP:
6124 + case SIOCATMARK:
6125 + return sock_do_ioctl(net, sock, cmd, arg);
6126 }
6127
6128 return -ENOIOCTLCMD;
6129 diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
6130 index ba765473d1f06..efeee5586b2ac 100644
6131 --- a/net/sunrpc/auth_gss/auth_gss.c
6132 +++ b/net/sunrpc/auth_gss/auth_gss.c
6133 @@ -1563,8 +1563,10 @@ gss_marshal(struct rpc_task *task, __be32 *p)
6134 cred_len = p++;
6135
6136 spin_lock(&ctx->gc_seq_lock);
6137 - req->rq_seqno = ctx->gc_seq++;
6138 + req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
6139 spin_unlock(&ctx->gc_seq_lock);
6140 + if (req->rq_seqno == MAXSEQ)
6141 + goto out_expired;
6142
6143 *p++ = htonl((u32) RPC_GSS_VERSION);
6144 *p++ = htonl((u32) ctx->gc_proc);
6145 @@ -1586,14 +1588,18 @@ gss_marshal(struct rpc_task *task, __be32 *p)
6146 mic.data = (u8 *)(p + 1);
6147 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
6148 if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
6149 - clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
6150 + goto out_expired;
6151 } else if (maj_stat != 0) {
6152 - printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
6153 + pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
6154 + task->tk_status = -EIO;
6155 goto out_put_ctx;
6156 }
6157 p = xdr_encode_opaque(p, NULL, mic.len);
6158 gss_put_ctx(ctx);
6159 return p;
6160 +out_expired:
6161 + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
6162 + task->tk_status = -EKEYEXPIRED;
6163 out_put_ctx:
6164 gss_put_ctx(ctx);
6165 return NULL;
6166 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
6167 index 24cbddc44c884..1ee04e0ec4bca 100644
6168 --- a/net/sunrpc/clnt.c
6169 +++ b/net/sunrpc/clnt.c
6170 @@ -1738,14 +1738,10 @@ rpc_xdr_encode(struct rpc_task *task)
6171 xdr_buf_init(&req->rq_rcv_buf,
6172 req->rq_rbuffer,
6173 req->rq_rcvsize);
6174 - req->rq_bytes_sent = 0;
6175
6176 p = rpc_encode_header(task);
6177 - if (p == NULL) {
6178 - printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
6179 - rpc_exit(task, -EIO);
6180 + if (p == NULL)
6181 return;
6182 - }
6183
6184 encode = task->tk_msg.rpc_proc->p_encode;
6185 if (encode == NULL)
6186 @@ -1770,10 +1766,17 @@ call_encode(struct rpc_task *task)
6187 /* Did the encode result in an error condition? */
6188 if (task->tk_status != 0) {
6189 /* Was the error nonfatal? */
6190 - if (task->tk_status == -EAGAIN || task->tk_status == -ENOMEM)
6191 + switch (task->tk_status) {
6192 + case -EAGAIN:
6193 + case -ENOMEM:
6194 rpc_delay(task, HZ >> 4);
6195 - else
6196 + break;
6197 + case -EKEYEXPIRED:
6198 + task->tk_action = call_refresh;
6199 + break;
6200 + default:
6201 rpc_exit(task, task->tk_status);
6202 + }
6203 return;
6204 }
6205
6206 @@ -2335,7 +2338,8 @@ rpc_encode_header(struct rpc_task *task)
6207 *p++ = htonl(clnt->cl_vers); /* program version */
6208 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
6209 p = rpcauth_marshcred(task, p);
6210 - req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
6211 + if (p)
6212 + req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
6213 return p;
6214 }
6215
6216 diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
6217 index 943f08be7c387..f1ec2110efebe 100644
6218 --- a/net/sunrpc/xprt.c
6219 +++ b/net/sunrpc/xprt.c
6220 @@ -1151,6 +1151,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
6221 struct rpc_xprt *xprt = req->rq_xprt;
6222
6223 if (xprt_request_need_enqueue_transmit(task, req)) {
6224 + req->rq_bytes_sent = 0;
6225 spin_lock(&xprt->queue_lock);
6226 /*
6227 * Requests that carry congestion control credits are added
6228 diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
6229 index 919fddec01973..dffedf1df02ce 100644
6230 --- a/net/sunrpc/xprtrdma/verbs.c
6231 +++ b/net/sunrpc/xprtrdma/verbs.c
6232 @@ -912,17 +912,13 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
6233 for (i = 0; i <= buf->rb_sc_last; i++) {
6234 sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
6235 if (!sc)
6236 - goto out_destroy;
6237 + return -ENOMEM;
6238
6239 sc->sc_xprt = r_xprt;
6240 buf->rb_sc_ctxs[i] = sc;
6241 }
6242
6243 return 0;
6244 -
6245 -out_destroy:
6246 - rpcrdma_sendctxs_destroy(buf);
6247 - return -ENOMEM;
6248 }
6249
6250 /* The sendctx queue is not guaranteed to have a size that is a
6251 diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
6252 index a264cf2accd0f..d4de871e7d4d7 100644
6253 --- a/net/xdp/xdp_umem.c
6254 +++ b/net/xdp/xdp_umem.c
6255 @@ -41,13 +41,20 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
6256 * not know if the device has more tx queues than rx, or the opposite.
6257 * This might also change during run time.
6258 */
6259 -static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
6260 - u16 queue_id)
6261 +static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
6262 + u16 queue_id)
6263 {
6264 + if (queue_id >= max_t(unsigned int,
6265 + dev->real_num_rx_queues,
6266 + dev->real_num_tx_queues))
6267 + return -EINVAL;
6268 +
6269 if (queue_id < dev->real_num_rx_queues)
6270 dev->_rx[queue_id].umem = umem;
6271 if (queue_id < dev->real_num_tx_queues)
6272 dev->_tx[queue_id].umem = umem;
6273 +
6274 + return 0;
6275 }
6276
6277 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
6278 @@ -88,7 +95,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
6279 goto out_rtnl_unlock;
6280 }
6281
6282 - xdp_reg_umem_at_qid(dev, umem, queue_id);
6283 + err = xdp_reg_umem_at_qid(dev, umem, queue_id);
6284 + if (err)
6285 + goto out_rtnl_unlock;
6286 +
6287 umem->dev = dev;
6288 umem->queue_id = queue_id;
6289 if (force_copy)
6290 diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
6291 index be0a961450bc2..f5ce993c78e42 100644
6292 --- a/samples/bpf/Makefile
6293 +++ b/samples/bpf/Makefile
6294 @@ -273,6 +273,7 @@ $(obj)/%.o: $(src)/%.c
6295 -Wno-gnu-variable-sized-type-not-at-end \
6296 -Wno-address-of-packed-member -Wno-tautological-compare \
6297 -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
6298 + -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
6299 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@
6300 ifeq ($(DWARF2BTF),y)
6301 $(BTF_PAHOLE) -J $@
6302 diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h
6303 new file mode 100644
6304 index 0000000000000..5cd7c1d1a5d56
6305 --- /dev/null
6306 +++ b/samples/bpf/asm_goto_workaround.h
6307 @@ -0,0 +1,16 @@
6308 +/* SPDX-License-Identifier: GPL-2.0 */
6309 +/* Copyright (c) 2019 Facebook */
6310 +#ifndef __ASM_GOTO_WORKAROUND_H
6311 +#define __ASM_GOTO_WORKAROUND_H
6312 +
6313 +/* this will bring in asm_volatile_goto macro definition
6314 + * if enabled by compiler and config options.
6315 + */
6316 +#include <linux/types.h>
6317 +
6318 +#ifdef asm_volatile_goto
6319 +#undef asm_volatile_goto
6320 +#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
6321 +#endif
6322 +
6323 +#endif
6324 diff --git a/security/keys/key.c b/security/keys/key.c
6325 index d97c9394b5dd4..249a6da4d2770 100644
6326 --- a/security/keys/key.c
6327 +++ b/security/keys/key.c
6328 @@ -265,8 +265,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
6329
6330 spin_lock(&user->lock);
6331 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
6332 - if (user->qnkeys + 1 >= maxkeys ||
6333 - user->qnbytes + quotalen >= maxbytes ||
6334 + if (user->qnkeys + 1 > maxkeys ||
6335 + user->qnbytes + quotalen > maxbytes ||
6336 user->qnbytes + quotalen < user->qnbytes)
6337 goto no_quota;
6338 }
6339 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
6340 index 41bcf57e96f21..99a55145ddcd2 100644
6341 --- a/security/keys/keyring.c
6342 +++ b/security/keys/keyring.c
6343 @@ -661,9 +661,6 @@ static bool search_nested_keyrings(struct key *keyring,
6344 BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
6345 (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
6346
6347 - if (ctx->index_key.description)
6348 - ctx->index_key.desc_len = strlen(ctx->index_key.description);
6349 -
6350 /* Check to see if this top-level keyring is what we are looking for
6351 * and whether it is valid or not.
6352 */
6353 @@ -914,6 +911,7 @@ key_ref_t keyring_search(key_ref_t keyring,
6354 struct keyring_search_context ctx = {
6355 .index_key.type = type,
6356 .index_key.description = description,
6357 + .index_key.desc_len = strlen(description),
6358 .cred = current_cred(),
6359 .match_data.cmp = key_default_cmp,
6360 .match_data.raw_data = description,
6361 diff --git a/security/keys/proc.c b/security/keys/proc.c
6362 index 5af2934965d80..d38be9db2cc07 100644
6363 --- a/security/keys/proc.c
6364 +++ b/security/keys/proc.c
6365 @@ -166,8 +166,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
6366 int rc;
6367
6368 struct keyring_search_context ctx = {
6369 - .index_key.type = key->type,
6370 - .index_key.description = key->description,
6371 + .index_key = key->index_key,
6372 .cred = m->file->f_cred,
6373 .match_data.cmp = lookup_user_key_possessed,
6374 .match_data.raw_data = key,
6375 diff --git a/security/keys/request_key.c b/security/keys/request_key.c
6376 index 114f7408feee6..7385536986497 100644
6377 --- a/security/keys/request_key.c
6378 +++ b/security/keys/request_key.c
6379 @@ -545,6 +545,7 @@ struct key *request_key_and_link(struct key_type *type,
6380 struct keyring_search_context ctx = {
6381 .index_key.type = type,
6382 .index_key.description = description,
6383 + .index_key.desc_len = strlen(description),
6384 .cred = current_cred(),
6385 .match_data.cmp = key_default_cmp,
6386 .match_data.raw_data = description,
6387 diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
6388 index 424e1d90412ea..6797843154f03 100644
6389 --- a/security/keys/request_key_auth.c
6390 +++ b/security/keys/request_key_auth.c
6391 @@ -246,7 +246,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
6392 struct key *authkey;
6393 key_ref_t authkey_ref;
6394
6395 - sprintf(description, "%x", target_id);
6396 + ctx.index_key.desc_len = sprintf(description, "%x", target_id);
6397
6398 authkey_ref = search_process_keyrings(&ctx);
6399
6400 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6401 index c1042df5e346e..1bddfa7dc2169 100644
6402 --- a/sound/pci/hda/patch_realtek.c
6403 +++ b/sound/pci/hda/patch_realtek.c
6404 @@ -1855,6 +1855,8 @@ enum {
6405 ALC887_FIXUP_BASS_CHMAP,
6406 ALC1220_FIXUP_GB_DUAL_CODECS,
6407 ALC1220_FIXUP_CLEVO_P950,
6408 + ALC1220_FIXUP_SYSTEM76_ORYP5,
6409 + ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
6410 };
6411
6412 static void alc889_fixup_coef(struct hda_codec *codec,
6413 @@ -2056,6 +2058,17 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
6414 snd_hda_override_conn_list(codec, 0x1b, 1, conn1);
6415 }
6416
6417 +static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
6418 + const struct hda_fixup *fix, int action);
6419 +
6420 +static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
6421 + const struct hda_fixup *fix,
6422 + int action)
6423 +{
6424 + alc1220_fixup_clevo_p950(codec, fix, action);
6425 + alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
6426 +}
6427 +
6428 static const struct hda_fixup alc882_fixups[] = {
6429 [ALC882_FIXUP_ABIT_AW9D_MAX] = {
6430 .type = HDA_FIXUP_PINS,
6431 @@ -2300,6 +2313,19 @@ static const struct hda_fixup alc882_fixups[] = {
6432 .type = HDA_FIXUP_FUNC,
6433 .v.func = alc1220_fixup_clevo_p950,
6434 },
6435 + [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
6436 + .type = HDA_FIXUP_FUNC,
6437 + .v.func = alc1220_fixup_system76_oryp5,
6438 + },
6439 + [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
6440 + .type = HDA_FIXUP_PINS,
6441 + .v.pins = (const struct hda_pintbl[]) {
6442 + { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
6443 + {}
6444 + },
6445 + .chained = true,
6446 + .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
6447 + },
6448 };
6449
6450 static const struct snd_pci_quirk alc882_fixup_tbl[] = {
6451 @@ -2376,6 +2402,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
6452 SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
6453 SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
6454 SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
6455 + SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
6456 + SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
6457 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
6458 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
6459 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
6460 @@ -5573,6 +5601,7 @@ enum {
6461 ALC294_FIXUP_ASUS_HEADSET_MIC,
6462 ALC294_FIXUP_ASUS_SPK,
6463 ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
6464 + ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
6465 };
6466
6467 static const struct hda_fixup alc269_fixups[] = {
6468 @@ -6506,6 +6535,17 @@ static const struct hda_fixup alc269_fixups[] = {
6469 .chained = true,
6470 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
6471 },
6472 + [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = {
6473 + .type = HDA_FIXUP_VERBS,
6474 + .v.verbs = (const struct hda_verb[]) {
6475 + /* Disable PCBEEP-IN passthrough */
6476 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
6477 + { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
6478 + { }
6479 + },
6480 + .chained = true,
6481 + .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
6482 + },
6483 };
6484
6485 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6486 @@ -7187,7 +7227,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6487 {0x12, 0x90a60130},
6488 {0x19, 0x03a11020},
6489 {0x21, 0x0321101f}),
6490 - SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
6491 + SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
6492 {0x12, 0x90a60130},
6493 {0x14, 0x90170110},
6494 {0x19, 0x04a11040},
6495 diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
6496 index b29d0f65611eb..2d49492d60692 100644
6497 --- a/sound/soc/soc-core.c
6498 +++ b/sound/soc/soc-core.c
6499 @@ -1034,17 +1034,18 @@ static int snd_soc_init_platform(struct snd_soc_card *card,
6500 * this function should be removed in the future
6501 */
6502 /* convert Legacy platform link */
6503 - if (!platform) {
6504 + if (!platform || dai_link->legacy_platform) {
6505 platform = devm_kzalloc(card->dev,
6506 sizeof(struct snd_soc_dai_link_component),
6507 GFP_KERNEL);
6508 if (!platform)
6509 return -ENOMEM;
6510
6511 - dai_link->platform = platform;
6512 - platform->name = dai_link->platform_name;
6513 - platform->of_node = dai_link->platform_of_node;
6514 - platform->dai_name = NULL;
6515 + dai_link->platform = platform;
6516 + dai_link->legacy_platform = 1;
6517 + platform->name = dai_link->platform_name;
6518 + platform->of_node = dai_link->platform_of_node;
6519 + platform->dai_name = NULL;
6520 }
6521
6522 /* if there's no platform we match on the empty platform */
6523 diff --git a/tools/include/uapi/linux/pkt_sched.h b/tools/include/uapi/linux/pkt_sched.h
6524 new file mode 100644
6525 index 0000000000000..0d18b1d1fbbc8
6526 --- /dev/null
6527 +++ b/tools/include/uapi/linux/pkt_sched.h
6528 @@ -0,0 +1,1163 @@
6529 +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
6530 +#ifndef __LINUX_PKT_SCHED_H
6531 +#define __LINUX_PKT_SCHED_H
6532 +
6533 +#include <linux/types.h>
6534 +
6535 +/* Logical priority bands not depending on specific packet scheduler.
6536 + Every scheduler will map them to real traffic classes, if it has
6537 + no more precise mechanism to classify packets.
6538 +
6539 + These numbers have no special meaning, though their coincidence
6540 + with obsolete IPv6 values is not occasional :-). New IPv6 drafts
6541 + preferred full anarchy inspired by diffserv group.
6542 +
6543 + Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
6544 + class, actually, as rule it will be handled with more care than
6545 + filler or even bulk.
6546 + */
6547 +
6548 +#define TC_PRIO_BESTEFFORT 0
6549 +#define TC_PRIO_FILLER 1
6550 +#define TC_PRIO_BULK 2
6551 +#define TC_PRIO_INTERACTIVE_BULK 4
6552 +#define TC_PRIO_INTERACTIVE 6
6553 +#define TC_PRIO_CONTROL 7
6554 +
6555 +#define TC_PRIO_MAX 15
6556 +
6557 +/* Generic queue statistics, available for all the elements.
6558 + Particular schedulers may have also their private records.
6559 + */
6560 +
6561 +struct tc_stats {
6562 + __u64 bytes; /* Number of enqueued bytes */
6563 + __u32 packets; /* Number of enqueued packets */
6564 + __u32 drops; /* Packets dropped because of lack of resources */
6565 + __u32 overlimits; /* Number of throttle events when this
6566 + * flow goes out of allocated bandwidth */
6567 + __u32 bps; /* Current flow byte rate */
6568 + __u32 pps; /* Current flow packet rate */
6569 + __u32 qlen;
6570 + __u32 backlog;
6571 +};
6572 +
6573 +struct tc_estimator {
6574 + signed char interval;
6575 + unsigned char ewma_log;
6576 +};
6577 +
6578 +/* "Handles"
6579 + ---------
6580 +
6581 + All the traffic control objects have 32bit identifiers, or "handles".
6582 +
6583 + They can be considered as opaque numbers from user API viewpoint,
6584 + but actually they always consist of two fields: major and
6585 + minor numbers, which are interpreted by kernel specially,
6586 + that may be used by applications, though not recommended.
6587 +
6588 + F.e. qdisc handles always have minor number equal to zero,
6589 + classes (or flows) have major equal to parent qdisc major, and
6590 + minor uniquely identifying class inside qdisc.
6591 +
6592 + Macros to manipulate handles:
6593 + */
6594 +
6595 +#define TC_H_MAJ_MASK (0xFFFF0000U)
6596 +#define TC_H_MIN_MASK (0x0000FFFFU)
6597 +#define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
6598 +#define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
6599 +#define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
6600 +
6601 +#define TC_H_UNSPEC (0U)
6602 +#define TC_H_ROOT (0xFFFFFFFFU)
6603 +#define TC_H_INGRESS (0xFFFFFFF1U)
6604 +#define TC_H_CLSACT TC_H_INGRESS
6605 +
6606 +#define TC_H_MIN_PRIORITY 0xFFE0U
6607 +#define TC_H_MIN_INGRESS 0xFFF2U
6608 +#define TC_H_MIN_EGRESS 0xFFF3U
6609 +
6610 +/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
6611 +enum tc_link_layer {
6612 + TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
6613 + TC_LINKLAYER_ETHERNET,
6614 + TC_LINKLAYER_ATM,
6615 +};
6616 +#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
6617 +
6618 +struct tc_ratespec {
6619 + unsigned char cell_log;
6620 + __u8 linklayer; /* lower 4 bits */
6621 + unsigned short overhead;
6622 + short cell_align;
6623 + unsigned short mpu;
6624 + __u32 rate;
6625 +};
6626 +
6627 +#define TC_RTAB_SIZE 1024
6628 +
6629 +struct tc_sizespec {
6630 + unsigned char cell_log;
6631 + unsigned char size_log;
6632 + short cell_align;
6633 + int overhead;
6634 + unsigned int linklayer;
6635 + unsigned int mpu;
6636 + unsigned int mtu;
6637 + unsigned int tsize;
6638 +};
6639 +
6640 +enum {
6641 + TCA_STAB_UNSPEC,
6642 + TCA_STAB_BASE,
6643 + TCA_STAB_DATA,
6644 + __TCA_STAB_MAX
6645 +};
6646 +
6647 +#define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
6648 +
6649 +/* FIFO section */
6650 +
6651 +struct tc_fifo_qopt {
6652 + __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
6653 +};
6654 +
6655 +/* SKBPRIO section */
6656 +
6657 +/*
6658 + * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1).
6659 + * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able
6660 + * to map one to one the DS field of IPV4 and IPV6 headers.
6661 + * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY.
6662 + */
6663 +
6664 +#define SKBPRIO_MAX_PRIORITY 64
6665 +
6666 +struct tc_skbprio_qopt {
6667 + __u32 limit; /* Queue length in packets. */
6668 +};
6669 +
6670 +/* PRIO section */
6671 +
6672 +#define TCQ_PRIO_BANDS 16
6673 +#define TCQ_MIN_PRIO_BANDS 2
6674 +
6675 +struct tc_prio_qopt {
6676 + int bands; /* Number of bands */
6677 + __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
6678 +};
6679 +
6680 +/* MULTIQ section */
6681 +
6682 +struct tc_multiq_qopt {
6683 + __u16 bands; /* Number of bands */
6684 + __u16 max_bands; /* Maximum number of queues */
6685 +};
6686 +
6687 +/* PLUG section */
6688 +
6689 +#define TCQ_PLUG_BUFFER 0
6690 +#define TCQ_PLUG_RELEASE_ONE 1
6691 +#define TCQ_PLUG_RELEASE_INDEFINITE 2
6692 +#define TCQ_PLUG_LIMIT 3
6693 +
6694 +struct tc_plug_qopt {
6695 + /* TCQ_PLUG_BUFFER: Inset a plug into the queue and
6696 + * buffer any incoming packets
6697 + * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
6698 + * to beginning of the next plug.
6699 + * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
6700 + * Stop buffering packets until the next TCQ_PLUG_BUFFER
6701 + * command is received (just act as a pass-thru queue).
6702 + * TCQ_PLUG_LIMIT: Increase/decrease queue size
6703 + */
6704 + int action;
6705 + __u32 limit;
6706 +};
6707 +
6708 +/* TBF section */
6709 +
6710 +struct tc_tbf_qopt {
6711 + struct tc_ratespec rate;
6712 + struct tc_ratespec peakrate;
6713 + __u32 limit;
6714 + __u32 buffer;
6715 + __u32 mtu;
6716 +};
6717 +
6718 +enum {
6719 + TCA_TBF_UNSPEC,
6720 + TCA_TBF_PARMS,
6721 + TCA_TBF_RTAB,
6722 + TCA_TBF_PTAB,
6723 + TCA_TBF_RATE64,
6724 + TCA_TBF_PRATE64,
6725 + TCA_TBF_BURST,
6726 + TCA_TBF_PBURST,
6727 + TCA_TBF_PAD,
6728 + __TCA_TBF_MAX,
6729 +};
6730 +
6731 +#define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
6732 +
6733 +
6734 +/* TEQL section */
6735 +
6736 +/* TEQL does not require any parameters */
6737 +
6738 +/* SFQ section */
6739 +
6740 +struct tc_sfq_qopt {
6741 + unsigned quantum; /* Bytes per round allocated to flow */
6742 + int perturb_period; /* Period of hash perturbation */
6743 + __u32 limit; /* Maximal packets in queue */
6744 + unsigned divisor; /* Hash divisor */
6745 + unsigned flows; /* Maximal number of flows */
6746 +};
6747 +
6748 +struct tc_sfqred_stats {
6749 + __u32 prob_drop; /* Early drops, below max threshold */
6750 + __u32 forced_drop; /* Early drops, after max threshold */
6751 + __u32 prob_mark; /* Marked packets, below max threshold */
6752 + __u32 forced_mark; /* Marked packets, after max threshold */
6753 + __u32 prob_mark_head; /* Marked packets, below max threshold */
6754 + __u32 forced_mark_head;/* Marked packets, after max threshold */
6755 +};
6756 +
6757 +struct tc_sfq_qopt_v1 {
6758 + struct tc_sfq_qopt v0;
6759 + unsigned int depth; /* max number of packets per flow */
6760 + unsigned int headdrop;
6761 +/* SFQRED parameters */
6762 + __u32 limit; /* HARD maximal flow queue length (bytes) */
6763 + __u32 qth_min; /* Min average length threshold (bytes) */
6764 + __u32 qth_max; /* Max average length threshold (bytes) */
6765 + unsigned char Wlog; /* log(W) */
6766 + unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
6767 + unsigned char Scell_log; /* cell size for idle damping */
6768 + unsigned char flags;
6769 + __u32 max_P; /* probability, high resolution */
6770 +/* SFQRED stats */
6771 + struct tc_sfqred_stats stats;
6772 +};
6773 +
6774 +
6775 +struct tc_sfq_xstats {
6776 + __s32 allot;
6777 +};
6778 +
6779 +/* RED section */
6780 +
6781 +enum {
6782 + TCA_RED_UNSPEC,
6783 + TCA_RED_PARMS,
6784 + TCA_RED_STAB,
6785 + TCA_RED_MAX_P,
6786 + __TCA_RED_MAX,
6787 +};
6788 +
6789 +#define TCA_RED_MAX (__TCA_RED_MAX - 1)
6790 +
6791 +struct tc_red_qopt {
6792 + __u32 limit; /* HARD maximal queue length (bytes) */
6793 + __u32 qth_min; /* Min average length threshold (bytes) */
6794 + __u32 qth_max; /* Max average length threshold (bytes) */
6795 + unsigned char Wlog; /* log(W) */
6796 + unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
6797 + unsigned char Scell_log; /* cell size for idle damping */
6798 + unsigned char flags;
6799 +#define TC_RED_ECN 1
6800 +#define TC_RED_HARDDROP 2
6801 +#define TC_RED_ADAPTATIVE 4
6802 +};
6803 +
6804 +struct tc_red_xstats {
6805 + __u32 early; /* Early drops */
6806 + __u32 pdrop; /* Drops due to queue limits */
6807 + __u32 other; /* Drops due to drop() calls */
6808 + __u32 marked; /* Marked packets */
6809 +};
6810 +
6811 +/* GRED section */
6812 +
6813 +#define MAX_DPs 16
6814 +
6815 +enum {
6816 + TCA_GRED_UNSPEC,
6817 + TCA_GRED_PARMS,
6818 + TCA_GRED_STAB,
6819 + TCA_GRED_DPS,
6820 + TCA_GRED_MAX_P,
6821 + TCA_GRED_LIMIT,
6822 + TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */
6823 + __TCA_GRED_MAX,
6824 +};
6825 +
6826 +#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
6827 +
6828 +enum {
6829 + TCA_GRED_VQ_ENTRY_UNSPEC,
6830 + TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */
6831 + __TCA_GRED_VQ_ENTRY_MAX,
6832 +};
6833 +#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
6834 +
6835 +enum {
6836 + TCA_GRED_VQ_UNSPEC,
6837 + TCA_GRED_VQ_PAD,
6838 + TCA_GRED_VQ_DP, /* u32 */
6839 + TCA_GRED_VQ_STAT_BYTES, /* u64 */
6840 + TCA_GRED_VQ_STAT_PACKETS, /* u32 */
6841 + TCA_GRED_VQ_STAT_BACKLOG, /* u32 */
6842 + TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */
6843 + TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */
6844 + TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */
6845 + TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */
6846 + TCA_GRED_VQ_STAT_PDROP, /* u32 */
6847 + TCA_GRED_VQ_STAT_OTHER, /* u32 */
6848 + TCA_GRED_VQ_FLAGS, /* u32 */
6849 + __TCA_GRED_VQ_MAX
6850 +};
6851 +
6852 +#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
6853 +
6854 +struct tc_gred_qopt {
6855 + __u32 limit; /* HARD maximal queue length (bytes) */
6856 + __u32 qth_min; /* Min average length threshold (bytes) */
6857 + __u32 qth_max; /* Max average length threshold (bytes) */
6858 + __u32 DP; /* up to 2^32 DPs */
6859 + __u32 backlog;
6860 + __u32 qave;
6861 + __u32 forced;
6862 + __u32 early;
6863 + __u32 other;
6864 + __u32 pdrop;
6865 + __u8 Wlog; /* log(W) */
6866 + __u8 Plog; /* log(P_max/(qth_max-qth_min)) */
6867 + __u8 Scell_log; /* cell size for idle damping */
6868 + __u8 prio; /* prio of this VQ */
6869 + __u32 packets;
6870 + __u32 bytesin;
6871 +};
6872 +
6873 +/* gred setup */
6874 +struct tc_gred_sopt {
6875 + __u32 DPs;
6876 + __u32 def_DP;
6877 + __u8 grio;
6878 + __u8 flags;
6879 + __u16 pad1;
6880 +};
6881 +
6882 +/* CHOKe section */
6883 +
6884 +enum {
6885 + TCA_CHOKE_UNSPEC,
6886 + TCA_CHOKE_PARMS,
6887 + TCA_CHOKE_STAB,
6888 + TCA_CHOKE_MAX_P,
6889 + __TCA_CHOKE_MAX,
6890 +};
6891 +
6892 +#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
6893 +
6894 +struct tc_choke_qopt {
6895 + __u32 limit; /* Hard queue length (packets) */
6896 + __u32 qth_min; /* Min average threshold (packets) */
6897 + __u32 qth_max; /* Max average threshold (packets) */
6898 + unsigned char Wlog; /* log(W) */
6899 + unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
6900 + unsigned char Scell_log; /* cell size for idle damping */
6901 + unsigned char flags; /* see RED flags */
6902 +};
6903 +
6904 +struct tc_choke_xstats {
6905 + __u32 early; /* Early drops */
6906 + __u32 pdrop; /* Drops due to queue limits */
6907 + __u32 other; /* Drops due to drop() calls */
6908 + __u32 marked; /* Marked packets */
6909 + __u32 matched; /* Drops due to flow match */
6910 +};
6911 +
6912 +/* HTB section */
6913 +#define TC_HTB_NUMPRIO 8
6914 +#define TC_HTB_MAXDEPTH 8
6915 +#define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */
6916 +
6917 +struct tc_htb_opt {
6918 + struct tc_ratespec rate;
6919 + struct tc_ratespec ceil;
6920 + __u32 buffer;
6921 + __u32 cbuffer;
6922 + __u32 quantum;
6923 + __u32 level; /* out only */
6924 + __u32 prio;
6925 +};
6926 +struct tc_htb_glob {
6927 + __u32 version; /* to match HTB/TC */
6928 + __u32 rate2quantum; /* bps->quantum divisor */
6929 + __u32 defcls; /* default class number */
6930 + __u32 debug; /* debug flags */
6931 +
6932 + /* stats */
6933 + __u32 direct_pkts; /* count of non shaped packets */
6934 +};
6935 +enum {
6936 + TCA_HTB_UNSPEC,
6937 + TCA_HTB_PARMS,
6938 + TCA_HTB_INIT,
6939 + TCA_HTB_CTAB,
6940 + TCA_HTB_RTAB,
6941 + TCA_HTB_DIRECT_QLEN,
6942 + TCA_HTB_RATE64,
6943 + TCA_HTB_CEIL64,
6944 + TCA_HTB_PAD,
6945 + __TCA_HTB_MAX,
6946 +};
6947 +
6948 +#define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
6949 +
6950 +struct tc_htb_xstats {
6951 + __u32 lends;
6952 + __u32 borrows;
6953 + __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */
6954 + __s32 tokens;
6955 + __s32 ctokens;
6956 +};
6957 +
6958 +/* HFSC section */
6959 +
6960 +struct tc_hfsc_qopt {
6961 + __u16 defcls; /* default class */
6962 +};
6963 +
6964 +struct tc_service_curve {
6965 + __u32 m1; /* slope of the first segment in bps */
6966 + __u32 d; /* x-projection of the first segment in us */
6967 + __u32 m2; /* slope of the second segment in bps */
6968 +};
6969 +
6970 +struct tc_hfsc_stats {
6971 + __u64 work; /* total work done */
6972 + __u64 rtwork; /* work done by real-time criteria */
6973 + __u32 period; /* current period */
6974 + __u32 level; /* class level in hierarchy */
6975 +};
6976 +
6977 +enum {
6978 + TCA_HFSC_UNSPEC,
6979 + TCA_HFSC_RSC,
6980 + TCA_HFSC_FSC,
6981 + TCA_HFSC_USC,
6982 + __TCA_HFSC_MAX,
6983 +};
6984 +
6985 +#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
6986 +
6987 +
6988 +/* CBQ section */
6989 +
6990 +#define TC_CBQ_MAXPRIO 8
6991 +#define TC_CBQ_MAXLEVEL 8
6992 +#define TC_CBQ_DEF_EWMA 5
6993 +
6994 +struct tc_cbq_lssopt {
6995 + unsigned char change;
6996 + unsigned char flags;
6997 +#define TCF_CBQ_LSS_BOUNDED 1
6998 +#define TCF_CBQ_LSS_ISOLATED 2
6999 + unsigned char ewma_log;
7000 + unsigned char level;
7001 +#define TCF_CBQ_LSS_FLAGS 1
7002 +#define TCF_CBQ_LSS_EWMA 2
7003 +#define TCF_CBQ_LSS_MAXIDLE 4
7004 +#define TCF_CBQ_LSS_MINIDLE 8
7005 +#define TCF_CBQ_LSS_OFFTIME 0x10
7006 +#define TCF_CBQ_LSS_AVPKT 0x20
7007 + __u32 maxidle;
7008 + __u32 minidle;
7009 + __u32 offtime;
7010 + __u32 avpkt;
7011 +};
7012 +
7013 +struct tc_cbq_wrropt {
7014 + unsigned char flags;
7015 + unsigned char priority;
7016 + unsigned char cpriority;
7017 + unsigned char __reserved;
7018 + __u32 allot;
7019 + __u32 weight;
7020 +};
7021 +
7022 +struct tc_cbq_ovl {
7023 + unsigned char strategy;
7024 +#define TC_CBQ_OVL_CLASSIC 0
7025 +#define TC_CBQ_OVL_DELAY 1
7026 +#define TC_CBQ_OVL_LOWPRIO 2
7027 +#define TC_CBQ_OVL_DROP 3
7028 +#define TC_CBQ_OVL_RCLASSIC 4
7029 + unsigned char priority2;
7030 + __u16 pad;
7031 + __u32 penalty;
7032 +};
7033 +
7034 +struct tc_cbq_police {
7035 + unsigned char police;
7036 + unsigned char __res1;
7037 + unsigned short __res2;
7038 +};
7039 +
7040 +struct tc_cbq_fopt {
7041 + __u32 split;
7042 + __u32 defmap;
7043 + __u32 defchange;
7044 +};
7045 +
7046 +struct tc_cbq_xstats {
7047 + __u32 borrows;
7048 + __u32 overactions;
7049 + __s32 avgidle;
7050 + __s32 undertime;
7051 +};
7052 +
7053 +enum {
7054 + TCA_CBQ_UNSPEC,
7055 + TCA_CBQ_LSSOPT,
7056 + TCA_CBQ_WRROPT,
7057 + TCA_CBQ_FOPT,
7058 + TCA_CBQ_OVL_STRATEGY,
7059 + TCA_CBQ_RATE,
7060 + TCA_CBQ_RTAB,
7061 + TCA_CBQ_POLICE,
7062 + __TCA_CBQ_MAX,
7063 +};
7064 +
7065 +#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1)
7066 +
7067 +/* dsmark section */
7068 +
7069 +enum {
7070 + TCA_DSMARK_UNSPEC,
7071 + TCA_DSMARK_INDICES,
7072 + TCA_DSMARK_DEFAULT_INDEX,
7073 + TCA_DSMARK_SET_TC_INDEX,
7074 + TCA_DSMARK_MASK,
7075 + TCA_DSMARK_VALUE,
7076 + __TCA_DSMARK_MAX,
7077 +};
7078 +
7079 +#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
7080 +
7081 +/* ATM section */
7082 +
7083 +enum {
7084 + TCA_ATM_UNSPEC,
7085 + TCA_ATM_FD, /* file/socket descriptor */
7086 + TCA_ATM_PTR, /* pointer to descriptor - later */
7087 + TCA_ATM_HDR, /* LL header */
7088 + TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */
7089 + TCA_ATM_ADDR, /* PVC address (for output only) */
7090 + TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */
7091 + __TCA_ATM_MAX,
7092 +};
7093 +
7094 +#define TCA_ATM_MAX (__TCA_ATM_MAX - 1)
7095 +
7096 +/* Network emulator */
7097 +
7098 +enum {
7099 + TCA_NETEM_UNSPEC,
7100 + TCA_NETEM_CORR,
7101 + TCA_NETEM_DELAY_DIST,
7102 + TCA_NETEM_REORDER,
7103 + TCA_NETEM_CORRUPT,
7104 + TCA_NETEM_LOSS,
7105 + TCA_NETEM_RATE,
7106 + TCA_NETEM_ECN,
7107 + TCA_NETEM_RATE64,
7108 + TCA_NETEM_PAD,
7109 + TCA_NETEM_LATENCY64,
7110 + TCA_NETEM_JITTER64,
7111 + TCA_NETEM_SLOT,
7112 + TCA_NETEM_SLOT_DIST,
7113 + __TCA_NETEM_MAX,
7114 +};
7115 +
7116 +#define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
7117 +
7118 +struct tc_netem_qopt {
7119 + __u32 latency; /* added delay (us) */
7120 + __u32 limit; /* fifo limit (packets) */
7121 + __u32 loss; /* random packet loss (0=none ~0=100%) */
7122 + __u32 gap; /* re-ordering gap (0 for none) */
7123 + __u32 duplicate; /* random packet dup (0=none ~0=100%) */
7124 + __u32 jitter; /* random jitter in latency (us) */
7125 +};
7126 +
7127 +struct tc_netem_corr {
7128 + __u32 delay_corr; /* delay correlation */
7129 + __u32 loss_corr; /* packet loss correlation */
7130 + __u32 dup_corr; /* duplicate correlation */
7131 +};
7132 +
7133 +struct tc_netem_reorder {
7134 + __u32 probability;
7135 + __u32 correlation;
7136 +};
7137 +
7138 +struct tc_netem_corrupt {
7139 + __u32 probability;
7140 + __u32 correlation;
7141 +};
7142 +
7143 +struct tc_netem_rate {
7144 + __u32 rate; /* byte/s */
7145 + __s32 packet_overhead;
7146 + __u32 cell_size;
7147 + __s32 cell_overhead;
7148 +};
7149 +
7150 +struct tc_netem_slot {
7151 + __s64 min_delay; /* nsec */
7152 + __s64 max_delay;
7153 + __s32 max_packets;
7154 + __s32 max_bytes;
7155 + __s64 dist_delay; /* nsec */
7156 + __s64 dist_jitter; /* nsec */
7157 +};
7158 +
7159 +enum {
7160 + NETEM_LOSS_UNSPEC,
7161 + NETEM_LOSS_GI, /* General Intuitive - 4 state model */
7162 + NETEM_LOSS_GE, /* Gilbert Elliot models */
7163 + __NETEM_LOSS_MAX
7164 +};
7165 +#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
7166 +
7167 +/* State transition probabilities for 4 state model */
7168 +struct tc_netem_gimodel {
7169 + __u32 p13;
7170 + __u32 p31;
7171 + __u32 p32;
7172 + __u32 p14;
7173 + __u32 p23;
7174 +};
7175 +
7176 +/* Gilbert-Elliot models */
7177 +struct tc_netem_gemodel {
7178 + __u32 p;
7179 + __u32 r;
7180 + __u32 h;
7181 + __u32 k1;
7182 +};
7183 +
7184 +#define NETEM_DIST_SCALE 8192
7185 +#define NETEM_DIST_MAX 16384
7186 +
7187 +/* DRR */
7188 +
7189 +enum {
7190 + TCA_DRR_UNSPEC,
7191 + TCA_DRR_QUANTUM,
7192 + __TCA_DRR_MAX
7193 +};
7194 +
7195 +#define TCA_DRR_MAX (__TCA_DRR_MAX - 1)
7196 +
7197 +struct tc_drr_stats {
7198 + __u32 deficit;
7199 +};
7200 +
7201 +/* MQPRIO */
7202 +#define TC_QOPT_BITMASK 15
7203 +#define TC_QOPT_MAX_QUEUE 16
7204 +
7205 +enum {
7206 + TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */
7207 + TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */
7208 + __TC_MQPRIO_HW_OFFLOAD_MAX
7209 +};
7210 +
7211 +#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
7212 +
7213 +enum {
7214 + TC_MQPRIO_MODE_DCB,
7215 + TC_MQPRIO_MODE_CHANNEL,
7216 + __TC_MQPRIO_MODE_MAX
7217 +};
7218 +
7219 +#define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1)
7220 +
7221 +enum {
7222 + TC_MQPRIO_SHAPER_DCB,
7223 + TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */
7224 + __TC_MQPRIO_SHAPER_MAX
7225 +};
7226 +
7227 +#define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1)
7228 +
7229 +struct tc_mqprio_qopt {
7230 + __u8 num_tc;
7231 + __u8 prio_tc_map[TC_QOPT_BITMASK + 1];
7232 + __u8 hw;
7233 + __u16 count[TC_QOPT_MAX_QUEUE];
7234 + __u16 offset[TC_QOPT_MAX_QUEUE];
7235 +};
7236 +
7237 +#define TC_MQPRIO_F_MODE 0x1
7238 +#define TC_MQPRIO_F_SHAPER 0x2
7239 +#define TC_MQPRIO_F_MIN_RATE 0x4
7240 +#define TC_MQPRIO_F_MAX_RATE 0x8
7241 +
7242 +enum {
7243 + TCA_MQPRIO_UNSPEC,
7244 + TCA_MQPRIO_MODE,
7245 + TCA_MQPRIO_SHAPER,
7246 + TCA_MQPRIO_MIN_RATE64,
7247 + TCA_MQPRIO_MAX_RATE64,
7248 + __TCA_MQPRIO_MAX,
7249 +};
7250 +
7251 +#define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1)
7252 +
7253 +/* SFB */
7254 +
7255 +enum {
7256 + TCA_SFB_UNSPEC,
7257 + TCA_SFB_PARMS,
7258 + __TCA_SFB_MAX,
7259 +};
7260 +
7261 +#define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
7262 +
7263 +/*
7264 + * Note: increment, decrement are Q0.16 fixed-point values.
7265 + */
7266 +struct tc_sfb_qopt {
7267 + __u32 rehash_interval; /* delay between hash move, in ms */
7268 + __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */
7269 + __u32 max; /* max len of qlen_min */
7270 + __u32 bin_size; /* maximum queue length per bin */
7271 + __u32 increment; /* probability increment, (d1 in Blue) */
7272 + __u32 decrement; /* probability decrement, (d2 in Blue) */
7273 + __u32 limit; /* max SFB queue length */
7274 + __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */
7275 + __u32 penalty_burst;
7276 +};
7277 +
7278 +struct tc_sfb_xstats {
7279 + __u32 earlydrop;
7280 + __u32 penaltydrop;
7281 + __u32 bucketdrop;
7282 + __u32 queuedrop;
7283 + __u32 childdrop; /* drops in child qdisc */
7284 + __u32 marked;
7285 + __u32 maxqlen;
7286 + __u32 maxprob;
7287 + __u32 avgprob;
7288 +};
7289 +
7290 +#define SFB_MAX_PROB 0xFFFF
7291 +
7292 +/* QFQ */
7293 +enum {
7294 + TCA_QFQ_UNSPEC,
7295 + TCA_QFQ_WEIGHT,
7296 + TCA_QFQ_LMAX,
7297 + __TCA_QFQ_MAX
7298 +};
7299 +
7300 +#define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1)
7301 +
7302 +struct tc_qfq_stats {
7303 + __u32 weight;
7304 + __u32 lmax;
7305 +};
7306 +
7307 +/* CODEL */
7308 +
7309 +enum {
7310 + TCA_CODEL_UNSPEC,
7311 + TCA_CODEL_TARGET,
7312 + TCA_CODEL_LIMIT,
7313 + TCA_CODEL_INTERVAL,
7314 + TCA_CODEL_ECN,
7315 + TCA_CODEL_CE_THRESHOLD,
7316 + __TCA_CODEL_MAX
7317 +};
7318 +
7319 +#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
7320 +
7321 +struct tc_codel_xstats {
7322 + __u32 maxpacket; /* largest packet we've seen so far */
7323 + __u32 count; /* how many drops we've done since the last time we
7324 + * entered dropping state
7325 + */
7326 + __u32 lastcount; /* count at entry to dropping state */
7327 + __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */
7328 + __s32 drop_next; /* time to drop next packet */
7329 + __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
7330 + __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
7331 + __u32 dropping; /* are we in dropping state ? */
7332 + __u32 ce_mark; /* number of CE marked packets because of ce_threshold */
7333 +};
7334 +
7335 +/* FQ_CODEL */
7336 +
7337 +enum {
7338 + TCA_FQ_CODEL_UNSPEC,
7339 + TCA_FQ_CODEL_TARGET,
7340 + TCA_FQ_CODEL_LIMIT,
7341 + TCA_FQ_CODEL_INTERVAL,
7342 + TCA_FQ_CODEL_ECN,
7343 + TCA_FQ_CODEL_FLOWS,
7344 + TCA_FQ_CODEL_QUANTUM,
7345 + TCA_FQ_CODEL_CE_THRESHOLD,
7346 + TCA_FQ_CODEL_DROP_BATCH_SIZE,
7347 + TCA_FQ_CODEL_MEMORY_LIMIT,
7348 + __TCA_FQ_CODEL_MAX
7349 +};
7350 +
7351 +#define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1)
7352 +
7353 +enum {
7354 + TCA_FQ_CODEL_XSTATS_QDISC,
7355 + TCA_FQ_CODEL_XSTATS_CLASS,
7356 +};
7357 +
7358 +struct tc_fq_codel_qd_stats {
7359 + __u32 maxpacket; /* largest packet we've seen so far */
7360 + __u32 drop_overlimit; /* number of time max qdisc
7361 + * packet limit was hit
7362 + */
7363 + __u32 ecn_mark; /* number of packets we ECN marked
7364 + * instead of being dropped
7365 + */
7366 + __u32 new_flow_count; /* number of time packets
7367 + * created a 'new flow'
7368 + */
7369 + __u32 new_flows_len; /* count of flows in new list */
7370 + __u32 old_flows_len; /* count of flows in old list */
7371 + __u32 ce_mark; /* packets above ce_threshold */
7372 + __u32 memory_usage; /* in bytes */
7373 + __u32 drop_overmemory;
7374 +};
7375 +
7376 +struct tc_fq_codel_cl_stats {
7377 + __s32 deficit;
7378 + __u32 ldelay; /* in-queue delay seen by most recently
7379 + * dequeued packet
7380 + */
7381 + __u32 count;
7382 + __u32 lastcount;
7383 + __u32 dropping;
7384 + __s32 drop_next;
7385 +};
7386 +
7387 +struct tc_fq_codel_xstats {
7388 + __u32 type;
7389 + union {
7390 + struct tc_fq_codel_qd_stats qdisc_stats;
7391 + struct tc_fq_codel_cl_stats class_stats;
7392 + };
7393 +};
7394 +
7395 +/* FQ */
7396 +
7397 +enum {
7398 + TCA_FQ_UNSPEC,
7399 +
7400 + TCA_FQ_PLIMIT, /* limit of total number of packets in queue */
7401 +
7402 + TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */
7403 +
7404 + TCA_FQ_QUANTUM, /* RR quantum */
7405 +
7406 + TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */
7407 +
7408 + TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */
7409 +
7410 + TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
7411 +
7412 + TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */
7413 +
7414 + TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */
7415 +
7416 + TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */
7417 +
7418 + TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
7419 +
7420 + TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
7421 +
7422 + TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */
7423 +
7424 + __TCA_FQ_MAX
7425 +};
7426 +
7427 +#define TCA_FQ_MAX (__TCA_FQ_MAX - 1)
7428 +
7429 +struct tc_fq_qd_stats {
7430 + __u64 gc_flows;
7431 + __u64 highprio_packets;
7432 + __u64 tcp_retrans;
7433 + __u64 throttled;
7434 + __u64 flows_plimit;
7435 + __u64 pkts_too_long;
7436 + __u64 allocation_errors;
7437 + __s64 time_next_delayed_flow;
7438 + __u32 flows;
7439 + __u32 inactive_flows;
7440 + __u32 throttled_flows;
7441 + __u32 unthrottle_latency_ns;
7442 + __u64 ce_mark; /* packets above ce_threshold */
7443 +};
7444 +
7445 +/* Heavy-Hitter Filter */
7446 +
7447 +enum {
7448 + TCA_HHF_UNSPEC,
7449 + TCA_HHF_BACKLOG_LIMIT,
7450 + TCA_HHF_QUANTUM,
7451 + TCA_HHF_HH_FLOWS_LIMIT,
7452 + TCA_HHF_RESET_TIMEOUT,
7453 + TCA_HHF_ADMIT_BYTES,
7454 + TCA_HHF_EVICT_TIMEOUT,
7455 + TCA_HHF_NON_HH_WEIGHT,
7456 + __TCA_HHF_MAX
7457 +};
7458 +
7459 +#define TCA_HHF_MAX (__TCA_HHF_MAX - 1)
7460 +
7461 +struct tc_hhf_xstats {
7462 + __u32 drop_overlimit; /* number of times max qdisc packet limit
7463 + * was hit
7464 + */
7465 + __u32 hh_overlimit; /* number of times max heavy-hitters was hit */
7466 + __u32 hh_tot_count; /* number of captured heavy-hitters so far */
7467 + __u32 hh_cur_count; /* number of current heavy-hitters */
7468 +};
7469 +
7470 +/* PIE */
7471 +enum {
7472 + TCA_PIE_UNSPEC,
7473 + TCA_PIE_TARGET,
7474 + TCA_PIE_LIMIT,
7475 + TCA_PIE_TUPDATE,
7476 + TCA_PIE_ALPHA,
7477 + TCA_PIE_BETA,
7478 + TCA_PIE_ECN,
7479 + TCA_PIE_BYTEMODE,
7480 + __TCA_PIE_MAX
7481 +};
7482 +#define TCA_PIE_MAX (__TCA_PIE_MAX - 1)
7483 +
7484 +struct tc_pie_xstats {
7485 + __u32 prob; /* current probability */
7486 + __u32 delay; /* current delay in ms */
7487 + __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */
7488 + __u32 packets_in; /* total number of packets enqueued */
7489 + __u32 dropped; /* packets dropped due to pie_action */
7490 + __u32 overlimit; /* dropped due to lack of space in queue */
7491 + __u32 maxq; /* maximum queue size */
7492 + __u32 ecn_mark; /* packets marked with ecn*/
7493 +};
7494 +
7495 +/* CBS */
7496 +struct tc_cbs_qopt {
7497 + __u8 offload;
7498 + __u8 _pad[3];
7499 + __s32 hicredit;
7500 + __s32 locredit;
7501 + __s32 idleslope;
7502 + __s32 sendslope;
7503 +};
7504 +
7505 +enum {
7506 + TCA_CBS_UNSPEC,
7507 + TCA_CBS_PARMS,
7508 + __TCA_CBS_MAX,
7509 +};
7510 +
7511 +#define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
7512 +
7513 +
7514 +/* ETF */
7515 +struct tc_etf_qopt {
7516 + __s32 delta;
7517 + __s32 clockid;
7518 + __u32 flags;
7519 +#define TC_ETF_DEADLINE_MODE_ON BIT(0)
7520 +#define TC_ETF_OFFLOAD_ON BIT(1)
7521 +};
7522 +
7523 +enum {
7524 + TCA_ETF_UNSPEC,
7525 + TCA_ETF_PARMS,
7526 + __TCA_ETF_MAX,
7527 +};
7528 +
7529 +#define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
7530 +
7531 +
7532 +/* CAKE */
7533 +enum {
7534 + TCA_CAKE_UNSPEC,
7535 + TCA_CAKE_PAD,
7536 + TCA_CAKE_BASE_RATE64,
7537 + TCA_CAKE_DIFFSERV_MODE,
7538 + TCA_CAKE_ATM,
7539 + TCA_CAKE_FLOW_MODE,
7540 + TCA_CAKE_OVERHEAD,
7541 + TCA_CAKE_RTT,
7542 + TCA_CAKE_TARGET,
7543 + TCA_CAKE_AUTORATE,
7544 + TCA_CAKE_MEMORY,
7545 + TCA_CAKE_NAT,
7546 + TCA_CAKE_RAW,
7547 + TCA_CAKE_WASH,
7548 + TCA_CAKE_MPU,
7549 + TCA_CAKE_INGRESS,
7550 + TCA_CAKE_ACK_FILTER,
7551 + TCA_CAKE_SPLIT_GSO,
7552 + __TCA_CAKE_MAX
7553 +};
7554 +#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1)
7555 +
7556 +enum {
7557 + __TCA_CAKE_STATS_INVALID,
7558 + TCA_CAKE_STATS_PAD,
7559 + TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
7560 + TCA_CAKE_STATS_MEMORY_LIMIT,
7561 + TCA_CAKE_STATS_MEMORY_USED,
7562 + TCA_CAKE_STATS_AVG_NETOFF,
7563 + TCA_CAKE_STATS_MIN_NETLEN,
7564 + TCA_CAKE_STATS_MAX_NETLEN,
7565 + TCA_CAKE_STATS_MIN_ADJLEN,
7566 + TCA_CAKE_STATS_MAX_ADJLEN,
7567 + TCA_CAKE_STATS_TIN_STATS,
7568 + TCA_CAKE_STATS_DEFICIT,
7569 + TCA_CAKE_STATS_COBALT_COUNT,
7570 + TCA_CAKE_STATS_DROPPING,
7571 + TCA_CAKE_STATS_DROP_NEXT_US,
7572 + TCA_CAKE_STATS_P_DROP,
7573 + TCA_CAKE_STATS_BLUE_TIMER_US,
7574 + __TCA_CAKE_STATS_MAX
7575 +};
7576 +#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
7577 +
7578 +enum {
7579 + __TCA_CAKE_TIN_STATS_INVALID,
7580 + TCA_CAKE_TIN_STATS_PAD,
7581 + TCA_CAKE_TIN_STATS_SENT_PACKETS,
7582 + TCA_CAKE_TIN_STATS_SENT_BYTES64,
7583 + TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
7584 + TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
7585 + TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
7586 + TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
7587 + TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
7588 + TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
7589 + TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
7590 + TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
7591 + TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
7592 + TCA_CAKE_TIN_STATS_TARGET_US,
7593 + TCA_CAKE_TIN_STATS_INTERVAL_US,
7594 + TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
7595 + TCA_CAKE_TIN_STATS_WAY_MISSES,
7596 + TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
7597 + TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
7598 + TCA_CAKE_TIN_STATS_AVG_DELAY_US,
7599 + TCA_CAKE_TIN_STATS_BASE_DELAY_US,
7600 + TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
7601 + TCA_CAKE_TIN_STATS_BULK_FLOWS,
7602 + TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
7603 + TCA_CAKE_TIN_STATS_MAX_SKBLEN,
7604 + TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
7605 + __TCA_CAKE_TIN_STATS_MAX
7606 +};
7607 +#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
7608 +#define TC_CAKE_MAX_TINS (8)
7609 +
7610 +enum {
7611 + CAKE_FLOW_NONE = 0,
7612 + CAKE_FLOW_SRC_IP,
7613 + CAKE_FLOW_DST_IP,
7614 + CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
7615 + CAKE_FLOW_FLOWS,
7616 + CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
7617 + CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
7618 + CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */
7619 + CAKE_FLOW_MAX,
7620 +};
7621 +
7622 +enum {
7623 + CAKE_DIFFSERV_DIFFSERV3 = 0,
7624 + CAKE_DIFFSERV_DIFFSERV4,
7625 + CAKE_DIFFSERV_DIFFSERV8,
7626 + CAKE_DIFFSERV_BESTEFFORT,
7627 + CAKE_DIFFSERV_PRECEDENCE,
7628 + CAKE_DIFFSERV_MAX
7629 +};
7630 +
7631 +enum {
7632 + CAKE_ACK_NONE = 0,
7633 + CAKE_ACK_FILTER,
7634 + CAKE_ACK_AGGRESSIVE,
7635 + CAKE_ACK_MAX
7636 +};
7637 +
7638 +enum {
7639 + CAKE_ATM_NONE = 0,
7640 + CAKE_ATM_ATM,
7641 + CAKE_ATM_PTM,
7642 + CAKE_ATM_MAX
7643 +};
7644 +
7645 +
7646 +/* TAPRIO */
7647 +enum {
7648 + TC_TAPRIO_CMD_SET_GATES = 0x00,
7649 + TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
7650 + TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
7651 +};
7652 +
7653 +enum {
7654 + TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
7655 + TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
7656 + TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
7657 + TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
7658 + TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
7659 + __TCA_TAPRIO_SCHED_ENTRY_MAX,
7660 +};
7661 +#define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1)
7662 +
7663 +/* The format for schedule entry list is:
7664 + * [TCA_TAPRIO_SCHED_ENTRY_LIST]
7665 + * [TCA_TAPRIO_SCHED_ENTRY]
7666 + * [TCA_TAPRIO_SCHED_ENTRY_CMD]
7667 + * [TCA_TAPRIO_SCHED_ENTRY_GATES]
7668 + * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]
7669 + */
7670 +enum {
7671 + TCA_TAPRIO_SCHED_UNSPEC,
7672 + TCA_TAPRIO_SCHED_ENTRY,
7673 + __TCA_TAPRIO_SCHED_MAX,
7674 +};
7675 +
7676 +#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
7677 +
7678 +enum {
7679 + TCA_TAPRIO_ATTR_UNSPEC,
7680 + TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
7681 + TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
7682 + TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
7683 + TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
7684 + TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
7685 + TCA_TAPRIO_PAD,
7686 + __TCA_TAPRIO_ATTR_MAX,
7687 +};
7688 +
7689 +#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
7690 +
7691 +#endif
7692 diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
7693 index ecd79b7fb1073..d5e992f7c7dd5 100644
7694 --- a/tools/testing/selftests/bpf/Makefile
7695 +++ b/tools/testing/selftests/bpf/Makefile
7696 @@ -53,7 +53,10 @@ TEST_PROGS := test_kmod.sh \
7697 test_flow_dissector.sh \
7698 test_xdp_vlan.sh
7699
7700 -TEST_PROGS_EXTENDED := with_addr.sh
7701 +TEST_PROGS_EXTENDED := with_addr.sh \
7702 + with_tunnels.sh \
7703 + tcp_client.py \
7704 + tcp_server.py
7705
7706 # Compile but not part of 'make run_tests'
7707 TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
7708 diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
7709 index 6ac7232b0fdeb..3ec4ce156074c 100644
7710 --- a/tools/testing/selftests/bpf/test_progs.c
7711 +++ b/tools/testing/selftests/bpf/test_progs.c
7712 @@ -1136,7 +1136,9 @@ static void test_stacktrace_build_id(void)
7713 int i, j;
7714 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
7715 int build_id_matches = 0;
7716 + int retry = 1;
7717
7718 +retry:
7719 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
7720 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
7721 goto out;
7722 @@ -1249,6 +1251,19 @@ static void test_stacktrace_build_id(void)
7723 previous_key = key;
7724 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
7725
7726 + /* stack_map_get_build_id_offset() is racy and sometimes can return
7727 + * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
7728 + * try it one more time.
7729 + */
7730 + if (build_id_matches < 1 && retry--) {
7731 + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
7732 + close(pmu_fd);
7733 + bpf_object__close(obj);
7734 + printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
7735 + __func__);
7736 + goto retry;
7737 + }
7738 +
7739 if (CHECK(build_id_matches < 1, "build id match",
7740 "Didn't find expected build ID from the map\n"))
7741 goto disable_pmu;
7742 @@ -1289,7 +1304,9 @@ static void test_stacktrace_build_id_nmi(void)
7743 int i, j;
7744 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
7745 int build_id_matches = 0;
7746 + int retry = 1;
7747
7748 +retry:
7749 err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
7750 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
7751 return;
7752 @@ -1384,6 +1401,19 @@ static void test_stacktrace_build_id_nmi(void)
7753 previous_key = key;
7754 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
7755
7756 + /* stack_map_get_build_id_offset() is racy and sometimes can return
7757 + * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
7758 + * try it one more time.
7759 + */
7760 + if (build_id_matches < 1 && retry--) {
7761 + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
7762 + close(pmu_fd);
7763 + bpf_object__close(obj);
7764 + printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
7765 + __func__);
7766 + goto retry;
7767 + }
7768 +
7769 if (CHECK(build_id_matches < 1, "build id match",
7770 "Didn't find expected build ID from the map\n"))
7771 goto disable_pmu;
7772 diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
7773 index aeeb76a54d633..e38f1cb7089d3 100644
7774 --- a/tools/testing/selftests/bpf/test_sock_addr.c
7775 +++ b/tools/testing/selftests/bpf/test_sock_addr.c
7776 @@ -44,6 +44,7 @@
7777 #define SERV6_V4MAPPED_IP "::ffff:192.168.0.4"
7778 #define SRC6_IP "::1"
7779 #define SRC6_REWRITE_IP "::6"
7780 +#define WILDCARD6_IP "::"
7781 #define SERV6_PORT 6060
7782 #define SERV6_REWRITE_PORT 6666
7783
7784 @@ -85,12 +86,14 @@ static int bind4_prog_load(const struct sock_addr_test *test);
7785 static int bind6_prog_load(const struct sock_addr_test *test);
7786 static int connect4_prog_load(const struct sock_addr_test *test);
7787 static int connect6_prog_load(const struct sock_addr_test *test);
7788 +static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
7789 static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
7790 static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
7791 static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
7792 static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
7793 static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
7794 static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
7795 +static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
7796
7797 static struct sock_addr_test tests[] = {
7798 /* bind */
7799 @@ -462,6 +465,34 @@ static struct sock_addr_test tests[] = {
7800 SRC6_REWRITE_IP,
7801 SYSCALL_ENOTSUPP,
7802 },
7803 + {
7804 + "sendmsg6: set dst IP = [::] (BSD'ism)",
7805 + sendmsg6_rw_wildcard_prog_load,
7806 + BPF_CGROUP_UDP6_SENDMSG,
7807 + BPF_CGROUP_UDP6_SENDMSG,
7808 + AF_INET6,
7809 + SOCK_DGRAM,
7810 + SERV6_IP,
7811 + SERV6_PORT,
7812 + SERV6_REWRITE_IP,
7813 + SERV6_REWRITE_PORT,
7814 + SRC6_REWRITE_IP,
7815 + SUCCESS,
7816 + },
7817 + {
7818 + "sendmsg6: preserve dst IP = [::] (BSD'ism)",
7819 + sendmsg_allow_prog_load,
7820 + BPF_CGROUP_UDP6_SENDMSG,
7821 + BPF_CGROUP_UDP6_SENDMSG,
7822 + AF_INET6,
7823 + SOCK_DGRAM,
7824 + WILDCARD6_IP,
7825 + SERV6_PORT,
7826 + SERV6_REWRITE_IP,
7827 + SERV6_PORT,
7828 + SRC6_IP,
7829 + SUCCESS,
7830 + },
7831 {
7832 "sendmsg6: deny call",
7833 sendmsg_deny_prog_load,
7834 @@ -714,16 +745,27 @@ static int connect6_prog_load(const struct sock_addr_test *test)
7835 return load_path(test, CONNECT6_PROG_PATH);
7836 }
7837
7838 -static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
7839 +static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
7840 + int32_t rc)
7841 {
7842 struct bpf_insn insns[] = {
7843 - /* return 0 */
7844 - BPF_MOV64_IMM(BPF_REG_0, 0),
7845 + /* return rc */
7846 + BPF_MOV64_IMM(BPF_REG_0, rc),
7847 BPF_EXIT_INSN(),
7848 };
7849 return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
7850 }
7851
7852 +static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
7853 +{
7854 + return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
7855 +}
7856 +
7857 +static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
7858 +{
7859 + return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
7860 +}
7861 +
7862 static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
7863 {
7864 struct sockaddr_in dst4_rw_addr;
7865 @@ -844,6 +886,11 @@ static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
7866 return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
7867 }
7868
7869 +static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test)
7870 +{
7871 + return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP);
7872 +}
7873 +
7874 static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test)
7875 {
7876 return load_path(test, SENDMSG6_PROG_PATH);
7877 diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
7878 index d8313d0438b74..b90dff8d3a94b 100755
7879 --- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
7880 +++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
7881 @@ -1,7 +1,7 @@
7882 #!/bin/bash
7883 # SPDX-License-Identifier: GPL-2.0
7884
7885 -ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding"
7886 +ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn"
7887 NUM_NETIFS=4
7888 CHECK_TC="yes"
7889 source lib.sh
7890 @@ -96,6 +96,51 @@ flooding()
7891 flood_test $swp2 $h1 $h2
7892 }
7893
7894 +vlan_deletion()
7895 +{
7896 + # Test that the deletion of a VLAN on a bridge port does not affect
7897 + # the PVID VLAN
7898 + log_info "Add and delete a VLAN on bridge port $swp1"
7899 +
7900 + bridge vlan add vid 10 dev $swp1
7901 + bridge vlan del vid 10 dev $swp1
7902 +
7903 + ping_ipv4
7904 + ping_ipv6
7905 +}
7906 +
7907 +extern_learn()
7908 +{
7909 + local mac=de:ad:be:ef:13:37
7910 + local ageing_time
7911 +
7912 + # Test that externally learned FDB entries can roam, but not age out
7913 + RET=0
7914 +
7915 + bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1
7916 +
7917 + bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
7918 + check_err $? "Did not find FDB entry when should"
7919 +
7920 + # Wait for 10 seconds after the ageing time to make sure the FDB entry
7921 + # was not aged out
7922 + ageing_time=$(bridge_ageing_time_get br0)
7923 + sleep $((ageing_time + 10))
7924 +
7925 + bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
7926 + check_err $? "FDB entry was aged out when should not"
7927 +
7928 + $MZ $h2 -c 1 -p 64 -a $mac -t ip -q
7929 +
7930 + bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37
7931 + check_err $? "FDB entry did not roam when should"
7932 +
7933 + log_test "Externally learned FDB entry - ageing & roaming"
7934 +
7935 + bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null
7936 + bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null
7937 +}
7938 +
7939 trap cleanup EXIT
7940
7941 setup_prepare
7942 diff --git a/tools/testing/selftests/net/ip_defrag.c b/tools/testing/selftests/net/ip_defrag.c
7943 index 61ae2782388e9..5d56cc0838f62 100644
7944 --- a/tools/testing/selftests/net/ip_defrag.c
7945 +++ b/tools/testing/selftests/net/ip_defrag.c
7946 @@ -203,6 +203,7 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
7947 {
7948 struct ip *iphdr = (struct ip *)ip_frame;
7949 struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame;
7950 + const bool ipv4 = !ipv6;
7951 int res;
7952 int offset;
7953 int frag_len;
7954 @@ -239,19 +240,53 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
7955 iphdr->ip_sum = 0;
7956 }
7957
7958 + /* Occasionally test in-order fragments. */
7959 + if (!cfg_overlap && (rand() % 100 < 15)) {
7960 + offset = 0;
7961 + while (offset < (UDP_HLEN + payload_len)) {
7962 + send_fragment(fd_raw, addr, alen, offset, ipv6);
7963 + offset += max_frag_len;
7964 + }
7965 + return;
7966 + }
7967 +
7968 + /* Occasionally test IPv4 "runs" (see net/ipv4/ip_fragment.c) */
7969 + if (ipv4 && !cfg_overlap && (rand() % 100 < 20) &&
7970 + (payload_len > 9 * max_frag_len)) {
7971 + offset = 6 * max_frag_len;
7972 + while (offset < (UDP_HLEN + payload_len)) {
7973 + send_fragment(fd_raw, addr, alen, offset, ipv6);
7974 + offset += max_frag_len;
7975 + }
7976 + offset = 3 * max_frag_len;
7977 + while (offset < 6 * max_frag_len) {
7978 + send_fragment(fd_raw, addr, alen, offset, ipv6);
7979 + offset += max_frag_len;
7980 + }
7981 + offset = 0;
7982 + while (offset < 3 * max_frag_len) {
7983 + send_fragment(fd_raw, addr, alen, offset, ipv6);
7984 + offset += max_frag_len;
7985 + }
7986 + return;
7987 + }
7988 +
7989 /* Odd fragments. */
7990 offset = max_frag_len;
7991 while (offset < (UDP_HLEN + payload_len)) {
7992 send_fragment(fd_raw, addr, alen, offset, ipv6);
7993 + /* IPv4 ignores duplicates, so randomly send a duplicate. */
7994 + if (ipv4 && (1 == rand() % 100))
7995 + send_fragment(fd_raw, addr, alen, offset, ipv6);
7996 offset += 2 * max_frag_len;
7997 }
7998
7999 if (cfg_overlap) {
8000 /* Send an extra random fragment. */
8001 - offset = rand() % (UDP_HLEN + payload_len - 1);
8002 - /* sendto() returns EINVAL if offset + frag_len is too small. */
8003 if (ipv6) {
8004 struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN);
8005 + /* sendto() returns EINVAL if offset + frag_len is too small. */
8006 + offset = rand() % (UDP_HLEN + payload_len - 1);
8007 frag_len = max_frag_len + rand() % 256;
8008 /* In IPv6 if !!(frag_len % 8), the fragment is dropped. */
8009 frag_len &= ~0x7;
8010 @@ -259,13 +294,29 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
8011 ip6hdr->ip6_plen = htons(frag_len);
8012 frag_len += IP6_HLEN;
8013 } else {
8014 - frag_len = IP4_HLEN + UDP_HLEN + rand() % 256;
8015 + /* In IPv4, duplicates and some fragments completely inside
8016 + * previously sent fragments are dropped/ignored. So
8017 + * random offset and frag_len can result in a dropped
8018 + * fragment instead of a dropped queue/packet. So we
8019 + * hard-code offset and frag_len.
8020 + *
8021 + * See ade446403bfb ("net: ipv4: do not handle duplicate
8022 + * fragments as overlapping").
8023 + */
8024 + if (max_frag_len * 4 < payload_len || max_frag_len < 16) {
8025 + /* not enough payload to play with random offset and frag_len. */
8026 + offset = 8;
8027 + frag_len = IP4_HLEN + UDP_HLEN + max_frag_len;
8028 + } else {
8029 + offset = rand() % (payload_len / 2);
8030 + frag_len = 2 * max_frag_len + 1 + rand() % 256;
8031 + }
8032 iphdr->ip_off = htons(offset / 8 | IP4_MF);
8033 iphdr->ip_len = htons(frag_len);
8034 }
8035 res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen);
8036 if (res < 0)
8037 - error(1, errno, "sendto overlap");
8038 + error(1, errno, "sendto overlap: %d", frag_len);
8039 if (res != frag_len)
8040 error(1, 0, "sendto overlap: %d vs %d", (int)res, frag_len);
8041 frag_counter++;
8042 @@ -275,6 +326,9 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
8043 offset = 0;
8044 while (offset < (UDP_HLEN + payload_len)) {
8045 send_fragment(fd_raw, addr, alen, offset, ipv6);
8046 + /* IPv4 ignores duplicates, so randomly send a duplicate. */
8047 + if (ipv4 && (1 == rand() % 100))
8048 + send_fragment(fd_raw, addr, alen, offset, ipv6);
8049 offset += 2 * max_frag_len;
8050 }
8051 }
8052 @@ -282,7 +336,11 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
8053 static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
8054 {
8055 int fd_tx_raw, fd_rx_udp;
8056 - struct timeval tv = { .tv_sec = 0, .tv_usec = 10 * 1000 };
8057 + /* Frag queue timeout is set to one second in the calling script;
8058 + * socket timeout should be just a bit longer to avoid tests interfering
8059 + * with each other.
8060 + */
8061 + struct timeval tv = { .tv_sec = 1, .tv_usec = 10 };
8062 int idx;
8063 int min_frag_len = ipv6 ? 1280 : 8;
8064
8065 @@ -308,12 +366,32 @@ static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
8066 payload_len += (rand() % 4096)) {
8067 if (cfg_verbose)
8068 printf("payload_len: %d\n", payload_len);
8069 - max_frag_len = min_frag_len;
8070 - do {
8071 +
8072 + if (cfg_overlap) {
8073 + /* With overlaps, one send/receive pair below takes
8074 + * at least one second (== timeout) to run, so there
8075 + * is not enough test time to run a nested loop:
8076 + * the full overlap test takes 20-30 seconds.
8077 + */
8078 + max_frag_len = min_frag_len +
8079 + rand() % (1500 - FRAG_HLEN - min_frag_len);
8080 send_udp_frags(fd_tx_raw, addr, alen, ipv6);
8081 recv_validate_udp(fd_rx_udp);
8082 - max_frag_len += 8 * (rand() % 8);
8083 - } while (max_frag_len < (1500 - FRAG_HLEN) && max_frag_len <= payload_len);
8084 + } else {
8085 + /* Without overlaps, each packet reassembly (== one
8086 + * send/receive pair below) takes very little time to
8087 + * run, so we can easily afford more thourough testing
8088 + * with a nested loop: the full non-overlap test takes
8089 + * less than one second).
8090 + */
8091 + max_frag_len = min_frag_len;
8092 + do {
8093 + send_udp_frags(fd_tx_raw, addr, alen, ipv6);
8094 + recv_validate_udp(fd_rx_udp);
8095 + max_frag_len += 8 * (rand() % 8);
8096 + } while (max_frag_len < (1500 - FRAG_HLEN) &&
8097 + max_frag_len <= payload_len);
8098 + }
8099 }
8100
8101 /* Cleanup. */
8102 diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh
8103 index f346727960449..7dd79a9efb177 100755
8104 --- a/tools/testing/selftests/net/ip_defrag.sh
8105 +++ b/tools/testing/selftests/net/ip_defrag.sh
8106 @@ -11,10 +11,17 @@ readonly NETNS="ns-$(mktemp -u XXXXXX)"
8107 setup() {
8108 ip netns add "${NETNS}"
8109 ip -netns "${NETNS}" link set lo up
8110 +
8111 ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1
8112 ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1
8113 + ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_time=1 >/dev/null 2>&1
8114 +
8115 ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1
8116 ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1
8117 + ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_time=1 >/dev/null 2>&1
8118 +
8119 + # DST cache can get full with a lot of frags, with GC not keeping up with the test.
8120 + ip netns exec "${NETNS}" sysctl -w net.ipv6.route.max_size=65536 >/dev/null 2>&1
8121 }
8122
8123 cleanup() {
8124 @@ -27,7 +34,6 @@ setup
8125 echo "ipv4 defrag"
8126 ip netns exec "${NETNS}" ./ip_defrag -4
8127
8128 -
8129 echo "ipv4 defrag with overlaps"
8130 ip netns exec "${NETNS}" ./ip_defrag -4o
8131
8132 @@ -37,3 +43,4 @@ ip netns exec "${NETNS}" ./ip_defrag -6
8133 echo "ipv6 defrag with overlaps"
8134 ip netns exec "${NETNS}" ./ip_defrag -6o
8135
8136 +echo "all tests done"
8137 diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
8138 index 637ea0219617f..0da3545cabdb6 100644
8139 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
8140 +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
8141 @@ -17,7 +17,7 @@
8142 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2",
8143 "expExitCode": "0",
8144 "verifyCmd": "$TC actions get action ife index 2",
8145 - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2",
8146 + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2",
8147 "matchCount": "1",
8148 "teardown": [
8149 "$TC actions flush action ife"
8150 @@ -41,7 +41,7 @@
8151 "cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2",
8152 "expExitCode": "0",
8153 "verifyCmd": "$TC actions get action ife index 2",
8154 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2",
8155 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2",
8156 "matchCount": "1",
8157 "teardown": [
8158 "$TC actions flush action ife"
8159 @@ -65,7 +65,7 @@
8160 "cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2",
8161 "expExitCode": "0",
8162 "verifyCmd": "$TC actions get action ife index 2",
8163 - "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2",
8164 + "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2",
8165 "matchCount": "1",
8166 "teardown": [
8167 "$TC actions flush action ife"
8168 @@ -89,7 +89,7 @@
8169 "cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2",
8170 "expExitCode": "0",
8171 "verifyCmd": "$TC actions get action ife index 2",
8172 - "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2",
8173 + "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2",
8174 "matchCount": "1",
8175 "teardown": [
8176 "$TC actions flush action ife"
8177 @@ -113,7 +113,7 @@
8178 "cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2",
8179 "expExitCode": "0",
8180 "verifyCmd": "$TC actions get action ife index 2",
8181 - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2",
8182 + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2",
8183 "matchCount": "1",
8184 "teardown": [
8185 "$TC actions flush action ife"
8186 @@ -137,7 +137,7 @@
8187 "cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2",
8188 "expExitCode": "0",
8189 "verifyCmd": "$TC actions get action ife index 2",
8190 - "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2",
8191 + "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2",
8192 "matchCount": "1",
8193 "teardown": [
8194 "$TC actions flush action ife"
8195 @@ -161,7 +161,7 @@
8196 "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90",
8197 "expExitCode": "0",
8198 "verifyCmd": "$TC actions get action ife index 90",
8199 - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90",
8200 + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90",
8201 "matchCount": "1",
8202 "teardown": [
8203 "$TC actions flush action ife"
8204 @@ -185,7 +185,7 @@
8205 "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90",
8206 "expExitCode": "255",
8207 "verifyCmd": "$TC actions get action ife index 90",
8208 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90",
8209 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90",
8210 "matchCount": "0",
8211 "teardown": []
8212 },
8213 @@ -207,7 +207,7 @@
8214 "cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9",
8215 "expExitCode": "0",
8216 "verifyCmd": "$TC actions get action ife index 9",
8217 - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9",
8218 + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9",
8219 "matchCount": "1",
8220 "teardown": [
8221 "$TC actions flush action ife"
8222 @@ -231,7 +231,7 @@
8223 "cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9",
8224 "expExitCode": "0",
8225 "verifyCmd": "$TC actions get action ife index 9",
8226 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9",
8227 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9",
8228 "matchCount": "1",
8229 "teardown": [
8230 "$TC actions flush action ife"
8231 @@ -255,7 +255,7 @@
8232 "cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9",
8233 "expExitCode": "0",
8234 "verifyCmd": "$TC actions get action ife index 9",
8235 - "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9",
8236 + "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9",
8237 "matchCount": "1",
8238 "teardown": [
8239 "$TC actions flush action ife"
8240 @@ -279,7 +279,7 @@
8241 "cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9",
8242 "expExitCode": "0",
8243 "verifyCmd": "$TC actions get action ife index 9",
8244 - "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9",
8245 + "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9",
8246 "matchCount": "1",
8247 "teardown": [
8248 "$TC actions flush action ife"
8249 @@ -303,7 +303,7 @@
8250 "cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9",
8251 "expExitCode": "0",
8252 "verifyCmd": "$TC actions get action ife index 9",
8253 - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9",
8254 + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9",
8255 "matchCount": "1",
8256 "teardown": [
8257 "$TC actions flush action ife"
8258 @@ -327,7 +327,7 @@
8259 "cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9",
8260 "expExitCode": "0",
8261 "verifyCmd": "$TC actions get action ife index 9",
8262 - "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9",
8263 + "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9",
8264 "matchCount": "1",
8265 "teardown": [
8266 "$TC actions flush action ife"
8267 @@ -351,7 +351,7 @@
8268 "cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99",
8269 "expExitCode": "0",
8270 "verifyCmd": "$TC actions get action ife index 99",
8271 - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99",
8272 + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99",
8273 "matchCount": "1",
8274 "teardown": [
8275 "$TC actions flush action ife"
8276 @@ -375,7 +375,7 @@
8277 "cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99",
8278 "expExitCode": "255",
8279 "verifyCmd": "$TC actions get action ife index 99",
8280 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99",
8281 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99",
8282 "matchCount": "0",
8283 "teardown": []
8284 },
8285 @@ -397,7 +397,7 @@
8286 "cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1",
8287 "expExitCode": "0",
8288 "verifyCmd": "$TC actions get action ife index 1",
8289 - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1",
8290 + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1",
8291 "matchCount": "1",
8292 "teardown": [
8293 "$TC actions flush action ife"
8294 @@ -421,7 +421,7 @@
8295 "cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1",
8296 "expExitCode": "0",
8297 "verifyCmd": "$TC actions get action ife index 1",
8298 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1",
8299 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1",
8300 "matchCount": "1",
8301 "teardown": [
8302 "$TC actions flush action ife"
8303 @@ -445,7 +445,7 @@
8304 "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
8305 "expExitCode": "0",
8306 "verifyCmd": "$TC actions get action ife index 1",
8307 - "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
8308 + "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
8309 "matchCount": "1",
8310 "teardown": [
8311 "$TC actions flush action ife"
8312 @@ -469,7 +469,7 @@
8313 "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
8314 "expExitCode": "0",
8315 "verifyCmd": "$TC actions get action ife index 1",
8316 - "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
8317 + "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
8318 "matchCount": "1",
8319 "teardown": [
8320 "$TC actions flush action ife"
8321 @@ -493,7 +493,7 @@
8322 "cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77",
8323 "expExitCode": "0",
8324 "verifyCmd": "$TC actions get action ife index 77",
8325 - "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77",
8326 + "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77",
8327 "matchCount": "1",
8328 "teardown": [
8329 "$TC actions flush action ife"
8330 @@ -517,7 +517,7 @@
8331 "cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77",
8332 "expExitCode": "0",
8333 "verifyCmd": "$TC actions get action ife index 77",
8334 - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77",
8335 + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77",
8336 "matchCount": "1",
8337 "teardown": [
8338 "$TC actions flush action ife"
8339 @@ -541,7 +541,7 @@
8340 "cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77",
8341 "expExitCode": "0",
8342 "verifyCmd": "$TC actions get action ife index 77",
8343 - "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77",
8344 + "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77",
8345 "matchCount": "1",
8346 "teardown": [
8347 "$TC actions flush action ife"
8348 @@ -565,7 +565,7 @@
8349 "cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1",
8350 "expExitCode": "0",
8351 "verifyCmd": "$TC actions get action ife index 1",
8352 - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1",
8353 + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1",
8354 "matchCount": "1",
8355 "teardown": [
8356 "$TC actions flush action ife"
8357 @@ -589,7 +589,7 @@
8358 "cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1",
8359 "expExitCode": "255",
8360 "verifyCmd": "$TC actions get action ife index 1",
8361 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1",
8362 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1",
8363 "matchCount": "0",
8364 "teardown": []
8365 },
8366 @@ -611,7 +611,7 @@
8367 "cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1",
8368 "expExitCode": "0",
8369 "verifyCmd": "$TC actions get action ife index 1",
8370 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1",
8371 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1",
8372 "matchCount": "1",
8373 "teardown": [
8374 "$TC actions flush action ife"
8375 @@ -635,7 +635,7 @@
8376 "cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1",
8377 "expExitCode": "0",
8378 "verifyCmd": "$TC actions get action ife index 1",
8379 - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
8380 + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
8381 "matchCount": "1",
8382 "teardown": [
8383 "$TC actions flush action ife"
8384 @@ -659,7 +659,7 @@
8385 "cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11",
8386 "expExitCode": "0",
8387 "verifyCmd": "$TC actions get action ife index 11",
8388 - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
8389 + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
8390 "matchCount": "1",
8391 "teardown": [
8392 "$TC actions flush action ife"
8393 @@ -683,7 +683,7 @@
8394 "cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1",
8395 "expExitCode": "0",
8396 "verifyCmd": "$TC actions get action ife index 1",
8397 - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1",
8398 + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1",
8399 "matchCount": "1",
8400 "teardown": [
8401 "$TC actions flush action ife"
8402 @@ -707,7 +707,7 @@
8403 "cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21",
8404 "expExitCode": "0",
8405 "verifyCmd": "$TC actions get action ife index 21",
8406 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21",
8407 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21",
8408 "matchCount": "1",
8409 "teardown": [
8410 "$TC actions flush action ife"
8411 @@ -731,7 +731,7 @@
8412 "cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21",
8413 "expExitCode": "0",
8414 "verifyCmd": "$TC actions get action ife index 21",
8415 - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21",
8416 + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21",
8417 "matchCount": "1",
8418 "teardown": [
8419 "$TC actions flush action ife"
8420 @@ -739,7 +739,7 @@
8421 },
8422 {
8423 "id": "fac3",
8424 - "name": "Create valid ife encode action with index at 32-bit maximnum",
8425 + "name": "Create valid ife encode action with index at 32-bit maximum",
8426 "category": [
8427 "actions",
8428 "ife"
8429 @@ -755,7 +755,7 @@
8430 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295",
8431 "expExitCode": "0",
8432 "verifyCmd": "$TC actions get action ife index 4294967295",
8433 - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295",
8434 + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295",
8435 "matchCount": "1",
8436 "teardown": [
8437 "$TC actions flush action ife"
8438 @@ -779,7 +779,7 @@
8439 "cmdUnderTest": "$TC actions add action ife decode pass index 1",
8440 "expExitCode": "0",
8441 "verifyCmd": "$TC actions get action ife index 1",
8442 - "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
8443 + "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
8444 "matchCount": "1",
8445 "teardown": [
8446 "$TC actions flush action ife"
8447 @@ -803,7 +803,7 @@
8448 "cmdUnderTest": "$TC actions add action ife decode pipe index 1",
8449 "expExitCode": "0",
8450 "verifyCmd": "$TC actions get action ife index 1",
8451 - "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
8452 + "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
8453 "matchCount": "1",
8454 "teardown": [
8455 "$TC actions flush action ife"
8456 @@ -827,7 +827,7 @@
8457 "cmdUnderTest": "$TC actions add action ife decode continue index 1",
8458 "expExitCode": "0",
8459 "verifyCmd": "$TC actions get action ife index 1",
8460 - "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
8461 + "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
8462 "matchCount": "1",
8463 "teardown": [
8464 "$TC actions flush action ife"
8465 @@ -851,7 +851,7 @@
8466 "cmdUnderTest": "$TC actions add action ife decode drop index 1",
8467 "expExitCode": "0",
8468 "verifyCmd": "$TC actions get action ife index 1",
8469 - "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
8470 + "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
8471 "matchCount": "1",
8472 "teardown": [
8473 "$TC actions flush action ife"
8474 @@ -875,7 +875,7 @@
8475 "cmdUnderTest": "$TC actions add action ife decode reclassify index 1",
8476 "expExitCode": "0",
8477 "verifyCmd": "$TC actions get action ife index 1",
8478 - "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
8479 + "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
8480 "matchCount": "1",
8481 "teardown": [
8482 "$TC actions flush action ife"
8483 @@ -899,7 +899,7 @@
8484 "cmdUnderTest": "$TC actions add action ife decode jump 10 index 1",
8485 "expExitCode": "0",
8486 "verifyCmd": "$TC actions get action ife index 1",
8487 - "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
8488 + "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
8489 "matchCount": "1",
8490 "teardown": [
8491 "$TC actions flush action ife"
8492 @@ -923,7 +923,7 @@
8493 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999",
8494 "expExitCode": "255",
8495 "verifyCmd": "$TC actions get action ife index 4294967295999",
8496 - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999",
8497 + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999",
8498 "matchCount": "0",
8499 "teardown": []
8500 },
8501 @@ -945,7 +945,7 @@
8502 "cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4",
8503 "expExitCode": "255",
8504 "verifyCmd": "$TC actions get action ife index 4",
8505 - "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4",
8506 + "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4",
8507 "matchCount": "0",
8508 "teardown": []
8509 },
8510 @@ -967,7 +967,7 @@
8511 "cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1",
8512 "expExitCode": "0",
8513 "verifyCmd": "$TC actions get action ife index 4",
8514 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
8515 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
8516 "matchCount": "1",
8517 "teardown": [
8518 "$TC actions flush action ife"
8519 @@ -991,7 +991,7 @@
8520 "cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4",
8521 "expExitCode": "255",
8522 "verifyCmd": "$TC actions get action ife index 4",
8523 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4",
8524 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4",
8525 "matchCount": "0",
8526 "teardown": []
8527 },
8528 @@ -1013,7 +1013,7 @@
8529 "cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4",
8530 "expExitCode": "255",
8531 "verifyCmd": "$TC actions get action ife index 4",
8532 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4",
8533 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4",
8534 "matchCount": "0",
8535 "teardown": []
8536 },
8537 diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
8538 index 10b2d894e4362..e7e15a7336b6d 100644
8539 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
8540 +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
8541 @@ -81,35 +81,6 @@
8542 ]
8543 ]
8544 },
8545 - {
8546 - "id": "ba4e",
8547 - "name": "Add tunnel_key set action with missing mandatory id parameter",
8548 - "category": [
8549 - "actions",
8550 - "tunnel_key"
8551 - ],
8552 - "setup": [
8553 - [
8554 - "$TC actions flush action tunnel_key",
8555 - 0,
8556 - 1,
8557 - 255
8558 - ]
8559 - ],
8560 - "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2",
8561 - "expExitCode": "255",
8562 - "verifyCmd": "$TC actions list action tunnel_key",
8563 - "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2",
8564 - "matchCount": "0",
8565 - "teardown": [
8566 - [
8567 - "$TC actions flush action tunnel_key",
8568 - 0,
8569 - 1,
8570 - 255
8571 - ]
8572 - ]
8573 - },
8574 {
8575 "id": "a5e0",
8576 "name": "Add tunnel_key set action with invalid src_ip parameter",
8577 @@ -634,7 +605,7 @@
8578 "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
8579 "expExitCode": "0",
8580 "verifyCmd": "$TC actions get action tunnel_key index 4",
8581 - "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
8582 + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
8583 "matchCount": "1",
8584 "teardown": [
8585 "$TC actions flush action tunnel_key"