Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.19/0125-4.19.26-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3404 - (hide annotations) (download)
Fri Aug 2 11:47:37 2019 UTC (4 years, 10 months ago) by niro
File size: 226664 byte(s)
-linux-4.19.26
1 niro 3404 diff --git a/Makefile b/Makefile
2     index 2caa131ff306b..b71076cecba9c 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 19
9     -SUBLEVEL = 25
10     +SUBLEVEL = 26
11     EXTRAVERSION =
12     NAME = "People's Front"
13    
14     diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
15     index ff7d3232764a2..db681cf4959c8 100644
16     --- a/arch/arc/include/asm/cache.h
17     +++ b/arch/arc/include/asm/cache.h
18     @@ -52,6 +52,17 @@
19     #define cache_line_size() SMP_CACHE_BYTES
20     #define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
21    
22     +/*
23     + * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
24     + * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
25     + * alignment for any atomic64_t embedded in buffer.
26     + * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
27     + * value of 4 (and not 8) in ARC ABI.
28     + */
29     +#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
30     +#define ARCH_SLAB_MINALIGN 8
31     +#endif
32     +
33     extern void arc_cache_init(void);
34     extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
35     extern void read_decode_cache_bcr(void);
36     diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
37     index 8b90d25a15cca..1f945d0f40daa 100644
38     --- a/arch/arc/kernel/head.S
39     +++ b/arch/arc/kernel/head.S
40     @@ -17,6 +17,7 @@
41     #include <asm/entry.h>
42     #include <asm/arcregs.h>
43     #include <asm/cache.h>
44     +#include <asm/irqflags.h>
45    
46     .macro CPU_EARLY_SETUP
47    
48     @@ -47,6 +48,15 @@
49     sr r5, [ARC_REG_DC_CTRL]
50    
51     1:
52     +
53     +#ifdef CONFIG_ISA_ARCV2
54     + ; Unaligned access is disabled at reset, so re-enable early as
55     + ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
56     + ; by default
57     + lr r5, [status32]
58     + bset r5, r5, STATUS_AD_BIT
59     + kflag r5
60     +#endif
61     .endm
62    
63     .section .init.text, "ax",@progbits
64     @@ -93,9 +103,9 @@ ENTRY(stext)
65     #ifdef CONFIG_ARC_UBOOT_SUPPORT
66     ; Uboot - kernel ABI
67     ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
68     - ; r1 = magic number (board identity, unused as of now
69     + ; r1 = magic number (always zero as of now)
70     ; r2 = pointer to uboot provided cmdline or external DTB in mem
71     - ; These are handled later in setup_arch()
72     + ; These are handled later in handle_uboot_args()
73     st r0, [@uboot_tag]
74     st r2, [@uboot_arg]
75     #endif
76     diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
77     index b2cae79a25d71..62a30e58441c5 100644
78     --- a/arch/arc/kernel/setup.c
79     +++ b/arch/arc/kernel/setup.c
80     @@ -449,43 +449,80 @@ void setup_processor(void)
81     arc_chk_core_config();
82     }
83    
84     -static inline int is_kernel(unsigned long addr)
85     +static inline bool uboot_arg_invalid(unsigned long addr)
86     {
87     - if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
88     - return 1;
89     - return 0;
90     + /*
91     + * Check that it is a untranslated address (although MMU is not enabled
92     + * yet, it being a high address ensures this is not by fluke)
93     + */
94     + if (addr < PAGE_OFFSET)
95     + return true;
96     +
97     + /* Check that address doesn't clobber resident kernel image */
98     + return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
99     }
100    
101     -void __init setup_arch(char **cmdline_p)
102     +#define IGNORE_ARGS "Ignore U-boot args: "
103     +
104     +/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
105     +#define UBOOT_TAG_NONE 0
106     +#define UBOOT_TAG_CMDLINE 1
107     +#define UBOOT_TAG_DTB 2
108     +
109     +void __init handle_uboot_args(void)
110     {
111     + bool use_embedded_dtb = true;
112     + bool append_cmdline = false;
113     +
114     #ifdef CONFIG_ARC_UBOOT_SUPPORT
115     - /* make sure that uboot passed pointer to cmdline/dtb is valid */
116     - if (uboot_tag && is_kernel((unsigned long)uboot_arg))
117     - panic("Invalid uboot arg\n");
118     + /* check that we know this tag */
119     + if (uboot_tag != UBOOT_TAG_NONE &&
120     + uboot_tag != UBOOT_TAG_CMDLINE &&
121     + uboot_tag != UBOOT_TAG_DTB) {
122     + pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
123     + goto ignore_uboot_args;
124     + }
125     +
126     + if (uboot_tag != UBOOT_TAG_NONE &&
127     + uboot_arg_invalid((unsigned long)uboot_arg)) {
128     + pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
129     + goto ignore_uboot_args;
130     + }
131     +
132     + /* see if U-boot passed an external Device Tree blob */
133     + if (uboot_tag == UBOOT_TAG_DTB) {
134     + machine_desc = setup_machine_fdt((void *)uboot_arg);
135    
136     - /* See if u-boot passed an external Device Tree blob */
137     - machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */
138     - if (!machine_desc)
139     + /* external Device Tree blob is invalid - use embedded one */
140     + use_embedded_dtb = !machine_desc;
141     + }
142     +
143     + if (uboot_tag == UBOOT_TAG_CMDLINE)
144     + append_cmdline = true;
145     +
146     +ignore_uboot_args:
147     #endif
148     - {
149     - /* No, so try the embedded one */
150     +
151     + if (use_embedded_dtb) {
152     machine_desc = setup_machine_fdt(__dtb_start);
153     if (!machine_desc)
154     panic("Embedded DT invalid\n");
155     + }
156    
157     - /*
158     - * If we are here, it is established that @uboot_arg didn't
159     - * point to DT blob. Instead if u-boot says it is cmdline,
160     - * append to embedded DT cmdline.
161     - * setup_machine_fdt() would have populated @boot_command_line
162     - */
163     - if (uboot_tag == 1) {
164     - /* Ensure a whitespace between the 2 cmdlines */
165     - strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
166     - strlcat(boot_command_line, uboot_arg,
167     - COMMAND_LINE_SIZE);
168     - }
169     + /*
170     + * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
171     + * append processing can only happen after.
172     + */
173     + if (append_cmdline) {
174     + /* Ensure a whitespace between the 2 cmdlines */
175     + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
176     + strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
177     }
178     +}
179     +
180     +void __init setup_arch(char **cmdline_p)
181     +{
182     + handle_uboot_args();
183    
184     /* Save unparsed command line copy for /proc/cmdline */
185     *cmdline_p = boot_command_line;
186     diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
187     index 2c118a6ab3587..0dc23fc227ed2 100644
188     --- a/arch/arm/probes/kprobes/opt-arm.c
189     +++ b/arch/arm/probes/kprobes/opt-arm.c
190     @@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
191     }
192    
193     /* Copy arch-dep-instance from template. */
194     - memcpy(code, (unsigned char *)optprobe_template_entry,
195     + memcpy(code, (unsigned long *)&optprobe_template_entry,
196     TMPL_END_IDX * sizeof(kprobe_opcode_t));
197    
198     /* Adjust buffer according to instruction. */
199     diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
200     index 951c4231bdb85..4c47b3fd958b6 100644
201     --- a/arch/mips/configs/ath79_defconfig
202     +++ b/arch/mips/configs/ath79_defconfig
203     @@ -71,6 +71,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
204     # CONFIG_SERIAL_8250_PCI is not set
205     CONFIG_SERIAL_8250_NR_UARTS=1
206     CONFIG_SERIAL_8250_RUNTIME_UARTS=1
207     +CONFIG_SERIAL_OF_PLATFORM=y
208     CONFIG_SERIAL_AR933X=y
209     CONFIG_SERIAL_AR933X_CONSOLE=y
210     # CONFIG_HW_RANDOM is not set
211     diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
212     index d31bc2f012088..fb2b6d0b77c36 100644
213     --- a/arch/mips/jazz/jazzdma.c
214     +++ b/arch/mips/jazz/jazzdma.c
215     @@ -74,14 +74,15 @@ static int __init vdma_init(void)
216     get_order(VDMA_PGTBL_SIZE));
217     BUG_ON(!pgtbl);
218     dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
219     - pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
220     + pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
221    
222     /*
223     * Clear the R4030 translation table
224     */
225     vdma_pgtbl_init();
226    
227     - r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
228     + r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
229     + CPHYSADDR((unsigned long)pgtbl));
230     r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
231     r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
232    
233     diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
234     index aeb7b1b0f2024..252c00985c973 100644
235     --- a/arch/mips/net/ebpf_jit.c
236     +++ b/arch/mips/net/ebpf_jit.c
237     @@ -343,12 +343,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
238     const struct bpf_prog *prog = ctx->skf;
239     int stack_adjust = ctx->stack_size;
240     int store_offset = stack_adjust - 8;
241     + enum reg_val_type td;
242     int r0 = MIPS_R_V0;
243    
244     - if (dest_reg == MIPS_R_RA &&
245     - get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
246     + if (dest_reg == MIPS_R_RA) {
247     /* Don't let zero extended value escape. */
248     - emit_instr(ctx, sll, r0, r0, 0);
249     + td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
250     + if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
251     + emit_instr(ctx, sll, r0, r0, 0);
252     + }
253    
254     if (ctx->flags & EBPF_SAVE_RA) {
255     emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
256     diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
257     index 2582df1c529bb..0964c236e3e5a 100644
258     --- a/arch/parisc/kernel/ptrace.c
259     +++ b/arch/parisc/kernel/ptrace.c
260     @@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
261    
262     long do_syscall_trace_enter(struct pt_regs *regs)
263     {
264     - if (test_thread_flag(TIF_SYSCALL_TRACE) &&
265     - tracehook_report_syscall_entry(regs)) {
266     + if (test_thread_flag(TIF_SYSCALL_TRACE)) {
267     + int rc = tracehook_report_syscall_entry(regs);
268     +
269     /*
270     - * Tracing decided this syscall should not happen or the
271     - * debugger stored an invalid system call number. Skip
272     - * the system call and the system call restart handling.
273     + * As tracesys_next does not set %r28 to -ENOSYS
274     + * when %r20 is set to -1, initialize it here.
275     */
276     - regs->gr[20] = -1UL;
277     - goto out;
278     + regs->gr[28] = -ENOSYS;
279     +
280     + if (rc) {
281     + /*
282     + * A nonzero return code from
283     + * tracehook_report_syscall_entry() tells us
284     + * to prevent the syscall execution. Skip
285     + * the syscall call and the syscall restart handling.
286     + *
287     + * Note that the tracer may also just change
288     + * regs->gr[20] to an invalid syscall number,
289     + * that is handled by tracesys_next.
290     + */
291     + regs->gr[20] = -1UL;
292     + return -1;
293     + }
294     }
295    
296     /* Do the secure computing check after ptrace. */
297     @@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs)
298     regs->gr[24] & 0xffffffff,
299     regs->gr[23] & 0xffffffff);
300    
301     -out:
302     /*
303     * Sign extend the syscall number to 64bit since it may have been
304     * modified by a compat ptrace call
305     diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
306     index 81d4574d1f377..9fd2ff28b8ff2 100644
307     --- a/arch/powerpc/kernel/head_8xx.S
308     +++ b/arch/powerpc/kernel/head_8xx.S
309     @@ -919,11 +919,12 @@ start_here:
310    
311     /* set up the PTE pointers for the Abatron bdiGDB.
312     */
313     - tovirt(r6,r6)
314     lis r5, abatron_pteptrs@h
315     ori r5, r5, abatron_pteptrs@l
316     stw r5, 0xf0(0) /* Must match your Abatron config file */
317     tophys(r5,r5)
318     + lis r6, swapper_pg_dir@h
319     + ori r6, r6, swapper_pg_dir@l
320     stw r6, 0(r5)
321    
322     /* Now turn on the MMU for real! */
323     diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
324     index 7bcfa61375c09..98d13c6a64be0 100644
325     --- a/arch/x86/kvm/cpuid.c
326     +++ b/arch/x86/kvm/cpuid.c
327     @@ -337,6 +337,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
328     unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
329     unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
330     unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
331     + unsigned f_la57 = 0;
332    
333     /* cpuid 1.edx */
334     const u32 kvm_cpuid_1_edx_x86_features =
335     @@ -491,7 +492,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
336     // TSC_ADJUST is emulated
337     entry->ebx |= F(TSC_ADJUST);
338     entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
339     + f_la57 = entry->ecx & F(LA57);
340     cpuid_mask(&entry->ecx, CPUID_7_ECX);
341     + /* Set LA57 based on hardware capability. */
342     + entry->ecx |= f_la57;
343     entry->ecx |= f_umip;
344     /* PKU is not yet implemented for shadow paging. */
345     if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
346     diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
347     index 52a7c3faee0cc..782f98b332f05 100644
348     --- a/arch/x86/xen/enlighten_pv.c
349     +++ b/arch/x86/xen/enlighten_pv.c
350     @@ -899,10 +899,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
351     val = native_read_msr_safe(msr, err);
352     switch (msr) {
353     case MSR_IA32_APICBASE:
354     -#ifdef CONFIG_X86_X2APIC
355     - if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
356     -#endif
357     - val &= ~X2APIC_ENABLE;
358     + val &= ~X2APIC_ENABLE;
359     break;
360     }
361     return val;
362     diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
363     index ea59c01ce8db0..f530d35412428 100644
364     --- a/drivers/acpi/nfit/core.c
365     +++ b/drivers/acpi/nfit/core.c
366     @@ -719,6 +719,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
367     struct acpi_nfit_memory_map *memdev;
368     struct acpi_nfit_desc *acpi_desc;
369     struct nfit_mem *nfit_mem;
370     + u16 physical_id;
371    
372     mutex_lock(&acpi_desc_lock);
373     list_for_each_entry(acpi_desc, &acpi_descs, list) {
374     @@ -726,10 +727,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
375     list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
376     memdev = __to_nfit_memdev(nfit_mem);
377     if (memdev->device_handle == device_handle) {
378     + *flags = memdev->flags;
379     + physical_id = memdev->physical_id;
380     mutex_unlock(&acpi_desc->init_mutex);
381     mutex_unlock(&acpi_desc_lock);
382     - *flags = memdev->flags;
383     - return memdev->physical_id;
384     + return physical_id;
385     }
386     }
387     mutex_unlock(&acpi_desc->init_mutex);
388     diff --git a/drivers/atm/he.c b/drivers/atm/he.c
389     index 29f102dcfec49..329ce9072ee9f 100644
390     --- a/drivers/atm/he.c
391     +++ b/drivers/atm/he.c
392     @@ -717,7 +717,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
393     instead of '/ 512', use '>> 9' to prevent a call
394     to divdu3 on x86 platforms
395     */
396     - rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
397     + rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
398    
399     if (rate_cps < 10)
400     rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
401     diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
402     index 00e954f22bc92..74401e0adb29c 100644
403     --- a/drivers/gpio/gpio-mt7621.c
404     +++ b/drivers/gpio/gpio-mt7621.c
405     @@ -30,6 +30,7 @@
406     #define GPIO_REG_EDGE 0xA0
407    
408     struct mtk_gc {
409     + struct irq_chip irq_chip;
410     struct gpio_chip chip;
411     spinlock_t lock;
412     int bank;
413     @@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
414     return 0;
415     }
416    
417     -static struct irq_chip mediatek_gpio_irq_chip = {
418     - .irq_unmask = mediatek_gpio_irq_unmask,
419     - .irq_mask = mediatek_gpio_irq_mask,
420     - .irq_mask_ack = mediatek_gpio_irq_mask,
421     - .irq_set_type = mediatek_gpio_irq_type,
422     -};
423     -
424     static int
425     mediatek_gpio_xlate(struct gpio_chip *chip,
426     const struct of_phandle_args *spec, u32 *flags)
427     @@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev,
428     return ret;
429     }
430    
431     + rg->irq_chip.name = dev_name(dev);
432     + rg->irq_chip.parent_device = dev;
433     + rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
434     + rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
435     + rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
436     + rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
437     +
438     if (mtk->gpio_irq) {
439     /*
440     * Manually request the irq here instead of passing
441     @@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev,
442     return ret;
443     }
444    
445     - ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
446     + ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
447     0, handle_simple_irq, IRQ_TYPE_NONE);
448     if (ret) {
449     dev_err(dev, "failed to add gpiochip_irqchip\n");
450     return ret;
451     }
452    
453     - gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
454     + gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
455     mtk->gpio_irq, NULL);
456     }
457    
458     @@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev)
459     mtk->gpio_irq = irq_of_parse_and_map(np, 0);
460     mtk->dev = dev;
461     platform_set_drvdata(pdev, mtk);
462     - mediatek_gpio_irq_chip.name = dev_name(dev);
463    
464     for (i = 0; i < MTK_BANK_CNT; i++) {
465     ret = mediatek_gpio_bank_probe(dev, np, i);
466     diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
467     index 9f3f166f17608..eb27fa76e8fc7 100644
468     --- a/drivers/gpio/gpio-pxa.c
469     +++ b/drivers/gpio/gpio-pxa.c
470     @@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void)
471     {
472     switch (gpio_type) {
473     case PXA3XX_GPIO:
474     + case MMP2_GPIO:
475     return false;
476    
477     default:
478     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
479     index fd825d30edf13..c0396e83f3526 100644
480     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
481     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
482     @@ -159,6 +159,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
483     }
484    
485     if (amdgpu_device_is_px(dev)) {
486     + dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
487     pm_runtime_use_autosuspend(dev->dev);
488     pm_runtime_set_autosuspend_delay(dev->dev, 5000);
489     pm_runtime_set_active(dev->dev);
490     diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
491     index 80f5db4ef75fd..0805c423a5ce0 100644
492     --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
493     +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
494     @@ -1072,8 +1072,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
495     * the GPU device is not already present in the topology device
496     * list then return NULL. This means a new topology device has to
497     * be created for this GPU.
498     - * TODO: Rather than assiging @gpu to first topology device withtout
499     - * gpu attached, it will better to have more stringent check.
500     */
501     static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
502     {
503     @@ -1081,12 +1079,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
504     struct kfd_topology_device *out_dev = NULL;
505    
506     down_write(&topology_lock);
507     - list_for_each_entry(dev, &topology_device_list, list)
508     + list_for_each_entry(dev, &topology_device_list, list) {
509     + /* Discrete GPUs need their own topology device list
510     + * entries. Don't assign them to CPU/APU nodes.
511     + */
512     + if (!gpu->device_info->needs_iommu_device &&
513     + dev->node_props.cpu_cores_count)
514     + continue;
515     +
516     if (!dev->gpu && (dev->node_props.simd_count > 0)) {
517     dev->gpu = gpu;
518     out_dev = dev;
519     break;
520     }
521     + }
522     up_write(&topology_lock);
523     return out_dev;
524     }
525     diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
526     index a851bb07443f0..c5ba9128b7361 100644
527     --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
528     +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
529     @@ -624,12 +624,13 @@ static int dm_suspend(void *handle)
530     struct amdgpu_display_manager *dm = &adev->dm;
531     int ret = 0;
532    
533     + WARN_ON(adev->dm.cached_state);
534     + adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
535     +
536     s3_handle_mst(adev->ddev, true);
537    
538     amdgpu_dm_irq_suspend(adev);
539    
540     - WARN_ON(adev->dm.cached_state);
541     - adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
542    
543     dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
544    
545     diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
546     index 580e7e82034fa..53ccacf99eca4 100644
547     --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
548     +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
549     @@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
550    
551     pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
552    
553     - if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
554     + if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
555     /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
556     pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
557     /* un-mute audio */
558     @@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
559     pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
560     pipe_ctx->stream_res.stream_enc, true);
561     if (pipe_ctx->stream_res.audio) {
562     + struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
563     +
564     if (option != KEEP_ACQUIRED_RESOURCE ||
565     !dc->debug.az_endpoint_mute_only) {
566     /*only disalbe az_endpoint if power down or free*/
567     @@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
568     update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
569     pipe_ctx->stream_res.audio = NULL;
570     }
571     + if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
572     + /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
573     + pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
574    
575     /* TODO: notify audio driver for if audio modes list changed
576     * add audio mode list change flag */
577     diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
578     index 2d6506c08bf72..6f91634880aa2 100644
579     --- a/drivers/gpu/drm/i915/intel_fbdev.c
580     +++ b/drivers/gpu/drm/i915/intel_fbdev.c
581     @@ -334,8 +334,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
582     bool *enabled, int width, int height)
583     {
584     struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
585     - unsigned long conn_configured, conn_seq, mask;
586     unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
587     + unsigned long conn_configured, conn_seq;
588     int i, j;
589     bool *save_enabled;
590     bool fallback = true, ret = true;
591     @@ -353,10 +353,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
592     drm_modeset_backoff(&ctx);
593    
594     memcpy(save_enabled, enabled, count);
595     - mask = GENMASK(count - 1, 0);
596     + conn_seq = GENMASK(count - 1, 0);
597     conn_configured = 0;
598     retry:
599     - conn_seq = conn_configured;
600     for (i = 0; i < count; i++) {
601     struct drm_fb_helper_connector *fb_conn;
602     struct drm_connector *connector;
603     @@ -369,7 +368,8 @@ retry:
604     if (conn_configured & BIT(i))
605     continue;
606    
607     - if (conn_seq == 0 && !connector->has_tile)
608     + /* First pass, only consider tiled connectors */
609     + if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
610     continue;
611    
612     if (connector->status == connector_status_connected)
613     @@ -473,8 +473,10 @@ retry:
614     conn_configured |= BIT(i);
615     }
616    
617     - if ((conn_configured & mask) != mask && conn_configured != conn_seq)
618     + if (conn_configured != conn_seq) { /* repeat until no more are found */
619     + conn_seq = conn_configured;
620     goto retry;
621     + }
622    
623     /*
624     * If the BIOS didn't enable everything it could, fall back to have the
625     diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
626     index bf5f294f172fa..611ac340fb289 100644
627     --- a/drivers/gpu/drm/meson/meson_drv.c
628     +++ b/drivers/gpu/drm/meson/meson_drv.c
629     @@ -368,8 +368,10 @@ static int meson_probe_remote(struct platform_device *pdev,
630     remote_node = of_graph_get_remote_port_parent(ep);
631     if (!remote_node ||
632     remote_node == parent || /* Ignore parent endpoint */
633     - !of_device_is_available(remote_node))
634     + !of_device_is_available(remote_node)) {
635     + of_node_put(remote_node);
636     continue;
637     + }
638    
639     count += meson_probe_remote(pdev, match, remote, remote_node);
640    
641     @@ -388,10 +390,13 @@ static int meson_drv_probe(struct platform_device *pdev)
642    
643     for_each_endpoint_of_node(np, ep) {
644     remote = of_graph_get_remote_port_parent(ep);
645     - if (!remote || !of_device_is_available(remote))
646     + if (!remote || !of_device_is_available(remote)) {
647     + of_node_put(remote);
648     continue;
649     + }
650    
651     count += meson_probe_remote(pdev, &match, np, remote);
652     + of_node_put(remote);
653     }
654    
655     if (count && !match)
656     diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
657     index dec1e081f5295..6a8fb6fd183c3 100644
658     --- a/drivers/gpu/drm/radeon/radeon_kms.c
659     +++ b/drivers/gpu/drm/radeon/radeon_kms.c
660     @@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
661     }
662    
663     if (radeon_is_px(dev)) {
664     + dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
665     pm_runtime_use_autosuspend(dev->dev);
666     pm_runtime_set_autosuspend_delay(dev->dev, 5000);
667     pm_runtime_set_active(dev->dev);
668     diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
669     index d7950b52a1fd9..e30b1f5b9d91a 100644
670     --- a/drivers/gpu/drm/sun4i/sun4i_backend.c
671     +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
672     @@ -717,17 +717,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
673     remote = of_graph_get_remote_port_parent(ep);
674     if (!remote)
675     continue;
676     + of_node_put(remote);
677    
678     /* does this node match any registered engines? */
679     list_for_each_entry(frontend, &drv->frontend_list, list) {
680     if (remote == frontend->node) {
681     - of_node_put(remote);
682     of_node_put(port);
683     + of_node_put(ep);
684     return frontend;
685     }
686     }
687     }
688     -
689     + of_node_put(port);
690     return ERR_PTR(-EINVAL);
691     }
692    
693     diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
694     index e36399213324d..ceb3db6f3fdda 100644
695     --- a/drivers/hwmon/tmp421.c
696     +++ b/drivers/hwmon/tmp421.c
697     @@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = {
698     .data = (void *)2
699     },
700     {
701     - .compatible = "ti,tmp422",
702     + .compatible = "ti,tmp442",
703     .data = (void *)3
704     },
705     { },
706     diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
707     index 0d3473b4596e1..21f4239022c7a 100644
708     --- a/drivers/infiniband/hw/mthca/mthca_provider.c
709     +++ b/drivers/infiniband/hw/mthca/mthca_provider.c
710     @@ -533,7 +533,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
711     {
712     struct mthca_ucontext *context;
713    
714     - qp = kmalloc(sizeof *qp, GFP_KERNEL);
715     + qp = kzalloc(sizeof(*qp), GFP_KERNEL);
716     if (!qp)
717     return ERR_PTR(-ENOMEM);
718    
719     @@ -599,7 +599,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
720     if (pd->uobject)
721     return ERR_PTR(-EINVAL);
722    
723     - qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
724     + qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
725     if (!qp)
726     return ERR_PTR(-ENOMEM);
727    
728     diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
729     index 0b34e909505f5..2c1114ee0c6da 100644
730     --- a/drivers/infiniband/ulp/srp/ib_srp.c
731     +++ b/drivers/infiniband/ulp/srp/ib_srp.c
732     @@ -2951,7 +2951,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
733     {
734     struct srp_target_port *target = host_to_target(scmnd->device->host);
735     struct srp_rdma_ch *ch;
736     - int i, j;
737     u8 status;
738    
739     shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
740     @@ -2963,15 +2962,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
741     if (status)
742     return FAILED;
743    
744     - for (i = 0; i < target->ch_count; i++) {
745     - ch = &target->ch[i];
746     - for (j = 0; j < target->req_ring_size; ++j) {
747     - struct srp_request *req = &ch->req_ring[j];
748     -
749     - srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
750     - }
751     - }
752     -
753     return SUCCESS;
754     }
755    
756     diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
757     index 4ac378e489023..40ca1e8fa09fc 100644
758     --- a/drivers/isdn/hardware/avm/b1.c
759     +++ b/drivers/isdn/hardware/avm/b1.c
760     @@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo)
761     int i, j;
762    
763     for (j = 0; j < AVM_MAXVERSION; j++)
764     - cinfo->version[j] = "\0\0" + 1;
765     + cinfo->version[j] = "";
766     for (i = 0, j = 0;
767     j < AVM_MAXVERSION && i < cinfo->versionlen;
768     j++, i += cinfo->versionbuf[i] + 1)
769     diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
770     index b730037a0e2d3..9cff667b2d245 100644
771     --- a/drivers/isdn/i4l/isdn_tty.c
772     +++ b/drivers/isdn/i4l/isdn_tty.c
773     @@ -1456,15 +1456,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
774     {
775     modem_info *info = (modem_info *) tty->driver_data;
776    
777     + mutex_lock(&modem_info_mutex);
778     if (!old_termios)
779     isdn_tty_change_speed(info);
780     else {
781     if (tty->termios.c_cflag == old_termios->c_cflag &&
782     tty->termios.c_ispeed == old_termios->c_ispeed &&
783     - tty->termios.c_ospeed == old_termios->c_ospeed)
784     + tty->termios.c_ospeed == old_termios->c_ospeed) {
785     + mutex_unlock(&modem_info_mutex);
786     return;
787     + }
788     isdn_tty_change_speed(info);
789     }
790     + mutex_unlock(&modem_info_mutex);
791     }
792    
793     /*
794     diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
795     index a2e74feee2b2f..fd64df5a57a5e 100644
796     --- a/drivers/leds/leds-lp5523.c
797     +++ b/drivers/leds/leds-lp5523.c
798     @@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
799    
800     /* Let the programs run for couple of ms and check the engine status */
801     usleep_range(3000, 6000);
802     - lp55xx_read(chip, LP5523_REG_STATUS, &status);
803     + ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
804     + if (ret)
805     + return ret;
806     status &= LP5523_ENG_STATUS_MASK;
807    
808     if (status != LP5523_ENG_STATUS_MASK) {
809     diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
810     index 30d09d1771717..11ab17f64c649 100644
811     --- a/drivers/mfd/ab8500-core.c
812     +++ b/drivers/mfd/ab8500-core.c
813     @@ -261,7 +261,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
814     mutex_unlock(&ab8500->lock);
815     dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
816    
817     - return ret;
818     + return (ret < 0) ? ret : 0;
819     }
820    
821     static int ab8500_get_register(struct device *dev, u8 bank,
822     diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
823     index 0be511dd93d01..f8e0fa97bb31e 100644
824     --- a/drivers/mfd/axp20x.c
825     +++ b/drivers/mfd/axp20x.c
826     @@ -640,9 +640,9 @@ static const struct mfd_cell axp221_cells[] = {
827    
828     static const struct mfd_cell axp223_cells[] = {
829     {
830     - .name = "axp221-pek",
831     - .num_resources = ARRAY_SIZE(axp22x_pek_resources),
832     - .resources = axp22x_pek_resources,
833     + .name = "axp221-pek",
834     + .num_resources = ARRAY_SIZE(axp22x_pek_resources),
835     + .resources = axp22x_pek_resources,
836     }, {
837     .name = "axp22x-adc",
838     .of_compatible = "x-powers,axp221-adc",
839     @@ -650,7 +650,7 @@ static const struct mfd_cell axp223_cells[] = {
840     .name = "axp20x-battery-power-supply",
841     .of_compatible = "x-powers,axp221-battery-power-supply",
842     }, {
843     - .name = "axp20x-regulator",
844     + .name = "axp20x-regulator",
845     }, {
846     .name = "axp20x-ac-power-supply",
847     .of_compatible = "x-powers,axp221-ac-power-supply",
848     @@ -666,9 +666,9 @@ static const struct mfd_cell axp223_cells[] = {
849    
850     static const struct mfd_cell axp152_cells[] = {
851     {
852     - .name = "axp20x-pek",
853     - .num_resources = ARRAY_SIZE(axp152_pek_resources),
854     - .resources = axp152_pek_resources,
855     + .name = "axp20x-pek",
856     + .num_resources = ARRAY_SIZE(axp152_pek_resources),
857     + .resources = axp152_pek_resources,
858     },
859     };
860    
861     @@ -697,87 +697,101 @@ static const struct resource axp288_charger_resources[] = {
862    
863     static const struct mfd_cell axp288_cells[] = {
864     {
865     - .name = "axp288_adc",
866     - .num_resources = ARRAY_SIZE(axp288_adc_resources),
867     - .resources = axp288_adc_resources,
868     - },
869     - {
870     - .name = "axp288_extcon",
871     - .num_resources = ARRAY_SIZE(axp288_extcon_resources),
872     - .resources = axp288_extcon_resources,
873     - },
874     - {
875     - .name = "axp288_charger",
876     - .num_resources = ARRAY_SIZE(axp288_charger_resources),
877     - .resources = axp288_charger_resources,
878     - },
879     - {
880     - .name = "axp288_fuel_gauge",
881     - .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
882     - .resources = axp288_fuel_gauge_resources,
883     - },
884     - {
885     - .name = "axp221-pek",
886     - .num_resources = ARRAY_SIZE(axp288_power_button_resources),
887     - .resources = axp288_power_button_resources,
888     - },
889     - {
890     - .name = "axp288_pmic_acpi",
891     + .name = "axp288_adc",
892     + .num_resources = ARRAY_SIZE(axp288_adc_resources),
893     + .resources = axp288_adc_resources,
894     + }, {
895     + .name = "axp288_extcon",
896     + .num_resources = ARRAY_SIZE(axp288_extcon_resources),
897     + .resources = axp288_extcon_resources,
898     + }, {
899     + .name = "axp288_charger",
900     + .num_resources = ARRAY_SIZE(axp288_charger_resources),
901     + .resources = axp288_charger_resources,
902     + }, {
903     + .name = "axp288_fuel_gauge",
904     + .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
905     + .resources = axp288_fuel_gauge_resources,
906     + }, {
907     + .name = "axp221-pek",
908     + .num_resources = ARRAY_SIZE(axp288_power_button_resources),
909     + .resources = axp288_power_button_resources,
910     + }, {
911     + .name = "axp288_pmic_acpi",
912     },
913     };
914    
915     static const struct mfd_cell axp803_cells[] = {
916     {
917     - .name = "axp221-pek",
918     - .num_resources = ARRAY_SIZE(axp803_pek_resources),
919     - .resources = axp803_pek_resources,
920     + .name = "axp221-pek",
921     + .num_resources = ARRAY_SIZE(axp803_pek_resources),
922     + .resources = axp803_pek_resources,
923     + }, {
924     + .name = "axp20x-gpio",
925     + .of_compatible = "x-powers,axp813-gpio",
926     + }, {
927     + .name = "axp813-adc",
928     + .of_compatible = "x-powers,axp813-adc",
929     + }, {
930     + .name = "axp20x-battery-power-supply",
931     + .of_compatible = "x-powers,axp813-battery-power-supply",
932     + }, {
933     + .name = "axp20x-ac-power-supply",
934     + .of_compatible = "x-powers,axp813-ac-power-supply",
935     + .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
936     + .resources = axp20x_ac_power_supply_resources,
937     },
938     - { .name = "axp20x-regulator" },
939     + { .name = "axp20x-regulator" },
940     };
941    
942     static const struct mfd_cell axp806_self_working_cells[] = {
943     {
944     - .name = "axp221-pek",
945     - .num_resources = ARRAY_SIZE(axp806_pek_resources),
946     - .resources = axp806_pek_resources,
947     + .name = "axp221-pek",
948     + .num_resources = ARRAY_SIZE(axp806_pek_resources),
949     + .resources = axp806_pek_resources,
950     },
951     - { .name = "axp20x-regulator" },
952     + { .name = "axp20x-regulator" },
953     };
954    
955     static const struct mfd_cell axp806_cells[] = {
956     {
957     - .id = 2,
958     - .name = "axp20x-regulator",
959     + .id = 2,
960     + .name = "axp20x-regulator",
961     },
962     };
963    
964     static const struct mfd_cell axp809_cells[] = {
965     {
966     - .name = "axp221-pek",
967     - .num_resources = ARRAY_SIZE(axp809_pek_resources),
968     - .resources = axp809_pek_resources,
969     + .name = "axp221-pek",
970     + .num_resources = ARRAY_SIZE(axp809_pek_resources),
971     + .resources = axp809_pek_resources,
972     }, {
973     - .id = 1,
974     - .name = "axp20x-regulator",
975     + .id = 1,
976     + .name = "axp20x-regulator",
977     },
978     };
979    
980     static const struct mfd_cell axp813_cells[] = {
981     {
982     - .name = "axp221-pek",
983     - .num_resources = ARRAY_SIZE(axp803_pek_resources),
984     - .resources = axp803_pek_resources,
985     + .name = "axp221-pek",
986     + .num_resources = ARRAY_SIZE(axp803_pek_resources),
987     + .resources = axp803_pek_resources,
988     }, {
989     - .name = "axp20x-regulator",
990     + .name = "axp20x-regulator",
991     }, {
992     - .name = "axp20x-gpio",
993     - .of_compatible = "x-powers,axp813-gpio",
994     + .name = "axp20x-gpio",
995     + .of_compatible = "x-powers,axp813-gpio",
996     }, {
997     - .name = "axp813-adc",
998     - .of_compatible = "x-powers,axp813-adc",
999     + .name = "axp813-adc",
1000     + .of_compatible = "x-powers,axp813-adc",
1001     }, {
1002     .name = "axp20x-battery-power-supply",
1003     .of_compatible = "x-powers,axp813-battery-power-supply",
1004     + }, {
1005     + .name = "axp20x-ac-power-supply",
1006     + .of_compatible = "x-powers,axp813-ac-power-supply",
1007     + .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
1008     + .resources = axp20x_ac_power_supply_resources,
1009     },
1010     };
1011    
1012     diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
1013     index 503979c81dae1..fab3cdc27ed64 100644
1014     --- a/drivers/mfd/bd9571mwv.c
1015     +++ b/drivers/mfd/bd9571mwv.c
1016     @@ -59,6 +59,7 @@ static const struct regmap_access_table bd9571mwv_writable_table = {
1017     };
1018    
1019     static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = {
1020     + regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC),
1021     regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN),
1022     regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT),
1023     regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ),
1024     diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
1025     index 6b22d54a540d1..bccde3eac92ca 100644
1026     --- a/drivers/mfd/cros_ec_dev.c
1027     +++ b/drivers/mfd/cros_ec_dev.c
1028     @@ -499,6 +499,7 @@ static int ec_device_remove(struct platform_device *pdev)
1029    
1030     cros_ec_debugfs_remove(ec);
1031    
1032     + mfd_remove_devices(ec->dev);
1033     cdev_del(&ec->cdev);
1034     device_unregister(&ec->class_dev);
1035     return 0;
1036     diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
1037     index 5970b8def5487..aec20e1c7d3d5 100644
1038     --- a/drivers/mfd/db8500-prcmu.c
1039     +++ b/drivers/mfd/db8500-prcmu.c
1040     @@ -2584,7 +2584,7 @@ static struct irq_chip prcmu_irq_chip = {
1041     .irq_unmask = prcmu_irq_unmask,
1042     };
1043    
1044     -static __init char *fw_project_name(u32 project)
1045     +static char *fw_project_name(u32 project)
1046     {
1047     switch (project) {
1048     case PRCMU_FW_PROJECT_U8500:
1049     @@ -2732,7 +2732,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
1050     INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
1051     }
1052    
1053     -static void __init init_prcm_registers(void)
1054     +static void init_prcm_registers(void)
1055     {
1056     u32 val;
1057    
1058     diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
1059     index c63e331738c17..234febfe6398b 100644
1060     --- a/drivers/mfd/mc13xxx-core.c
1061     +++ b/drivers/mfd/mc13xxx-core.c
1062     @@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
1063    
1064     mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
1065    
1066     - mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
1067     + ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
1068     + if (ret)
1069     + goto out;
1070    
1071     adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
1072     adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;
1073     diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
1074     index 77b64bd64df36..ab24e176ef448 100644
1075     --- a/drivers/mfd/mt6397-core.c
1076     +++ b/drivers/mfd/mt6397-core.c
1077     @@ -329,8 +329,7 @@ static int mt6397_probe(struct platform_device *pdev)
1078    
1079     default:
1080     dev_err(&pdev->dev, "unsupported chip: %d\n", id);
1081     - ret = -ENODEV;
1082     - break;
1083     + return -ENODEV;
1084     }
1085    
1086     if (ret) {
1087     diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
1088     index 52fafea06067e..8d420c37b2a61 100644
1089     --- a/drivers/mfd/qcom_rpm.c
1090     +++ b/drivers/mfd/qcom_rpm.c
1091     @@ -638,6 +638,10 @@ static int qcom_rpm_probe(struct platform_device *pdev)
1092     return -EFAULT;
1093     }
1094    
1095     + writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
1096     + writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
1097     + writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
1098     +
1099     dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
1100     fw_version[1],
1101     fw_version[2]);
1102     diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
1103     index 7a30546880a42..fe8d335a4d74d 100644
1104     --- a/drivers/mfd/ti_am335x_tscadc.c
1105     +++ b/drivers/mfd/ti_am335x_tscadc.c
1106     @@ -264,8 +264,9 @@ static int ti_tscadc_probe(struct platform_device *pdev)
1107     cell->pdata_size = sizeof(tscadc);
1108     }
1109    
1110     - err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
1111     - tscadc->used_cells, NULL, 0, NULL);
1112     + err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
1113     + tscadc->cells, tscadc->used_cells, NULL,
1114     + 0, NULL);
1115     if (err < 0)
1116     goto err_disable_clk;
1117    
1118     diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
1119     index 910f569ff77c1..8bcdecf494d05 100644
1120     --- a/drivers/mfd/tps65218.c
1121     +++ b/drivers/mfd/tps65218.c
1122     @@ -235,9 +235,9 @@ static int tps65218_probe(struct i2c_client *client,
1123    
1124     mutex_init(&tps->tps_lock);
1125    
1126     - ret = regmap_add_irq_chip(tps->regmap, tps->irq,
1127     - IRQF_ONESHOT, 0, &tps65218_irq_chip,
1128     - &tps->irq_data);
1129     + ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq,
1130     + IRQF_ONESHOT, 0, &tps65218_irq_chip,
1131     + &tps->irq_data);
1132     if (ret < 0)
1133     return ret;
1134    
1135     @@ -253,26 +253,9 @@ static int tps65218_probe(struct i2c_client *client,
1136     ARRAY_SIZE(tps65218_cells), NULL, 0,
1137     regmap_irq_get_domain(tps->irq_data));
1138    
1139     - if (ret < 0)
1140     - goto err_irq;
1141     -
1142     - return 0;
1143     -
1144     -err_irq:
1145     - regmap_del_irq_chip(tps->irq, tps->irq_data);
1146     -
1147     return ret;
1148     }
1149    
1150     -static int tps65218_remove(struct i2c_client *client)
1151     -{
1152     - struct tps65218 *tps = i2c_get_clientdata(client);
1153     -
1154     - regmap_del_irq_chip(tps->irq, tps->irq_data);
1155     -
1156     - return 0;
1157     -}
1158     -
1159     static const struct i2c_device_id tps65218_id_table[] = {
1160     { "tps65218", TPS65218 },
1161     { },
1162     @@ -285,7 +268,6 @@ static struct i2c_driver tps65218_driver = {
1163     .of_match_table = of_tps65218_match_table,
1164     },
1165     .probe = tps65218_probe,
1166     - .remove = tps65218_remove,
1167     .id_table = tps65218_id_table,
1168     };
1169    
1170     diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
1171     index 4be3d239da9ec..299016bc46d90 100644
1172     --- a/drivers/mfd/twl-core.c
1173     +++ b/drivers/mfd/twl-core.c
1174     @@ -979,7 +979,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
1175     * letting it generate the right frequencies for USB, MADC, and
1176     * other purposes.
1177     */
1178     -static inline int __init protect_pm_master(void)
1179     +static inline int protect_pm_master(void)
1180     {
1181     int e = 0;
1182    
1183     @@ -988,7 +988,7 @@ static inline int __init protect_pm_master(void)
1184     return e;
1185     }
1186    
1187     -static inline int __init unprotect_pm_master(void)
1188     +static inline int unprotect_pm_master(void)
1189     {
1190     int e = 0;
1191    
1192     diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
1193     index 1ee68bd440fbc..16c6e2accfaa5 100644
1194     --- a/drivers/mfd/wm5110-tables.c
1195     +++ b/drivers/mfd/wm5110-tables.c
1196     @@ -1618,6 +1618,7 @@ static const struct reg_default wm5110_reg_default[] = {
1197     { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
1198     { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
1199     { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
1200     + { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
1201     { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
1202     { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
1203     { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
1204     @@ -2869,6 +2870,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
1205     case ARIZONA_ASRC_ENABLE:
1206     case ARIZONA_ASRC_STATUS:
1207     case ARIZONA_ASRC_RATE1:
1208     + case ARIZONA_ASRC_RATE2:
1209     case ARIZONA_ISRC_1_CTRL_1:
1210     case ARIZONA_ISRC_1_CTRL_2:
1211     case ARIZONA_ISRC_1_CTRL_3:
1212     diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1213     index 4b73131a0f206..1b5f591cf0a23 100644
1214     --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
1215     +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1216     @@ -2595,11 +2595,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
1217     goto err_device_destroy;
1218     }
1219    
1220     - clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
1221     - /* Make sure we don't have a race with AENQ Links state handler */
1222     - if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1223     - netif_carrier_on(adapter->netdev);
1224     -
1225     rc = ena_enable_msix_and_set_admin_interrupts(adapter,
1226     adapter->num_queues);
1227     if (rc) {
1228     @@ -2616,6 +2611,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
1229     }
1230    
1231     set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
1232     +
1233     + clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
1234     + if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1235     + netif_carrier_on(adapter->netdev);
1236     +
1237     mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
1238     dev_err(&pdev->dev, "Device reset completed successfully\n");
1239    
1240     diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1241     index 65a22cd9aef26..029730bbe7db1 100644
1242     --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1243     +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
1244     @@ -2052,6 +2052,7 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1245     bool nonlinear = skb_is_nonlinear(skb);
1246     struct rtnl_link_stats64 *percpu_stats;
1247     struct dpaa_percpu_priv *percpu_priv;
1248     + struct netdev_queue *txq;
1249     struct dpaa_priv *priv;
1250     struct qm_fd fd;
1251     int offset = 0;
1252     @@ -2101,6 +2102,11 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1253     if (unlikely(err < 0))
1254     goto skb_to_fd_failed;
1255    
1256     + txq = netdev_get_tx_queue(net_dev, queue_mapping);
1257     +
1258     + /* LLTX requires to do our own update of trans_start */
1259     + txq->trans_start = jiffies;
1260     +
1261     if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1262     fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
1263     skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1264     diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1265     index ad1779fc410e6..a78bfafd212c8 100644
1266     --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1267     +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1268     @@ -147,12 +147,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
1269     struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
1270     int i;
1271    
1272     - vf_cb->mac_cb = NULL;
1273     -
1274     - kfree(vf_cb);
1275     -
1276     for (i = 0; i < handle->q_num; i++)
1277     hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
1278     +
1279     + kfree(vf_cb);
1280     }
1281    
1282     static int hns_ae_wait_flow_down(struct hnae_handle *handle)
1283     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1284     index a1aeeb8094c37..f5cd9539980f8 100644
1285     --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1286     +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1287     @@ -620,6 +620,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
1288     }
1289     #endif
1290    
1291     +#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
1292     +
1293     /* We reach this function only after checking that any of
1294     * the (IPv4 | IPv6) bits are set in cqe->status.
1295     */
1296     @@ -627,9 +629,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
1297     netdev_features_t dev_features)
1298     {
1299     __wsum hw_checksum = 0;
1300     + void *hdr;
1301     +
1302     + /* CQE csum doesn't cover padding octets in short ethernet
1303     + * frames. And the pad field is appended prior to calculating
1304     + * and appending the FCS field.
1305     + *
1306     + * Detecting these padded frames requires to verify and parse
1307     + * IP headers, so we simply force all those small frames to skip
1308     + * checksum complete.
1309     + */
1310     + if (short_frame(skb->len))
1311     + return -EINVAL;
1312    
1313     - void *hdr = (u8 *)va + sizeof(struct ethhdr);
1314     -
1315     + hdr = (u8 *)va + sizeof(struct ethhdr);
1316     hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
1317    
1318     if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
1319     @@ -822,6 +835,11 @@ xdp_drop_no_cnt:
1320     skb_record_rx_queue(skb, cq_ring);
1321    
1322     if (likely(dev->features & NETIF_F_RXCSUM)) {
1323     + /* TODO: For IP non TCP/UDP packets when csum complete is
1324     + * not an option (not supported or any other reason) we can
1325     + * actually check cqe IPOK status bit and report
1326     + * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
1327     + */
1328     if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
1329     MLX4_CQE_STATUS_UDP)) &&
1330     (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
1331     diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
1332     index 7262c6310650e..288fca826a55c 100644
1333     --- a/drivers/net/ethernet/mellanox/mlx4/icm.c
1334     +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
1335     @@ -57,12 +57,12 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
1336     int i;
1337    
1338     if (chunk->nsg > 0)
1339     - pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
1340     + pci_unmap_sg(dev->persist->pdev, chunk->sg, chunk->npages,
1341     PCI_DMA_BIDIRECTIONAL);
1342    
1343     for (i = 0; i < chunk->npages; ++i)
1344     - __free_pages(sg_page(&chunk->mem[i]),
1345     - get_order(chunk->mem[i].length));
1346     + __free_pages(sg_page(&chunk->sg[i]),
1347     + get_order(chunk->sg[i].length));
1348     }
1349    
1350     static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
1351     @@ -71,9 +71,9 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
1352    
1353     for (i = 0; i < chunk->npages; ++i)
1354     dma_free_coherent(&dev->persist->pdev->dev,
1355     - chunk->mem[i].length,
1356     - lowmem_page_address(sg_page(&chunk->mem[i])),
1357     - sg_dma_address(&chunk->mem[i]));
1358     + chunk->buf[i].size,
1359     + chunk->buf[i].addr,
1360     + chunk->buf[i].dma_addr);
1361     }
1362    
1363     void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
1364     @@ -111,22 +111,21 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
1365     return 0;
1366     }
1367    
1368     -static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
1369     - int order, gfp_t gfp_mask)
1370     +static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
1371     + int order, gfp_t gfp_mask)
1372     {
1373     - void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
1374     - &sg_dma_address(mem), gfp_mask);
1375     - if (!buf)
1376     + buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
1377     + &buf->dma_addr, gfp_mask);
1378     + if (!buf->addr)
1379     return -ENOMEM;
1380    
1381     - if (offset_in_page(buf)) {
1382     - dma_free_coherent(dev, PAGE_SIZE << order,
1383     - buf, sg_dma_address(mem));
1384     + if (offset_in_page(buf->addr)) {
1385     + dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
1386     + buf->dma_addr);
1387     return -ENOMEM;
1388     }
1389    
1390     - sg_set_buf(mem, buf, PAGE_SIZE << order);
1391     - sg_dma_len(mem) = PAGE_SIZE << order;
1392     + buf->size = PAGE_SIZE << order;
1393     return 0;
1394     }
1395    
1396     @@ -159,21 +158,21 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
1397    
1398     while (npages > 0) {
1399     if (!chunk) {
1400     - chunk = kmalloc_node(sizeof(*chunk),
1401     + chunk = kzalloc_node(sizeof(*chunk),
1402     gfp_mask & ~(__GFP_HIGHMEM |
1403     __GFP_NOWARN),
1404     dev->numa_node);
1405     if (!chunk) {
1406     - chunk = kmalloc(sizeof(*chunk),
1407     + chunk = kzalloc(sizeof(*chunk),
1408     gfp_mask & ~(__GFP_HIGHMEM |
1409     __GFP_NOWARN));
1410     if (!chunk)
1411     goto fail;
1412     }
1413     + chunk->coherent = coherent;
1414    
1415     - sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
1416     - chunk->npages = 0;
1417     - chunk->nsg = 0;
1418     + if (!coherent)
1419     + sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
1420     list_add_tail(&chunk->list, &icm->chunk_list);
1421     }
1422    
1423     @@ -186,10 +185,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
1424    
1425     if (coherent)
1426     ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
1427     - &chunk->mem[chunk->npages],
1428     - cur_order, mask);
1429     + &chunk->buf[chunk->npages],
1430     + cur_order, mask);
1431     else
1432     - ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
1433     + ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
1434     cur_order, mask,
1435     dev->numa_node);
1436    
1437     @@ -205,7 +204,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
1438     if (coherent)
1439     ++chunk->nsg;
1440     else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
1441     - chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
1442     + chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
1443     chunk->npages,
1444     PCI_DMA_BIDIRECTIONAL);
1445    
1446     @@ -220,7 +219,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
1447     }
1448    
1449     if (!coherent && chunk) {
1450     - chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
1451     + chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
1452     chunk->npages,
1453     PCI_DMA_BIDIRECTIONAL);
1454    
1455     @@ -320,7 +319,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
1456     u64 idx;
1457     struct mlx4_icm_chunk *chunk;
1458     struct mlx4_icm *icm;
1459     - struct page *page = NULL;
1460     + void *addr = NULL;
1461    
1462     if (!table->lowmem)
1463     return NULL;
1464     @@ -336,28 +335,49 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
1465    
1466     list_for_each_entry(chunk, &icm->chunk_list, list) {
1467     for (i = 0; i < chunk->npages; ++i) {
1468     + dma_addr_t dma_addr;
1469     + size_t len;
1470     +
1471     + if (table->coherent) {
1472     + len = chunk->buf[i].size;
1473     + dma_addr = chunk->buf[i].dma_addr;
1474     + addr = chunk->buf[i].addr;
1475     + } else {
1476     + struct page *page;
1477     +
1478     + len = sg_dma_len(&chunk->sg[i]);
1479     + dma_addr = sg_dma_address(&chunk->sg[i]);
1480     +
1481     + /* XXX: we should never do this for highmem
1482     + * allocation. This function either needs
1483     + * to be split, or the kernel virtual address
1484     + * return needs to be made optional.
1485     + */
1486     + page = sg_page(&chunk->sg[i]);
1487     + addr = lowmem_page_address(page);
1488     + }
1489     +
1490     if (dma_handle && dma_offset >= 0) {
1491     - if (sg_dma_len(&chunk->mem[i]) > dma_offset)
1492     - *dma_handle = sg_dma_address(&chunk->mem[i]) +
1493     - dma_offset;
1494     - dma_offset -= sg_dma_len(&chunk->mem[i]);
1495     + if (len > dma_offset)
1496     + *dma_handle = dma_addr + dma_offset;
1497     + dma_offset -= len;
1498     }
1499     +
1500     /*
1501     * DMA mapping can merge pages but not split them,
1502     * so if we found the page, dma_handle has already
1503     * been assigned to.
1504     */
1505     - if (chunk->mem[i].length > offset) {
1506     - page = sg_page(&chunk->mem[i]);
1507     + if (len > offset)
1508     goto out;
1509     - }
1510     - offset -= chunk->mem[i].length;
1511     + offset -= len;
1512     }
1513     }
1514    
1515     + addr = NULL;
1516     out:
1517     mutex_unlock(&table->mutex);
1518     - return page ? lowmem_page_address(page) + offset : NULL;
1519     + return addr ? addr + offset : NULL;
1520     }
1521    
1522     int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
1523     diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
1524     index c9169a490557c..d199874b1c074 100644
1525     --- a/drivers/net/ethernet/mellanox/mlx4/icm.h
1526     +++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
1527     @@ -47,11 +47,21 @@ enum {
1528     MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
1529     };
1530    
1531     +struct mlx4_icm_buf {
1532     + void *addr;
1533     + size_t size;
1534     + dma_addr_t dma_addr;
1535     +};
1536     +
1537     struct mlx4_icm_chunk {
1538     struct list_head list;
1539     int npages;
1540     int nsg;
1541     - struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
1542     + bool coherent;
1543     + union {
1544     + struct scatterlist sg[MLX4_ICM_CHUNK_LEN];
1545     + struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN];
1546     + };
1547     };
1548    
1549     struct mlx4_icm {
1550     @@ -114,12 +124,18 @@ static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
1551    
1552     static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
1553     {
1554     - return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
1555     + if (iter->chunk->coherent)
1556     + return iter->chunk->buf[iter->page_idx].dma_addr;
1557     + else
1558     + return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
1559     }
1560    
1561     static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
1562     {
1563     - return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
1564     + if (iter->chunk->coherent)
1565     + return iter->chunk->buf[iter->page_idx].size;
1566     + else
1567     + return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
1568     }
1569    
1570     int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
1571     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
1572     index 16ceeb1b2c9d8..da52e60d4437c 100644
1573     --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
1574     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
1575     @@ -633,6 +633,7 @@ enum {
1576     MLX5E_STATE_ASYNC_EVENTS_ENABLED,
1577     MLX5E_STATE_OPENED,
1578     MLX5E_STATE_DESTROYING,
1579     + MLX5E_STATE_XDP_TX_ENABLED,
1580     };
1581    
1582     struct mlx5e_rqt {
1583     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
1584     index ad6d471d00dd4..4a33c9a7cac7e 100644
1585     --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
1586     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
1587     @@ -262,7 +262,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1588     int sq_num;
1589     int i;
1590    
1591     - if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
1592     + /* this flag is sufficient, no need to test internal sq state */
1593     + if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
1594     return -ENETDOWN;
1595    
1596     if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1597     @@ -275,9 +276,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1598    
1599     sq = &priv->channels.c[sq_num]->xdpsq;
1600    
1601     - if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
1602     - return -ENETDOWN;
1603     -
1604     for (i = 0; i < n; i++) {
1605     struct xdp_frame *xdpf = frames[i];
1606     struct mlx5e_xdp_info xdpi;
1607     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
1608     index 6dfab045925f0..4d096623178b9 100644
1609     --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
1610     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
1611     @@ -49,6 +49,23 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
1612     int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1613     u32 flags);
1614    
1615     +static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
1616     +{
1617     + set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
1618     +}
1619     +
1620     +static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
1621     +{
1622     + clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
1623     + /* let other device's napi(s) see our new state */
1624     + synchronize_rcu();
1625     +}
1626     +
1627     +static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
1628     +{
1629     + return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
1630     +}
1631     +
1632     static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
1633     {
1634     struct mlx5_wq_cyc *wq = &sq->wq;
1635     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1636     index 944f21f99d437..637d59c01fe5c 100644
1637     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1638     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1639     @@ -2890,6 +2890,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
1640    
1641     mlx5e_build_tx2sq_maps(priv);
1642     mlx5e_activate_channels(&priv->channels);
1643     + mlx5e_xdp_tx_enable(priv);
1644     netif_tx_start_all_queues(priv->netdev);
1645    
1646     if (MLX5_ESWITCH_MANAGER(priv->mdev))
1647     @@ -2911,6 +2912,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
1648     */
1649     netif_tx_stop_all_queues(priv->netdev);
1650     netif_tx_disable(priv->netdev);
1651     + mlx5e_xdp_tx_disable(priv);
1652     mlx5e_deactivate_channels(&priv->channels);
1653     }
1654    
1655     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1656     index c9cc9747d21d1..701624a63d2f4 100644
1657     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1658     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1659     @@ -144,6 +144,7 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
1660    
1661     s->tx_packets += sq_stats->packets;
1662     s->tx_bytes += sq_stats->bytes;
1663     + s->tx_queue_dropped += sq_stats->dropped;
1664     }
1665     }
1666     }
1667     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1668     index 3092c59c0dc71..9f7f8425f6767 100644
1669     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1670     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1671     @@ -96,6 +96,7 @@ struct mlx5e_tc_flow_parse_attr {
1672     struct ip_tunnel_info tun_info;
1673     struct mlx5_flow_spec spec;
1674     int num_mod_hdr_actions;
1675     + int max_mod_hdr_actions;
1676     void *mod_hdr_actions;
1677     int mirred_ifindex;
1678     };
1679     @@ -1742,9 +1743,9 @@ static struct mlx5_fields fields[] = {
1680     OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
1681     };
1682    
1683     -/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
1684     - * max from the SW pedit action. On success, it says how many HW actions were
1685     - * actually parsed.
1686     +/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
1687     + * max from the SW pedit action. On success, attr->num_mod_hdr_actions
1688     + * says how many HW actions were actually parsed.
1689     */
1690     static int offload_pedit_fields(struct pedit_headers *masks,
1691     struct pedit_headers *vals,
1692     @@ -1767,9 +1768,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
1693     add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1694    
1695     action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1696     - action = parse_attr->mod_hdr_actions;
1697     - max_actions = parse_attr->num_mod_hdr_actions;
1698     - nactions = 0;
1699     + action = parse_attr->mod_hdr_actions +
1700     + parse_attr->num_mod_hdr_actions * action_size;
1701     +
1702     + max_actions = parse_attr->max_mod_hdr_actions;
1703     + nactions = parse_attr->num_mod_hdr_actions;
1704    
1705     for (i = 0; i < ARRAY_SIZE(fields); i++) {
1706     f = &fields[i];
1707     @@ -1874,7 +1877,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1708     if (!parse_attr->mod_hdr_actions)
1709     return -ENOMEM;
1710    
1711     - parse_attr->num_mod_hdr_actions = max_actions;
1712     + parse_attr->max_mod_hdr_actions = max_actions;
1713     return 0;
1714     }
1715    
1716     @@ -1918,9 +1921,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1717     goto out_err;
1718     }
1719    
1720     - err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1721     - if (err)
1722     - goto out_err;
1723     + if (!parse_attr->mod_hdr_actions) {
1724     + err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1725     + if (err)
1726     + goto out_err;
1727     + }
1728    
1729     err = offload_pedit_fields(masks, vals, parse_attr);
1730     if (err < 0)
1731     diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
1732     index c7901a3f2a794..a903e97793f9a 100644
1733     --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
1734     +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
1735     @@ -1367,10 +1367,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1736     u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1737    
1738     if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1739     - break;
1740     + return 0;
1741     cond_resched();
1742     } while (time_before(jiffies, end));
1743     - return 0;
1744     + return -EBUSY;
1745     }
1746    
1747     static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1748     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
1749     index e3c6fe8b1d406..1dcf152b28138 100644
1750     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
1751     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
1752     @@ -75,7 +75,15 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
1753     act_set = mlxsw_afa_block_first_set(rulei->act_block);
1754     mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
1755    
1756     - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
1757     + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
1758     + if (err)
1759     + goto err_ptce2_write;
1760     +
1761     + return 0;
1762     +
1763     +err_ptce2_write:
1764     + cregion->ops->entry_remove(cregion, centry);
1765     + return err;
1766     }
1767    
1768     static void
1769     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1770     index cdec48bcc6ad5..af673abdb4823 100644
1771     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1772     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1773     @@ -1209,7 +1209,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1774     static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1775     {
1776     return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1777     - MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
1778     + MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1779     }
1780    
1781     static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1782     @@ -1221,7 +1221,7 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1783     static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1784     const char *mac, u16 fid, bool adding,
1785     enum mlxsw_reg_sfd_rec_action action,
1786     - bool dynamic)
1787     + enum mlxsw_reg_sfd_rec_policy policy)
1788     {
1789     char *sfd_pl;
1790     u8 num_rec;
1791     @@ -1232,8 +1232,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1792     return -ENOMEM;
1793    
1794     mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1795     - mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1796     - mac, fid, action, local_port);
1797     + mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
1798     num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1799     err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1800     if (err)
1801     @@ -1252,7 +1251,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1802     bool dynamic)
1803     {
1804     return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1805     - MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
1806     + MLXSW_REG_SFD_REC_ACTION_NOP,
1807     + mlxsw_sp_sfd_rec_policy(dynamic));
1808     }
1809    
1810     int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1811     @@ -1260,7 +1260,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1812     {
1813     return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1814     MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1815     - false);
1816     + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1817     }
1818    
1819     static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1820     diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
1821     index 2fa1c050a14b4..92cd8abeb41d7 100644
1822     --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
1823     +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
1824     @@ -1592,6 +1592,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1825     cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1826     rx_prod.bd_prod = cpu_to_le16(bd_prod);
1827     rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1828     +
1829     + /* Make sure chain element is updated before ringing the doorbell */
1830     + dma_wmb();
1831     +
1832     DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1833     }
1834    
1835     diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
1836     index 20909036e0028..1c39305274440 100644
1837     --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
1838     +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
1839     @@ -260,6 +260,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
1840     struct stmmac_extra_stats *x, u32 chan)
1841     {
1842     u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
1843     + u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
1844     int ret = 0;
1845    
1846     /* ABNORMAL interrupts */
1847     @@ -279,8 +280,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
1848     x->normal_irq_n++;
1849    
1850     if (likely(intr_status & XGMAC_RI)) {
1851     - u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
1852     - if (likely(value & XGMAC_RIE)) {
1853     + if (likely(intr_en & XGMAC_RIE)) {
1854     x->rx_normal_irq_n++;
1855     ret |= handle_rx;
1856     }
1857     @@ -292,7 +292,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
1858     }
1859    
1860     /* Clear interrupts */
1861     - writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan));
1862     + writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
1863    
1864     return ret;
1865     }
1866     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1867     index 2103b865726ac..123b74e25ed81 100644
1868     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1869     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1870     @@ -3522,27 +3522,28 @@ static int stmmac_napi_poll(struct napi_struct *napi, int budget)
1871     struct stmmac_channel *ch =
1872     container_of(napi, struct stmmac_channel, napi);
1873     struct stmmac_priv *priv = ch->priv_data;
1874     - int work_done = 0, work_rem = budget;
1875     + int work_done, rx_done = 0, tx_done = 0;
1876     u32 chan = ch->index;
1877    
1878     priv->xstats.napi_poll++;
1879    
1880     - if (ch->has_tx) {
1881     - int done = stmmac_tx_clean(priv, work_rem, chan);
1882     + if (ch->has_tx)
1883     + tx_done = stmmac_tx_clean(priv, budget, chan);
1884     + if (ch->has_rx)
1885     + rx_done = stmmac_rx(priv, budget, chan);
1886    
1887     - work_done += done;
1888     - work_rem -= done;
1889     - }
1890     -
1891     - if (ch->has_rx) {
1892     - int done = stmmac_rx(priv, work_rem, chan);
1893     + work_done = max(rx_done, tx_done);
1894     + work_done = min(work_done, budget);
1895    
1896     - work_done += done;
1897     - work_rem -= done;
1898     - }
1899     + if (work_done < budget && napi_complete_done(napi, work_done)) {
1900     + int stat;
1901    
1902     - if (work_done < budget && napi_complete_done(napi, work_done))
1903     stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
1904     + stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
1905     + &priv->xstats, chan);
1906     + if (stat && napi_reschedule(napi))
1907     + stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
1908     + }
1909    
1910     return work_done;
1911     }
1912     @@ -4191,6 +4192,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1913     return ret;
1914     }
1915    
1916     + /* Rx Watchdog is available in the COREs newer than the 3.40.
1917     + * In some case, for example on bugged HW this feature
1918     + * has to be disable and this can be done by passing the
1919     + * riwt_off field from the platform.
1920     + */
1921     + if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
1922     + (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
1923     + priv->use_riwt = 1;
1924     + dev_info(priv->device,
1925     + "Enable RX Mitigation via HW Watchdog Timer\n");
1926     + }
1927     +
1928     return 0;
1929     }
1930    
1931     @@ -4323,18 +4336,6 @@ int stmmac_dvr_probe(struct device *device,
1932     if (flow_ctrl)
1933     priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
1934    
1935     - /* Rx Watchdog is available in the COREs newer than the 3.40.
1936     - * In some case, for example on bugged HW this feature
1937     - * has to be disable and this can be done by passing the
1938     - * riwt_off field from the platform.
1939     - */
1940     - if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
1941     - (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
1942     - priv->use_riwt = 1;
1943     - dev_info(priv->device,
1944     - "Enable RX Mitigation via HW Watchdog Timer\n");
1945     - }
1946     -
1947     /* Setup channels NAPI */
1948     maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
1949    
1950     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
1951     index c54a50dbd5ac2..d819e8eaba122 100644
1952     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
1953     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
1954     @@ -299,7 +299,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
1955     */
1956     static void stmmac_pci_remove(struct pci_dev *pdev)
1957     {
1958     + int i;
1959     +
1960     stmmac_dvr_remove(&pdev->dev);
1961     +
1962     + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
1963     + if (pci_resource_len(pdev, i) == 0)
1964     + continue;
1965     + pcim_iounmap_regions(pdev, BIT(i));
1966     + break;
1967     + }
1968     +
1969     pci_disable_device(pdev);
1970     }
1971    
1972     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
1973     index 531294f4978bc..58ea18af9813a 100644
1974     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
1975     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
1976     @@ -301,6 +301,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
1977     /* Queue 0 is not AVB capable */
1978     if (queue <= 0 || queue >= tx_queues_count)
1979     return -EINVAL;
1980     + if (!priv->dma_cap.av)
1981     + return -EOPNOTSUPP;
1982     if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
1983     return -EOPNOTSUPP;
1984    
1985     diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
1986     index 493cd382b8aa0..01711e6e9a394 100644
1987     --- a/drivers/net/geneve.c
1988     +++ b/drivers/net/geneve.c
1989     @@ -1406,9 +1406,13 @@ static void geneve_link_config(struct net_device *dev,
1990     }
1991     #if IS_ENABLED(CONFIG_IPV6)
1992     case AF_INET6: {
1993     - struct rt6_info *rt = rt6_lookup(geneve->net,
1994     - &info->key.u.ipv6.dst, NULL, 0,
1995     - NULL, 0);
1996     + struct rt6_info *rt;
1997     +
1998     + if (!__in6_dev_get(dev))
1999     + break;
2000     +
2001     + rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0,
2002     + NULL, 0);
2003    
2004     if (rt && rt->dst.dev)
2005     ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
2006     diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
2007     index 70f3f90c2ed69..2787e8b1d668a 100644
2008     --- a/drivers/net/phy/phylink.c
2009     +++ b/drivers/net/phy/phylink.c
2010     @@ -502,6 +502,17 @@ static void phylink_run_resolve(struct phylink *pl)
2011     queue_work(system_power_efficient_wq, &pl->resolve);
2012     }
2013    
2014     +static void phylink_run_resolve_and_disable(struct phylink *pl, int bit)
2015     +{
2016     + unsigned long state = pl->phylink_disable_state;
2017     +
2018     + set_bit(bit, &pl->phylink_disable_state);
2019     + if (state == 0) {
2020     + queue_work(system_power_efficient_wq, &pl->resolve);
2021     + flush_work(&pl->resolve);
2022     + }
2023     +}
2024     +
2025     static void phylink_fixed_poll(struct timer_list *t)
2026     {
2027     struct phylink *pl = container_of(t, struct phylink, link_poll);
2028     @@ -955,9 +966,7 @@ void phylink_stop(struct phylink *pl)
2029     if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
2030     del_timer_sync(&pl->link_poll);
2031    
2032     - set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
2033     - queue_work(system_power_efficient_wq, &pl->resolve);
2034     - flush_work(&pl->resolve);
2035     + phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
2036     }
2037     EXPORT_SYMBOL_GPL(phylink_stop);
2038    
2039     @@ -1664,9 +1673,7 @@ static void phylink_sfp_link_down(void *upstream)
2040    
2041     ASSERT_RTNL();
2042    
2043     - set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
2044     - queue_work(system_power_efficient_wq, &pl->resolve);
2045     - flush_work(&pl->resolve);
2046     + phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK);
2047     }
2048    
2049     static void phylink_sfp_link_up(void *upstream)
2050     diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
2051     index ad9db652874dc..fef701bfad62e 100644
2052     --- a/drivers/net/phy/sfp-bus.c
2053     +++ b/drivers/net/phy/sfp-bus.c
2054     @@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
2055     return ret;
2056     }
2057     }
2058     + bus->socket_ops->attach(bus->sfp);
2059     if (bus->started)
2060     bus->socket_ops->start(bus->sfp);
2061     bus->netdev->sfp_bus = bus;
2062     @@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
2063     if (bus->registered) {
2064     if (bus->started)
2065     bus->socket_ops->stop(bus->sfp);
2066     + bus->socket_ops->detach(bus->sfp);
2067     if (bus->phydev && ops && ops->disconnect_phy)
2068     ops->disconnect_phy(bus->upstream);
2069     }
2070     diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
2071     index fd8bb998ae52d..68c8fbf099f87 100644
2072     --- a/drivers/net/phy/sfp.c
2073     +++ b/drivers/net/phy/sfp.c
2074     @@ -184,6 +184,7 @@ struct sfp {
2075    
2076     struct gpio_desc *gpio[GPIO_MAX];
2077    
2078     + bool attached;
2079     unsigned int state;
2080     struct delayed_work poll;
2081     struct delayed_work timeout;
2082     @@ -1475,7 +1476,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
2083     */
2084     switch (sfp->sm_mod_state) {
2085     default:
2086     - if (event == SFP_E_INSERT) {
2087     + if (event == SFP_E_INSERT && sfp->attached) {
2088     sfp_module_tx_disable(sfp);
2089     sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
2090     }
2091     @@ -1607,6 +1608,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
2092     mutex_unlock(&sfp->sm_mutex);
2093     }
2094    
2095     +static void sfp_attach(struct sfp *sfp)
2096     +{
2097     + sfp->attached = true;
2098     + if (sfp->state & SFP_F_PRESENT)
2099     + sfp_sm_event(sfp, SFP_E_INSERT);
2100     +}
2101     +
2102     +static void sfp_detach(struct sfp *sfp)
2103     +{
2104     + sfp->attached = false;
2105     + sfp_sm_event(sfp, SFP_E_REMOVE);
2106     +}
2107     +
2108     static void sfp_start(struct sfp *sfp)
2109     {
2110     sfp_sm_event(sfp, SFP_E_DEV_UP);
2111     @@ -1667,6 +1681,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
2112     }
2113    
2114     static const struct sfp_socket_ops sfp_module_ops = {
2115     + .attach = sfp_attach,
2116     + .detach = sfp_detach,
2117     .start = sfp_start,
2118     .stop = sfp_stop,
2119     .module_info = sfp_module_info,
2120     @@ -1834,10 +1850,6 @@ static int sfp_probe(struct platform_device *pdev)
2121     dev_info(sfp->dev, "Host maximum power %u.%uW\n",
2122     sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
2123    
2124     - sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
2125     - if (!sfp->sfp_bus)
2126     - return -ENOMEM;
2127     -
2128     /* Get the initial state, and always signal TX disable,
2129     * since the network interface will not be up.
2130     */
2131     @@ -1848,10 +1860,6 @@ static int sfp_probe(struct platform_device *pdev)
2132     sfp->state |= SFP_F_RATE_SELECT;
2133     sfp_set_state(sfp, sfp->state);
2134     sfp_module_tx_disable(sfp);
2135     - rtnl_lock();
2136     - if (sfp->state & SFP_F_PRESENT)
2137     - sfp_sm_event(sfp, SFP_E_INSERT);
2138     - rtnl_unlock();
2139    
2140     for (i = 0; i < GPIO_MAX; i++) {
2141     if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
2142     @@ -1884,6 +1892,10 @@ static int sfp_probe(struct platform_device *pdev)
2143     dev_warn(sfp->dev,
2144     "No tx_disable pin: SFP modules will always be emitting.\n");
2145    
2146     + sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
2147     + if (!sfp->sfp_bus)
2148     + return -ENOMEM;
2149     +
2150     return 0;
2151     }
2152    
2153     diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
2154     index 31b0acf337e27..64f54b0bbd8c4 100644
2155     --- a/drivers/net/phy/sfp.h
2156     +++ b/drivers/net/phy/sfp.h
2157     @@ -7,6 +7,8 @@
2158     struct sfp;
2159    
2160     struct sfp_socket_ops {
2161     + void (*attach)(struct sfp *sfp);
2162     + void (*detach)(struct sfp *sfp);
2163     void (*start)(struct sfp *sfp);
2164     void (*stop)(struct sfp *sfp);
2165     int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo);
2166     diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
2167     index 4b6572f0188a7..723814d84b7d8 100644
2168     --- a/drivers/net/team/team.c
2169     +++ b/drivers/net/team/team.c
2170     @@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team,
2171     }
2172     }
2173    
2174     -static bool __team_option_inst_tmp_find(const struct list_head *opts,
2175     - const struct team_option_inst *needle)
2176     -{
2177     - struct team_option_inst *opt_inst;
2178     -
2179     - list_for_each_entry(opt_inst, opts, tmp_list)
2180     - if (opt_inst == needle)
2181     - return true;
2182     - return false;
2183     -}
2184     -
2185     static int __team_options_register(struct team *team,
2186     const struct team_option *option,
2187     size_t option_count)
2188     @@ -2463,7 +2452,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2189     int err = 0;
2190     int i;
2191     struct nlattr *nl_option;
2192     - LIST_HEAD(opt_inst_list);
2193    
2194     rtnl_lock();
2195    
2196     @@ -2483,6 +2471,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2197     struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2198     struct nlattr *attr;
2199     struct nlattr *attr_data;
2200     + LIST_HEAD(opt_inst_list);
2201     enum team_option_type opt_type;
2202     int opt_port_ifindex = 0; /* != 0 for per-port options */
2203     u32 opt_array_index = 0;
2204     @@ -2587,23 +2576,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2205     if (err)
2206     goto team_put;
2207     opt_inst->changed = true;
2208     -
2209     - /* dumb/evil user-space can send us duplicate opt,
2210     - * keep only the last one
2211     - */
2212     - if (__team_option_inst_tmp_find(&opt_inst_list,
2213     - opt_inst))
2214     - continue;
2215     -
2216     list_add(&opt_inst->tmp_list, &opt_inst_list);
2217     }
2218     if (!opt_found) {
2219     err = -ENOENT;
2220     goto team_put;
2221     }
2222     - }
2223    
2224     - err = team_nl_send_event_options_get(team, &opt_inst_list);
2225     + err = team_nl_send_event_options_get(team, &opt_inst_list);
2226     + if (err)
2227     + break;
2228     + }
2229    
2230     team_put:
2231     team_nl_team_put(team);
2232     diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
2233     index a7f37063518ec..3d05bc1937d40 100644
2234     --- a/drivers/pinctrl/pinctrl-max77620.c
2235     +++ b/drivers/pinctrl/pinctrl-max77620.c
2236     @@ -34,14 +34,12 @@ enum max77620_pin_ppdrv {
2237     MAX77620_PIN_PP_DRV,
2238     };
2239    
2240     -enum max77620_pinconf_param {
2241     - MAX77620_ACTIVE_FPS_SOURCE = PIN_CONFIG_END + 1,
2242     - MAX77620_ACTIVE_FPS_POWER_ON_SLOTS,
2243     - MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS,
2244     - MAX77620_SUSPEND_FPS_SOURCE,
2245     - MAX77620_SUSPEND_FPS_POWER_ON_SLOTS,
2246     - MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS,
2247     -};
2248     +#define MAX77620_ACTIVE_FPS_SOURCE (PIN_CONFIG_END + 1)
2249     +#define MAX77620_ACTIVE_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 2)
2250     +#define MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 3)
2251     +#define MAX77620_SUSPEND_FPS_SOURCE (PIN_CONFIG_END + 4)
2252     +#define MAX77620_SUSPEND_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 5)
2253     +#define MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 6)
2254    
2255     struct max77620_pin_function {
2256     const char *name;
2257     diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
2258     index bf07735275a49..0fc382cb977bf 100644
2259     --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
2260     +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
2261     @@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
2262     }
2263    
2264     static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
2265     - unsigned int tid, int pg_idx, bool reply)
2266     + unsigned int tid, int pg_idx)
2267     {
2268     struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
2269     GFP_KERNEL);
2270     @@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
2271     req = (struct cpl_set_tcb_field *)skb->head;
2272     req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
2273     OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2274     - req->reply = V_NO_REPLY(reply ? 0 : 1);
2275     + req->reply = V_NO_REPLY(1);
2276     req->cpu_idx = 0;
2277     req->word = htons(31);
2278     req->mask = cpu_to_be64(0xF0000000);
2279     @@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
2280     * @tid: connection id
2281     * @hcrc: header digest enabled
2282     * @dcrc: data digest enabled
2283     - * @reply: request reply from h/w
2284     * set up the iscsi digest settings for a connection identified by tid
2285     */
2286     static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2287     - int hcrc, int dcrc, int reply)
2288     + int hcrc, int dcrc)
2289     {
2290     struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
2291     GFP_KERNEL);
2292     @@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2293     req = (struct cpl_set_tcb_field *)skb->head;
2294     req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
2295     OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2296     - req->reply = V_NO_REPLY(reply ? 0 : 1);
2297     + req->reply = V_NO_REPLY(1);
2298     req->cpu_idx = 0;
2299     req->word = htons(31);
2300     req->mask = cpu_to_be64(0x0F000000);
2301     diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
2302     index 211da1d5a8699..689d6c813a50d 100644
2303     --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
2304     +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
2305     @@ -1517,16 +1517,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
2306     struct cxgbi_sock *csk;
2307    
2308     csk = lookup_tid(t, tid);
2309     - if (!csk)
2310     + if (!csk) {
2311     pr_err("can't find conn. for tid %u.\n", tid);
2312     + return;
2313     + }
2314    
2315     log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2316     "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
2317     csk, csk->state, csk->flags, csk->tid, rpl->status);
2318    
2319     - if (rpl->status != CPL_ERR_NONE)
2320     + if (rpl->status != CPL_ERR_NONE) {
2321     pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
2322     csk, tid, rpl->status);
2323     + csk->err = -EINVAL;
2324     + }
2325     +
2326     + complete(&csk->cmpl);
2327    
2328     __kfree_skb(skb);
2329     }
2330     @@ -1903,7 +1909,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
2331     }
2332    
2333     static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2334     - int pg_idx, bool reply)
2335     + int pg_idx)
2336     {
2337     struct sk_buff *skb;
2338     struct cpl_set_tcb_field *req;
2339     @@ -1919,7 +1925,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2340     req = (struct cpl_set_tcb_field *)skb->head;
2341     INIT_TP_WR(req, csk->tid);
2342     OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
2343     - req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
2344     + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2345     req->word_cookie = htons(0);
2346     req->mask = cpu_to_be64(0x3 << 8);
2347     req->val = cpu_to_be64(pg_idx << 8);
2348     @@ -1928,12 +1934,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2349     log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2350     "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
2351    
2352     + reinit_completion(&csk->cmpl);
2353     cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2354     - return 0;
2355     + wait_for_completion(&csk->cmpl);
2356     +
2357     + return csk->err;
2358     }
2359    
2360     static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2361     - int hcrc, int dcrc, int reply)
2362     + int hcrc, int dcrc)
2363     {
2364     struct sk_buff *skb;
2365     struct cpl_set_tcb_field *req;
2366     @@ -1951,7 +1960,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2367     req = (struct cpl_set_tcb_field *)skb->head;
2368     INIT_TP_WR(req, tid);
2369     OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2370     - req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
2371     + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2372     req->word_cookie = htons(0);
2373     req->mask = cpu_to_be64(0x3 << 4);
2374     req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
2375     @@ -1961,8 +1970,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2376     log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2377     "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
2378    
2379     + reinit_completion(&csk->cmpl);
2380     cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2381     - return 0;
2382     + wait_for_completion(&csk->cmpl);
2383     +
2384     + return csk->err;
2385     }
2386    
2387     static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
2388     diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
2389     index 3f3af5e74a07d..f2c561ca731a3 100644
2390     --- a/drivers/scsi/cxgbi/libcxgbi.c
2391     +++ b/drivers/scsi/cxgbi/libcxgbi.c
2392     @@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
2393     skb_queue_head_init(&csk->receive_queue);
2394     skb_queue_head_init(&csk->write_queue);
2395     timer_setup(&csk->retry_timer, NULL, 0);
2396     + init_completion(&csk->cmpl);
2397     rwlock_init(&csk->callback_lock);
2398     csk->cdev = cdev;
2399     csk->flags = 0;
2400     @@ -2252,14 +2253,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
2401     if (!err && conn->hdrdgst_en)
2402     err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2403     conn->hdrdgst_en,
2404     - conn->datadgst_en, 0);
2405     + conn->datadgst_en);
2406     break;
2407     case ISCSI_PARAM_DATADGST_EN:
2408     err = iscsi_set_param(cls_conn, param, buf, buflen);
2409     if (!err && conn->datadgst_en)
2410     err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2411     conn->hdrdgst_en,
2412     - conn->datadgst_en, 0);
2413     + conn->datadgst_en);
2414     break;
2415     case ISCSI_PARAM_MAX_R2T:
2416     return iscsi_tcp_set_max_r2t(conn, buf);
2417     @@ -2385,7 +2386,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
2418    
2419     ppm = csk->cdev->cdev2ppm(csk->cdev);
2420     err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
2421     - ppm->tformat.pgsz_idx_dflt, 0);
2422     + ppm->tformat.pgsz_idx_dflt);
2423     if (err < 0)
2424     return err;
2425    
2426     diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
2427     index dcb190e753434..3bf7414a75e5e 100644
2428     --- a/drivers/scsi/cxgbi/libcxgbi.h
2429     +++ b/drivers/scsi/cxgbi/libcxgbi.h
2430     @@ -146,6 +146,7 @@ struct cxgbi_sock {
2431     struct sk_buff_head receive_queue;
2432     struct sk_buff_head write_queue;
2433     struct timer_list retry_timer;
2434     + struct completion cmpl;
2435     int err;
2436     rwlock_t callback_lock;
2437     void *user_data;
2438     @@ -487,9 +488,9 @@ struct cxgbi_device {
2439     struct cxgbi_ppm *,
2440     struct cxgbi_task_tag_info *);
2441     int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
2442     - unsigned int, int, int, int);
2443     + unsigned int, int, int);
2444     int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
2445     - unsigned int, int, bool);
2446     + unsigned int, int);
2447    
2448     void (*csk_release_offload_resources)(struct cxgbi_sock *);
2449     int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
2450     diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
2451     index 08c7b1e25fe48..dde84f7443136 100644
2452     --- a/drivers/scsi/isci/init.c
2453     +++ b/drivers/scsi/isci/init.c
2454     @@ -588,6 +588,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
2455     shost->max_lun = ~0;
2456     shost->max_cmd_len = MAX_COMMAND_SIZE;
2457    
2458     + /* turn on DIF support */
2459     + scsi_host_set_prot(shost,
2460     + SHOST_DIF_TYPE1_PROTECTION |
2461     + SHOST_DIF_TYPE2_PROTECTION |
2462     + SHOST_DIF_TYPE3_PROTECTION);
2463     + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
2464     +
2465     err = scsi_add_host(shost, &pdev->dev);
2466     if (err)
2467     goto err_shost;
2468     @@ -675,13 +682,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2469     goto err_host_alloc;
2470     }
2471     pci_info->hosts[i] = h;
2472     -
2473     - /* turn on DIF support */
2474     - scsi_host_set_prot(to_shost(h),
2475     - SHOST_DIF_TYPE1_PROTECTION |
2476     - SHOST_DIF_TYPE2_PROTECTION |
2477     - SHOST_DIF_TYPE3_PROTECTION);
2478     - scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
2479     }
2480    
2481     err = isci_setup_interrupts(pdev);
2482     diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
2483     index 2f0a4f2c5ff80..d4821b9dea45d 100644
2484     --- a/drivers/scsi/qedi/qedi_iscsi.c
2485     +++ b/drivers/scsi/qedi/qedi_iscsi.c
2486     @@ -954,6 +954,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
2487    
2488     qedi_ep = ep->dd_data;
2489     if (qedi_ep->state == EP_STATE_IDLE ||
2490     + qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
2491     qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
2492     return -1;
2493    
2494     @@ -1036,6 +1037,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
2495    
2496     switch (qedi_ep->state) {
2497     case EP_STATE_OFLDCONN_START:
2498     + case EP_STATE_OFLDCONN_NONE:
2499     goto ep_release_conn;
2500     case EP_STATE_OFLDCONN_FAILED:
2501     break;
2502     @@ -1226,6 +1228,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
2503    
2504     if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
2505     QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
2506     + qedi_ep->state = EP_STATE_OFLDCONN_NONE;
2507     ret = -EIO;
2508     goto set_path_exit;
2509     }
2510     diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
2511     index 11260776212fa..892d70d545537 100644
2512     --- a/drivers/scsi/qedi/qedi_iscsi.h
2513     +++ b/drivers/scsi/qedi/qedi_iscsi.h
2514     @@ -59,6 +59,7 @@ enum {
2515     EP_STATE_OFLDCONN_FAILED = 0x2000,
2516     EP_STATE_CONNECT_FAILED = 0x4000,
2517     EP_STATE_DISCONN_TIMEDOUT = 0x8000,
2518     + EP_STATE_OFLDCONN_NONE = 0x10000,
2519     };
2520    
2521     struct qedi_conn;
2522     diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
2523     index 0e13349dce570..575445c761b48 100644
2524     --- a/drivers/scsi/qla4xxx/ql4_os.c
2525     +++ b/drivers/scsi/qla4xxx/ql4_os.c
2526     @@ -7237,6 +7237,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
2527    
2528     rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
2529     fw_ddb_entry);
2530     + if (rc)
2531     + goto free_sess;
2532    
2533     ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
2534     __func__, fnode_sess->dev.kobj.name);
2535     diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
2536     index 14e5bf7af0bb1..c3bcaaec0fc5c 100644
2537     --- a/drivers/scsi/ufs/ufs.h
2538     +++ b/drivers/scsi/ufs/ufs.h
2539     @@ -195,7 +195,7 @@ enum ufs_desc_def_size {
2540     QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
2541     QUERY_DESC_UNIT_DEF_SIZE = 0x23,
2542     QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
2543     - QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
2544     + QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
2545     QUERY_DESC_POWER_DEF_SIZE = 0x62,
2546     QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
2547     };
2548     diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
2549     index 12ddb5928a738..6e80dfe4fa979 100644
2550     --- a/drivers/scsi/ufs/ufshcd.c
2551     +++ b/drivers/scsi/ufs/ufshcd.c
2552     @@ -7768,6 +7768,8 @@ out:
2553     trace_ufshcd_system_resume(dev_name(hba->dev), ret,
2554     ktime_to_us(ktime_sub(ktime_get(), start)),
2555     hba->curr_dev_pwr_mode, hba->uic_link_state);
2556     + if (!ret)
2557     + hba->is_sys_suspended = false;
2558     return ret;
2559     }
2560     EXPORT_SYMBOL(ufshcd_system_resume);
2561     diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
2562     index ac263a180253e..894e60ecebe20 100644
2563     --- a/drivers/staging/erofs/data.c
2564     +++ b/drivers/staging/erofs/data.c
2565     @@ -25,7 +25,7 @@ static inline void read_endio(struct bio *bio)
2566     struct page *page = bvec->bv_page;
2567    
2568     /* page is already locked */
2569     - BUG_ON(PageUptodate(page));
2570     + DBG_BUGON(PageUptodate(page));
2571    
2572     if (unlikely(err))
2573     SetPageError(page);
2574     @@ -91,12 +91,12 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
2575     struct erofs_map_blocks *map,
2576     int flags)
2577     {
2578     + int err = 0;
2579     erofs_blk_t nblocks, lastblk;
2580     u64 offset = map->m_la;
2581     struct erofs_vnode *vi = EROFS_V(inode);
2582    
2583     trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
2584     - BUG_ON(is_inode_layout_compression(inode));
2585    
2586     nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
2587     lastblk = nblocks - is_inode_layout_inline(inode);
2588     @@ -123,18 +123,27 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
2589     map->m_plen = inode->i_size - offset;
2590    
2591     /* inline data should locate in one meta block */
2592     - BUG_ON(erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE);
2593     + if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
2594     + DBG_BUGON(1);
2595     + err = -EIO;
2596     + goto err_out;
2597     + }
2598     +
2599     map->m_flags |= EROFS_MAP_META;
2600     } else {
2601     errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
2602     vi->nid, inode->i_size, map->m_la);
2603     - BUG();
2604     + DBG_BUGON(1);
2605     + err = -EIO;
2606     + goto err_out;
2607     }
2608    
2609     out:
2610     map->m_llen = map->m_plen;
2611     +
2612     +err_out:
2613     trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
2614     - return 0;
2615     + return err;
2616     }
2617    
2618     #ifdef CONFIG_EROFS_FS_ZIP
2619     @@ -190,7 +199,7 @@ static inline struct bio *erofs_read_raw_page(
2620     erofs_off_t current_block = (erofs_off_t)page->index;
2621     int err;
2622    
2623     - BUG_ON(!nblocks);
2624     + DBG_BUGON(!nblocks);
2625    
2626     if (PageUptodate(page)) {
2627     err = 0;
2628     @@ -233,7 +242,7 @@ submit_bio_retry:
2629     }
2630    
2631     /* for RAW access mode, m_plen must be equal to m_llen */
2632     - BUG_ON(map.m_plen != map.m_llen);
2633     + DBG_BUGON(map.m_plen != map.m_llen);
2634    
2635     blknr = erofs_blknr(map.m_pa);
2636     blkoff = erofs_blkoff(map.m_pa);
2637     @@ -243,7 +252,7 @@ submit_bio_retry:
2638     void *vsrc, *vto;
2639     struct page *ipage;
2640    
2641     - BUG_ON(map.m_plen > PAGE_SIZE);
2642     + DBG_BUGON(map.m_plen > PAGE_SIZE);
2643    
2644     ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
2645    
2646     @@ -270,7 +279,7 @@ submit_bio_retry:
2647     }
2648    
2649     /* pa must be block-aligned for raw reading */
2650     - BUG_ON(erofs_blkoff(map.m_pa) != 0);
2651     + DBG_BUGON(erofs_blkoff(map.m_pa));
2652    
2653     /* max # of continuous pages */
2654     if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
2655     @@ -331,7 +340,7 @@ static int erofs_raw_access_readpage(struct file *file, struct page *page)
2656     if (IS_ERR(bio))
2657     return PTR_ERR(bio);
2658    
2659     - BUG_ON(bio != NULL); /* since we have only one bio -- must be NULL */
2660     + DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
2661     return 0;
2662     }
2663    
2664     @@ -369,7 +378,7 @@ static int erofs_raw_access_readpages(struct file *filp,
2665     /* pages could still be locked */
2666     put_page(page);
2667     }
2668     - BUG_ON(!list_empty(pages));
2669     + DBG_BUGON(!list_empty(pages));
2670    
2671     /* the rare case (end in gaps) */
2672     if (unlikely(bio != NULL))
2673     diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
2674     index be6ae3b1bdbe1..04b84ff31d036 100644
2675     --- a/drivers/staging/erofs/dir.c
2676     +++ b/drivers/staging/erofs/dir.c
2677     @@ -53,8 +53,11 @@ static int erofs_fill_dentries(struct dir_context *ctx,
2678     strnlen(de_name, maxsize - nameoff) :
2679     le16_to_cpu(de[1].nameoff) - nameoff;
2680    
2681     - /* the corrupted directory found */
2682     - BUG_ON(de_namelen < 0);
2683     + /* a corrupted entry is found */
2684     + if (unlikely(de_namelen < 0)) {
2685     + DBG_BUGON(1);
2686     + return -EIO;
2687     + }
2688    
2689     #ifdef CONFIG_EROFS_FS_DEBUG
2690     dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
2691     diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
2692     index fbf6ff25cd1bd..9e7815f55a17c 100644
2693     --- a/drivers/staging/erofs/inode.c
2694     +++ b/drivers/staging/erofs/inode.c
2695     @@ -132,7 +132,13 @@ static int fill_inline_data(struct inode *inode, void *data, unsigned m_pofs)
2696     return -ENOMEM;
2697    
2698     m_pofs += vi->inode_isize + vi->xattr_isize;
2699     - BUG_ON(m_pofs + inode->i_size > PAGE_SIZE);
2700     +
2701     + /* inline symlink data shouldn't across page boundary as well */
2702     + if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
2703     + DBG_BUGON(1);
2704     + kfree(lnk);
2705     + return -EIO;
2706     + }
2707    
2708     /* get in-page inline data */
2709     memcpy(lnk, data + m_pofs, inode->i_size);
2710     @@ -170,7 +176,7 @@ static int fill_inode(struct inode *inode, int isdir)
2711     return PTR_ERR(page);
2712     }
2713    
2714     - BUG_ON(!PageUptodate(page));
2715     + DBG_BUGON(!PageUptodate(page));
2716     data = page_address(page);
2717    
2718     err = read_inode(inode, data + ofs);
2719     diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
2720     index e6313c54e3ad6..9f44ed8f00239 100644
2721     --- a/drivers/staging/erofs/internal.h
2722     +++ b/drivers/staging/erofs/internal.h
2723     @@ -184,50 +184,70 @@ struct erofs_workgroup {
2724    
2725     #define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
2726    
2727     -static inline bool erofs_workgroup_try_to_freeze(
2728     - struct erofs_workgroup *grp, int v)
2729     +#if defined(CONFIG_SMP)
2730     +static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
2731     + int val)
2732     {
2733     -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
2734     - if (v != atomic_cmpxchg(&grp->refcount,
2735     - v, EROFS_LOCKED_MAGIC))
2736     - return false;
2737     preempt_disable();
2738     + if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
2739     + preempt_enable();
2740     + return false;
2741     + }
2742     + return true;
2743     +}
2744     +
2745     +static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
2746     + int orig_val)
2747     +{
2748     + /*
2749     + * other observers should notice all modifications
2750     + * in the freezing period.
2751     + */
2752     + smp_mb();
2753     + atomic_set(&grp->refcount, orig_val);
2754     + preempt_enable();
2755     +}
2756     +
2757     +static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
2758     +{
2759     + return atomic_cond_read_relaxed(&grp->refcount,
2760     + VAL != EROFS_LOCKED_MAGIC);
2761     +}
2762     #else
2763     +static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
2764     + int val)
2765     +{
2766     preempt_disable();
2767     - if (atomic_read(&grp->refcount) != v) {
2768     + /* no need to spin on UP platforms, let's just disable preemption. */
2769     + if (val != atomic_read(&grp->refcount)) {
2770     preempt_enable();
2771     return false;
2772     }
2773     -#endif
2774     return true;
2775     }
2776    
2777     -static inline void erofs_workgroup_unfreeze(
2778     - struct erofs_workgroup *grp, int v)
2779     +static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
2780     + int orig_val)
2781     {
2782     -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
2783     - atomic_set(&grp->refcount, v);
2784     -#endif
2785     preempt_enable();
2786     }
2787    
2788     +static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
2789     +{
2790     + int v = atomic_read(&grp->refcount);
2791     +
2792     + /* workgroup is never freezed on uniprocessor systems */
2793     + DBG_BUGON(v == EROFS_LOCKED_MAGIC);
2794     + return v;
2795     +}
2796     +#endif
2797     +
2798     static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
2799     {
2800     - const int locked = (int)EROFS_LOCKED_MAGIC;
2801     int o;
2802    
2803     repeat:
2804     - o = atomic_read(&grp->refcount);
2805     -
2806     - /* spin if it is temporarily locked at the reclaim path */
2807     - if (unlikely(o == locked)) {
2808     -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
2809     - do
2810     - cpu_relax();
2811     - while (atomic_read(&grp->refcount) == locked);
2812     -#endif
2813     - goto repeat;
2814     - }
2815     + o = erofs_wait_on_workgroup_freezed(grp);
2816    
2817     if (unlikely(o <= 0))
2818     return -1;
2819     diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
2820     index 2df9768edac96..b0583cdb079ae 100644
2821     --- a/drivers/staging/erofs/super.c
2822     +++ b/drivers/staging/erofs/super.c
2823     @@ -40,7 +40,6 @@ static int erofs_init_inode_cache(void)
2824    
2825     static void erofs_exit_inode_cache(void)
2826     {
2827     - BUG_ON(erofs_inode_cachep == NULL);
2828     kmem_cache_destroy(erofs_inode_cachep);
2829     }
2830    
2831     @@ -265,8 +264,8 @@ static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
2832     int ret = 1; /* 0 - busy */
2833     struct address_space *const mapping = page->mapping;
2834    
2835     - BUG_ON(!PageLocked(page));
2836     - BUG_ON(mapping->a_ops != &managed_cache_aops);
2837     + DBG_BUGON(!PageLocked(page));
2838     + DBG_BUGON(mapping->a_ops != &managed_cache_aops);
2839    
2840     if (PagePrivate(page))
2841     ret = erofs_try_to_free_cached_page(mapping, page);
2842     @@ -279,10 +278,10 @@ static void managed_cache_invalidatepage(struct page *page,
2843     {
2844     const unsigned int stop = length + offset;
2845    
2846     - BUG_ON(!PageLocked(page));
2847     + DBG_BUGON(!PageLocked(page));
2848    
2849     - /* Check for overflow */
2850     - BUG_ON(stop > PAGE_SIZE || stop < length);
2851     + /* Check for potential overflow in debug mode */
2852     + DBG_BUGON(stop > PAGE_SIZE || stop < length);
2853    
2854     if (offset == 0 && stop == PAGE_SIZE)
2855     while (!managed_cache_releasepage(page, GFP_NOFS))
2856     @@ -404,12 +403,6 @@ static int erofs_read_super(struct super_block *sb,
2857    
2858     erofs_register_super(sb);
2859    
2860     - /*
2861     - * We already have a positive dentry, which was instantiated
2862     - * by d_make_root. Just need to d_rehash it.
2863     - */
2864     - d_rehash(sb->s_root);
2865     -
2866     if (!silent)
2867     infoln("mounted on %s with opts: %s.", dev_name,
2868     (char *)data);
2869     @@ -625,7 +618,7 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
2870    
2871     static int erofs_remount(struct super_block *sb, int *flags, char *data)
2872     {
2873     - BUG_ON(!sb_rdonly(sb));
2874     + DBG_BUGON(!sb_rdonly(sb));
2875    
2876     *flags |= SB_RDONLY;
2877     return 0;
2878     diff --git a/drivers/staging/erofs/unzip_pagevec.h b/drivers/staging/erofs/unzip_pagevec.h
2879     index 0956615b86f72..23856ba2742d8 100644
2880     --- a/drivers/staging/erofs/unzip_pagevec.h
2881     +++ b/drivers/staging/erofs/unzip_pagevec.h
2882     @@ -150,7 +150,7 @@ z_erofs_pagevec_ctor_dequeue(struct z_erofs_pagevec_ctor *ctor,
2883     erofs_vtptr_t t;
2884    
2885     if (unlikely(ctor->index >= ctor->nr)) {
2886     - BUG_ON(ctor->next == NULL);
2887     + DBG_BUGON(!ctor->next);
2888     z_erofs_pagevec_ctor_pagedown(ctor, true);
2889     }
2890    
2891     diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
2892     index 0346630b67c8c..1279241449f4b 100644
2893     --- a/drivers/staging/erofs/unzip_vle.c
2894     +++ b/drivers/staging/erofs/unzip_vle.c
2895     @@ -18,9 +18,6 @@ static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
2896    
2897     void z_erofs_exit_zip_subsystem(void)
2898     {
2899     - BUG_ON(z_erofs_workqueue == NULL);
2900     - BUG_ON(z_erofs_workgroup_cachep == NULL);
2901     -
2902     destroy_workqueue(z_erofs_workqueue);
2903     kmem_cache_destroy(z_erofs_workgroup_cachep);
2904     }
2905     @@ -293,12 +290,9 @@ z_erofs_vle_work_lookup(struct super_block *sb,
2906     *grp_ret = grp = container_of(egrp,
2907     struct z_erofs_vle_workgroup, obj);
2908    
2909     -#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
2910     work = z_erofs_vle_grab_work(grp, pageofs);
2911     + /* if multiref is disabled, `primary' is always true */
2912     primary = true;
2913     -#else
2914     - BUG();
2915     -#endif
2916    
2917     DBG_BUGON(work->pageofs != pageofs);
2918    
2919     @@ -365,12 +359,12 @@ z_erofs_vle_work_register(struct super_block *sb,
2920     struct z_erofs_vle_workgroup *grp = *grp_ret;
2921     struct z_erofs_vle_work *work;
2922    
2923     -#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
2924     - BUG_ON(grp != NULL);
2925     -#else
2926     - if (grp != NULL)
2927     - goto skip;
2928     -#endif
2929     + /* if multiref is disabled, grp should never be nullptr */
2930     + if (unlikely(grp)) {
2931     + DBG_BUGON(1);
2932     + return ERR_PTR(-EINVAL);
2933     + }
2934     +
2935     /* no available workgroup, let's allocate one */
2936     grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
2937     if (unlikely(grp == NULL))
2938     @@ -393,13 +387,7 @@ z_erofs_vle_work_register(struct super_block *sb,
2939     *hosted = true;
2940    
2941     newgrp = true;
2942     -#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
2943     -skip:
2944     - /* currently unimplemented */
2945     - BUG();
2946     -#else
2947     work = z_erofs_vle_grab_primary_work(grp);
2948     -#endif
2949     work->pageofs = pageofs;
2950    
2951     mutex_init(&work->lock);
2952     @@ -606,7 +594,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
2953    
2954     enum z_erofs_page_type page_type;
2955     unsigned cur, end, spiltted, index;
2956     - int err;
2957     + int err = 0;
2958    
2959     /* register locked file pages as online pages in pack */
2960     z_erofs_onlinepage_init(page);
2961     @@ -624,7 +612,7 @@ repeat:
2962     /* go ahead the next map_blocks */
2963     debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
2964    
2965     - if (!z_erofs_vle_work_iter_end(builder))
2966     + if (z_erofs_vle_work_iter_end(builder))
2967     fe->initial = false;
2968    
2969     map->m_la = offset + cur;
2970     @@ -633,12 +621,11 @@ repeat:
2971     if (unlikely(err))
2972     goto err_out;
2973    
2974     - /* deal with hole (FIXME! broken now) */
2975     if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
2976     goto hitted;
2977    
2978     DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
2979     - BUG_ON(erofs_blkoff(map->m_pa));
2980     + DBG_BUGON(erofs_blkoff(map->m_pa));
2981    
2982     err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
2983     if (unlikely(err))
2984     @@ -683,7 +670,7 @@ retry:
2985    
2986     err = z_erofs_vle_work_add_page(builder,
2987     newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
2988     - if (!err)
2989     + if (likely(!err))
2990     goto retry;
2991     }
2992    
2993     @@ -694,9 +681,10 @@ retry:
2994    
2995     /* FIXME! avoid the last relundant fixup & endio */
2996     z_erofs_onlinepage_fixup(page, index, true);
2997     - ++spiltted;
2998    
2999     - /* also update nr_pages and increase queued_pages */
3000     + /* bump up the number of spiltted parts of a page */
3001     + ++spiltted;
3002     + /* also update nr_pages */
3003     work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
3004     next_part:
3005     /* can be used for verification */
3006     @@ -706,16 +694,18 @@ next_part:
3007     if (end > 0)
3008     goto repeat;
3009    
3010     +out:
3011     /* FIXME! avoid the last relundant fixup & endio */
3012     z_erofs_onlinepage_endio(page);
3013    
3014     debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
3015     __func__, page, spiltted, map->m_llen);
3016     - return 0;
3017     + return err;
3018    
3019     + /* if some error occurred while processing this page */
3020     err_out:
3021     - /* TODO: the missing error handing cases */
3022     - return err;
3023     + SetPageError(page);
3024     + goto out;
3025     }
3026    
3027     static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
3028     @@ -752,7 +742,7 @@ static inline void z_erofs_vle_read_endio(struct bio *bio)
3029     bool cachemngd = false;
3030    
3031     DBG_BUGON(PageUptodate(page));
3032     - BUG_ON(page->mapping == NULL);
3033     + DBG_BUGON(!page->mapping);
3034    
3035     #ifdef EROFS_FS_HAS_MANAGED_CACHE
3036     if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
3037     @@ -796,10 +786,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
3038     const unsigned clusterpages = erofs_clusterpages(sbi);
3039    
3040     struct z_erofs_pagevec_ctor ctor;
3041     - unsigned nr_pages;
3042     -#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
3043     - unsigned sparsemem_pages = 0;
3044     -#endif
3045     + unsigned int nr_pages;
3046     + unsigned int sparsemem_pages = 0;
3047     struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
3048     struct page **pages, **compressed_pages, *page;
3049     unsigned i, llen;
3050     @@ -811,12 +799,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
3051     int err;
3052    
3053     might_sleep();
3054     -#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
3055     work = z_erofs_vle_grab_primary_work(grp);
3056     -#else
3057     - BUG();
3058     -#endif
3059     - BUG_ON(!READ_ONCE(work->nr_pages));
3060     + DBG_BUGON(!READ_ONCE(work->nr_pages));
3061    
3062     mutex_lock(&work->lock);
3063     nr_pages = work->nr_pages;
3064     @@ -865,14 +849,12 @@ repeat:
3065     else
3066     pagenr = z_erofs_onlinepage_index(page);
3067    
3068     - BUG_ON(pagenr >= nr_pages);
3069     + DBG_BUGON(pagenr >= nr_pages);
3070     + DBG_BUGON(pages[pagenr]);
3071    
3072     -#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
3073     - BUG_ON(pages[pagenr] != NULL);
3074     - ++sparsemem_pages;
3075     -#endif
3076     pages[pagenr] = page;
3077     }
3078     + sparsemem_pages = i;
3079    
3080     z_erofs_pagevec_ctor_exit(&ctor, true);
3081    
3082     @@ -891,9 +873,8 @@ repeat:
3083     if (z_erofs_is_stagingpage(page))
3084     continue;
3085     #ifdef EROFS_FS_HAS_MANAGED_CACHE
3086     - else if (page->mapping == mngda) {
3087     - BUG_ON(PageLocked(page));
3088     - BUG_ON(!PageUptodate(page));
3089     + if (page->mapping == mngda) {
3090     + DBG_BUGON(!PageUptodate(page));
3091     continue;
3092     }
3093     #endif
3094     @@ -901,11 +882,9 @@ repeat:
3095     /* only non-head page could be reused as a compressed page */
3096     pagenr = z_erofs_onlinepage_index(page);
3097    
3098     - BUG_ON(pagenr >= nr_pages);
3099     -#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
3100     - BUG_ON(pages[pagenr] != NULL);
3101     + DBG_BUGON(pagenr >= nr_pages);
3102     + DBG_BUGON(pages[pagenr]);
3103     ++sparsemem_pages;
3104     -#endif
3105     pages[pagenr] = page;
3106    
3107     overlapped = true;
3108     @@ -914,9 +893,6 @@ repeat:
3109     llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
3110    
3111     if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
3112     - /* FIXME! this should be fixed in the future */
3113     - BUG_ON(grp->llen != llen);
3114     -
3115     err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
3116     pages, nr_pages, work->pageofs);
3117     goto out;
3118     @@ -931,12 +907,8 @@ repeat:
3119     if (err != -ENOTSUPP)
3120     goto out_percpu;
3121    
3122     -#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
3123     - if (sparsemem_pages >= nr_pages) {
3124     - BUG_ON(sparsemem_pages > nr_pages);
3125     + if (sparsemem_pages >= nr_pages)
3126     goto skip_allocpage;
3127     - }
3128     -#endif
3129    
3130     for (i = 0; i < nr_pages; ++i) {
3131     if (pages[i] != NULL)
3132     @@ -945,9 +917,7 @@ repeat:
3133     pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
3134     }
3135    
3136     -#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
3137     skip_allocpage:
3138     -#endif
3139     vout = erofs_vmap(pages, nr_pages);
3140    
3141     err = z_erofs_vle_unzip_vmap(compressed_pages,
3142     @@ -1031,7 +1001,7 @@ static void z_erofs_vle_unzip_wq(struct work_struct *work)
3143     struct z_erofs_vle_unzip_io_sb, io.u.work);
3144     LIST_HEAD(page_pool);
3145    
3146     - BUG_ON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
3147     + DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
3148     z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
3149    
3150     put_pages_list(&page_pool);
3151     @@ -1360,7 +1330,6 @@ static inline int __z_erofs_vle_normalaccess_readpages(
3152     continue;
3153     }
3154    
3155     - BUG_ON(PagePrivate(page));
3156     set_page_private(page, (unsigned long)head);
3157     head = page;
3158     }
3159     diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h
3160     index 3939985008652..3316bc36965d4 100644
3161     --- a/drivers/staging/erofs/unzip_vle.h
3162     +++ b/drivers/staging/erofs/unzip_vle.h
3163     @@ -47,13 +47,6 @@ static inline bool z_erofs_gather_if_stagingpage(struct list_head *page_pool,
3164     #define Z_EROFS_VLE_INLINE_PAGEVECS 3
3165    
3166     struct z_erofs_vle_work {
3167     - /* struct z_erofs_vle_work *left, *right; */
3168     -
3169     -#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
3170     - struct list_head list;
3171     -
3172     - atomic_t refcount;
3173     -#endif
3174     struct mutex lock;
3175    
3176     /* I: decompression offset in page */
3177     @@ -107,10 +100,8 @@ static inline void z_erofs_vle_set_workgrp_fmt(
3178     grp->flags = fmt | (grp->flags & ~Z_EROFS_VLE_WORKGRP_FMT_MASK);
3179     }
3180    
3181     -#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
3182     -#error multiref decompression is unimplemented yet
3183     -#else
3184    
3185     +/* definitions if multiref is disabled */
3186     #define z_erofs_vle_grab_primary_work(grp) (&(grp)->work)
3187     #define z_erofs_vle_grab_work(grp, pageofs) (&(grp)->work)
3188     #define z_erofs_vle_work_workgroup(wrk, primary) \
3189     @@ -118,7 +109,6 @@ static inline void z_erofs_vle_set_workgrp_fmt(
3190     struct z_erofs_vle_workgroup, work) : \
3191     ({ BUG(); (void *)NULL; }))
3192    
3193     -#endif
3194    
3195     #define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_vle_workgroup)
3196    
3197     diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
3198     index f5b665f15be52..9cb35cd33365a 100644
3199     --- a/drivers/staging/erofs/unzip_vle_lz4.c
3200     +++ b/drivers/staging/erofs/unzip_vle_lz4.c
3201     @@ -57,7 +57,7 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages,
3202     if (compressed_pages[j] != page)
3203     continue;
3204    
3205     - BUG_ON(mirrored[j]);
3206     + DBG_BUGON(mirrored[j]);
3207     memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
3208     mirrored[j] = true;
3209     break;
3210     diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
3211     index 595cf90af9bb2..dd2ac9dbc4b47 100644
3212     --- a/drivers/staging/erofs/utils.c
3213     +++ b/drivers/staging/erofs/utils.c
3214     @@ -23,9 +23,6 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
3215     list_del(&page->lru);
3216     } else {
3217     page = alloc_pages(gfp | __GFP_NOFAIL, 0);
3218     -
3219     - BUG_ON(page == NULL);
3220     - BUG_ON(page->mapping != NULL);
3221     }
3222     return page;
3223     }
3224     @@ -60,7 +57,7 @@ repeat:
3225     /* decrease refcount added by erofs_workgroup_put */
3226     if (unlikely(oldcount == 1))
3227     atomic_long_dec(&erofs_global_shrink_cnt);
3228     - BUG_ON(index != grp->index);
3229     + DBG_BUGON(index != grp->index);
3230     }
3231     rcu_read_unlock();
3232     return grp;
3233     @@ -73,8 +70,11 @@ int erofs_register_workgroup(struct super_block *sb,
3234     struct erofs_sb_info *sbi;
3235     int err;
3236    
3237     - /* grp->refcount should not < 1 */
3238     - BUG_ON(!atomic_read(&grp->refcount));
3239     + /* grp shouldn't be broken or used before */
3240     + if (unlikely(atomic_read(&grp->refcount) != 1)) {
3241     + DBG_BUGON(1);
3242     + return -EINVAL;
3243     + }
3244    
3245     err = radix_tree_preload(GFP_NOFS);
3246     if (err)
3247     diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
3248     index 9cd404acdb82b..ac7620120491b 100644
3249     --- a/drivers/target/target_core_user.c
3250     +++ b/drivers/target/target_core_user.c
3251     @@ -148,7 +148,7 @@ struct tcmu_dev {
3252     size_t ring_size;
3253    
3254     struct mutex cmdr_lock;
3255     - struct list_head cmdr_queue;
3256     + struct list_head qfull_queue;
3257    
3258     uint32_t dbi_max;
3259     uint32_t dbi_thresh;
3260     @@ -159,6 +159,7 @@ struct tcmu_dev {
3261    
3262     struct timer_list cmd_timer;
3263     unsigned int cmd_time_out;
3264     + struct list_head inflight_queue;
3265    
3266     struct timer_list qfull_timer;
3267     int qfull_time_out;
3268     @@ -179,7 +180,7 @@ struct tcmu_dev {
3269     struct tcmu_cmd {
3270     struct se_cmd *se_cmd;
3271     struct tcmu_dev *tcmu_dev;
3272     - struct list_head cmdr_queue_entry;
3273     + struct list_head queue_entry;
3274    
3275     uint16_t cmd_id;
3276    
3277     @@ -192,6 +193,7 @@ struct tcmu_cmd {
3278     unsigned long deadline;
3279    
3280     #define TCMU_CMD_BIT_EXPIRED 0
3281     +#define TCMU_CMD_BIT_INFLIGHT 1
3282     unsigned long flags;
3283     };
3284     /*
3285     @@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
3286     if (!tcmu_cmd)
3287     return NULL;
3288    
3289     - INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry);
3290     + INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
3291     tcmu_cmd->se_cmd = se_cmd;
3292     tcmu_cmd->tcmu_dev = udev;
3293    
3294     @@ -915,11 +917,13 @@ setup_timer:
3295     return 0;
3296    
3297     tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
3298     - mod_timer(timer, tcmu_cmd->deadline);
3299     + if (!timer_pending(timer))
3300     + mod_timer(timer, tcmu_cmd->deadline);
3301     +
3302     return 0;
3303     }
3304    
3305     -static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
3306     +static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
3307     {
3308     struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
3309     unsigned int tmo;
3310     @@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
3311     if (ret)
3312     return ret;
3313    
3314     - list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue);
3315     + list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
3316     pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
3317     tcmu_cmd->cmd_id, udev->name);
3318     return 0;
3319     @@ -999,7 +1003,7 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
3320     base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
3321     command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
3322    
3323     - if (!list_empty(&udev->cmdr_queue))
3324     + if (!list_empty(&udev->qfull_queue))
3325     goto queue;
3326    
3327     mb = udev->mb_addr;
3328     @@ -1096,13 +1100,16 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
3329     UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
3330     tcmu_flush_dcache_range(mb, sizeof(*mb));
3331    
3332     + list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
3333     + set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
3334     +
3335     /* TODO: only if FLUSH and FUA? */
3336     uio_event_notify(&udev->uio_info);
3337    
3338     return 0;
3339    
3340     queue:
3341     - if (add_to_cmdr_queue(tcmu_cmd)) {
3342     + if (add_to_qfull_queue(tcmu_cmd)) {
3343     *scsi_err = TCM_OUT_OF_RESOURCES;
3344     return -1;
3345     }
3346     @@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
3347     if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
3348     goto out;
3349    
3350     + list_del_init(&cmd->queue_entry);
3351     +
3352     tcmu_cmd_reset_dbi_cur(cmd);
3353    
3354     if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
3355     @@ -1194,9 +1203,29 @@ out:
3356     tcmu_free_cmd(cmd);
3357     }
3358    
3359     +static void tcmu_set_next_deadline(struct list_head *queue,
3360     + struct timer_list *timer)
3361     +{
3362     + struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
3363     + unsigned long deadline = 0;
3364     +
3365     + list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
3366     + if (!time_after(jiffies, tcmu_cmd->deadline)) {
3367     + deadline = tcmu_cmd->deadline;
3368     + break;
3369     + }
3370     + }
3371     +
3372     + if (deadline)
3373     + mod_timer(timer, deadline);
3374     + else
3375     + del_timer(timer);
3376     +}
3377     +
3378     static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3379     {
3380     struct tcmu_mailbox *mb;
3381     + struct tcmu_cmd *cmd;
3382     int handled = 0;
3383    
3384     if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
3385     @@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3386     while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
3387    
3388     struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
3389     - struct tcmu_cmd *cmd;
3390    
3391     tcmu_flush_dcache_range(entry, sizeof(*entry));
3392    
3393     @@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3394     /* no more pending commands */
3395     del_timer(&udev->cmd_timer);
3396    
3397     - if (list_empty(&udev->cmdr_queue)) {
3398     + if (list_empty(&udev->qfull_queue)) {
3399     /*
3400     * no more pending or waiting commands so try to
3401     * reclaim blocks if needed.
3402     @@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3403     tcmu_global_max_blocks)
3404     schedule_delayed_work(&tcmu_unmap_work, 0);
3405     }
3406     + } else if (udev->cmd_time_out) {
3407     + tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
3408     }
3409    
3410     return handled;
3411     @@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
3412     if (!time_after(jiffies, cmd->deadline))
3413     return 0;
3414    
3415     - is_running = list_empty(&cmd->cmdr_queue_entry);
3416     + is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
3417     se_cmd = cmd->se_cmd;
3418    
3419     if (is_running) {
3420     @@ -1288,12 +1318,11 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
3421     */
3422     scsi_status = SAM_STAT_CHECK_CONDITION;
3423     } else {
3424     - list_del_init(&cmd->cmdr_queue_entry);
3425     -
3426     idr_remove(&udev->commands, id);
3427     tcmu_free_cmd(cmd);
3428     scsi_status = SAM_STAT_TASK_SET_FULL;
3429     }
3430     + list_del_init(&cmd->queue_entry);
3431    
3432     pr_debug("Timing out cmd %u on dev %s that is %s.\n",
3433     id, udev->name, is_running ? "inflight" : "queued");
3434     @@ -1372,7 +1401,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
3435    
3436     INIT_LIST_HEAD(&udev->node);
3437     INIT_LIST_HEAD(&udev->timedout_entry);
3438     - INIT_LIST_HEAD(&udev->cmdr_queue);
3439     + INIT_LIST_HEAD(&udev->qfull_queue);
3440     + INIT_LIST_HEAD(&udev->inflight_queue);
3441     idr_init(&udev->commands);
3442    
3443     timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
3444     @@ -1383,7 +1413,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
3445     return &udev->se_dev;
3446     }
3447    
3448     -static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
3449     +static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
3450     {
3451     struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
3452     LIST_HEAD(cmds);
3453     @@ -1391,15 +1421,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
3454     sense_reason_t scsi_ret;
3455     int ret;
3456    
3457     - if (list_empty(&udev->cmdr_queue))
3458     + if (list_empty(&udev->qfull_queue))
3459     return true;
3460    
3461     pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
3462    
3463     - list_splice_init(&udev->cmdr_queue, &cmds);
3464     + list_splice_init(&udev->qfull_queue, &cmds);
3465    
3466     - list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) {
3467     - list_del_init(&tcmu_cmd->cmdr_queue_entry);
3468     + list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
3469     + list_del_init(&tcmu_cmd->queue_entry);
3470    
3471     pr_debug("removing cmd %u on dev %s from queue\n",
3472     tcmu_cmd->cmd_id, udev->name);
3473     @@ -1437,14 +1467,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
3474     * cmd was requeued, so just put all cmds back in
3475     * the queue
3476     */
3477     - list_splice_tail(&cmds, &udev->cmdr_queue);
3478     + list_splice_tail(&cmds, &udev->qfull_queue);
3479     drained = false;
3480     - goto done;
3481     + break;
3482     }
3483     }
3484     - if (list_empty(&udev->cmdr_queue))
3485     - del_timer(&udev->qfull_timer);
3486     -done:
3487     +
3488     + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
3489     return drained;
3490     }
3491    
3492     @@ -1454,7 +1483,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
3493    
3494     mutex_lock(&udev->cmdr_lock);
3495     tcmu_handle_completions(udev);
3496     - run_cmdr_queue(udev, false);
3497     + run_qfull_queue(udev, false);
3498     mutex_unlock(&udev->cmdr_lock);
3499    
3500     return 0;
3501     @@ -1982,7 +2011,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev)
3502     /* complete IO that has executed successfully */
3503     tcmu_handle_completions(udev);
3504     /* fail IO waiting to be queued */
3505     - run_cmdr_queue(udev, true);
3506     + run_qfull_queue(udev, true);
3507    
3508     unlock:
3509     mutex_unlock(&udev->cmdr_lock);
3510     @@ -1997,7 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
3511     mutex_lock(&udev->cmdr_lock);
3512    
3513     idr_for_each_entry(&udev->commands, cmd, i) {
3514     - if (!list_empty(&cmd->cmdr_queue_entry))
3515     + if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
3516     continue;
3517    
3518     pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
3519     @@ -2006,6 +2035,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
3520    
3521     idr_remove(&udev->commands, i);
3522     if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
3523     + list_del_init(&cmd->queue_entry);
3524     if (err_level == 1) {
3525     /*
3526     * Userspace was not able to start the
3527     @@ -2666,6 +2696,10 @@ static void check_timedout_devices(void)
3528    
3529     mutex_lock(&udev->cmdr_lock);
3530     idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
3531     +
3532     + tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
3533     + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
3534     +
3535     mutex_unlock(&udev->cmdr_lock);
3536    
3537     spin_lock_bh(&timed_out_udevs_lock);
3538     diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
3539     index 5eaeca805c95c..b214a72d5caad 100644
3540     --- a/drivers/vhost/vhost.c
3541     +++ b/drivers/vhost/vhost.c
3542     @@ -1035,8 +1035,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
3543     int type, ret;
3544    
3545     ret = copy_from_iter(&type, sizeof(type), from);
3546     - if (ret != sizeof(type))
3547     + if (ret != sizeof(type)) {
3548     + ret = -EINVAL;
3549     goto done;
3550     + }
3551    
3552     switch (type) {
3553     case VHOST_IOTLB_MSG:
3554     @@ -1055,8 +1057,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
3555    
3556     iov_iter_advance(from, offset);
3557     ret = copy_from_iter(&msg, sizeof(msg), from);
3558     - if (ret != sizeof(msg))
3559     + if (ret != sizeof(msg)) {
3560     + ret = -EINVAL;
3561     goto done;
3562     + }
3563     if (vhost_process_iotlb_msg(dev, &msg)) {
3564     ret = -EFAULT;
3565     goto done;
3566     diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
3567     index bdfcc0a71db14..6bde543452f25 100644
3568     --- a/drivers/video/backlight/pwm_bl.c
3569     +++ b/drivers/video/backlight/pwm_bl.c
3570     @@ -262,6 +262,16 @@ static int pwm_backlight_parse_dt(struct device *dev,
3571    
3572     memset(data, 0, sizeof(*data));
3573    
3574     + /*
3575     + * These values are optional and set as 0 by default, the out values
3576     + * are modified only if a valid u32 value can be decoded.
3577     + */
3578     + of_property_read_u32(node, "post-pwm-on-delay-ms",
3579     + &data->post_pwm_on_delay);
3580     + of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
3581     +
3582     + data->enable_gpio = -EINVAL;
3583     +
3584     /*
3585     * Determine the number of brightness levels, if this property is not
3586     * set a default table of brightness levels will be used.
3587     @@ -374,15 +384,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
3588     data->max_brightness--;
3589     }
3590    
3591     - /*
3592     - * These values are optional and set as 0 by default, the out values
3593     - * are modified only if a valid u32 value can be decoded.
3594     - */
3595     - of_property_read_u32(node, "post-pwm-on-delay-ms",
3596     - &data->post_pwm_on_delay);
3597     - of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
3598     -
3599     - data->enable_gpio = -EINVAL;
3600     return 0;
3601     }
3602    
3603     diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
3604     index afbd6101c78eb..070026a7e55a5 100644
3605     --- a/drivers/video/fbdev/udlfb.c
3606     +++ b/drivers/video/fbdev/udlfb.c
3607     @@ -916,8 +916,6 @@ static int dlfb_ops_open(struct fb_info *info, int user)
3608    
3609     dlfb->fb_count++;
3610    
3611     - kref_get(&dlfb->kref);
3612     -
3613     if (fb_defio && (info->fbdefio == NULL)) {
3614     /* enable defio at last moment if not disabled by client */
3615    
3616     @@ -940,14 +938,17 @@ static int dlfb_ops_open(struct fb_info *info, int user)
3617     return 0;
3618     }
3619    
3620     -/*
3621     - * Called when all client interfaces to start transactions have been disabled,
3622     - * and all references to our device instance (dlfb_data) are released.
3623     - * Every transaction must have a reference, so we know are fully spun down
3624     - */
3625     -static void dlfb_free(struct kref *kref)
3626     +static void dlfb_ops_destroy(struct fb_info *info)
3627     {
3628     - struct dlfb_data *dlfb = container_of(kref, struct dlfb_data, kref);
3629     + struct dlfb_data *dlfb = info->par;
3630     +
3631     + if (info->cmap.len != 0)
3632     + fb_dealloc_cmap(&info->cmap);
3633     + if (info->monspecs.modedb)
3634     + fb_destroy_modedb(info->monspecs.modedb);
3635     + vfree(info->screen_base);
3636     +
3637     + fb_destroy_modelist(&info->modelist);
3638    
3639     while (!list_empty(&dlfb->deferred_free)) {
3640     struct dlfb_deferred_free *d = list_entry(dlfb->deferred_free.next, struct dlfb_deferred_free, list);
3641     @@ -957,40 +958,13 @@ static void dlfb_free(struct kref *kref)
3642     }
3643     vfree(dlfb->backing_buffer);
3644     kfree(dlfb->edid);
3645     + usb_put_dev(dlfb->udev);
3646     kfree(dlfb);
3647     -}
3648     -
3649     -static void dlfb_free_framebuffer(struct dlfb_data *dlfb)
3650     -{
3651     - struct fb_info *info = dlfb->info;
3652     -
3653     - if (info) {
3654     - unregister_framebuffer(info);
3655     -
3656     - if (info->cmap.len != 0)
3657     - fb_dealloc_cmap(&info->cmap);
3658     - if (info->monspecs.modedb)
3659     - fb_destroy_modedb(info->monspecs.modedb);
3660     - vfree(info->screen_base);
3661     -
3662     - fb_destroy_modelist(&info->modelist);
3663     -
3664     - dlfb->info = NULL;
3665     -
3666     - /* Assume info structure is freed after this point */
3667     - framebuffer_release(info);
3668     - }
3669    
3670     - /* ref taken in probe() as part of registering framebfufer */
3671     - kref_put(&dlfb->kref, dlfb_free);
3672     + /* Assume info structure is freed after this point */
3673     + framebuffer_release(info);
3674     }
3675    
3676     -static void dlfb_free_framebuffer_work(struct work_struct *work)
3677     -{
3678     - struct dlfb_data *dlfb = container_of(work, struct dlfb_data,
3679     - free_framebuffer_work.work);
3680     - dlfb_free_framebuffer(dlfb);
3681     -}
3682     /*
3683     * Assumes caller is holding info->lock mutex (for open and release at least)
3684     */
3685     @@ -1000,10 +974,6 @@ static int dlfb_ops_release(struct fb_info *info, int user)
3686    
3687     dlfb->fb_count--;
3688    
3689     - /* We can't free fb_info here - fbmem will touch it when we return */
3690     - if (dlfb->virtualized && (dlfb->fb_count == 0))
3691     - schedule_delayed_work(&dlfb->free_framebuffer_work, HZ);
3692     -
3693     if ((dlfb->fb_count == 0) && (info->fbdefio)) {
3694     fb_deferred_io_cleanup(info);
3695     kfree(info->fbdefio);
3696     @@ -1013,8 +983,6 @@ static int dlfb_ops_release(struct fb_info *info, int user)
3697    
3698     dev_dbg(info->dev, "release, user=%d count=%d\n", user, dlfb->fb_count);
3699    
3700     - kref_put(&dlfb->kref, dlfb_free);
3701     -
3702     return 0;
3703     }
3704    
3705     @@ -1172,6 +1140,7 @@ static struct fb_ops dlfb_ops = {
3706     .fb_blank = dlfb_ops_blank,
3707     .fb_check_var = dlfb_ops_check_var,
3708     .fb_set_par = dlfb_ops_set_par,
3709     + .fb_destroy = dlfb_ops_destroy,
3710     };
3711    
3712    
3713     @@ -1615,12 +1584,13 @@ success:
3714     return true;
3715     }
3716    
3717     -static void dlfb_init_framebuffer_work(struct work_struct *work);
3718     -
3719     static int dlfb_usb_probe(struct usb_interface *intf,
3720     const struct usb_device_id *id)
3721     {
3722     + int i;
3723     + const struct device_attribute *attr;
3724     struct dlfb_data *dlfb;
3725     + struct fb_info *info;
3726     int retval = -ENOMEM;
3727     struct usb_device *usbdev = interface_to_usbdev(intf);
3728    
3729     @@ -1631,10 +1601,9 @@ static int dlfb_usb_probe(struct usb_interface *intf,
3730     goto error;
3731     }
3732    
3733     - kref_init(&dlfb->kref); /* matching kref_put in usb .disconnect fn */
3734     INIT_LIST_HEAD(&dlfb->deferred_free);
3735    
3736     - dlfb->udev = usbdev;
3737     + dlfb->udev = usb_get_dev(usbdev);
3738     usb_set_intfdata(intf, dlfb);
3739    
3740     dev_dbg(&intf->dev, "console enable=%d\n", console);
3741     @@ -1657,42 +1626,6 @@ static int dlfb_usb_probe(struct usb_interface *intf,
3742     }
3743    
3744    
3745     - if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
3746     - retval = -ENOMEM;
3747     - dev_err(&intf->dev, "unable to allocate urb list\n");
3748     - goto error;
3749     - }
3750     -
3751     - kref_get(&dlfb->kref); /* matching kref_put in free_framebuffer_work */
3752     -
3753     - /* We don't register a new USB class. Our client interface is dlfbev */
3754     -
3755     - /* Workitem keep things fast & simple during USB enumeration */
3756     - INIT_DELAYED_WORK(&dlfb->init_framebuffer_work,
3757     - dlfb_init_framebuffer_work);
3758     - schedule_delayed_work(&dlfb->init_framebuffer_work, 0);
3759     -
3760     - return 0;
3761     -
3762     -error:
3763     - if (dlfb) {
3764     -
3765     - kref_put(&dlfb->kref, dlfb_free); /* last ref from kref_init */
3766     -
3767     - /* dev has been deallocated. Do not dereference */
3768     - }
3769     -
3770     - return retval;
3771     -}
3772     -
3773     -static void dlfb_init_framebuffer_work(struct work_struct *work)
3774     -{
3775     - int i, retval;
3776     - struct fb_info *info;
3777     - const struct device_attribute *attr;
3778     - struct dlfb_data *dlfb = container_of(work, struct dlfb_data,
3779     - init_framebuffer_work.work);
3780     -
3781     /* allocates framebuffer driver structure, not framebuffer memory */
3782     info = framebuffer_alloc(0, &dlfb->udev->dev);
3783     if (!info) {
3784     @@ -1706,17 +1639,22 @@ static void dlfb_init_framebuffer_work(struct work_struct *work)
3785     dlfb->ops = dlfb_ops;
3786     info->fbops = &dlfb->ops;
3787    
3788     + INIT_LIST_HEAD(&info->modelist);
3789     +
3790     + if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
3791     + retval = -ENOMEM;
3792     + dev_err(&intf->dev, "unable to allocate urb list\n");
3793     + goto error;
3794     + }
3795     +
3796     + /* We don't register a new USB class. Our client interface is dlfbev */
3797     +
3798     retval = fb_alloc_cmap(&info->cmap, 256, 0);
3799     if (retval < 0) {
3800     dev_err(info->device, "cmap allocation failed: %d\n", retval);
3801     goto error;
3802     }
3803    
3804     - INIT_DELAYED_WORK(&dlfb->free_framebuffer_work,
3805     - dlfb_free_framebuffer_work);
3806     -
3807     - INIT_LIST_HEAD(&info->modelist);
3808     -
3809     retval = dlfb_setup_modes(dlfb, info, NULL, 0);
3810     if (retval != 0) {
3811     dev_err(info->device,
3812     @@ -1760,10 +1698,16 @@ static void dlfb_init_framebuffer_work(struct work_struct *work)
3813     dev_name(info->dev), info->var.xres, info->var.yres,
3814     ((dlfb->backing_buffer) ?
3815     info->fix.smem_len * 2 : info->fix.smem_len) >> 10);
3816     - return;
3817     + return 0;
3818    
3819     error:
3820     - dlfb_free_framebuffer(dlfb);
3821     + if (dlfb->info) {
3822     + dlfb_ops_destroy(dlfb->info);
3823     + } else if (dlfb) {
3824     + usb_put_dev(dlfb->udev);
3825     + kfree(dlfb);
3826     + }
3827     + return retval;
3828     }
3829    
3830     static void dlfb_usb_disconnect(struct usb_interface *intf)
3831     @@ -1791,20 +1735,9 @@ static void dlfb_usb_disconnect(struct usb_interface *intf)
3832     for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
3833     device_remove_file(info->dev, &fb_device_attrs[i]);
3834     device_remove_bin_file(info->dev, &edid_attr);
3835     - unlink_framebuffer(info);
3836     }
3837    
3838     - usb_set_intfdata(intf, NULL);
3839     - dlfb->udev = NULL;
3840     -
3841     - /* if clients still have us open, will be freed on last close */
3842     - if (dlfb->fb_count == 0)
3843     - schedule_delayed_work(&dlfb->free_framebuffer_work, 0);
3844     -
3845     - /* release reference taken by kref_init in probe() */
3846     - kref_put(&dlfb->kref, dlfb_free);
3847     -
3848     - /* consider dlfb_data freed */
3849     + unregister_framebuffer(info);
3850     }
3851    
3852     static struct usb_driver dlfb_driver = {
3853     diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
3854     index 5c4a764717c4d..81208cd3f4ecb 100644
3855     --- a/drivers/watchdog/mt7621_wdt.c
3856     +++ b/drivers/watchdog/mt7621_wdt.c
3857     @@ -17,6 +17,7 @@
3858     #include <linux/watchdog.h>
3859     #include <linux/moduleparam.h>
3860     #include <linux/platform_device.h>
3861     +#include <linux/mod_devicetable.h>
3862    
3863     #include <asm/mach-ralink/ralink_regs.h>
3864    
3865     diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
3866     index 98967f0a7d10e..db7c57d82cfdc 100644
3867     --- a/drivers/watchdog/rt2880_wdt.c
3868     +++ b/drivers/watchdog/rt2880_wdt.c
3869     @@ -18,6 +18,7 @@
3870     #include <linux/watchdog.h>
3871     #include <linux/moduleparam.h>
3872     #include <linux/platform_device.h>
3873     +#include <linux/mod_devicetable.h>
3874    
3875     #include <asm/mach-ralink/ralink_regs.h>
3876    
3877     diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
3878     index b1092fbefa630..d4ea33581ac26 100644
3879     --- a/drivers/xen/pvcalls-back.c
3880     +++ b/drivers/xen/pvcalls-back.c
3881     @@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque)
3882    
3883     /* write the data, then modify the indexes */
3884     virt_wmb();
3885     - if (ret < 0)
3886     + if (ret < 0) {
3887     + atomic_set(&map->read, 0);
3888     intf->in_error = ret;
3889     - else
3890     + } else
3891     intf->in_prod = prod + ret;
3892     /* update the indexes, then notify the other end */
3893     virt_wmb();
3894     @@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev,
3895     static void pvcalls_sk_state_change(struct sock *sock)
3896     {
3897     struct sock_mapping *map = sock->sk_user_data;
3898     - struct pvcalls_data_intf *intf;
3899    
3900     if (map == NULL)
3901     return;
3902    
3903     - intf = map->ring;
3904     - intf->in_error = -ENOTCONN;
3905     + atomic_inc(&map->read);
3906     notify_remote_via_irq(map->irq);
3907     }
3908    
3909     diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
3910     index 77224d8f3e6fe..91da7e44d5d4f 100644
3911     --- a/drivers/xen/pvcalls-front.c
3912     +++ b/drivers/xen/pvcalls-front.c
3913     @@ -31,6 +31,12 @@
3914     #define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
3915     #define PVCALLS_FRONT_MAX_SPIN 5000
3916    
3917     +static struct proto pvcalls_proto = {
3918     + .name = "PVCalls",
3919     + .owner = THIS_MODULE,
3920     + .obj_size = sizeof(struct sock),
3921     +};
3922     +
3923     struct pvcalls_bedata {
3924     struct xen_pvcalls_front_ring ring;
3925     grant_ref_t ref;
3926     @@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock)
3927     return ret;
3928     }
3929    
3930     +static void free_active_ring(struct sock_mapping *map)
3931     +{
3932     + if (!map->active.ring)
3933     + return;
3934     +
3935     + free_pages((unsigned long)map->active.data.in,
3936     + map->active.ring->ring_order);
3937     + free_page((unsigned long)map->active.ring);
3938     +}
3939     +
3940     +static int alloc_active_ring(struct sock_mapping *map)
3941     +{
3942     + void *bytes;
3943     +
3944     + map->active.ring = (struct pvcalls_data_intf *)
3945     + get_zeroed_page(GFP_KERNEL);
3946     + if (!map->active.ring)
3947     + goto out;
3948     +
3949     + map->active.ring->ring_order = PVCALLS_RING_ORDER;
3950     + bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
3951     + PVCALLS_RING_ORDER);
3952     + if (!bytes)
3953     + goto out;
3954     +
3955     + map->active.data.in = bytes;
3956     + map->active.data.out = bytes +
3957     + XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
3958     +
3959     + return 0;
3960     +
3961     +out:
3962     + free_active_ring(map);
3963     + return -ENOMEM;
3964     +}
3965     +
3966     static int create_active(struct sock_mapping *map, int *evtchn)
3967     {
3968     void *bytes;
3969     @@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
3970     *evtchn = -1;
3971     init_waitqueue_head(&map->active.inflight_conn_req);
3972    
3973     - map->active.ring = (struct pvcalls_data_intf *)
3974     - __get_free_page(GFP_KERNEL | __GFP_ZERO);
3975     - if (map->active.ring == NULL)
3976     - goto out_error;
3977     - map->active.ring->ring_order = PVCALLS_RING_ORDER;
3978     - bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
3979     - PVCALLS_RING_ORDER);
3980     - if (bytes == NULL)
3981     - goto out_error;
3982     + bytes = map->active.data.in;
3983     for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
3984     map->active.ring->ref[i] = gnttab_grant_foreign_access(
3985     pvcalls_front_dev->otherend_id,
3986     @@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
3987     pvcalls_front_dev->otherend_id,
3988     pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
3989    
3990     - map->active.data.in = bytes;
3991     - map->active.data.out = bytes +
3992     - XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
3993     -
3994     ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
3995     if (ret)
3996     goto out_error;
3997     @@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
3998     out_error:
3999     if (*evtchn >= 0)
4000     xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
4001     - free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
4002     - free_page((unsigned long)map->active.ring);
4003     return ret;
4004     }
4005    
4006     @@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
4007     return PTR_ERR(map);
4008    
4009     bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
4010     + ret = alloc_active_ring(map);
4011     + if (ret < 0) {
4012     + pvcalls_exit_sock(sock);
4013     + return ret;
4014     + }
4015    
4016     spin_lock(&bedata->socket_lock);
4017     ret = get_request(bedata, &req_id);
4018     if (ret < 0) {
4019     spin_unlock(&bedata->socket_lock);
4020     + free_active_ring(map);
4021     pvcalls_exit_sock(sock);
4022     return ret;
4023     }
4024     ret = create_active(map, &evtchn);
4025     if (ret < 0) {
4026     spin_unlock(&bedata->socket_lock);
4027     + free_active_ring(map);
4028     pvcalls_exit_sock(sock);
4029     return ret;
4030     }
4031     @@ -560,15 +595,13 @@ static int __read_ring(struct pvcalls_data_intf *intf,
4032     error = intf->in_error;
4033     /* get pointers before reading from the ring */
4034     virt_rmb();
4035     - if (error < 0)
4036     - return error;
4037    
4038     size = pvcalls_queued(prod, cons, array_size);
4039     masked_prod = pvcalls_mask(prod, array_size);
4040     masked_cons = pvcalls_mask(cons, array_size);
4041    
4042     if (size == 0)
4043     - return 0;
4044     + return error ?: size;
4045    
4046     if (len > size)
4047     len = size;
4048     @@ -780,25 +813,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
4049     }
4050     }
4051    
4052     - spin_lock(&bedata->socket_lock);
4053     - ret = get_request(bedata, &req_id);
4054     - if (ret < 0) {
4055     + map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
4056     + if (map2 == NULL) {
4057     clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
4058     (void *)&map->passive.flags);
4059     - spin_unlock(&bedata->socket_lock);
4060     + pvcalls_exit_sock(sock);
4061     + return -ENOMEM;
4062     + }
4063     + ret = alloc_active_ring(map2);
4064     + if (ret < 0) {
4065     + clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
4066     + (void *)&map->passive.flags);
4067     + kfree(map2);
4068     pvcalls_exit_sock(sock);
4069     return ret;
4070     }
4071     - map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
4072     - if (map2 == NULL) {
4073     + spin_lock(&bedata->socket_lock);
4074     + ret = get_request(bedata, &req_id);
4075     + if (ret < 0) {
4076     clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
4077     (void *)&map->passive.flags);
4078     spin_unlock(&bedata->socket_lock);
4079     + free_active_ring(map2);
4080     + kfree(map2);
4081     pvcalls_exit_sock(sock);
4082     - return -ENOMEM;
4083     + return ret;
4084     }
4085     +
4086     ret = create_active(map2, &evtchn);
4087     if (ret < 0) {
4088     + free_active_ring(map2);
4089     kfree(map2);
4090     clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
4091     (void *)&map->passive.flags);
4092     @@ -839,7 +883,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
4093    
4094     received:
4095     map2->sock = newsock;
4096     - newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
4097     + newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false);
4098     if (!newsock->sk) {
4099     bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
4100     map->passive.inflight_req_id = PVCALLS_INVALID_ID;
4101     @@ -1032,8 +1076,8 @@ int pvcalls_front_release(struct socket *sock)
4102     spin_lock(&bedata->socket_lock);
4103     list_del(&map->list);
4104     spin_unlock(&bedata->socket_lock);
4105     - if (READ_ONCE(map->passive.inflight_req_id) !=
4106     - PVCALLS_INVALID_ID) {
4107     + if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID &&
4108     + READ_ONCE(map->passive.inflight_req_id) != 0) {
4109     pvcalls_front_free_map(bedata,
4110     map->passive.accept_map);
4111     }
4112     diff --git a/fs/afs/flock.c b/fs/afs/flock.c
4113     index dc62d15a964b8..1bb300ef362b0 100644
4114     --- a/fs/afs/flock.c
4115     +++ b/fs/afs/flock.c
4116     @@ -208,7 +208,7 @@ again:
4117     /* The new front of the queue now owns the state variables. */
4118     next = list_entry(vnode->pending_locks.next,
4119     struct file_lock, fl_u.afs.link);
4120     - vnode->lock_key = afs_file_key(next->fl_file);
4121     + vnode->lock_key = key_get(afs_file_key(next->fl_file));
4122     vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
4123     vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
4124     goto again;
4125     @@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
4126     /* The new front of the queue now owns the state variables. */
4127     next = list_entry(vnode->pending_locks.next,
4128     struct file_lock, fl_u.afs.link);
4129     - vnode->lock_key = afs_file_key(next->fl_file);
4130     + vnode->lock_key = key_get(afs_file_key(next->fl_file));
4131     vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
4132     vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
4133     afs_lock_may_be_available(vnode);
4134     diff --git a/fs/afs/inode.c b/fs/afs/inode.c
4135     index 071075d775a95..0726e40db0f8b 100644
4136     --- a/fs/afs/inode.c
4137     +++ b/fs/afs/inode.c
4138     @@ -411,7 +411,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
4139     } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
4140     valid = true;
4141     } else {
4142     - vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
4143     vnode->cb_v_break = vnode->volume->cb_v_break;
4144     valid = false;
4145     }
4146     @@ -543,6 +542,8 @@ void afs_evict_inode(struct inode *inode)
4147     #endif
4148    
4149     afs_put_permits(rcu_access_pointer(vnode->permit_cache));
4150     + key_put(vnode->lock_key);
4151     + vnode->lock_key = NULL;
4152     _leave("");
4153     }
4154    
4155     diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
4156     index 041c27ea8de15..f74193da0e092 100644
4157     --- a/fs/ceph/snap.c
4158     +++ b/fs/ceph/snap.c
4159     @@ -616,7 +616,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
4160     capsnap->size);
4161    
4162     spin_lock(&mdsc->snap_flush_lock);
4163     - list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
4164     + if (list_empty(&ci->i_snap_flush_item))
4165     + list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
4166     spin_unlock(&mdsc->snap_flush_lock);
4167     return 1; /* caller may want to ceph_flush_snaps */
4168     }
4169     diff --git a/fs/proc/base.c b/fs/proc/base.c
4170     index 7e9f07bf260d2..81d77b15b3479 100644
4171     --- a/fs/proc/base.c
4172     +++ b/fs/proc/base.c
4173     @@ -1084,10 +1084,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
4174    
4175     task_lock(p);
4176     if (!p->vfork_done && process_shares_mm(p, mm)) {
4177     - pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
4178     - task_pid_nr(p), p->comm,
4179     - p->signal->oom_score_adj, oom_adj,
4180     - task_pid_nr(task), task->comm);
4181     p->signal->oom_score_adj = oom_adj;
4182     if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
4183     p->signal->oom_score_adj_min = (short)oom_adj;
4184     diff --git a/include/keys/user-type.h b/include/keys/user-type.h
4185     index e098cbe27db54..12babe9915944 100644
4186     --- a/include/keys/user-type.h
4187     +++ b/include/keys/user-type.h
4188     @@ -31,7 +31,7 @@
4189     struct user_key_payload {
4190     struct rcu_head rcu; /* RCU destructor */
4191     unsigned short datalen; /* length of this data */
4192     - char data[0]; /* actual data */
4193     + char data[0] __aligned(__alignof__(u64)); /* actual data */
4194     };
4195    
4196     extern struct key_type key_type_user;
4197     diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
4198     index b1ce500fe8b3d..d756f2318efe0 100644
4199     --- a/include/linux/compiler-clang.h
4200     +++ b/include/linux/compiler-clang.h
4201     @@ -3,9 +3,8 @@
4202     #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
4203     #endif
4204    
4205     -/* Some compiler specific definitions are overwritten here
4206     - * for Clang compiler
4207     - */
4208     +/* Compiler specific definitions for Clang compiler */
4209     +
4210     #define uninitialized_var(x) x = *(&(x))
4211    
4212     /* same as gcc, this was present in clang-2.6 so we can assume it works
4213     diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
4214     index 0242f6eec4eaf..a8ff0ca0c3213 100644
4215     --- a/include/linux/compiler-gcc.h
4216     +++ b/include/linux/compiler-gcc.h
4217     @@ -58,10 +58,6 @@
4218     (typeof(ptr)) (__ptr + (off)); \
4219     })
4220    
4221     -/* Make the optimizer believe the variable can be manipulated arbitrarily. */
4222     -#define OPTIMIZER_HIDE_VAR(var) \
4223     - __asm__ ("" : "=r" (var) : "0" (var))
4224     -
4225     /*
4226     * A trick to suppress uninitialized variable warning without generating any
4227     * code
4228     diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
4229     index 4c7f9befa9f6c..f1fc60f103176 100644
4230     --- a/include/linux/compiler-intel.h
4231     +++ b/include/linux/compiler-intel.h
4232     @@ -5,9 +5,7 @@
4233    
4234     #ifdef __ECC
4235    
4236     -/* Some compiler specific definitions are overwritten here
4237     - * for Intel ECC compiler
4238     - */
4239     +/* Compiler specific definitions for Intel ECC compiler */
4240    
4241     #include <asm/intrinsics.h>
4242    
4243     diff --git a/include/linux/compiler.h b/include/linux/compiler.h
4244     index 681d866efb1eb..269d376f5a119 100644
4245     --- a/include/linux/compiler.h
4246     +++ b/include/linux/compiler.h
4247     @@ -158,7 +158,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
4248     #endif
4249    
4250     #ifndef OPTIMIZER_HIDE_VAR
4251     -#define OPTIMIZER_HIDE_VAR(var) barrier()
4252     +/* Make the optimizer believe the variable can be manipulated arbitrarily. */
4253     +#define OPTIMIZER_HIDE_VAR(var) \
4254     + __asm__ ("" : "=r" (var) : "0" (var))
4255     #endif
4256    
4257     /* Not-quite-unique ID. */
4258     diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
4259     index 59ddf9af909e4..2dd0a9ed5b361 100644
4260     --- a/include/linux/qed/qed_chain.h
4261     +++ b/include/linux/qed/qed_chain.h
4262     @@ -663,6 +663,37 @@ out:
4263     static inline void qed_chain_set_prod(struct qed_chain *p_chain,
4264     u32 prod_idx, void *p_prod_elem)
4265     {
4266     + if (p_chain->mode == QED_CHAIN_MODE_PBL) {
4267     + u32 cur_prod, page_mask, page_cnt, page_diff;
4268     +
4269     + cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
4270     + p_chain->u.chain32.prod_idx;
4271     +
4272     + /* Assume that number of elements in a page is power of 2 */
4273     + page_mask = ~p_chain->elem_per_page_mask;
4274     +
4275     + /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
4276     + * reaches the first element of next page before the page index
4277     + * is incremented. See qed_chain_produce().
4278     + * Index wrap around is not a problem because the difference
4279     + * between current and given producer indices is always
4280     + * positive and lower than the chain's capacity.
4281     + */
4282     + page_diff = (((cur_prod - 1) & page_mask) -
4283     + ((prod_idx - 1) & page_mask)) /
4284     + p_chain->elem_per_page;
4285     +
4286     + page_cnt = qed_chain_get_page_cnt(p_chain);
4287     + if (is_chain_u16(p_chain))
4288     + p_chain->pbl.c.u16.prod_page_idx =
4289     + (p_chain->pbl.c.u16.prod_page_idx -
4290     + page_diff + page_cnt) % page_cnt;
4291     + else
4292     + p_chain->pbl.c.u32.prod_page_idx =
4293     + (p_chain->pbl.c.u32.prod_page_idx -
4294     + page_diff + page_cnt) % page_cnt;
4295     + }
4296     +
4297     if (is_chain_u16(p_chain))
4298     p_chain->u.chain16.prod_idx = (u16) prod_idx;
4299     else
4300     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
4301     index 5d69e208e8d91..a404d475acee3 100644
4302     --- a/include/linux/skbuff.h
4303     +++ b/include/linux/skbuff.h
4304     @@ -2392,7 +2392,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
4305    
4306     if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
4307     skb_set_transport_header(skb, keys.control.thoff);
4308     - else
4309     + else if (offset_hint >= 0)
4310     skb_set_transport_header(skb, offset_hint);
4311     }
4312    
4313     diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
4314     index cb462f9ab7dd5..e0348cb0a1dd7 100644
4315     --- a/include/linux/virtio_net.h
4316     +++ b/include/linux/virtio_net.h
4317     @@ -57,6 +57,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
4318    
4319     if (!skb_partial_csum_set(skb, start, off))
4320     return -EINVAL;
4321     + } else {
4322     + /* gso packets without NEEDS_CSUM do not set transport_offset.
4323     + * probe and drop if does not match one of the above types.
4324     + */
4325     + if (gso_type && skb->network_header) {
4326     + if (!skb->protocol)
4327     + virtio_net_hdr_set_proto(skb, hdr);
4328     +retry:
4329     + skb_probe_transport_header(skb, -1);
4330     + if (!skb_transport_header_was_set(skb)) {
4331     + /* UFO does not specify ipv4 or 6: try both */
4332     + if (gso_type & SKB_GSO_UDP &&
4333     + skb->protocol == htons(ETH_P_IP)) {
4334     + skb->protocol = htons(ETH_P_IPV6);
4335     + goto retry;
4336     + }
4337     + return -EINVAL;
4338     + }
4339     + }
4340     }
4341    
4342     if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
4343     diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
4344     index 0e355f4a3d763..0a3de10c6dece 100644
4345     --- a/include/net/netfilter/nf_flow_table.h
4346     +++ b/include/net/netfilter/nf_flow_table.h
4347     @@ -84,7 +84,6 @@ struct flow_offload {
4348     struct nf_flow_route {
4349     struct {
4350     struct dst_entry *dst;
4351     - int ifindex;
4352     } tuple[FLOW_OFFLOAD_DIR_MAX];
4353     };
4354    
4355     diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
4356     index 14565d703291b..e8baca85bac6a 100644
4357     --- a/include/uapi/linux/inet_diag.h
4358     +++ b/include/uapi/linux/inet_diag.h
4359     @@ -137,15 +137,21 @@ enum {
4360     INET_DIAG_TCLASS,
4361     INET_DIAG_SKMEMINFO,
4362     INET_DIAG_SHUTDOWN,
4363     - INET_DIAG_DCTCPINFO,
4364     - INET_DIAG_PROTOCOL, /* response attribute only */
4365     +
4366     + /*
4367     + * Next extenstions cannot be requested in struct inet_diag_req_v2:
4368     + * its field idiag_ext has only 8 bits.
4369     + */
4370     +
4371     + INET_DIAG_DCTCPINFO, /* request as INET_DIAG_VEGASINFO */
4372     + INET_DIAG_PROTOCOL, /* response attribute only */
4373     INET_DIAG_SKV6ONLY,
4374     INET_DIAG_LOCALS,
4375     INET_DIAG_PEERS,
4376     INET_DIAG_PAD,
4377     - INET_DIAG_MARK,
4378     - INET_DIAG_BBRINFO,
4379     - INET_DIAG_CLASS_ID,
4380     + INET_DIAG_MARK, /* only with CAP_NET_ADMIN */
4381     + INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */
4382     + INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */
4383     INET_DIAG_MD5SIG,
4384     __INET_DIAG_MAX,
4385     };
4386     diff --git a/include/video/udlfb.h b/include/video/udlfb.h
4387     index 3abd327bada64..7d09e54ae54e0 100644
4388     --- a/include/video/udlfb.h
4389     +++ b/include/video/udlfb.h
4390     @@ -36,12 +36,9 @@ struct dlfb_data {
4391     struct usb_device *udev;
4392     struct fb_info *info;
4393     struct urb_list urbs;
4394     - struct kref kref;
4395     char *backing_buffer;
4396     int fb_count;
4397     bool virtualized; /* true when physical usb device not present */
4398     - struct delayed_work init_framebuffer_work;
4399     - struct delayed_work free_framebuffer_work;
4400     atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */
4401     atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
4402     char *edid; /* null until we read edid from hw or get from sysfs */
4403     diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
4404     index 8061a439ef18c..6a32933cae4ff 100644
4405     --- a/kernel/bpf/stackmap.c
4406     +++ b/kernel/bpf/stackmap.c
4407     @@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr,
4408    
4409     if (nhdr->n_type == BPF_BUILD_ID &&
4410     nhdr->n_namesz == sizeof("GNU") &&
4411     - nhdr->n_descsz == BPF_BUILD_ID_SIZE) {
4412     + nhdr->n_descsz > 0 &&
4413     + nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
4414     memcpy(build_id,
4415     note_start + note_offs +
4416     ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
4417     - BPF_BUILD_ID_SIZE);
4418     + nhdr->n_descsz);
4419     + memset(build_id + nhdr->n_descsz, 0,
4420     + BPF_BUILD_ID_SIZE - nhdr->n_descsz);
4421     return 0;
4422     }
4423     new_offs = note_offs + sizeof(Elf32_Nhdr) +
4424     @@ -260,7 +263,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
4425     return -EFAULT; /* page not mapped */
4426    
4427     ret = -EINVAL;
4428     - page_addr = page_address(page);
4429     + page_addr = kmap_atomic(page);
4430     ehdr = (Elf32_Ehdr *)page_addr;
4431    
4432     /* compare magic x7f "ELF" */
4433     @@ -276,6 +279,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
4434     else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
4435     ret = stack_map_get_build_id_64(page_addr, build_id);
4436     out:
4437     + kunmap_atomic(page_addr);
4438     put_page(page);
4439     return ret;
4440     }
4441     @@ -310,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
4442     for (i = 0; i < trace_nr; i++) {
4443     id_offs[i].status = BPF_STACK_BUILD_ID_IP;
4444     id_offs[i].ip = ips[i];
4445     + memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
4446     }
4447     return;
4448     }
4449     @@ -320,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
4450     /* per entry fall back to ips */
4451     id_offs[i].status = BPF_STACK_BUILD_ID_IP;
4452     id_offs[i].ip = ips[i];
4453     + memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
4454     continue;
4455     }
4456     id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
4457     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4458     index bf6f1d70484dc..17bd0c0dfa98a 100644
4459     --- a/kernel/trace/trace.c
4460     +++ b/kernel/trace/trace.c
4461     @@ -3383,6 +3383,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
4462     const char tgid_space[] = " ";
4463     const char space[] = " ";
4464    
4465     + print_event_info(buf, m);
4466     +
4467     seq_printf(m, "# %s _-----=> irqs-off\n",
4468     tgid ? tgid_space : space);
4469     seq_printf(m, "# %s / _----=> need-resched\n",
4470     diff --git a/mm/mempolicy.c b/mm/mempolicy.c
4471     index 149b6f4cf0233..89d4439516f6c 100644
4472     --- a/mm/mempolicy.c
4473     +++ b/mm/mempolicy.c
4474     @@ -1300,7 +1300,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
4475     nodemask_t *nodes)
4476     {
4477     unsigned long copy = ALIGN(maxnode-1, 64) / 8;
4478     - const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
4479     + unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
4480    
4481     if (copy > nbytes) {
4482     if (copy > PAGE_SIZE)
4483     @@ -1477,7 +1477,7 @@ static int kernel_get_mempolicy(int __user *policy,
4484     int uninitialized_var(pval);
4485     nodemask_t nodes;
4486    
4487     - if (nmask != NULL && maxnode < MAX_NUMNODES)
4488     + if (nmask != NULL && maxnode < nr_node_ids)
4489     return -EINVAL;
4490    
4491     err = do_get_mempolicy(&pval, &nodes, addr, flags);
4492     @@ -1513,7 +1513,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
4493     unsigned long nr_bits, alloc_size;
4494     DECLARE_BITMAP(bm, MAX_NUMNODES);
4495    
4496     - nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
4497     + nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
4498     alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
4499    
4500     if (nmask)
4501     diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
4502     index 3899fa6e201dd..a2976adeeedce 100644
4503     --- a/net/batman-adv/soft-interface.c
4504     +++ b/net/batman-adv/soft-interface.c
4505     @@ -227,6 +227,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
4506    
4507     switch (ntohs(ethhdr->h_proto)) {
4508     case ETH_P_8021Q:
4509     + if (!pskb_may_pull(skb, sizeof(*vhdr)))
4510     + goto dropped;
4511     vhdr = vlan_eth_hdr(skb);
4512    
4513     /* drop batman-in-batman packets to prevent loops */
4514     diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
4515     index 502f663495308..4d4b9b5ea1c17 100644
4516     --- a/net/bridge/br_fdb.c
4517     +++ b/net/bridge/br_fdb.c
4518     @@ -1088,6 +1088,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
4519     err = -ENOMEM;
4520     goto err_unlock;
4521     }
4522     + if (swdev_notify)
4523     + fdb->added_by_user = 1;
4524     fdb->added_by_external_learn = 1;
4525     fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
4526     } else {
4527     @@ -1107,6 +1109,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
4528     modified = true;
4529     }
4530    
4531     + if (swdev_notify)
4532     + fdb->added_by_user = 1;
4533     +
4534     if (modified)
4535     fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
4536     }
4537     diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
4538     index 6dec8e9b34511..20ed7adcf1cc4 100644
4539     --- a/net/bridge/br_multicast.c
4540     +++ b/net/bridge/br_multicast.c
4541     @@ -1420,14 +1420,7 @@ static void br_multicast_query_received(struct net_bridge *br,
4542     return;
4543    
4544     br_multicast_update_query_timer(br, query, max_delay);
4545     -
4546     - /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
4547     - * the arrival port for IGMP Queries where the source address
4548     - * is 0.0.0.0 should not be added to router port list.
4549     - */
4550     - if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
4551     - saddr->proto == htons(ETH_P_IPV6))
4552     - br_multicast_mark_router(br, port);
4553     + br_multicast_mark_router(br, port);
4554     }
4555    
4556     static void br_ip4_multicast_query(struct net_bridge *br,
4557     diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
4558     index a127d14421164..f7d7f32ac673c 100644
4559     --- a/net/ceph/messenger.c
4560     +++ b/net/ceph/messenger.c
4561     @@ -2091,6 +2091,8 @@ static int process_connect(struct ceph_connection *con)
4562     dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
4563    
4564     if (con->auth) {
4565     + int len = le32_to_cpu(con->in_reply.authorizer_len);
4566     +
4567     /*
4568     * Any connection that defines ->get_authorizer()
4569     * should also define ->add_authorizer_challenge() and
4570     @@ -2100,8 +2102,7 @@ static int process_connect(struct ceph_connection *con)
4571     */
4572     if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
4573     ret = con->ops->add_authorizer_challenge(
4574     - con, con->auth->authorizer_reply_buf,
4575     - le32_to_cpu(con->in_reply.authorizer_len));
4576     + con, con->auth->authorizer_reply_buf, len);
4577     if (ret < 0)
4578     return ret;
4579    
4580     @@ -2111,10 +2112,12 @@ static int process_connect(struct ceph_connection *con)
4581     return 0;
4582     }
4583    
4584     - ret = con->ops->verify_authorizer_reply(con);
4585     - if (ret < 0) {
4586     - con->error_msg = "bad authorize reply";
4587     - return ret;
4588     + if (len) {
4589     + ret = con->ops->verify_authorizer_reply(con);
4590     + if (ret < 0) {
4591     + con->error_msg = "bad authorize reply";
4592     + return ret;
4593     + }
4594     }
4595     }
4596    
4597     diff --git a/net/core/filter.c b/net/core/filter.c
4598     index 8c2411fb25090..fb0080e84bd43 100644
4599     --- a/net/core/filter.c
4600     +++ b/net/core/filter.c
4601     @@ -3930,7 +3930,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4602     sk->sk_rcvlowat = val ? : 1;
4603     break;
4604     case SO_MARK:
4605     - sk->sk_mark = val;
4606     + if (sk->sk_mark != val) {
4607     + sk->sk_mark = val;
4608     + sk_dst_reset(sk);
4609     + }
4610     break;
4611     default:
4612     ret = -EINVAL;
4613     @@ -4001,7 +4004,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4614     /* Only some options are supported */
4615     switch (optname) {
4616     case TCP_BPF_IW:
4617     - if (val <= 0 || tp->data_segs_out > 0)
4618     + if (val <= 0 || tp->data_segs_out > tp->syn_data)
4619     ret = -EINVAL;
4620     else
4621     tp->snd_cwnd = val;
4622     diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
4623     index 1a4e9ff02762e..5731670c560b0 100644
4624     --- a/net/ipv4/inet_diag.c
4625     +++ b/net/ipv4/inet_diag.c
4626     @@ -108,6 +108,7 @@ static size_t inet_sk_attr_size(struct sock *sk,
4627     + nla_total_size(1) /* INET_DIAG_TOS */
4628     + nla_total_size(1) /* INET_DIAG_TCLASS */
4629     + nla_total_size(4) /* INET_DIAG_MARK */
4630     + + nla_total_size(4) /* INET_DIAG_CLASS_ID */
4631     + nla_total_size(sizeof(struct inet_diag_meminfo))
4632     + nla_total_size(sizeof(struct inet_diag_msg))
4633     + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
4634     @@ -287,12 +288,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
4635     goto errout;
4636     }
4637    
4638     - if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) {
4639     + if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
4640     + ext & (1 << (INET_DIAG_TCLASS - 1))) {
4641     u32 classid = 0;
4642    
4643     #ifdef CONFIG_SOCK_CGROUP_DATA
4644     classid = sock_cgroup_classid(&sk->sk_cgrp_data);
4645     #endif
4646     + /* Fallback to socket priority if class id isn't set.
4647     + * Classful qdiscs use it as direct reference to class.
4648     + * For cgroup2 classid is always zero.
4649     + */
4650     + if (!classid)
4651     + classid = sk->sk_priority;
4652    
4653     if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
4654     goto errout;
4655     diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
4656     index fb1e7f237f531..3cd237b42f446 100644
4657     --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
4658     +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
4659     @@ -56,7 +56,7 @@ struct clusterip_config {
4660     #endif
4661     enum clusterip_hashmode hash_mode; /* which hashing mode */
4662     u_int32_t hash_initval; /* hash initialization */
4663     - struct rcu_head rcu;
4664     + struct rcu_head rcu; /* for call_rcu_bh */
4665     struct net *net; /* netns for pernet list */
4666     char ifname[IFNAMSIZ]; /* device ifname */
4667     };
4668     @@ -72,6 +72,8 @@ struct clusterip_net {
4669    
4670     #ifdef CONFIG_PROC_FS
4671     struct proc_dir_entry *procdir;
4672     + /* mutex protects the config->pde*/
4673     + struct mutex mutex;
4674     #endif
4675     };
4676    
4677     @@ -118,17 +120,18 @@ clusterip_config_entry_put(struct clusterip_config *c)
4678    
4679     local_bh_disable();
4680     if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
4681     + list_del_rcu(&c->list);
4682     + spin_unlock(&cn->lock);
4683     + local_bh_enable();
4684     /* In case anyone still accesses the file, the open/close
4685     * functions are also incrementing the refcount on their own,
4686     * so it's safe to remove the entry even if it's in use. */
4687     #ifdef CONFIG_PROC_FS
4688     + mutex_lock(&cn->mutex);
4689     if (cn->procdir)
4690     proc_remove(c->pde);
4691     + mutex_unlock(&cn->mutex);
4692     #endif
4693     - list_del_rcu(&c->list);
4694     - spin_unlock(&cn->lock);
4695     - local_bh_enable();
4696     -
4697     return;
4698     }
4699     local_bh_enable();
4700     @@ -278,9 +281,11 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
4701    
4702     /* create proc dir entry */
4703     sprintf(buffer, "%pI4", &ip);
4704     + mutex_lock(&cn->mutex);
4705     c->pde = proc_create_data(buffer, 0600,
4706     cn->procdir,
4707     &clusterip_proc_fops, c);
4708     + mutex_unlock(&cn->mutex);
4709     if (!c->pde) {
4710     err = -ENOMEM;
4711     goto err;
4712     @@ -833,6 +838,7 @@ static int clusterip_net_init(struct net *net)
4713     pr_err("Unable to proc dir entry\n");
4714     return -ENOMEM;
4715     }
4716     + mutex_init(&cn->mutex);
4717     #endif /* CONFIG_PROC_FS */
4718    
4719     return 0;
4720     @@ -841,9 +847,12 @@ static int clusterip_net_init(struct net *net)
4721     static void clusterip_net_exit(struct net *net)
4722     {
4723     struct clusterip_net *cn = clusterip_pernet(net);
4724     +
4725     #ifdef CONFIG_PROC_FS
4726     + mutex_lock(&cn->mutex);
4727     proc_remove(cn->procdir);
4728     cn->procdir = NULL;
4729     + mutex_unlock(&cn->mutex);
4730     #endif
4731     nf_unregister_net_hook(net, &cip_arp_ops);
4732     }
4733     diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
4734     index 8b075f0bc3516..6d0b1f3e927bd 100644
4735     --- a/net/ipv6/netfilter.c
4736     +++ b/net/ipv6/netfilter.c
4737     @@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
4738     struct sock *sk = sk_to_full_sk(skb->sk);
4739     unsigned int hh_len;
4740     struct dst_entry *dst;
4741     + int strict = (ipv6_addr_type(&iph->daddr) &
4742     + (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
4743     struct flowi6 fl6 = {
4744     .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
4745     - rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
4746     + strict ? skb_dst(skb)->dev->ifindex : 0,
4747     .flowi6_mark = skb->mark,
4748     .flowi6_uid = sock_net_uid(net, sk),
4749     .daddr = iph->daddr,
4750     diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
4751     index 8d0ba757a46ce..9b2f272ca1649 100644
4752     --- a/net/ipv6/seg6.c
4753     +++ b/net/ipv6/seg6.c
4754     @@ -221,9 +221,7 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info)
4755     rcu_read_unlock();
4756    
4757     genlmsg_end(msg, hdr);
4758     - genlmsg_reply(msg, info);
4759     -
4760     - return 0;
4761     + return genlmsg_reply(msg, info);
4762    
4763     nla_put_failure:
4764     rcu_read_unlock();
4765     diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
4766     index eb162bd0e0419..da6d5a3f53995 100644
4767     --- a/net/ipv6/sit.c
4768     +++ b/net/ipv6/sit.c
4769     @@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
4770     }
4771    
4772     err = 0;
4773     - if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
4774     + if (__in6_dev_get(skb->dev) &&
4775     + !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
4776     goto out;
4777    
4778     if (t->parms.iph.daddr == 0)
4779     diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
4780     index 76ba2f34ef6b1..cab8b2b647f96 100644
4781     --- a/net/ipv6/udp.c
4782     +++ b/net/ipv6/udp.c
4783     @@ -1322,10 +1322,7 @@ do_udp_sendmsg:
4784     ipc6.opt = opt;
4785    
4786     fl6.flowi6_proto = sk->sk_protocol;
4787     - if (!ipv6_addr_any(daddr))
4788     - fl6.daddr = *daddr;
4789     - else
4790     - fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
4791     + fl6.daddr = *daddr;
4792     if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
4793     fl6.saddr = np->saddr;
4794     fl6.fl6_sport = inet->inet_sport;
4795     @@ -1353,6 +1350,9 @@ do_udp_sendmsg:
4796     }
4797     }
4798    
4799     + if (ipv6_addr_any(&fl6.daddr))
4800     + fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
4801     +
4802     final_p = fl6_update_dst(&fl6, opt, &final);
4803     if (final_p)
4804     connected = false;
4805     diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
4806     index 5d22eda8a6b1e..c2abe9db1ea24 100644
4807     --- a/net/mac80211/cfg.c
4808     +++ b/net/mac80211/cfg.c
4809     @@ -887,6 +887,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
4810     BSS_CHANGED_P2P_PS |
4811     BSS_CHANGED_TXPOWER;
4812     int err;
4813     + int prev_beacon_int;
4814    
4815     old = sdata_dereference(sdata->u.ap.beacon, sdata);
4816     if (old)
4817     @@ -909,6 +910,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
4818    
4819     sdata->needed_rx_chains = sdata->local->rx_chains;
4820    
4821     + prev_beacon_int = sdata->vif.bss_conf.beacon_int;
4822     sdata->vif.bss_conf.beacon_int = params->beacon_interval;
4823    
4824     mutex_lock(&local->mtx);
4825     @@ -917,8 +919,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
4826     if (!err)
4827     ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
4828     mutex_unlock(&local->mtx);
4829     - if (err)
4830     + if (err) {
4831     + sdata->vif.bss_conf.beacon_int = prev_beacon_int;
4832     return err;
4833     + }
4834    
4835     /*
4836     * Apply control port protocol, this allows us to
4837     diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
4838     index 21526630bf655..e84103b405341 100644
4839     --- a/net/mac80211/mesh.h
4840     +++ b/net/mac80211/mesh.h
4841     @@ -70,6 +70,7 @@ enum mesh_deferred_task_flags {
4842     * @dst: mesh path destination mac address
4843     * @mpp: mesh proxy mac address
4844     * @rhash: rhashtable list pointer
4845     + * @walk_list: linked list containing all mesh_path objects.
4846     * @gate_list: list pointer for known gates list
4847     * @sdata: mesh subif
4848     * @next_hop: mesh neighbor to which frames for this destination will be
4849     @@ -105,6 +106,7 @@ struct mesh_path {
4850     u8 dst[ETH_ALEN];
4851     u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
4852     struct rhash_head rhash;
4853     + struct hlist_node walk_list;
4854     struct hlist_node gate_list;
4855     struct ieee80211_sub_if_data *sdata;
4856     struct sta_info __rcu *next_hop;
4857     @@ -133,12 +135,16 @@ struct mesh_path {
4858     * gate's mpath may or may not be resolved and active.
4859     * @gates_lock: protects updates to known_gates
4860     * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
4861     + * @walk_head: linked list containging all mesh_path objects
4862     + * @walk_lock: lock protecting walk_head
4863     * @entries: number of entries in the table
4864     */
4865     struct mesh_table {
4866     struct hlist_head known_gates;
4867     spinlock_t gates_lock;
4868     struct rhashtable rhead;
4869     + struct hlist_head walk_head;
4870     + spinlock_t walk_lock;
4871     atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
4872     };
4873    
4874     diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
4875     index a5125624a76dc..c3a7396fb9556 100644
4876     --- a/net/mac80211/mesh_pathtbl.c
4877     +++ b/net/mac80211/mesh_pathtbl.c
4878     @@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void)
4879     return NULL;
4880    
4881     INIT_HLIST_HEAD(&newtbl->known_gates);
4882     + INIT_HLIST_HEAD(&newtbl->walk_head);
4883     atomic_set(&newtbl->entries, 0);
4884     spin_lock_init(&newtbl->gates_lock);
4885     + spin_lock_init(&newtbl->walk_lock);
4886    
4887     return newtbl;
4888     }
4889     @@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
4890     static struct mesh_path *
4891     __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
4892     {
4893     - int i = 0, ret;
4894     - struct mesh_path *mpath = NULL;
4895     - struct rhashtable_iter iter;
4896     -
4897     - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
4898     - if (ret)
4899     - return NULL;
4900     -
4901     - rhashtable_walk_start(&iter);
4902     + int i = 0;
4903     + struct mesh_path *mpath;
4904    
4905     - while ((mpath = rhashtable_walk_next(&iter))) {
4906     - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
4907     - continue;
4908     - if (IS_ERR(mpath))
4909     - break;
4910     + hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
4911     if (i++ == idx)
4912     break;
4913     }
4914     - rhashtable_walk_stop(&iter);
4915     - rhashtable_walk_exit(&iter);
4916    
4917     - if (IS_ERR(mpath) || !mpath)
4918     + if (!mpath)
4919     return NULL;
4920    
4921     if (mpath_expired(mpath)) {
4922     @@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
4923     return ERR_PTR(-ENOMEM);
4924    
4925     tbl = sdata->u.mesh.mesh_paths;
4926     + spin_lock_bh(&tbl->walk_lock);
4927     do {
4928     ret = rhashtable_lookup_insert_fast(&tbl->rhead,
4929     &new_mpath->rhash,
4930     @@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
4931     mpath = rhashtable_lookup_fast(&tbl->rhead,
4932     dst,
4933     mesh_rht_params);
4934     -
4935     + else if (!ret)
4936     + hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
4937     } while (unlikely(ret == -EEXIST && !mpath));
4938     + spin_unlock_bh(&tbl->walk_lock);
4939    
4940     - if (ret && ret != -EEXIST)
4941     - return ERR_PTR(ret);
4942     -
4943     - /* At this point either new_mpath was added, or we found a
4944     - * matching entry already in the table; in the latter case
4945     - * free the unnecessary new entry.
4946     - */
4947     - if (ret == -EEXIST) {
4948     + if (ret) {
4949     kfree(new_mpath);
4950     +
4951     + if (ret != -EEXIST)
4952     + return ERR_PTR(ret);
4953     +
4954     new_mpath = mpath;
4955     }
4956     +
4957     sdata->u.mesh.mesh_paths_generation++;
4958     return new_mpath;
4959     }
4960     @@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
4961    
4962     memcpy(new_mpath->mpp, mpp, ETH_ALEN);
4963     tbl = sdata->u.mesh.mpp_paths;
4964     +
4965     + spin_lock_bh(&tbl->walk_lock);
4966     ret = rhashtable_lookup_insert_fast(&tbl->rhead,
4967     &new_mpath->rhash,
4968     mesh_rht_params);
4969     + if (!ret)
4970     + hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
4971     + spin_unlock_bh(&tbl->walk_lock);
4972     +
4973     + if (ret)
4974     + kfree(new_mpath);
4975    
4976     sdata->u.mesh.mpp_paths_generation++;
4977     return ret;
4978     @@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta)
4979     struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
4980     static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
4981     struct mesh_path *mpath;
4982     - struct rhashtable_iter iter;
4983     - int ret;
4984     -
4985     - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
4986     - if (ret)
4987     - return;
4988    
4989     - rhashtable_walk_start(&iter);
4990     -
4991     - while ((mpath = rhashtable_walk_next(&iter))) {
4992     - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
4993     - continue;
4994     - if (IS_ERR(mpath))
4995     - break;
4996     + rcu_read_lock();
4997     + hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
4998     if (rcu_access_pointer(mpath->next_hop) == sta &&
4999     mpath->flags & MESH_PATH_ACTIVE &&
5000     !(mpath->flags & MESH_PATH_FIXED)) {
5001     @@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta)
5002     WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
5003     }
5004     }
5005     - rhashtable_walk_stop(&iter);
5006     - rhashtable_walk_exit(&iter);
5007     + rcu_read_unlock();
5008     }
5009    
5010     static void mesh_path_free_rcu(struct mesh_table *tbl,
5011     @@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
5012    
5013     static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
5014     {
5015     + hlist_del_rcu(&mpath->walk_list);
5016     rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
5017     mesh_path_free_rcu(tbl, mpath);
5018     }
5019     @@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
5020     struct ieee80211_sub_if_data *sdata = sta->sdata;
5021     struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
5022     struct mesh_path *mpath;
5023     - struct rhashtable_iter iter;
5024     - int ret;
5025     -
5026     - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5027     - if (ret)
5028     - return;
5029     -
5030     - rhashtable_walk_start(&iter);
5031     -
5032     - while ((mpath = rhashtable_walk_next(&iter))) {
5033     - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5034     - continue;
5035     - if (IS_ERR(mpath))
5036     - break;
5037     + struct hlist_node *n;
5038    
5039     + spin_lock_bh(&tbl->walk_lock);
5040     + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
5041     if (rcu_access_pointer(mpath->next_hop) == sta)
5042     __mesh_path_del(tbl, mpath);
5043     }
5044     -
5045     - rhashtable_walk_stop(&iter);
5046     - rhashtable_walk_exit(&iter);
5047     + spin_unlock_bh(&tbl->walk_lock);
5048     }
5049    
5050     static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
5051     @@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
5052     {
5053     struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
5054     struct mesh_path *mpath;
5055     - struct rhashtable_iter iter;
5056     - int ret;
5057     -
5058     - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5059     - if (ret)
5060     - return;
5061     -
5062     - rhashtable_walk_start(&iter);
5063     -
5064     - while ((mpath = rhashtable_walk_next(&iter))) {
5065     - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5066     - continue;
5067     - if (IS_ERR(mpath))
5068     - break;
5069     + struct hlist_node *n;
5070    
5071     + spin_lock_bh(&tbl->walk_lock);
5072     + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
5073     if (ether_addr_equal(mpath->mpp, proxy))
5074     __mesh_path_del(tbl, mpath);
5075     }
5076     -
5077     - rhashtable_walk_stop(&iter);
5078     - rhashtable_walk_exit(&iter);
5079     + spin_unlock_bh(&tbl->walk_lock);
5080     }
5081    
5082     static void table_flush_by_iface(struct mesh_table *tbl)
5083     {
5084     struct mesh_path *mpath;
5085     - struct rhashtable_iter iter;
5086     - int ret;
5087     -
5088     - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
5089     - if (ret)
5090     - return;
5091     -
5092     - rhashtable_walk_start(&iter);
5093     + struct hlist_node *n;
5094    
5095     - while ((mpath = rhashtable_walk_next(&iter))) {
5096     - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5097     - continue;
5098     - if (IS_ERR(mpath))
5099     - break;
5100     + spin_lock_bh(&tbl->walk_lock);
5101     + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
5102     __mesh_path_del(tbl, mpath);
5103     }
5104     -
5105     - rhashtable_walk_stop(&iter);
5106     - rhashtable_walk_exit(&iter);
5107     + spin_unlock_bh(&tbl->walk_lock);
5108     }
5109    
5110     /**
5111     @@ -675,7 +624,7 @@ static int table_path_del(struct mesh_table *tbl,
5112     {
5113     struct mesh_path *mpath;
5114    
5115     - rcu_read_lock();
5116     + spin_lock_bh(&tbl->walk_lock);
5117     mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
5118     if (!mpath) {
5119     rcu_read_unlock();
5120     @@ -683,7 +632,7 @@ static int table_path_del(struct mesh_table *tbl,
5121     }
5122    
5123     __mesh_path_del(tbl, mpath);
5124     - rcu_read_unlock();
5125     + spin_unlock_bh(&tbl->walk_lock);
5126     return 0;
5127     }
5128    
5129     @@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
5130     struct mesh_table *tbl)
5131     {
5132     struct mesh_path *mpath;
5133     - struct rhashtable_iter iter;
5134     - int ret;
5135     + struct hlist_node *n;
5136    
5137     - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
5138     - if (ret)
5139     - return;
5140     -
5141     - rhashtable_walk_start(&iter);
5142     -
5143     - while ((mpath = rhashtable_walk_next(&iter))) {
5144     - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
5145     - continue;
5146     - if (IS_ERR(mpath))
5147     - break;
5148     + spin_lock_bh(&tbl->walk_lock);
5149     + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
5150     if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
5151     (!(mpath->flags & MESH_PATH_FIXED)) &&
5152     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
5153     __mesh_path_del(tbl, mpath);
5154     }
5155     -
5156     - rhashtable_walk_stop(&iter);
5157     - rhashtable_walk_exit(&iter);
5158     + spin_unlock_bh(&tbl->walk_lock);
5159     }
5160    
5161     void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
5162     diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
5163     index 51ad330bf8e83..828348b2a504d 100644
5164     --- a/net/mac80211/rx.c
5165     +++ b/net/mac80211/rx.c
5166     @@ -2598,6 +2598,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
5167     struct ieee80211_sub_if_data *sdata = rx->sdata;
5168     struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
5169     u16 ac, q, hdrlen;
5170     + int tailroom = 0;
5171    
5172     hdr = (struct ieee80211_hdr *) skb->data;
5173     hdrlen = ieee80211_hdrlen(hdr->frame_control);
5174     @@ -2684,8 +2685,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
5175     if (!ifmsh->mshcfg.dot11MeshForwarding)
5176     goto out;
5177    
5178     + if (sdata->crypto_tx_tailroom_needed_cnt)
5179     + tailroom = IEEE80211_ENCRYPT_TAILROOM;
5180     +
5181     fwd_skb = skb_copy_expand(skb, local->tx_headroom +
5182     - sdata->encrypt_headroom, 0, GFP_ATOMIC);
5183     + sdata->encrypt_headroom,
5184     + tailroom, GFP_ATOMIC);
5185     if (!fwd_skb)
5186     goto out;
5187    
5188     diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
5189     index d8125616edc79..e1537ace2b90c 100644
5190     --- a/net/netfilter/nf_flow_table_core.c
5191     +++ b/net/netfilter/nf_flow_table_core.c
5192     @@ -28,6 +28,7 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
5193     {
5194     struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
5195     struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
5196     + struct dst_entry *other_dst = route->tuple[!dir].dst;
5197     struct dst_entry *dst = route->tuple[dir].dst;
5198    
5199     ft->dir = dir;
5200     @@ -50,8 +51,8 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
5201     ft->src_port = ctt->src.u.tcp.port;
5202     ft->dst_port = ctt->dst.u.tcp.port;
5203    
5204     - ft->iifidx = route->tuple[dir].ifindex;
5205     - ft->oifidx = route->tuple[!dir].ifindex;
5206     + ft->iifidx = other_dst->dev->ifindex;
5207     + ft->oifidx = dst->dev->ifindex;
5208     ft->dst_cache = dst;
5209     }
5210    
5211     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
5212     index ed9af46720e14..7d424fd270255 100644
5213     --- a/net/netfilter/nf_tables_api.c
5214     +++ b/net/netfilter/nf_tables_api.c
5215     @@ -291,6 +291,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
5216     int err;
5217    
5218     list_for_each_entry(rule, &ctx->chain->rules, list) {
5219     + if (!nft_is_active_next(ctx->net, rule))
5220     + continue;
5221     +
5222     err = nft_delrule(ctx, rule);
5223     if (err < 0)
5224     return err;
5225     @@ -4439,6 +4442,8 @@ err6:
5226     err5:
5227     kfree(trans);
5228     err4:
5229     + if (obj)
5230     + obj->use--;
5231     kfree(elem.priv);
5232     err3:
5233     if (nla[NFTA_SET_ELEM_DATA] != NULL)
5234     diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
5235     index 00db27dfd2ff7..b0bc130947c94 100644
5236     --- a/net/netfilter/nfnetlink_osf.c
5237     +++ b/net/netfilter/nfnetlink_osf.c
5238     @@ -71,6 +71,7 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
5239     int ttl_check,
5240     struct nf_osf_hdr_ctx *ctx)
5241     {
5242     + const __u8 *optpinit = ctx->optp;
5243     unsigned int check_WSS = 0;
5244     int fmatch = FMATCH_WRONG;
5245     int foptsize, optnum;
5246     @@ -160,6 +161,9 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
5247     }
5248     }
5249    
5250     + if (fmatch != FMATCH_OK)
5251     + ctx->optp = optpinit;
5252     +
5253     return fmatch == FMATCH_OK;
5254     }
5255    
5256     diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
5257     index 29d6fc73caf99..38da1f5436b48 100644
5258     --- a/net/netfilter/nft_compat.c
5259     +++ b/net/netfilter/nft_compat.c
5260     @@ -282,6 +282,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
5261     {
5262     struct xt_target *target = expr->ops->data;
5263     void *info = nft_expr_priv(expr);
5264     + struct module *me = target->me;
5265     struct xt_tgdtor_param par;
5266    
5267     par.net = ctx->net;
5268     @@ -292,7 +293,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
5269     par.target->destroy(&par);
5270    
5271     if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
5272     - module_put(target->me);
5273     + module_put(me);
5274     }
5275    
5276     static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
5277     diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
5278     index 5fd4c57c79cc9..436cc14cfc59b 100644
5279     --- a/net/netfilter/nft_flow_offload.c
5280     +++ b/net/netfilter/nft_flow_offload.c
5281     @@ -12,6 +12,7 @@
5282     #include <net/netfilter/nf_conntrack_core.h>
5283     #include <linux/netfilter/nf_conntrack_common.h>
5284     #include <net/netfilter/nf_flow_table.h>
5285     +#include <net/netfilter/nf_conntrack_helper.h>
5286    
5287     struct nft_flow_offload {
5288     struct nft_flowtable *flowtable;
5289     @@ -29,10 +30,12 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
5290     memset(&fl, 0, sizeof(fl));
5291     switch (nft_pf(pkt)) {
5292     case NFPROTO_IPV4:
5293     - fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
5294     + fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
5295     + fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
5296     break;
5297     case NFPROTO_IPV6:
5298     - fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
5299     + fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
5300     + fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
5301     break;
5302     }
5303    
5304     @@ -41,9 +44,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
5305     return -ENOENT;
5306    
5307     route->tuple[dir].dst = this_dst;
5308     - route->tuple[dir].ifindex = nft_in(pkt)->ifindex;
5309     route->tuple[!dir].dst = other_dst;
5310     - route->tuple[!dir].ifindex = nft_out(pkt)->ifindex;
5311    
5312     return 0;
5313     }
5314     @@ -66,6 +67,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
5315     {
5316     struct nft_flow_offload *priv = nft_expr_priv(expr);
5317     struct nf_flowtable *flowtable = &priv->flowtable->data;
5318     + const struct nf_conn_help *help;
5319     enum ip_conntrack_info ctinfo;
5320     struct nf_flow_route route;
5321     struct flow_offload *flow;
5322     @@ -88,7 +90,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
5323     goto out;
5324     }
5325    
5326     - if (test_bit(IPS_HELPER_BIT, &ct->status))
5327     + help = nfct_help(ct);
5328     + if (help)
5329     goto out;
5330    
5331     if (ctinfo == IP_CT_NEW ||
5332     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
5333     index c76c21604ffd9..fd16fb836df28 100644
5334     --- a/net/packet/af_packet.c
5335     +++ b/net/packet/af_packet.c
5336     @@ -4275,7 +4275,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
5337     rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
5338     if (unlikely(rb->frames_per_block == 0))
5339     goto out;
5340     - if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
5341     + if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
5342     goto out;
5343     if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
5344     req->tp_frame_nr))
5345     diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
5346     index 9ccc93f257db0..38bb882bb9587 100644
5347     --- a/net/sched/cls_tcindex.c
5348     +++ b/net/sched/cls_tcindex.c
5349     @@ -48,7 +48,7 @@ struct tcindex_data {
5350     u32 hash; /* hash table size; 0 if undefined */
5351     u32 alloc_hash; /* allocated size */
5352     u32 fall_through; /* 0: only classify if explicit match */
5353     - struct rcu_head rcu;
5354     + struct rcu_work rwork;
5355     };
5356    
5357     static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
5358     @@ -221,17 +221,11 @@ found:
5359     return 0;
5360     }
5361    
5362     -static int tcindex_destroy_element(struct tcf_proto *tp,
5363     - void *arg, struct tcf_walker *walker)
5364     -{
5365     - bool last;
5366     -
5367     - return tcindex_delete(tp, arg, &last, NULL);
5368     -}
5369     -
5370     -static void __tcindex_destroy(struct rcu_head *head)
5371     +static void tcindex_destroy_work(struct work_struct *work)
5372     {
5373     - struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
5374     + struct tcindex_data *p = container_of(to_rcu_work(work),
5375     + struct tcindex_data,
5376     + rwork);
5377    
5378     kfree(p->perfect);
5379     kfree(p->h);
5380     @@ -258,9 +252,11 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r)
5381     return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
5382     }
5383    
5384     -static void __tcindex_partial_destroy(struct rcu_head *head)
5385     +static void tcindex_partial_destroy_work(struct work_struct *work)
5386     {
5387     - struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
5388     + struct tcindex_data *p = container_of(to_rcu_work(work),
5389     + struct tcindex_data,
5390     + rwork);
5391    
5392     kfree(p->perfect);
5393     kfree(p);
5394     @@ -275,7 +271,7 @@ static void tcindex_free_perfect_hash(struct tcindex_data *cp)
5395     kfree(cp->perfect);
5396     }
5397    
5398     -static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
5399     +static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
5400     {
5401     int i, err = 0;
5402    
5403     @@ -289,6 +285,9 @@ static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
5404     TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
5405     if (err < 0)
5406     goto errout;
5407     +#ifdef CONFIG_NET_CLS_ACT
5408     + cp->perfect[i].exts.net = net;
5409     +#endif
5410     }
5411    
5412     return 0;
5413     @@ -305,9 +304,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5414     struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
5415     {
5416     struct tcindex_filter_result new_filter_result, *old_r = r;
5417     - struct tcindex_filter_result cr;
5418     struct tcindex_data *cp = NULL, *oldp;
5419     struct tcindex_filter *f = NULL; /* make gcc behave */
5420     + struct tcf_result cr = {};
5421     int err, balloc = 0;
5422     struct tcf_exts e;
5423    
5424     @@ -337,7 +336,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5425     if (p->perfect) {
5426     int i;
5427    
5428     - if (tcindex_alloc_perfect_hash(cp) < 0)
5429     + if (tcindex_alloc_perfect_hash(net, cp) < 0)
5430     goto errout;
5431     for (i = 0; i < cp->hash; i++)
5432     cp->perfect[i].res = p->perfect[i].res;
5433     @@ -346,13 +345,10 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5434     cp->h = p->h;
5435    
5436     err = tcindex_filter_result_init(&new_filter_result);
5437     - if (err < 0)
5438     - goto errout1;
5439     - err = tcindex_filter_result_init(&cr);
5440     if (err < 0)
5441     goto errout1;
5442     if (old_r)
5443     - cr.res = r->res;
5444     + cr = r->res;
5445    
5446     if (tb[TCA_TCINDEX_HASH])
5447     cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
5448     @@ -406,7 +402,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5449     err = -ENOMEM;
5450     if (!cp->perfect && !cp->h) {
5451     if (valid_perfect_hash(cp)) {
5452     - if (tcindex_alloc_perfect_hash(cp) < 0)
5453     + if (tcindex_alloc_perfect_hash(net, cp) < 0)
5454     goto errout_alloc;
5455     balloc = 1;
5456     } else {
5457     @@ -443,8 +439,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5458     }
5459    
5460     if (tb[TCA_TCINDEX_CLASSID]) {
5461     - cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
5462     - tcf_bind_filter(tp, &cr.res, base);
5463     + cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
5464     + tcf_bind_filter(tp, &cr, base);
5465     }
5466    
5467     if (old_r && old_r != r) {
5468     @@ -456,7 +452,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5469     }
5470    
5471     oldp = p;
5472     - r->res = cr.res;
5473     + r->res = cr;
5474     tcf_exts_change(&r->exts, &e);
5475    
5476     rcu_assign_pointer(tp->root, cp);
5477     @@ -475,10 +471,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5478     ; /* nothing */
5479    
5480     rcu_assign_pointer(*fp, f);
5481     + } else {
5482     + tcf_exts_destroy(&new_filter_result.exts);
5483     }
5484    
5485     if (oldp)
5486     - call_rcu(&oldp->rcu, __tcindex_partial_destroy);
5487     + tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
5488     return 0;
5489    
5490     errout_alloc:
5491     @@ -487,7 +485,6 @@ errout_alloc:
5492     else if (balloc == 2)
5493     kfree(cp->h);
5494     errout1:
5495     - tcf_exts_destroy(&cr.exts);
5496     tcf_exts_destroy(&new_filter_result.exts);
5497     errout:
5498     kfree(cp);
5499     @@ -562,15 +559,34 @@ static void tcindex_destroy(struct tcf_proto *tp,
5500     struct netlink_ext_ack *extack)
5501     {
5502     struct tcindex_data *p = rtnl_dereference(tp->root);
5503     - struct tcf_walker walker;
5504     + int i;
5505    
5506     pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
5507     - walker.count = 0;
5508     - walker.skip = 0;
5509     - walker.fn = tcindex_destroy_element;
5510     - tcindex_walk(tp, &walker);
5511    
5512     - call_rcu(&p->rcu, __tcindex_destroy);
5513     + if (p->perfect) {
5514     + for (i = 0; i < p->hash; i++) {
5515     + struct tcindex_filter_result *r = p->perfect + i;
5516     +
5517     + tcf_unbind_filter(tp, &r->res);
5518     + if (tcf_exts_get_net(&r->exts))
5519     + tcf_queue_work(&r->rwork,
5520     + tcindex_destroy_rexts_work);
5521     + else
5522     + __tcindex_destroy_rexts(r);
5523     + }
5524     + }
5525     +
5526     + for (i = 0; p->h && i < p->hash; i++) {
5527     + struct tcindex_filter *f, *next;
5528     + bool last;
5529     +
5530     + for (f = rtnl_dereference(p->h[i]); f; f = next) {
5531     + next = rtnl_dereference(f->next);
5532     + tcindex_delete(tp, &f->result, &last, NULL);
5533     + }
5534     + }
5535     +
5536     + tcf_queue_work(&p->rwork, tcindex_destroy_work);
5537     }
5538    
5539    
5540     diff --git a/net/sctp/diag.c b/net/sctp/diag.c
5541     index 078f01a8d582a..435847d98b51c 100644
5542     --- a/net/sctp/diag.c
5543     +++ b/net/sctp/diag.c
5544     @@ -256,6 +256,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
5545     + nla_total_size(1) /* INET_DIAG_TOS */
5546     + nla_total_size(1) /* INET_DIAG_TCLASS */
5547     + nla_total_size(4) /* INET_DIAG_MARK */
5548     + + nla_total_size(4) /* INET_DIAG_CLASS_ID */
5549     + nla_total_size(addrlen * asoc->peer.transport_count)
5550     + nla_total_size(addrlen * addrcnt)
5551     + nla_total_size(sizeof(struct inet_diag_meminfo))
5552     diff --git a/net/sctp/offload.c b/net/sctp/offload.c
5553     index 123e9f2dc2265..edfcf16e704c4 100644
5554     --- a/net/sctp/offload.c
5555     +++ b/net/sctp/offload.c
5556     @@ -36,6 +36,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
5557     {
5558     skb->ip_summed = CHECKSUM_NONE;
5559     skb->csum_not_inet = 0;
5560     + gso_reset_checksum(skb, ~0);
5561     return sctp_compute_cksum(skb, skb_transport_offset(skb));
5562     }
5563    
5564     diff --git a/net/sctp/stream.c b/net/sctp/stream.c
5565     index f24633114dfdf..2936ed17bf9ef 100644
5566     --- a/net/sctp/stream.c
5567     +++ b/net/sctp/stream.c
5568     @@ -144,8 +144,10 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
5569     }
5570     }
5571    
5572     - for (i = outcnt; i < stream->outcnt; i++)
5573     + for (i = outcnt; i < stream->outcnt; i++) {
5574     kfree(SCTP_SO(stream, i)->ext);
5575     + SCTP_SO(stream, i)->ext = NULL;
5576     + }
5577     }
5578    
5579     static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
5580     diff --git a/net/socket.c b/net/socket.c
5581     index 390a8ecef4bf4..5c820212ba815 100644
5582     --- a/net/socket.c
5583     +++ b/net/socket.c
5584     @@ -941,8 +941,7 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
5585     EXPORT_SYMBOL(dlci_ioctl_set);
5586    
5587     static long sock_do_ioctl(struct net *net, struct socket *sock,
5588     - unsigned int cmd, unsigned long arg,
5589     - unsigned int ifreq_size)
5590     + unsigned int cmd, unsigned long arg)
5591     {
5592     int err;
5593     void __user *argp = (void __user *)arg;
5594     @@ -968,11 +967,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
5595     } else {
5596     struct ifreq ifr;
5597     bool need_copyout;
5598     - if (copy_from_user(&ifr, argp, ifreq_size))
5599     + if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
5600     return -EFAULT;
5601     err = dev_ioctl(net, cmd, &ifr, &need_copyout);
5602     if (!err && need_copyout)
5603     - if (copy_to_user(argp, &ifr, ifreq_size))
5604     + if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
5605     return -EFAULT;
5606     }
5607     return err;
5608     @@ -1071,8 +1070,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
5609     err = open_related_ns(&net->ns, get_net_ns);
5610     break;
5611     default:
5612     - err = sock_do_ioctl(net, sock, cmd, arg,
5613     - sizeof(struct ifreq));
5614     + err = sock_do_ioctl(net, sock, cmd, arg);
5615     break;
5616     }
5617     return err;
5618     @@ -2752,8 +2750,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
5619     int err;
5620    
5621     set_fs(KERNEL_DS);
5622     - err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
5623     - sizeof(struct compat_ifreq));
5624     + err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
5625     set_fs(old_fs);
5626     if (!err)
5627     err = compat_put_timeval(&ktv, up);
5628     @@ -2769,8 +2766,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
5629     int err;
5630    
5631     set_fs(KERNEL_DS);
5632     - err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
5633     - sizeof(struct compat_ifreq));
5634     + err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
5635     set_fs(old_fs);
5636     if (!err)
5637     err = compat_put_timespec(&kts, up);
5638     @@ -2966,6 +2962,54 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
5639     return dev_ioctl(net, cmd, &ifreq, NULL);
5640     }
5641    
5642     +static int compat_ifreq_ioctl(struct net *net, struct socket *sock,
5643     + unsigned int cmd,
5644     + struct compat_ifreq __user *uifr32)
5645     +{
5646     + struct ifreq __user *uifr;
5647     + int err;
5648     +
5649     + /* Handle the fact that while struct ifreq has the same *layout* on
5650     + * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data,
5651     + * which are handled elsewhere, it still has different *size* due to
5652     + * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit,
5653     + * resulting in struct ifreq being 32 and 40 bytes respectively).
5654     + * As a result, if the struct happens to be at the end of a page and
5655     + * the next page isn't readable/writable, we get a fault. To prevent
5656     + * that, copy back and forth to the full size.
5657     + */
5658     +
5659     + uifr = compat_alloc_user_space(sizeof(*uifr));
5660     + if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
5661     + return -EFAULT;
5662     +
5663     + err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
5664     +
5665     + if (!err) {
5666     + switch (cmd) {
5667     + case SIOCGIFFLAGS:
5668     + case SIOCGIFMETRIC:
5669     + case SIOCGIFMTU:
5670     + case SIOCGIFMEM:
5671     + case SIOCGIFHWADDR:
5672     + case SIOCGIFINDEX:
5673     + case SIOCGIFADDR:
5674     + case SIOCGIFBRDADDR:
5675     + case SIOCGIFDSTADDR:
5676     + case SIOCGIFNETMASK:
5677     + case SIOCGIFPFLAGS:
5678     + case SIOCGIFTXQLEN:
5679     + case SIOCGMIIPHY:
5680     + case SIOCGMIIREG:
5681     + case SIOCGIFNAME:
5682     + if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
5683     + err = -EFAULT;
5684     + break;
5685     + }
5686     + }
5687     + return err;
5688     +}
5689     +
5690     static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
5691     struct compat_ifreq __user *uifr32)
5692     {
5693     @@ -3081,8 +3125,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
5694     }
5695    
5696     set_fs(KERNEL_DS);
5697     - ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
5698     - sizeof(struct compat_ifreq));
5699     + ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
5700     set_fs(old_fs);
5701    
5702     out:
5703     @@ -3182,21 +3225,22 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
5704     case SIOCSIFTXQLEN:
5705     case SIOCBRADDIF:
5706     case SIOCBRDELIF:
5707     + case SIOCGIFNAME:
5708     case SIOCSIFNAME:
5709     case SIOCGMIIPHY:
5710     case SIOCGMIIREG:
5711     case SIOCSMIIREG:
5712     - case SIOCSARP:
5713     - case SIOCGARP:
5714     - case SIOCDARP:
5715     - case SIOCATMARK:
5716     case SIOCBONDENSLAVE:
5717     case SIOCBONDRELEASE:
5718     case SIOCBONDSETHWADDR:
5719     case SIOCBONDCHANGEACTIVE:
5720     - case SIOCGIFNAME:
5721     - return sock_do_ioctl(net, sock, cmd, arg,
5722     - sizeof(struct compat_ifreq));
5723     + return compat_ifreq_ioctl(net, sock, cmd, argp);
5724     +
5725     + case SIOCSARP:
5726     + case SIOCGARP:
5727     + case SIOCDARP:
5728     + case SIOCATMARK:
5729     + return sock_do_ioctl(net, sock, cmd, arg);
5730     }
5731    
5732     return -ENOIOCTLCMD;
5733     diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
5734     index 956a5ea47b58e..3d6bf790cf1fb 100644
5735     --- a/net/sunrpc/xprtrdma/verbs.c
5736     +++ b/net/sunrpc/xprtrdma/verbs.c
5737     @@ -872,7 +872,7 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
5738     for (i = 0; i <= buf->rb_sc_last; i++) {
5739     sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
5740     if (!sc)
5741     - goto out_destroy;
5742     + return -ENOMEM;
5743    
5744     sc->sc_xprt = r_xprt;
5745     buf->rb_sc_ctxs[i] = sc;
5746     @@ -880,10 +880,6 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
5747     buf->rb_flags = 0;
5748    
5749     return 0;
5750     -
5751     -out_destroy:
5752     - rpcrdma_sendctxs_destroy(buf);
5753     - return -ENOMEM;
5754     }
5755    
5756     /* The sendctx queue is not guaranteed to have a size that is a
5757     diff --git a/security/keys/key.c b/security/keys/key.c
5758     index d97c9394b5dd4..249a6da4d2770 100644
5759     --- a/security/keys/key.c
5760     +++ b/security/keys/key.c
5761     @@ -265,8 +265,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
5762    
5763     spin_lock(&user->lock);
5764     if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
5765     - if (user->qnkeys + 1 >= maxkeys ||
5766     - user->qnbytes + quotalen >= maxbytes ||
5767     + if (user->qnkeys + 1 > maxkeys ||
5768     + user->qnbytes + quotalen > maxbytes ||
5769     user->qnbytes + quotalen < user->qnbytes)
5770     goto no_quota;
5771     }
5772     diff --git a/security/keys/keyring.c b/security/keys/keyring.c
5773     index 41bcf57e96f21..99a55145ddcd2 100644
5774     --- a/security/keys/keyring.c
5775     +++ b/security/keys/keyring.c
5776     @@ -661,9 +661,6 @@ static bool search_nested_keyrings(struct key *keyring,
5777     BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
5778     (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
5779    
5780     - if (ctx->index_key.description)
5781     - ctx->index_key.desc_len = strlen(ctx->index_key.description);
5782     -
5783     /* Check to see if this top-level keyring is what we are looking for
5784     * and whether it is valid or not.
5785     */
5786     @@ -914,6 +911,7 @@ key_ref_t keyring_search(key_ref_t keyring,
5787     struct keyring_search_context ctx = {
5788     .index_key.type = type,
5789     .index_key.description = description,
5790     + .index_key.desc_len = strlen(description),
5791     .cred = current_cred(),
5792     .match_data.cmp = key_default_cmp,
5793     .match_data.raw_data = description,
5794     diff --git a/security/keys/proc.c b/security/keys/proc.c
5795     index 5af2934965d80..d38be9db2cc07 100644
5796     --- a/security/keys/proc.c
5797     +++ b/security/keys/proc.c
5798     @@ -166,8 +166,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
5799     int rc;
5800    
5801     struct keyring_search_context ctx = {
5802     - .index_key.type = key->type,
5803     - .index_key.description = key->description,
5804     + .index_key = key->index_key,
5805     .cred = m->file->f_cred,
5806     .match_data.cmp = lookup_user_key_possessed,
5807     .match_data.raw_data = key,
5808     diff --git a/security/keys/request_key.c b/security/keys/request_key.c
5809     index 114f7408feee6..7385536986497 100644
5810     --- a/security/keys/request_key.c
5811     +++ b/security/keys/request_key.c
5812     @@ -545,6 +545,7 @@ struct key *request_key_and_link(struct key_type *type,
5813     struct keyring_search_context ctx = {
5814     .index_key.type = type,
5815     .index_key.description = description,
5816     + .index_key.desc_len = strlen(description),
5817     .cred = current_cred(),
5818     .match_data.cmp = key_default_cmp,
5819     .match_data.raw_data = description,
5820     diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
5821     index 424e1d90412ea..6797843154f03 100644
5822     --- a/security/keys/request_key_auth.c
5823     +++ b/security/keys/request_key_auth.c
5824     @@ -246,7 +246,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
5825     struct key *authkey;
5826     key_ref_t authkey_ref;
5827    
5828     - sprintf(description, "%x", target_id);
5829     + ctx.index_key.desc_len = sprintf(description, "%x", target_id);
5830    
5831     authkey_ref = search_process_keyrings(&ctx);
5832    
5833     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5834     index 9199d91d0a594..bf1ffcaab23fe 100644
5835     --- a/sound/pci/hda/patch_realtek.c
5836     +++ b/sound/pci/hda/patch_realtek.c
5837     @@ -1855,6 +1855,8 @@ enum {
5838     ALC887_FIXUP_BASS_CHMAP,
5839     ALC1220_FIXUP_GB_DUAL_CODECS,
5840     ALC1220_FIXUP_CLEVO_P950,
5841     + ALC1220_FIXUP_SYSTEM76_ORYP5,
5842     + ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
5843     };
5844    
5845     static void alc889_fixup_coef(struct hda_codec *codec,
5846     @@ -2056,6 +2058,17 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
5847     snd_hda_override_conn_list(codec, 0x1b, 1, conn1);
5848     }
5849    
5850     +static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
5851     + const struct hda_fixup *fix, int action);
5852     +
5853     +static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
5854     + const struct hda_fixup *fix,
5855     + int action)
5856     +{
5857     + alc1220_fixup_clevo_p950(codec, fix, action);
5858     + alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
5859     +}
5860     +
5861     static const struct hda_fixup alc882_fixups[] = {
5862     [ALC882_FIXUP_ABIT_AW9D_MAX] = {
5863     .type = HDA_FIXUP_PINS,
5864     @@ -2300,6 +2313,19 @@ static const struct hda_fixup alc882_fixups[] = {
5865     .type = HDA_FIXUP_FUNC,
5866     .v.func = alc1220_fixup_clevo_p950,
5867     },
5868     + [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
5869     + .type = HDA_FIXUP_FUNC,
5870     + .v.func = alc1220_fixup_system76_oryp5,
5871     + },
5872     + [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
5873     + .type = HDA_FIXUP_PINS,
5874     + .v.pins = (const struct hda_pintbl[]) {
5875     + { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
5876     + {}
5877     + },
5878     + .chained = true,
5879     + .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
5880     + },
5881     };
5882    
5883     static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5884     @@ -2376,6 +2402,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5885     SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
5886     SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
5887     SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
5888     + SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
5889     + SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
5890     SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
5891     SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
5892     SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
5893     @@ -5573,6 +5601,7 @@ enum {
5894     ALC294_FIXUP_ASUS_HEADSET_MIC,
5895     ALC294_FIXUP_ASUS_SPK,
5896     ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
5897     + ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
5898     };
5899    
5900     static const struct hda_fixup alc269_fixups[] = {
5901     @@ -6506,6 +6535,17 @@ static const struct hda_fixup alc269_fixups[] = {
5902     .chained = true,
5903     .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
5904     },
5905     + [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = {
5906     + .type = HDA_FIXUP_VERBS,
5907     + .v.verbs = (const struct hda_verb[]) {
5908     + /* Disable PCBEEP-IN passthrough */
5909     + { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
5910     + { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
5911     + { }
5912     + },
5913     + .chained = true,
5914     + .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
5915     + },
5916     };
5917    
5918     static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5919     @@ -7187,7 +7227,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5920     {0x12, 0x90a60130},
5921     {0x19, 0x03a11020},
5922     {0x21, 0x0321101f}),
5923     - SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
5924     + SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
5925     {0x12, 0x90a60130},
5926     {0x14, 0x90170110},
5927     {0x19, 0x04a11040},
5928     diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
5929     index d029cad08cbd8..89f8b0dae7ef0 100644
5930     --- a/tools/testing/selftests/bpf/test_progs.c
5931     +++ b/tools/testing/selftests/bpf/test_progs.c
5932     @@ -1136,7 +1136,9 @@ static void test_stacktrace_build_id(void)
5933     int i, j;
5934     struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
5935     int build_id_matches = 0;
5936     + int retry = 1;
5937    
5938     +retry:
5939     err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
5940     if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
5941     goto out;
5942     @@ -1249,6 +1251,19 @@ static void test_stacktrace_build_id(void)
5943     previous_key = key;
5944     } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
5945    
5946     + /* stack_map_get_build_id_offset() is racy and sometimes can return
5947     + * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
5948     + * try it one more time.
5949     + */
5950     + if (build_id_matches < 1 && retry--) {
5951     + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
5952     + close(pmu_fd);
5953     + bpf_object__close(obj);
5954     + printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
5955     + __func__);
5956     + goto retry;
5957     + }
5958     +
5959     if (CHECK(build_id_matches < 1, "build id match",
5960     "Didn't find expected build ID from the map\n"))
5961     goto disable_pmu;
5962     @@ -1289,7 +1304,9 @@ static void test_stacktrace_build_id_nmi(void)
5963     int i, j;
5964     struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
5965     int build_id_matches = 0;
5966     + int retry = 1;
5967    
5968     +retry:
5969     err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
5970     if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
5971     return;
5972     @@ -1384,6 +1401,19 @@ static void test_stacktrace_build_id_nmi(void)
5973     previous_key = key;
5974     } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
5975    
5976     + /* stack_map_get_build_id_offset() is racy and sometimes can return
5977     + * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
5978     + * try it one more time.
5979     + */
5980     + if (build_id_matches < 1 && retry--) {
5981     + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
5982     + close(pmu_fd);
5983     + bpf_object__close(obj);
5984     + printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
5985     + __func__);
5986     + goto retry;
5987     + }
5988     +
5989     if (CHECK(build_id_matches < 1, "build id match",
5990     "Didn't find expected build ID from the map\n"))
5991     goto disable_pmu;
5992     diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
5993     index aeeb76a54d633..e38f1cb7089d3 100644
5994     --- a/tools/testing/selftests/bpf/test_sock_addr.c
5995     +++ b/tools/testing/selftests/bpf/test_sock_addr.c
5996     @@ -44,6 +44,7 @@
5997     #define SERV6_V4MAPPED_IP "::ffff:192.168.0.4"
5998     #define SRC6_IP "::1"
5999     #define SRC6_REWRITE_IP "::6"
6000     +#define WILDCARD6_IP "::"
6001     #define SERV6_PORT 6060
6002     #define SERV6_REWRITE_PORT 6666
6003    
6004     @@ -85,12 +86,14 @@ static int bind4_prog_load(const struct sock_addr_test *test);
6005     static int bind6_prog_load(const struct sock_addr_test *test);
6006     static int connect4_prog_load(const struct sock_addr_test *test);
6007     static int connect6_prog_load(const struct sock_addr_test *test);
6008     +static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
6009     static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
6010     static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
6011     static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
6012     static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
6013     static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
6014     static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
6015     +static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
6016    
6017     static struct sock_addr_test tests[] = {
6018     /* bind */
6019     @@ -462,6 +465,34 @@ static struct sock_addr_test tests[] = {
6020     SRC6_REWRITE_IP,
6021     SYSCALL_ENOTSUPP,
6022     },
6023     + {
6024     + "sendmsg6: set dst IP = [::] (BSD'ism)",
6025     + sendmsg6_rw_wildcard_prog_load,
6026     + BPF_CGROUP_UDP6_SENDMSG,
6027     + BPF_CGROUP_UDP6_SENDMSG,
6028     + AF_INET6,
6029     + SOCK_DGRAM,
6030     + SERV6_IP,
6031     + SERV6_PORT,
6032     + SERV6_REWRITE_IP,
6033     + SERV6_REWRITE_PORT,
6034     + SRC6_REWRITE_IP,
6035     + SUCCESS,
6036     + },
6037     + {
6038     + "sendmsg6: preserve dst IP = [::] (BSD'ism)",
6039     + sendmsg_allow_prog_load,
6040     + BPF_CGROUP_UDP6_SENDMSG,
6041     + BPF_CGROUP_UDP6_SENDMSG,
6042     + AF_INET6,
6043     + SOCK_DGRAM,
6044     + WILDCARD6_IP,
6045     + SERV6_PORT,
6046     + SERV6_REWRITE_IP,
6047     + SERV6_PORT,
6048     + SRC6_IP,
6049     + SUCCESS,
6050     + },
6051     {
6052     "sendmsg6: deny call",
6053     sendmsg_deny_prog_load,
6054     @@ -714,16 +745,27 @@ static int connect6_prog_load(const struct sock_addr_test *test)
6055     return load_path(test, CONNECT6_PROG_PATH);
6056     }
6057    
6058     -static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
6059     +static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
6060     + int32_t rc)
6061     {
6062     struct bpf_insn insns[] = {
6063     - /* return 0 */
6064     - BPF_MOV64_IMM(BPF_REG_0, 0),
6065     + /* return rc */
6066     + BPF_MOV64_IMM(BPF_REG_0, rc),
6067     BPF_EXIT_INSN(),
6068     };
6069     return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
6070     }
6071    
6072     +static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
6073     +{
6074     + return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
6075     +}
6076     +
6077     +static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
6078     +{
6079     + return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
6080     +}
6081     +
6082     static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
6083     {
6084     struct sockaddr_in dst4_rw_addr;
6085     @@ -844,6 +886,11 @@ static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
6086     return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
6087     }
6088    
6089     +static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test)
6090     +{
6091     + return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP);
6092     +}
6093     +
6094     static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test)
6095     {
6096     return load_path(test, SENDMSG6_PROG_PATH);
6097     diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
6098     index d8313d0438b74..b90dff8d3a94b 100755
6099     --- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
6100     +++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
6101     @@ -1,7 +1,7 @@
6102     #!/bin/bash
6103     # SPDX-License-Identifier: GPL-2.0
6104    
6105     -ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding"
6106     +ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn"
6107     NUM_NETIFS=4
6108     CHECK_TC="yes"
6109     source lib.sh
6110     @@ -96,6 +96,51 @@ flooding()
6111     flood_test $swp2 $h1 $h2
6112     }
6113    
6114     +vlan_deletion()
6115     +{
6116     + # Test that the deletion of a VLAN on a bridge port does not affect
6117     + # the PVID VLAN
6118     + log_info "Add and delete a VLAN on bridge port $swp1"
6119     +
6120     + bridge vlan add vid 10 dev $swp1
6121     + bridge vlan del vid 10 dev $swp1
6122     +
6123     + ping_ipv4
6124     + ping_ipv6
6125     +}
6126     +
6127     +extern_learn()
6128     +{
6129     + local mac=de:ad:be:ef:13:37
6130     + local ageing_time
6131     +
6132     + # Test that externally learned FDB entries can roam, but not age out
6133     + RET=0
6134     +
6135     + bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1
6136     +
6137     + bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
6138     + check_err $? "Did not find FDB entry when should"
6139     +
6140     + # Wait for 10 seconds after the ageing time to make sure the FDB entry
6141     + # was not aged out
6142     + ageing_time=$(bridge_ageing_time_get br0)
6143     + sleep $((ageing_time + 10))
6144     +
6145     + bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
6146     + check_err $? "FDB entry was aged out when should not"
6147     +
6148     + $MZ $h2 -c 1 -p 64 -a $mac -t ip -q
6149     +
6150     + bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37
6151     + check_err $? "FDB entry did not roam when should"
6152     +
6153     + log_test "Externally learned FDB entry - ageing & roaming"
6154     +
6155     + bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null
6156     + bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null
6157     +}
6158     +
6159     trap cleanup EXIT
6160    
6161     setup_prepare
6162     diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
6163     index 637ea0219617f..0da3545cabdb6 100644
6164     --- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
6165     +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
6166     @@ -17,7 +17,7 @@
6167     "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2",
6168     "expExitCode": "0",
6169     "verifyCmd": "$TC actions get action ife index 2",
6170     - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2",
6171     + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2",
6172     "matchCount": "1",
6173     "teardown": [
6174     "$TC actions flush action ife"
6175     @@ -41,7 +41,7 @@
6176     "cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2",
6177     "expExitCode": "0",
6178     "verifyCmd": "$TC actions get action ife index 2",
6179     - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2",
6180     + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2",
6181     "matchCount": "1",
6182     "teardown": [
6183     "$TC actions flush action ife"
6184     @@ -65,7 +65,7 @@
6185     "cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2",
6186     "expExitCode": "0",
6187     "verifyCmd": "$TC actions get action ife index 2",
6188     - "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2",
6189     + "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2",
6190     "matchCount": "1",
6191     "teardown": [
6192     "$TC actions flush action ife"
6193     @@ -89,7 +89,7 @@
6194     "cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2",
6195     "expExitCode": "0",
6196     "verifyCmd": "$TC actions get action ife index 2",
6197     - "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2",
6198     + "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2",
6199     "matchCount": "1",
6200     "teardown": [
6201     "$TC actions flush action ife"
6202     @@ -113,7 +113,7 @@
6203     "cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2",
6204     "expExitCode": "0",
6205     "verifyCmd": "$TC actions get action ife index 2",
6206     - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2",
6207     + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2",
6208     "matchCount": "1",
6209     "teardown": [
6210     "$TC actions flush action ife"
6211     @@ -137,7 +137,7 @@
6212     "cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2",
6213     "expExitCode": "0",
6214     "verifyCmd": "$TC actions get action ife index 2",
6215     - "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2",
6216     + "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2",
6217     "matchCount": "1",
6218     "teardown": [
6219     "$TC actions flush action ife"
6220     @@ -161,7 +161,7 @@
6221     "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90",
6222     "expExitCode": "0",
6223     "verifyCmd": "$TC actions get action ife index 90",
6224     - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90",
6225     + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90",
6226     "matchCount": "1",
6227     "teardown": [
6228     "$TC actions flush action ife"
6229     @@ -185,7 +185,7 @@
6230     "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90",
6231     "expExitCode": "255",
6232     "verifyCmd": "$TC actions get action ife index 90",
6233     - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90",
6234     + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90",
6235     "matchCount": "0",
6236     "teardown": []
6237     },
6238     @@ -207,7 +207,7 @@
6239     "cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9",
6240     "expExitCode": "0",
6241     "verifyCmd": "$TC actions get action ife index 9",
6242     - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9",
6243     + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9",
6244     "matchCount": "1",
6245     "teardown": [
6246     "$TC actions flush action ife"
6247     @@ -231,7 +231,7 @@
6248     "cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9",
6249     "expExitCode": "0",
6250     "verifyCmd": "$TC actions get action ife index 9",
6251     - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9",
6252     + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9",
6253     "matchCount": "1",
6254     "teardown": [
6255     "$TC actions flush action ife"
6256     @@ -255,7 +255,7 @@
6257     "cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9",
6258     "expExitCode": "0",
6259     "verifyCmd": "$TC actions get action ife index 9",
6260     - "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9",
6261     + "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9",
6262     "matchCount": "1",
6263     "teardown": [
6264     "$TC actions flush action ife"
6265     @@ -279,7 +279,7 @@
6266     "cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9",
6267     "expExitCode": "0",
6268     "verifyCmd": "$TC actions get action ife index 9",
6269     - "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9",
6270     + "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9",
6271     "matchCount": "1",
6272     "teardown": [
6273     "$TC actions flush action ife"
6274     @@ -303,7 +303,7 @@
6275     "cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9",
6276     "expExitCode": "0",
6277     "verifyCmd": "$TC actions get action ife index 9",
6278     - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9",
6279     + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9",
6280     "matchCount": "1",
6281     "teardown": [
6282     "$TC actions flush action ife"
6283     @@ -327,7 +327,7 @@
6284     "cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9",
6285     "expExitCode": "0",
6286     "verifyCmd": "$TC actions get action ife index 9",
6287     - "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9",
6288     + "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9",
6289     "matchCount": "1",
6290     "teardown": [
6291     "$TC actions flush action ife"
6292     @@ -351,7 +351,7 @@
6293     "cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99",
6294     "expExitCode": "0",
6295     "verifyCmd": "$TC actions get action ife index 99",
6296     - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99",
6297     + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99",
6298     "matchCount": "1",
6299     "teardown": [
6300     "$TC actions flush action ife"
6301     @@ -375,7 +375,7 @@
6302     "cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99",
6303     "expExitCode": "255",
6304     "verifyCmd": "$TC actions get action ife index 99",
6305     - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99",
6306     + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99",
6307     "matchCount": "0",
6308     "teardown": []
6309     },
6310     @@ -397,7 +397,7 @@
6311     "cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1",
6312     "expExitCode": "0",
6313     "verifyCmd": "$TC actions get action ife index 1",
6314     - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1",
6315     + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1",
6316     "matchCount": "1",
6317     "teardown": [
6318     "$TC actions flush action ife"
6319     @@ -421,7 +421,7 @@
6320     "cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1",
6321     "expExitCode": "0",
6322     "verifyCmd": "$TC actions get action ife index 1",
6323     - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1",
6324     + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1",
6325     "matchCount": "1",
6326     "teardown": [
6327     "$TC actions flush action ife"
6328     @@ -445,7 +445,7 @@
6329     "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
6330     "expExitCode": "0",
6331     "verifyCmd": "$TC actions get action ife index 1",
6332     - "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
6333     + "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
6334     "matchCount": "1",
6335     "teardown": [
6336     "$TC actions flush action ife"
6337     @@ -469,7 +469,7 @@
6338     "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
6339     "expExitCode": "0",
6340     "verifyCmd": "$TC actions get action ife index 1",
6341     - "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
6342     + "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
6343     "matchCount": "1",
6344     "teardown": [
6345     "$TC actions flush action ife"
6346     @@ -493,7 +493,7 @@
6347     "cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77",
6348     "expExitCode": "0",
6349     "verifyCmd": "$TC actions get action ife index 77",
6350     - "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77",
6351     + "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77",
6352     "matchCount": "1",
6353     "teardown": [
6354     "$TC actions flush action ife"
6355     @@ -517,7 +517,7 @@
6356     "cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77",
6357     "expExitCode": "0",
6358     "verifyCmd": "$TC actions get action ife index 77",
6359     - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77",
6360     + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77",
6361     "matchCount": "1",
6362     "teardown": [
6363     "$TC actions flush action ife"
6364     @@ -541,7 +541,7 @@
6365     "cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77",
6366     "expExitCode": "0",
6367     "verifyCmd": "$TC actions get action ife index 77",
6368     - "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77",
6369     + "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77",
6370     "matchCount": "1",
6371     "teardown": [
6372     "$TC actions flush action ife"
6373     @@ -565,7 +565,7 @@
6374     "cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1",
6375     "expExitCode": "0",
6376     "verifyCmd": "$TC actions get action ife index 1",
6377     - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1",
6378     + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1",
6379     "matchCount": "1",
6380     "teardown": [
6381     "$TC actions flush action ife"
6382     @@ -589,7 +589,7 @@
6383     "cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1",
6384     "expExitCode": "255",
6385     "verifyCmd": "$TC actions get action ife index 1",
6386     - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1",
6387     + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1",
6388     "matchCount": "0",
6389     "teardown": []
6390     },
6391     @@ -611,7 +611,7 @@
6392     "cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1",
6393     "expExitCode": "0",
6394     "verifyCmd": "$TC actions get action ife index 1",
6395     - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1",
6396     + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1",
6397     "matchCount": "1",
6398     "teardown": [
6399     "$TC actions flush action ife"
6400     @@ -635,7 +635,7 @@
6401     "cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1",
6402     "expExitCode": "0",
6403     "verifyCmd": "$TC actions get action ife index 1",
6404     - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
6405     + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
6406     "matchCount": "1",
6407     "teardown": [
6408     "$TC actions flush action ife"
6409     @@ -659,7 +659,7 @@
6410     "cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11",
6411     "expExitCode": "0",
6412     "verifyCmd": "$TC actions get action ife index 11",
6413     - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
6414     + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
6415     "matchCount": "1",
6416     "teardown": [
6417     "$TC actions flush action ife"
6418     @@ -683,7 +683,7 @@
6419     "cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1",
6420     "expExitCode": "0",
6421     "verifyCmd": "$TC actions get action ife index 1",
6422     - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1",
6423     + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1",
6424     "matchCount": "1",
6425     "teardown": [
6426     "$TC actions flush action ife"
6427     @@ -707,7 +707,7 @@
6428     "cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21",
6429     "expExitCode": "0",
6430     "verifyCmd": "$TC actions get action ife index 21",
6431     - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21",
6432     + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21",
6433     "matchCount": "1",
6434     "teardown": [
6435     "$TC actions flush action ife"
6436     @@ -731,7 +731,7 @@
6437     "cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21",
6438     "expExitCode": "0",
6439     "verifyCmd": "$TC actions get action ife index 21",
6440     - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21",
6441     + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21",
6442     "matchCount": "1",
6443     "teardown": [
6444     "$TC actions flush action ife"
6445     @@ -739,7 +739,7 @@
6446     },
6447     {
6448     "id": "fac3",
6449     - "name": "Create valid ife encode action with index at 32-bit maximnum",
6450     + "name": "Create valid ife encode action with index at 32-bit maximum",
6451     "category": [
6452     "actions",
6453     "ife"
6454     @@ -755,7 +755,7 @@
6455     "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295",
6456     "expExitCode": "0",
6457     "verifyCmd": "$TC actions get action ife index 4294967295",
6458     - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295",
6459     + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295",
6460     "matchCount": "1",
6461     "teardown": [
6462     "$TC actions flush action ife"
6463     @@ -779,7 +779,7 @@
6464     "cmdUnderTest": "$TC actions add action ife decode pass index 1",
6465     "expExitCode": "0",
6466     "verifyCmd": "$TC actions get action ife index 1",
6467     - "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
6468     + "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
6469     "matchCount": "1",
6470     "teardown": [
6471     "$TC actions flush action ife"
6472     @@ -803,7 +803,7 @@
6473     "cmdUnderTest": "$TC actions add action ife decode pipe index 1",
6474     "expExitCode": "0",
6475     "verifyCmd": "$TC actions get action ife index 1",
6476     - "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
6477     + "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
6478     "matchCount": "1",
6479     "teardown": [
6480     "$TC actions flush action ife"
6481     @@ -827,7 +827,7 @@
6482     "cmdUnderTest": "$TC actions add action ife decode continue index 1",
6483     "expExitCode": "0",
6484     "verifyCmd": "$TC actions get action ife index 1",
6485     - "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
6486     + "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
6487     "matchCount": "1",
6488     "teardown": [
6489     "$TC actions flush action ife"
6490     @@ -851,7 +851,7 @@
6491     "cmdUnderTest": "$TC actions add action ife decode drop index 1",
6492     "expExitCode": "0",
6493     "verifyCmd": "$TC actions get action ife index 1",
6494     - "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
6495     + "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
6496     "matchCount": "1",
6497     "teardown": [
6498     "$TC actions flush action ife"
6499     @@ -875,7 +875,7 @@
6500     "cmdUnderTest": "$TC actions add action ife decode reclassify index 1",
6501     "expExitCode": "0",
6502     "verifyCmd": "$TC actions get action ife index 1",
6503     - "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
6504     + "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
6505     "matchCount": "1",
6506     "teardown": [
6507     "$TC actions flush action ife"
6508     @@ -899,7 +899,7 @@
6509     "cmdUnderTest": "$TC actions add action ife decode jump 10 index 1",
6510     "expExitCode": "0",
6511     "verifyCmd": "$TC actions get action ife index 1",
6512     - "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
6513     + "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
6514     "matchCount": "1",
6515     "teardown": [
6516     "$TC actions flush action ife"
6517     @@ -923,7 +923,7 @@
6518     "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999",
6519     "expExitCode": "255",
6520     "verifyCmd": "$TC actions get action ife index 4294967295999",
6521     - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999",
6522     + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999",
6523     "matchCount": "0",
6524     "teardown": []
6525     },
6526     @@ -945,7 +945,7 @@
6527     "cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4",
6528     "expExitCode": "255",
6529     "verifyCmd": "$TC actions get action ife index 4",
6530     - "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4",
6531     + "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4",
6532     "matchCount": "0",
6533     "teardown": []
6534     },
6535     @@ -967,7 +967,7 @@
6536     "cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1",
6537     "expExitCode": "0",
6538     "verifyCmd": "$TC actions get action ife index 4",
6539     - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
6540     + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
6541     "matchCount": "1",
6542     "teardown": [
6543     "$TC actions flush action ife"
6544     @@ -991,7 +991,7 @@
6545     "cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4",
6546     "expExitCode": "255",
6547     "verifyCmd": "$TC actions get action ife index 4",
6548     - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4",
6549     + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4",
6550     "matchCount": "0",
6551     "teardown": []
6552     },
6553     @@ -1013,7 +1013,7 @@
6554     "cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4",
6555     "expExitCode": "255",
6556     "verifyCmd": "$TC actions get action ife index 4",
6557     - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4",
6558     + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4",
6559     "matchCount": "0",
6560     "teardown": []
6561     },
6562     diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
6563     index 10b2d894e4362..e7e15a7336b6d 100644
6564     --- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
6565     +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
6566     @@ -81,35 +81,6 @@
6567     ]
6568     ]
6569     },
6570     - {
6571     - "id": "ba4e",
6572     - "name": "Add tunnel_key set action with missing mandatory id parameter",
6573     - "category": [
6574     - "actions",
6575     - "tunnel_key"
6576     - ],
6577     - "setup": [
6578     - [
6579     - "$TC actions flush action tunnel_key",
6580     - 0,
6581     - 1,
6582     - 255
6583     - ]
6584     - ],
6585     - "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2",
6586     - "expExitCode": "255",
6587     - "verifyCmd": "$TC actions list action tunnel_key",
6588     - "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2",
6589     - "matchCount": "0",
6590     - "teardown": [
6591     - [
6592     - "$TC actions flush action tunnel_key",
6593     - 0,
6594     - 1,
6595     - 255
6596     - ]
6597     - ]
6598     - },
6599     {
6600     "id": "a5e0",
6601     "name": "Add tunnel_key set action with invalid src_ip parameter",
6602     @@ -634,7 +605,7 @@
6603     "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
6604     "expExitCode": "0",
6605     "verifyCmd": "$TC actions get action tunnel_key index 4",
6606     - "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
6607     + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
6608     "matchCount": "1",
6609     "teardown": [
6610     "$TC actions flush action tunnel_key"