Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0260-4.9.161-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3313 - (hide annotations) (download)
Tue Mar 12 10:43:17 2019 UTC (5 years, 2 months ago) by niro
File size: 94946 byte(s)
-linux-4.9.161
1 niro 3313 diff --git a/Makefile b/Makefile
2     index af70503df3f46..239b74a7147b5 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 160
9     +SUBLEVEL = 161
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     @@ -306,11 +306,6 @@ HOSTCXX = g++
14     HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
15     HOSTCXXFLAGS = -O2
16    
17     -ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
18     -HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
19     - -Wno-missing-field-initializers -fno-delete-null-pointer-checks
20     -endif
21     -
22     # Decide whether to build built-in, modular, or both.
23     # Normally, just do built-in.
24    
25     @@ -511,36 +506,17 @@ endif
26    
27     ifeq ($(cc-name),clang)
28     ifneq ($(CROSS_COMPILE),)
29     -CLANG_TARGET := -target $(notdir $(CROSS_COMPILE:%-=%))
30     +CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
31     GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
32     -CLANG_PREFIX := --prefix=$(GCC_TOOLCHAIN_DIR)
33     +CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
34     GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
35     endif
36     ifneq ($(GCC_TOOLCHAIN),)
37     -CLANG_GCC_TC := -gcc-toolchain $(GCC_TOOLCHAIN)
38     +CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN)
39     endif
40     -KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
41     -KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
42     -KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
43     -KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
44     -KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
45     -KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
46     -KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
47     -# Quiet clang warning: comparison of unsigned expression < 0 is always false
48     -KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
49     -# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
50     -# source of a reference will be _MergedGlobals and not on of the whitelisted names.
51     -# See modpost pattern 2
52     -KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
53     -KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
54     -KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
55     -KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
56     -else
57     -
58     -# These warnings generated too much noise in a regular build.
59     -# Use make W=1 to enable them (see scripts/Makefile.build)
60     -KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
61     -KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
62     +CLANG_FLAGS += -no-integrated-as
63     +KBUILD_CFLAGS += $(CLANG_FLAGS)
64     +KBUILD_AFLAGS += $(CLANG_FLAGS)
65     endif
66    
67    
68     @@ -739,6 +715,26 @@ ifdef CONFIG_CC_STACKPROTECTOR
69     endif
70     KBUILD_CFLAGS += $(stackp-flag)
71    
72     +ifeq ($(cc-name),clang)
73     +KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
74     +KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
75     +KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
76     +KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
77     +# Quiet clang warning: comparison of unsigned expression < 0 is always false
78     +KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
79     +# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
80     +# source of a reference will be _MergedGlobals and not on of the whitelisted names.
81     +# See modpost pattern 2
82     +KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
83     +KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
84     +else
85     +
86     +# These warnings generated too much noise in a regular build.
87     +# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
88     +KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
89     +endif
90     +
91     +KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
92     ifdef CONFIG_FRAME_POINTER
93     KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
94     else
95     diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
96     index 4fd6272e6c01b..c5816a224571e 100644
97     --- a/arch/arc/include/asm/cache.h
98     +++ b/arch/arc/include/asm/cache.h
99     @@ -49,6 +49,17 @@
100    
101     #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
102    
103     +/*
104     + * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
105     + * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
106     + * alignment for any atomic64_t embedded in buffer.
107     + * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
108     + * value of 4 (and not 8) in ARC ABI.
109     + */
110     +#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
111     +#define ARCH_SLAB_MINALIGN 8
112     +#endif
113     +
114     extern void arc_cache_init(void);
115     extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
116     extern void read_decode_cache_bcr(void);
117     diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
118     index 8b90d25a15cca..1f945d0f40daa 100644
119     --- a/arch/arc/kernel/head.S
120     +++ b/arch/arc/kernel/head.S
121     @@ -17,6 +17,7 @@
122     #include <asm/entry.h>
123     #include <asm/arcregs.h>
124     #include <asm/cache.h>
125     +#include <asm/irqflags.h>
126    
127     .macro CPU_EARLY_SETUP
128    
129     @@ -47,6 +48,15 @@
130     sr r5, [ARC_REG_DC_CTRL]
131    
132     1:
133     +
134     +#ifdef CONFIG_ISA_ARCV2
135     + ; Unaligned access is disabled at reset, so re-enable early as
136     + ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
137     + ; by default
138     + lr r5, [status32]
139     + bset r5, r5, STATUS_AD_BIT
140     + kflag r5
141     +#endif
142     .endm
143    
144     .section .init.text, "ax",@progbits
145     @@ -93,9 +103,9 @@ ENTRY(stext)
146     #ifdef CONFIG_ARC_UBOOT_SUPPORT
147     ; Uboot - kernel ABI
148     ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
149     - ; r1 = magic number (board identity, unused as of now
150     + ; r1 = magic number (always zero as of now)
151     ; r2 = pointer to uboot provided cmdline or external DTB in mem
152     - ; These are handled later in setup_arch()
153     + ; These are handled later in handle_uboot_args()
154     st r0, [@uboot_tag]
155     st r2, [@uboot_arg]
156     #endif
157     diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
158     index 0385df77a6973..9119bea503a7c 100644
159     --- a/arch/arc/kernel/setup.c
160     +++ b/arch/arc/kernel/setup.c
161     @@ -381,43 +381,80 @@ void setup_processor(void)
162     arc_chk_core_config();
163     }
164    
165     -static inline int is_kernel(unsigned long addr)
166     +static inline bool uboot_arg_invalid(unsigned long addr)
167     {
168     - if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
169     - return 1;
170     - return 0;
171     + /*
172     + * Check that it is a untranslated address (although MMU is not enabled
173     + * yet, it being a high address ensures this is not by fluke)
174     + */
175     + if (addr < PAGE_OFFSET)
176     + return true;
177     +
178     + /* Check that address doesn't clobber resident kernel image */
179     + return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
180     }
181    
182     -void __init setup_arch(char **cmdline_p)
183     +#define IGNORE_ARGS "Ignore U-boot args: "
184     +
185     +/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
186     +#define UBOOT_TAG_NONE 0
187     +#define UBOOT_TAG_CMDLINE 1
188     +#define UBOOT_TAG_DTB 2
189     +
190     +void __init handle_uboot_args(void)
191     {
192     + bool use_embedded_dtb = true;
193     + bool append_cmdline = false;
194     +
195     #ifdef CONFIG_ARC_UBOOT_SUPPORT
196     - /* make sure that uboot passed pointer to cmdline/dtb is valid */
197     - if (uboot_tag && is_kernel((unsigned long)uboot_arg))
198     - panic("Invalid uboot arg\n");
199     + /* check that we know this tag */
200     + if (uboot_tag != UBOOT_TAG_NONE &&
201     + uboot_tag != UBOOT_TAG_CMDLINE &&
202     + uboot_tag != UBOOT_TAG_DTB) {
203     + pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
204     + goto ignore_uboot_args;
205     + }
206     +
207     + if (uboot_tag != UBOOT_TAG_NONE &&
208     + uboot_arg_invalid((unsigned long)uboot_arg)) {
209     + pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
210     + goto ignore_uboot_args;
211     + }
212     +
213     + /* see if U-boot passed an external Device Tree blob */
214     + if (uboot_tag == UBOOT_TAG_DTB) {
215     + machine_desc = setup_machine_fdt((void *)uboot_arg);
216    
217     - /* See if u-boot passed an external Device Tree blob */
218     - machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */
219     - if (!machine_desc)
220     + /* external Device Tree blob is invalid - use embedded one */
221     + use_embedded_dtb = !machine_desc;
222     + }
223     +
224     + if (uboot_tag == UBOOT_TAG_CMDLINE)
225     + append_cmdline = true;
226     +
227     +ignore_uboot_args:
228     #endif
229     - {
230     - /* No, so try the embedded one */
231     +
232     + if (use_embedded_dtb) {
233     machine_desc = setup_machine_fdt(__dtb_start);
234     if (!machine_desc)
235     panic("Embedded DT invalid\n");
236     + }
237    
238     - /*
239     - * If we are here, it is established that @uboot_arg didn't
240     - * point to DT blob. Instead if u-boot says it is cmdline,
241     - * append to embedded DT cmdline.
242     - * setup_machine_fdt() would have populated @boot_command_line
243     - */
244     - if (uboot_tag == 1) {
245     - /* Ensure a whitespace between the 2 cmdlines */
246     - strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
247     - strlcat(boot_command_line, uboot_arg,
248     - COMMAND_LINE_SIZE);
249     - }
250     + /*
251     + * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
252     + * append processing can only happen after.
253     + */
254     + if (append_cmdline) {
255     + /* Ensure a whitespace between the 2 cmdlines */
256     + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
257     + strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
258     }
259     +}
260     +
261     +void __init setup_arch(char **cmdline_p)
262     +{
263     + handle_uboot_args();
264    
265     /* Save unparsed command line copy for /proc/cmdline */
266     *cmdline_p = boot_command_line;
267     diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
268     index f8ae6d6e4767e..85a15b38b6d8c 100644
269     --- a/arch/arm64/include/asm/arch_gicv3.h
270     +++ b/arch/arm64/include/asm/arch_gicv3.h
271     @@ -80,18 +80,8 @@
272     #include <linux/stringify.h>
273     #include <asm/barrier.h>
274    
275     -#define read_gicreg(r) \
276     - ({ \
277     - u64 reg; \
278     - asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \
279     - reg; \
280     - })
281     -
282     -#define write_gicreg(v,r) \
283     - do { \
284     - u64 __val = (v); \
285     - asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\
286     - } while (0)
287     +#define read_gicreg read_sysreg_s
288     +#define write_gicreg write_sysreg_s
289    
290     /*
291     * Low-level accessors
292     @@ -102,13 +92,13 @@
293    
294     static inline void gic_write_eoir(u32 irq)
295     {
296     - asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" ((u64)irq));
297     + write_sysreg_s(irq, ICC_EOIR1_EL1);
298     isb();
299     }
300    
301     static inline void gic_write_dir(u32 irq)
302     {
303     - asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" ((u64)irq));
304     + write_sysreg_s(irq, ICC_DIR_EL1);
305     isb();
306     }
307    
308     @@ -116,7 +106,7 @@ static inline u64 gic_read_iar_common(void)
309     {
310     u64 irqstat;
311    
312     - asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
313     + irqstat = read_sysreg_s(ICC_IAR1_EL1);
314     dsb(sy);
315     return irqstat;
316     }
317     @@ -134,10 +124,12 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
318    
319     asm volatile(
320     "nop;nop;nop;nop\n\t"
321     - "nop;nop;nop;nop\n\t"
322     - "mrs_s %0, " __stringify(ICC_IAR1_EL1) "\n\t"
323     - "nop;nop;nop;nop"
324     - : "=r" (irqstat));
325     + "nop;nop;nop;nop");
326     +
327     + irqstat = read_sysreg_s(ICC_IAR1_EL1);
328     +
329     + asm volatile(
330     + "nop;nop;nop;nop");
331     mb();
332    
333     return irqstat;
334     @@ -145,43 +137,40 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
335    
336     static inline void gic_write_pmr(u32 val)
337     {
338     - asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" ((u64)val));
339     + write_sysreg_s(val, ICC_PMR_EL1);
340     }
341    
342     static inline void gic_write_ctlr(u32 val)
343     {
344     - asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" ((u64)val));
345     + write_sysreg_s(val, ICC_CTLR_EL1);
346     isb();
347     }
348    
349     static inline void gic_write_grpen1(u32 val)
350     {
351     - asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" ((u64)val));
352     + write_sysreg_s(val, ICC_GRPEN1_EL1);
353     isb();
354     }
355    
356     static inline void gic_write_sgi1r(u64 val)
357     {
358     - asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
359     + write_sysreg_s(val, ICC_SGI1R_EL1);
360     }
361    
362     static inline u32 gic_read_sre(void)
363     {
364     - u64 val;
365     -
366     - asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
367     - return val;
368     + return read_sysreg_s(ICC_SRE_EL1);
369     }
370    
371     static inline void gic_write_sre(u32 val)
372     {
373     - asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" ((u64)val));
374     + write_sysreg_s(val, ICC_SRE_EL1);
375     isb();
376     }
377    
378     static inline void gic_write_bpr1(u32 val)
379     {
380     - asm volatile("msr_s " __stringify(ICC_BPR1_EL1) ", %0" : : "r" (val));
381     + write_sysreg_s(val, ICC_BPR1_EL1);
382     }
383    
384     #define gic_read_typer(c) readq_relaxed(c)
385     diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
386     index 134879c1310a0..4ed369c0ec6a1 100644
387     --- a/arch/mips/configs/ath79_defconfig
388     +++ b/arch/mips/configs/ath79_defconfig
389     @@ -74,6 +74,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
390     # CONFIG_SERIAL_8250_PCI is not set
391     CONFIG_SERIAL_8250_NR_UARTS=1
392     CONFIG_SERIAL_8250_RUNTIME_UARTS=1
393     +CONFIG_SERIAL_OF_PLATFORM=y
394     CONFIG_SERIAL_AR933X=y
395     CONFIG_SERIAL_AR933X_CONSOLE=y
396     # CONFIG_HW_RANDOM is not set
397     diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
398     index db6f5afff4ff1..ea897912bc712 100644
399     --- a/arch/mips/jazz/jazzdma.c
400     +++ b/arch/mips/jazz/jazzdma.c
401     @@ -71,14 +71,15 @@ static int __init vdma_init(void)
402     get_order(VDMA_PGTBL_SIZE));
403     BUG_ON(!pgtbl);
404     dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
405     - pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
406     + pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
407    
408     /*
409     * Clear the R4030 translation table
410     */
411     vdma_pgtbl_init();
412    
413     - r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
414     + r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
415     + CPHYSADDR((unsigned long)pgtbl));
416     r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
417     r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
418    
419     diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
420     index e02d7b4d2b693..0780c375fe2e2 100644
421     --- a/arch/parisc/kernel/ptrace.c
422     +++ b/arch/parisc/kernel/ptrace.c
423     @@ -311,15 +311,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
424    
425     long do_syscall_trace_enter(struct pt_regs *regs)
426     {
427     - if (test_thread_flag(TIF_SYSCALL_TRACE) &&
428     - tracehook_report_syscall_entry(regs)) {
429     + if (test_thread_flag(TIF_SYSCALL_TRACE)) {
430     + int rc = tracehook_report_syscall_entry(regs);
431     +
432     /*
433     - * Tracing decided this syscall should not happen or the
434     - * debugger stored an invalid system call number. Skip
435     - * the system call and the system call restart handling.
436     + * As tracesys_next does not set %r28 to -ENOSYS
437     + * when %r20 is set to -1, initialize it here.
438     */
439     - regs->gr[20] = -1UL;
440     - goto out;
441     + regs->gr[28] = -ENOSYS;
442     +
443     + if (rc) {
444     + /*
445     + * A nonzero return code from
446     + * tracehook_report_syscall_entry() tells us
447     + * to prevent the syscall execution. Skip
448     + * the syscall call and the syscall restart handling.
449     + *
450     + * Note that the tracer may also just change
451     + * regs->gr[20] to an invalid syscall number,
452     + * that is handled by tracesys_next.
453     + */
454     + regs->gr[20] = -1UL;
455     + return -1;
456     + }
457     }
458    
459     /* Do the secure computing check after ptrace. */
460     @@ -343,7 +357,6 @@ long do_syscall_trace_enter(struct pt_regs *regs)
461     regs->gr[24] & 0xffffffff,
462     regs->gr[23] & 0xffffffff);
463    
464     -out:
465     /*
466     * Sign extend the syscall number to 64bit since it may have been
467     * modified by a compat ptrace call
468     diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
469     index cda8e14bd72a3..89b163351e642 100644
470     --- a/arch/x86/boot/compressed/Makefile
471     +++ b/arch/x86/boot/compressed/Makefile
472     @@ -34,6 +34,7 @@ KBUILD_CFLAGS += $(cflags-y)
473     KBUILD_CFLAGS += -mno-mmx -mno-sse
474     KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
475     KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
476     +KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
477    
478     KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
479     GCOV_PROFILE := n
480     diff --git a/drivers/atm/he.c b/drivers/atm/he.c
481     index 31b513a23ae0c..985a5800a6376 100644
482     --- a/drivers/atm/he.c
483     +++ b/drivers/atm/he.c
484     @@ -717,7 +717,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
485     instead of '/ 512', use '>> 9' to prevent a call
486     to divdu3 on x86 platforms
487     */
488     - rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
489     + rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
490    
491     if (rate_cps < 10)
492     rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
493     diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
494     index be54e5331a451..50272fe81f267 100644
495     --- a/drivers/char/hpet.c
496     +++ b/drivers/char/hpet.c
497     @@ -574,7 +574,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
498     }
499    
500     static int
501     -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
502     +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
503     struct hpet_info *info)
504     {
505     struct hpet_timer __iomem *timer;
506     diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
507     index 279d1e021421b..685247c3d489f 100644
508     --- a/drivers/gpu/drm/i915/i915_irq.c
509     +++ b/drivers/gpu/drm/i915/i915_irq.c
510     @@ -1985,10 +1985,10 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
511     DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
512    
513     if (pch_iir & SDE_TRANSA_FIFO_UNDER)
514     - intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
515     + intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
516    
517     if (pch_iir & SDE_TRANSB_FIFO_UNDER)
518     - intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
519     + intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
520     }
521    
522     static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
523     @@ -2022,13 +2022,13 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
524     DRM_ERROR("PCH poison interrupt\n");
525    
526     if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
527     - intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
528     + intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
529    
530     if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
531     - intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
532     + intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
533    
534     if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
535     - intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
536     + intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_C);
537    
538     I915_WRITE(SERR_INT, serr_int);
539     }
540     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
541     index c185625d67f20..d915877b6ecbc 100644
542     --- a/drivers/gpu/drm/i915/intel_display.c
543     +++ b/drivers/gpu/drm/i915/intel_display.c
544     @@ -1849,7 +1849,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
545    
546     /* FDI must be feeding us bits for PCH ports */
547     assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
548     - assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
549     + assert_fdi_rx_enabled(dev_priv, PIPE_A);
550    
551     /* Workaround: set timing override bit. */
552     val = I915_READ(TRANS_CHICKEN2(PIPE_A));
553     @@ -1950,7 +1950,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
554     assert_sprites_disabled(dev_priv, pipe);
555    
556     if (HAS_PCH_LPT(dev_priv))
557     - pch_transcoder = TRANSCODER_A;
558     + pch_transcoder = PIPE_A;
559     else
560     pch_transcoder = pipe;
561    
562     @@ -4636,7 +4636,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
563     struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
564     enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
565    
566     - assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
567     + assert_pch_transcoder_disabled(dev_priv, PIPE_A);
568    
569     lpt_program_iclkip(crtc);
570    
571     @@ -5410,7 +5410,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
572     return;
573    
574     if (intel_crtc->config->has_pch_encoder)
575     - intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
576     + intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A,
577     false);
578    
579     intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
580     @@ -5498,7 +5498,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
581     intel_wait_for_vblank(dev, pipe);
582     intel_wait_for_vblank(dev, pipe);
583     intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
584     - intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
585     + intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A,
586     true);
587     }
588    
589     @@ -5597,7 +5597,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
590     enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
591    
592     if (intel_crtc->config->has_pch_encoder)
593     - intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
594     + intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A,
595     false);
596    
597     intel_encoders_disable(crtc, old_crtc_state, old_state);
598     @@ -5626,7 +5626,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
599     intel_encoders_post_disable(crtc, old_crtc_state, old_state);
600    
601     if (old_crtc_state->has_pch_encoder)
602     - intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
603     + intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A,
604     true);
605     }
606    
607     diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
608     index 6a9860df208f5..8aafb96015402 100644
609     --- a/drivers/gpu/drm/i915/intel_drv.h
610     +++ b/drivers/gpu/drm/i915/intel_drv.h
611     @@ -1095,12 +1095,12 @@ static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
612     bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
613     enum pipe pipe, bool enable);
614     bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
615     - enum transcoder pch_transcoder,
616     + enum pipe pch_transcoder,
617     bool enable);
618     void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
619     enum pipe pipe);
620     void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
621     - enum transcoder pch_transcoder);
622     + enum pipe pch_transcoder);
623     void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
624     void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
625    
626     diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
627     index 2aa744081f090..b6b64a2d4b71c 100644
628     --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
629     +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
630     @@ -185,11 +185,11 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
631     }
632    
633     static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
634     - enum transcoder pch_transcoder,
635     + enum pipe pch_transcoder,
636     bool enable)
637     {
638     struct drm_i915_private *dev_priv = to_i915(dev);
639     - uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
640     + uint32_t bit = (pch_transcoder == PIPE_A) ?
641     SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
642    
643     if (enable)
644     @@ -201,7 +201,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
645     static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
646     {
647     struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
648     - enum transcoder pch_transcoder = (enum transcoder) crtc->pipe;
649     + enum pipe pch_transcoder = crtc->pipe;
650     uint32_t serr_int = I915_READ(SERR_INT);
651    
652     assert_spin_locked(&dev_priv->irq_lock);
653     @@ -212,12 +212,12 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
654     I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
655     POSTING_READ(SERR_INT);
656    
657     - DRM_ERROR("pch fifo underrun on pch transcoder %s\n",
658     - transcoder_name(pch_transcoder));
659     + DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
660     + pipe_name(pch_transcoder));
661     }
662    
663     static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
664     - enum transcoder pch_transcoder,
665     + enum pipe pch_transcoder,
666     bool enable, bool old)
667     {
668     struct drm_i915_private *dev_priv = to_i915(dev);
669     @@ -235,8 +235,8 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
670    
671     if (old && I915_READ(SERR_INT) &
672     SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
673     - DRM_ERROR("uncleared pch fifo underrun on pch transcoder %s\n",
674     - transcoder_name(pch_transcoder));
675     + DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
676     + pipe_name(pch_transcoder));
677     }
678     }
679     }
680     @@ -311,7 +311,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
681     * Returns the previous state of underrun reporting.
682     */
683     bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
684     - enum transcoder pch_transcoder,
685     + enum pipe pch_transcoder,
686     bool enable)
687     {
688     struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
689     @@ -384,12 +384,12 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
690     * interrupt to avoid an irq storm.
691     */
692     void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
693     - enum transcoder pch_transcoder)
694     + enum pipe pch_transcoder)
695     {
696     if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
697     false))
698     - DRM_ERROR("PCH transcoder %s FIFO underrun\n",
699     - transcoder_name(pch_transcoder));
700     + DRM_ERROR("PCH transcoder %c FIFO underrun\n",
701     + pipe_name(pch_transcoder));
702     }
703    
704     /**
705     diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
706     index 646359025574a..74de1ae48d4f7 100644
707     --- a/drivers/infiniband/ulp/srp/ib_srp.c
708     +++ b/drivers/infiniband/ulp/srp/ib_srp.c
709     @@ -2639,7 +2639,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
710     {
711     struct srp_target_port *target = host_to_target(scmnd->device->host);
712     struct srp_rdma_ch *ch;
713     - int i, j;
714     u8 status;
715    
716     shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
717     @@ -2651,15 +2650,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
718     if (status)
719     return FAILED;
720    
721     - for (i = 0; i < target->ch_count; i++) {
722     - ch = &target->ch[i];
723     - for (j = 0; j < target->req_ring_size; ++j) {
724     - struct srp_request *req = &ch->req_ring[j];
725     -
726     - srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
727     - }
728     - }
729     -
730     return SUCCESS;
731     }
732    
733     diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
734     index 4d9b195547c5c..df2a10157720a 100644
735     --- a/drivers/isdn/hardware/avm/b1.c
736     +++ b/drivers/isdn/hardware/avm/b1.c
737     @@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo)
738     int i, j;
739    
740     for (j = 0; j < AVM_MAXVERSION; j++)
741     - cinfo->version[j] = "\0\0" + 1;
742     + cinfo->version[j] = "";
743     for (i = 0, j = 0;
744     j < AVM_MAXVERSION && i < cinfo->versionlen;
745     j++, i += cinfo->versionbuf[i] + 1)
746     diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
747     index 63eaa0a9f8a18..d4e0d1602c80f 100644
748     --- a/drivers/isdn/i4l/isdn_tty.c
749     +++ b/drivers/isdn/i4l/isdn_tty.c
750     @@ -1455,15 +1455,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
751     {
752     modem_info *info = (modem_info *) tty->driver_data;
753    
754     + mutex_lock(&modem_info_mutex);
755     if (!old_termios)
756     isdn_tty_change_speed(info);
757     else {
758     if (tty->termios.c_cflag == old_termios->c_cflag &&
759     tty->termios.c_ispeed == old_termios->c_ispeed &&
760     - tty->termios.c_ospeed == old_termios->c_ospeed)
761     + tty->termios.c_ospeed == old_termios->c_ospeed) {
762     + mutex_unlock(&modem_info_mutex);
763     return;
764     + }
765     isdn_tty_change_speed(info);
766     }
767     + mutex_unlock(&modem_info_mutex);
768     }
769    
770     /*
771     diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
772     index c5b30f06218a3..44ceed7ac3c5b 100644
773     --- a/drivers/leds/leds-lp5523.c
774     +++ b/drivers/leds/leds-lp5523.c
775     @@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
776    
777     /* Let the programs run for couple of ms and check the engine status */
778     usleep_range(3000, 6000);
779     - lp55xx_read(chip, LP5523_REG_STATUS, &status);
780     + ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
781     + if (ret)
782     + return ret;
783     status &= LP5523_ENG_STATUS_MASK;
784    
785     if (status != LP5523_ENG_STATUS_MASK) {
786     diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
787     index 589eebfc13df9..2f212bdc187a4 100644
788     --- a/drivers/mfd/ab8500-core.c
789     +++ b/drivers/mfd/ab8500-core.c
790     @@ -257,7 +257,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
791     mutex_unlock(&ab8500->lock);
792     dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
793    
794     - return ret;
795     + return (ret < 0) ? ret : 0;
796     }
797    
798     static int ab8500_get_register(struct device *dev, u8 bank,
799     diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
800     index ca38a6a141100..26ccf3f4ade9c 100644
801     --- a/drivers/mfd/db8500-prcmu.c
802     +++ b/drivers/mfd/db8500-prcmu.c
803     @@ -2588,7 +2588,7 @@ static struct irq_chip prcmu_irq_chip = {
804     .irq_unmask = prcmu_irq_unmask,
805     };
806    
807     -static __init char *fw_project_name(u32 project)
808     +static char *fw_project_name(u32 project)
809     {
810     switch (project) {
811     case PRCMU_FW_PROJECT_U8500:
812     @@ -2736,7 +2736,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
813     INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
814     }
815    
816     -static void __init init_prcm_registers(void)
817     +static void init_prcm_registers(void)
818     {
819     u32 val;
820    
821     diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
822     index d7f54e492aa61..6c16f170529f5 100644
823     --- a/drivers/mfd/mc13xxx-core.c
824     +++ b/drivers/mfd/mc13xxx-core.c
825     @@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
826    
827     mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
828    
829     - mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
830     + ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
831     + if (ret)
832     + goto out;
833    
834     adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
835     adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;
836     diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
837     index e14d8b058f0c2..5d4c10f05450a 100644
838     --- a/drivers/mfd/mt6397-core.c
839     +++ b/drivers/mfd/mt6397-core.c
840     @@ -306,8 +306,7 @@ static int mt6397_probe(struct platform_device *pdev)
841    
842     default:
843     dev_err(&pdev->dev, "unsupported chip: %d\n", id);
844     - ret = -ENODEV;
845     - break;
846     + return -ENODEV;
847     }
848    
849     if (ret) {
850     diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
851     index 52fafea06067e..8d420c37b2a61 100644
852     --- a/drivers/mfd/qcom_rpm.c
853     +++ b/drivers/mfd/qcom_rpm.c
854     @@ -638,6 +638,10 @@ static int qcom_rpm_probe(struct platform_device *pdev)
855     return -EFAULT;
856     }
857    
858     + writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
859     + writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
860     + writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
861     +
862     dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
863     fw_version[1],
864     fw_version[2]);
865     diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
866     index 798f0a829637f..60286adbd6a1c 100644
867     --- a/drivers/mfd/ti_am335x_tscadc.c
868     +++ b/drivers/mfd/ti_am335x_tscadc.c
869     @@ -264,8 +264,9 @@ static int ti_tscadc_probe(struct platform_device *pdev)
870     cell->pdata_size = sizeof(tscadc);
871     }
872    
873     - err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
874     - tscadc->used_cells, NULL, 0, NULL);
875     + err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
876     + tscadc->cells, tscadc->used_cells, NULL,
877     + 0, NULL);
878     if (err < 0)
879     goto err_disable_clk;
880    
881     diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
882     index c64615dca2bd3..1d58df8565488 100644
883     --- a/drivers/mfd/twl-core.c
884     +++ b/drivers/mfd/twl-core.c
885     @@ -979,7 +979,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
886     * letting it generate the right frequencies for USB, MADC, and
887     * other purposes.
888     */
889     -static inline int __init protect_pm_master(void)
890     +static inline int protect_pm_master(void)
891     {
892     int e = 0;
893    
894     @@ -988,7 +988,7 @@ static inline int __init protect_pm_master(void)
895     return e;
896     }
897    
898     -static inline int __init unprotect_pm_master(void)
899     +static inline int unprotect_pm_master(void)
900     {
901     int e = 0;
902    
903     diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
904     index 1ee68bd440fbc..16c6e2accfaa5 100644
905     --- a/drivers/mfd/wm5110-tables.c
906     +++ b/drivers/mfd/wm5110-tables.c
907     @@ -1618,6 +1618,7 @@ static const struct reg_default wm5110_reg_default[] = {
908     { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
909     { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
910     { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
911     + { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
912     { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
913     { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
914     { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
915     @@ -2869,6 +2870,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
916     case ARIZONA_ASRC_ENABLE:
917     case ARIZONA_ASRC_STATUS:
918     case ARIZONA_ASRC_RATE1:
919     + case ARIZONA_ASRC_RATE2:
920     case ARIZONA_ISRC_1_CTRL_1:
921     case ARIZONA_ISRC_1_CTRL_2:
922     case ARIZONA_ISRC_1_CTRL_3:
923     diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
924     index 0b4d90ceea7a6..864f107ed48fa 100644
925     --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
926     +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
927     @@ -149,12 +149,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
928     struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
929     int i;
930    
931     - vf_cb->mac_cb = NULL;
932     -
933     - kfree(vf_cb);
934     -
935     for (i = 0; i < handle->q_num; i++)
936     hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
937     +
938     + kfree(vf_cb);
939     }
940    
941     static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
942     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
943     index 1a92cd719e19d..ab2259c5808aa 100644
944     --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
945     +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
946     @@ -777,13 +777,27 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
947     return 0;
948     }
949     #endif
950     +
951     +#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
952     +
953     static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
954     netdev_features_t dev_features)
955     {
956     __wsum hw_checksum = 0;
957     + void *hdr;
958     +
959     + /* CQE csum doesn't cover padding octets in short ethernet
960     + * frames. And the pad field is appended prior to calculating
961     + * and appending the FCS field.
962     + *
963     + * Detecting these padded frames requires to verify and parse
964     + * IP headers, so we simply force all those small frames to skip
965     + * checksum complete.
966     + */
967     + if (short_frame(skb->len))
968     + return -EINVAL;
969    
970     - void *hdr = (u8 *)va + sizeof(struct ethhdr);
971     -
972     + hdr = (u8 *)va + sizeof(struct ethhdr);
973     hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
974    
975     if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
976     @@ -945,6 +959,11 @@ xdp_drop:
977     }
978    
979     if (likely(dev->features & NETIF_F_RXCSUM)) {
980     + /* TODO: For IP non TCP/UDP packets when csum complete is
981     + * not an option (not supported or any other reason) we can
982     + * actually check cqe IPOK status bit and report
983     + * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
984     + */
985     if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
986     MLX4_CQE_STATUS_UDP)) {
987     if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
988     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
989     index bf1c09ca73c03..b210c171a3806 100644
990     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
991     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
992     @@ -91,6 +91,7 @@ static void mlx5e_update_sw_rep_counters(struct mlx5e_priv *priv)
993    
994     s->tx_packets += sq_stats->packets;
995     s->tx_bytes += sq_stats->bytes;
996     + s->tx_queue_dropped += sq_stats->dropped;
997     }
998     }
999     }
1000     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1001     index 60e1edcbe5734..7ca1ab5c19366 100644
1002     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1003     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1004     @@ -794,7 +794,7 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1005     static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1006     {
1007     return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1008     - MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
1009     + MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1010     }
1011    
1012     static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1013     @@ -806,7 +806,7 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1014     static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1015     const char *mac, u16 fid, bool adding,
1016     enum mlxsw_reg_sfd_rec_action action,
1017     - bool dynamic)
1018     + enum mlxsw_reg_sfd_rec_policy policy)
1019     {
1020     char *sfd_pl;
1021     u8 num_rec;
1022     @@ -817,8 +817,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1023     return -ENOMEM;
1024    
1025     mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1026     - mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1027     - mac, fid, action, local_port);
1028     + mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
1029     num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1030     err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1031     if (err)
1032     @@ -837,7 +836,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1033     bool dynamic)
1034     {
1035     return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1036     - MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
1037     + MLXSW_REG_SFD_REC_ACTION_NOP,
1038     + mlxsw_sp_sfd_rec_policy(dynamic));
1039     }
1040    
1041     int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1042     @@ -845,7 +845,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1043     {
1044     return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1045     MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1046     - false);
1047     + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1048     }
1049    
1050     static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1051     diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
1052     index a3360cbdb30bd..5b968e6a0a7fb 100644
1053     --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
1054     +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
1055     @@ -1013,6 +1013,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1056     cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1057     rx_prod.bd_prod = cpu_to_le16(bd_prod);
1058     rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1059     +
1060     + /* Make sure chain element is updated before ringing the doorbell */
1061     + dma_wmb();
1062     +
1063     DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1064     }
1065    
1066     diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
1067     index b3e669af30055..026e8e9cb9429 100644
1068     --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
1069     +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
1070     @@ -34,7 +34,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
1071     unsigned int entry = priv->cur_tx;
1072     struct dma_desc *desc = priv->dma_tx + entry;
1073     unsigned int nopaged_len = skb_headlen(skb);
1074     - unsigned int bmax;
1075     + unsigned int bmax, des2;
1076     unsigned int i = 1, len;
1077    
1078     if (priv->plat->enh_desc)
1079     @@ -44,11 +44,12 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
1080    
1081     len = nopaged_len - bmax;
1082    
1083     - desc->des2 = dma_map_single(priv->device, skb->data,
1084     - bmax, DMA_TO_DEVICE);
1085     - if (dma_mapping_error(priv->device, desc->des2))
1086     + des2 = dma_map_single(priv->device, skb->data,
1087     + bmax, DMA_TO_DEVICE);
1088     + desc->des2 = cpu_to_le32(des2);
1089     + if (dma_mapping_error(priv->device, des2))
1090     return -1;
1091     - priv->tx_skbuff_dma[entry].buf = desc->des2;
1092     + priv->tx_skbuff_dma[entry].buf = des2;
1093     priv->tx_skbuff_dma[entry].len = bmax;
1094     /* do not close the descriptor and do not set own bit */
1095     priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
1096     @@ -60,12 +61,13 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
1097     desc = priv->dma_tx + entry;
1098    
1099     if (len > bmax) {
1100     - desc->des2 = dma_map_single(priv->device,
1101     - (skb->data + bmax * i),
1102     - bmax, DMA_TO_DEVICE);
1103     - if (dma_mapping_error(priv->device, desc->des2))
1104     + des2 = dma_map_single(priv->device,
1105     + (skb->data + bmax * i),
1106     + bmax, DMA_TO_DEVICE);
1107     + desc->des2 = cpu_to_le32(des2);
1108     + if (dma_mapping_error(priv->device, des2))
1109     return -1;
1110     - priv->tx_skbuff_dma[entry].buf = desc->des2;
1111     + priv->tx_skbuff_dma[entry].buf = des2;
1112     priv->tx_skbuff_dma[entry].len = bmax;
1113     priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
1114     STMMAC_CHAIN_MODE, 1,
1115     @@ -73,12 +75,13 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
1116     len -= bmax;
1117     i++;
1118     } else {
1119     - desc->des2 = dma_map_single(priv->device,
1120     - (skb->data + bmax * i), len,
1121     - DMA_TO_DEVICE);
1122     - if (dma_mapping_error(priv->device, desc->des2))
1123     + des2 = dma_map_single(priv->device,
1124     + (skb->data + bmax * i), len,
1125     + DMA_TO_DEVICE);
1126     + desc->des2 = cpu_to_le32(des2);
1127     + if (dma_mapping_error(priv->device, des2))
1128     return -1;
1129     - priv->tx_skbuff_dma[entry].buf = desc->des2;
1130     + priv->tx_skbuff_dma[entry].buf = des2;
1131     priv->tx_skbuff_dma[entry].len = len;
1132     /* last descriptor can be set now */
1133     priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
1134     @@ -119,19 +122,19 @@ static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr,
1135     struct dma_extended_desc *p = (struct dma_extended_desc *)des;
1136     for (i = 0; i < (size - 1); i++) {
1137     dma_phy += sizeof(struct dma_extended_desc);
1138     - p->basic.des3 = (unsigned int)dma_phy;
1139     + p->basic.des3 = cpu_to_le32((unsigned int)dma_phy);
1140     p++;
1141     }
1142     - p->basic.des3 = (unsigned int)phy_addr;
1143     + p->basic.des3 = cpu_to_le32((unsigned int)phy_addr);
1144    
1145     } else {
1146     struct dma_desc *p = (struct dma_desc *)des;
1147     for (i = 0; i < (size - 1); i++) {
1148     dma_phy += sizeof(struct dma_desc);
1149     - p->des3 = (unsigned int)dma_phy;
1150     + p->des3 = cpu_to_le32((unsigned int)dma_phy);
1151     p++;
1152     }
1153     - p->des3 = (unsigned int)phy_addr;
1154     + p->des3 = cpu_to_le32((unsigned int)phy_addr);
1155     }
1156     }
1157    
1158     @@ -144,10 +147,10 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
1159     * 1588-2002 time stamping is enabled, hence reinitialize it
1160     * to keep explicit chaining in the descriptor.
1161     */
1162     - p->des3 = (unsigned int)(priv->dma_rx_phy +
1163     - (((priv->dirty_rx) + 1) %
1164     - DMA_RX_SIZE) *
1165     - sizeof(struct dma_desc));
1166     + p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy +
1167     + (((priv->dirty_rx) + 1) %
1168     + DMA_RX_SIZE) *
1169     + sizeof(struct dma_desc)));
1170     }
1171    
1172     static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
1173     @@ -161,9 +164,9 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
1174     * 1588-2002 time stamping is enabled, hence reinitialize it
1175     * to keep explicit chaining in the descriptor.
1176     */
1177     - p->des3 = (unsigned int)((priv->dma_tx_phy +
1178     - ((priv->dirty_tx + 1) % DMA_TX_SIZE))
1179     - * sizeof(struct dma_desc));
1180     + p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy +
1181     + ((priv->dirty_tx + 1) % DMA_TX_SIZE))
1182     + * sizeof(struct dma_desc)));
1183     }
1184    
1185     const struct stmmac_mode_ops chain_mode_ops = {
1186     diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
1187     index e3c86d4221095..faeeef75d7f17 100644
1188     --- a/drivers/net/ethernet/stmicro/stmmac/descs.h
1189     +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
1190     @@ -87,7 +87,7 @@
1191     #define TDES0_ERROR_SUMMARY BIT(15)
1192     #define TDES0_IP_HEADER_ERROR BIT(16)
1193     #define TDES0_TIME_STAMP_STATUS BIT(17)
1194     -#define TDES0_OWN BIT(31)
1195     +#define TDES0_OWN ((u32)BIT(31)) /* silence sparse */
1196     /* TDES1 */
1197     #define TDES1_BUFFER1_SIZE_MASK GENMASK(10, 0)
1198     #define TDES1_BUFFER2_SIZE_MASK GENMASK(21, 11)
1199     @@ -130,7 +130,7 @@
1200     #define ETDES0_FIRST_SEGMENT BIT(28)
1201     #define ETDES0_LAST_SEGMENT BIT(29)
1202     #define ETDES0_INTERRUPT BIT(30)
1203     -#define ETDES0_OWN BIT(31)
1204     +#define ETDES0_OWN ((u32)BIT(31)) /* silence sparse */
1205     /* TDES1 */
1206     #define ETDES1_BUFFER1_SIZE_MASK GENMASK(12, 0)
1207     #define ETDES1_BUFFER2_SIZE_MASK GENMASK(28, 16)
1208     @@ -170,19 +170,19 @@
1209    
1210     /* Basic descriptor structure for normal and alternate descriptors */
1211     struct dma_desc {
1212     - unsigned int des0;
1213     - unsigned int des1;
1214     - unsigned int des2;
1215     - unsigned int des3;
1216     + __le32 des0;
1217     + __le32 des1;
1218     + __le32 des2;
1219     + __le32 des3;
1220     };
1221    
1222     /* Extended descriptor structure (e.g. >= databook 3.50a) */
1223     struct dma_extended_desc {
1224     struct dma_desc basic; /* Basic descriptors */
1225     - unsigned int des4; /* Extended Status */
1226     - unsigned int des5; /* Reserved */
1227     - unsigned int des6; /* Tx/Rx Timestamp Low */
1228     - unsigned int des7; /* Tx/Rx Timestamp High */
1229     + __le32 des4; /* Extended Status */
1230     + __le32 des5; /* Reserved */
1231     + __le32 des6; /* Tx/Rx Timestamp Low */
1232     + __le32 des7; /* Tx/Rx Timestamp High */
1233     };
1234    
1235     /* Transmit checksum insertion control */
1236     diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
1237     index 7635a464ce41c..1d181e205d6ec 100644
1238     --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
1239     +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
1240     @@ -35,47 +35,50 @@
1241     /* Enhanced descriptors */
1242     static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
1243     {
1244     - p->des1 |= ((BUF_SIZE_8KiB - 1) << ERDES1_BUFFER2_SIZE_SHIFT)
1245     - & ERDES1_BUFFER2_SIZE_MASK;
1246     + p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1)
1247     + << ERDES1_BUFFER2_SIZE_SHIFT)
1248     + & ERDES1_BUFFER2_SIZE_MASK);
1249    
1250     if (end)
1251     - p->des1 |= ERDES1_END_RING;
1252     + p->des1 |= cpu_to_le32(ERDES1_END_RING);
1253     }
1254    
1255     static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end)
1256     {
1257     if (end)
1258     - p->des0 |= ETDES0_END_RING;
1259     + p->des0 |= cpu_to_le32(ETDES0_END_RING);
1260     else
1261     - p->des0 &= ~ETDES0_END_RING;
1262     + p->des0 &= cpu_to_le32(~ETDES0_END_RING);
1263     }
1264    
1265     static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
1266     {
1267     if (unlikely(len > BUF_SIZE_4KiB)) {
1268     - p->des1 |= (((len - BUF_SIZE_4KiB) << ETDES1_BUFFER2_SIZE_SHIFT)
1269     + p->des1 |= cpu_to_le32((((len - BUF_SIZE_4KiB)
1270     + << ETDES1_BUFFER2_SIZE_SHIFT)
1271     & ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB
1272     - & ETDES1_BUFFER1_SIZE_MASK);
1273     + & ETDES1_BUFFER1_SIZE_MASK));
1274     } else
1275     - p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK);
1276     + p->des1 |= cpu_to_le32((len & ETDES1_BUFFER1_SIZE_MASK));
1277     }
1278    
1279     /* Normal descriptors */
1280     static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
1281     {
1282     - p->des1 |= ((BUF_SIZE_2KiB - 1) << RDES1_BUFFER2_SIZE_SHIFT)
1283     - & RDES1_BUFFER2_SIZE_MASK;
1284     + p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
1285     + << RDES1_BUFFER2_SIZE_SHIFT)
1286     + & RDES1_BUFFER2_SIZE_MASK);
1287    
1288     if (end)
1289     - p->des1 |= RDES1_END_RING;
1290     + p->des1 |= cpu_to_le32(RDES1_END_RING);
1291     }
1292    
1293     static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end)
1294     {
1295     if (end)
1296     - p->des1 |= TDES1_END_RING;
1297     + p->des1 |= cpu_to_le32(TDES1_END_RING);
1298     else
1299     - p->des1 &= ~TDES1_END_RING;
1300     + p->des1 &= cpu_to_le32(~TDES1_END_RING);
1301     }
1302    
1303     static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
1304     @@ -83,10 +86,11 @@ static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
1305     if (unlikely(len > BUF_SIZE_2KiB)) {
1306     unsigned int buffer1 = (BUF_SIZE_2KiB - 1)
1307     & TDES1_BUFFER1_SIZE_MASK;
1308     - p->des1 |= ((((len - buffer1) << TDES1_BUFFER2_SIZE_SHIFT)
1309     - & TDES1_BUFFER2_SIZE_MASK) | buffer1);
1310     + p->des1 |= cpu_to_le32((((len - buffer1)
1311     + << TDES1_BUFFER2_SIZE_SHIFT)
1312     + & TDES1_BUFFER2_SIZE_MASK) | buffer1);
1313     } else
1314     - p->des1 |= (len & TDES1_BUFFER1_SIZE_MASK);
1315     + p->des1 |= cpu_to_le32((len & TDES1_BUFFER1_SIZE_MASK));
1316     }
1317    
1318     /* Specific functions used for Chain mode */
1319     @@ -94,32 +98,32 @@ static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
1320     /* Enhanced descriptors */
1321     static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p)
1322     {
1323     - p->des1 |= ERDES1_SECOND_ADDRESS_CHAINED;
1324     + p->des1 |= cpu_to_le32(ERDES1_SECOND_ADDRESS_CHAINED);
1325     }
1326    
1327     static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p)
1328     {
1329     - p->des0 |= ETDES0_SECOND_ADDRESS_CHAINED;
1330     + p->des0 |= cpu_to_le32(ETDES0_SECOND_ADDRESS_CHAINED);
1331     }
1332    
1333     static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
1334     {
1335     - p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK);
1336     + p->des1 |= cpu_to_le32(len & ETDES1_BUFFER1_SIZE_MASK);
1337     }
1338    
1339     /* Normal descriptors */
1340     static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
1341     {
1342     - p->des1 |= RDES1_SECOND_ADDRESS_CHAINED;
1343     + p->des1 |= cpu_to_le32(RDES1_SECOND_ADDRESS_CHAINED);
1344     }
1345    
1346     static inline void ndesc_tx_set_on_chain(struct dma_desc *p)
1347     {
1348     - p->des1 |= TDES1_SECOND_ADDRESS_CHAINED;
1349     + p->des1 |= cpu_to_le32(TDES1_SECOND_ADDRESS_CHAINED);
1350     }
1351    
1352     static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
1353     {
1354     - p->des1 |= len & TDES1_BUFFER1_SIZE_MASK;
1355     + p->des1 |= cpu_to_le32(len & TDES1_BUFFER1_SIZE_MASK);
1356     }
1357     #endif /* __DESC_COM_H__ */
1358     diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
1359     index f988c7573ba59..3f5056858535a 100644
1360     --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
1361     +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
1362     @@ -23,7 +23,7 @@ static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
1363     unsigned int tdes3;
1364     int ret = tx_done;
1365    
1366     - tdes3 = p->des3;
1367     + tdes3 = le32_to_cpu(p->des3);
1368    
1369     /* Get tx owner first */
1370     if (unlikely(tdes3 & TDES3_OWN))
1371     @@ -77,9 +77,9 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
1372     struct dma_desc *p)
1373     {
1374     struct net_device_stats *stats = (struct net_device_stats *)data;
1375     - unsigned int rdes1 = p->des1;
1376     - unsigned int rdes2 = p->des2;
1377     - unsigned int rdes3 = p->des3;
1378     + unsigned int rdes1 = le32_to_cpu(p->des1);
1379     + unsigned int rdes2 = le32_to_cpu(p->des2);
1380     + unsigned int rdes3 = le32_to_cpu(p->des3);
1381     int message_type;
1382     int ret = good_frame;
1383    
1384     @@ -176,47 +176,48 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
1385    
1386     static int dwmac4_rd_get_tx_len(struct dma_desc *p)
1387     {
1388     - return (p->des2 & TDES2_BUFFER1_SIZE_MASK);
1389     + return (le32_to_cpu(p->des2) & TDES2_BUFFER1_SIZE_MASK);
1390     }
1391    
1392     static int dwmac4_get_tx_owner(struct dma_desc *p)
1393     {
1394     - return (p->des3 & TDES3_OWN) >> TDES3_OWN_SHIFT;
1395     + return (le32_to_cpu(p->des3) & TDES3_OWN) >> TDES3_OWN_SHIFT;
1396     }
1397    
1398     static void dwmac4_set_tx_owner(struct dma_desc *p)
1399     {
1400     - p->des3 |= TDES3_OWN;
1401     + p->des3 |= cpu_to_le32(TDES3_OWN);
1402     }
1403    
1404     static void dwmac4_set_rx_owner(struct dma_desc *p)
1405     {
1406     - p->des3 |= RDES3_OWN;
1407     + p->des3 |= cpu_to_le32(RDES3_OWN);
1408     }
1409    
1410     static int dwmac4_get_tx_ls(struct dma_desc *p)
1411     {
1412     - return (p->des3 & TDES3_LAST_DESCRIPTOR) >> TDES3_LAST_DESCRIPTOR_SHIFT;
1413     + return (le32_to_cpu(p->des3) & TDES3_LAST_DESCRIPTOR)
1414     + >> TDES3_LAST_DESCRIPTOR_SHIFT;
1415     }
1416    
1417     static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
1418     {
1419     - return (p->des3 & RDES3_PACKET_SIZE_MASK);
1420     + return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK);
1421     }
1422    
1423     static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
1424     {
1425     - p->des2 |= TDES2_TIMESTAMP_ENABLE;
1426     + p->des2 |= cpu_to_le32(TDES2_TIMESTAMP_ENABLE);
1427     }
1428    
1429     static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
1430     {
1431     /* Context type from W/B descriptor must be zero */
1432     - if (p->des3 & TDES3_CONTEXT_TYPE)
1433     + if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
1434     return -EINVAL;
1435    
1436     /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
1437     - if (p->des3 & TDES3_TIMESTAMP_STATUS)
1438     + if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
1439     return 0;
1440    
1441     return 1;
1442     @@ -227,9 +228,9 @@ static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
1443     struct dma_desc *p = (struct dma_desc *)desc;
1444     u64 ns;
1445    
1446     - ns = p->des0;
1447     + ns = le32_to_cpu(p->des0);
1448     /* convert high/sec time stamp value to nanosecond */
1449     - ns += p->des1 * 1000000000ULL;
1450     + ns += le32_to_cpu(p->des1) * 1000000000ULL;
1451    
1452     return ns;
1453     }
1454     @@ -267,7 +268,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
1455    
1456     /* Get the status from normal w/b descriptor */
1457     if (likely(p->des3 & TDES3_RS1V)) {
1458     - if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) {
1459     + if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) {
1460     int i = 0;
1461    
1462     /* Check if timestamp is OK from context descriptor */
1463     @@ -290,10 +291,10 @@ exit:
1464     static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
1465     int mode, int end)
1466     {
1467     - p->des3 = RDES3_OWN | RDES3_BUFFER1_VALID_ADDR;
1468     + p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
1469    
1470     if (!disable_rx_ic)
1471     - p->des3 |= RDES3_INT_ON_COMPLETION_EN;
1472     + p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
1473     }
1474    
1475     static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
1476     @@ -308,9 +309,9 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
1477     bool csum_flag, int mode, bool tx_own,
1478     bool ls)
1479     {
1480     - unsigned int tdes3 = p->des3;
1481     + unsigned int tdes3 = le32_to_cpu(p->des3);
1482    
1483     - p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK);
1484     + p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
1485    
1486     if (is_fs)
1487     tdes3 |= TDES3_FIRST_DESCRIPTOR;
1488     @@ -338,7 +339,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
1489     */
1490     wmb();
1491    
1492     - p->des3 = tdes3;
1493     + p->des3 = cpu_to_le32(tdes3);
1494     }
1495    
1496     static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
1497     @@ -346,14 +347,14 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
1498     bool ls, unsigned int tcphdrlen,
1499     unsigned int tcppayloadlen)
1500     {
1501     - unsigned int tdes3 = p->des3;
1502     + unsigned int tdes3 = le32_to_cpu(p->des3);
1503    
1504     if (len1)
1505     - p->des2 |= (len1 & TDES2_BUFFER1_SIZE_MASK);
1506     + p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK));
1507    
1508     if (len2)
1509     - p->des2 |= (len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
1510     - & TDES2_BUFFER2_SIZE_MASK;
1511     + p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
1512     + & TDES2_BUFFER2_SIZE_MASK);
1513    
1514     if (is_fs) {
1515     tdes3 |= TDES3_FIRST_DESCRIPTOR |
1516     @@ -381,7 +382,7 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
1517     */
1518     wmb();
1519    
1520     - p->des3 = tdes3;
1521     + p->des3 = cpu_to_le32(tdes3);
1522     }
1523    
1524     static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
1525     @@ -392,7 +393,7 @@ static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
1526    
1527     static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
1528     {
1529     - p->des2 |= TDES2_INTERRUPT_ON_COMPLETION;
1530     + p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION);
1531     }
1532    
1533     static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
1534     @@ -405,7 +406,8 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
1535     for (i = 0; i < size; i++) {
1536     pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
1537     i, (unsigned int)virt_to_phys(p),
1538     - p->des0, p->des1, p->des2, p->des3);
1539     + le32_to_cpu(p->des0), le32_to_cpu(p->des1),
1540     + le32_to_cpu(p->des2), le32_to_cpu(p->des3));
1541     p++;
1542     }
1543     }
1544     @@ -414,8 +416,8 @@ static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
1545     {
1546     p->des0 = 0;
1547     p->des1 = 0;
1548     - p->des2 = mss;
1549     - p->des3 = TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV;
1550     + p->des2 = cpu_to_le32(mss);
1551     + p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
1552     }
1553    
1554     const struct stmmac_desc_ops dwmac4_desc_ops = {
1555     diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
1556     index e75549327c345..ce97e522566a8 100644
1557     --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
1558     +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
1559     @@ -30,7 +30,7 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
1560     struct dma_desc *p, void __iomem *ioaddr)
1561     {
1562     struct net_device_stats *stats = (struct net_device_stats *)data;
1563     - unsigned int tdes0 = p->des0;
1564     + unsigned int tdes0 = le32_to_cpu(p->des0);
1565     int ret = tx_done;
1566    
1567     /* Get tx owner first */
1568     @@ -95,7 +95,7 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
1569    
1570     static int enh_desc_get_tx_len(struct dma_desc *p)
1571     {
1572     - return (p->des1 & ETDES1_BUFFER1_SIZE_MASK);
1573     + return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK);
1574     }
1575    
1576     static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
1577     @@ -134,8 +134,8 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
1578     static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
1579     struct dma_extended_desc *p)
1580     {
1581     - unsigned int rdes0 = p->basic.des0;
1582     - unsigned int rdes4 = p->des4;
1583     + unsigned int rdes0 = le32_to_cpu(p->basic.des0);
1584     + unsigned int rdes4 = le32_to_cpu(p->des4);
1585    
1586     if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
1587     int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;
1588     @@ -199,7 +199,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
1589     struct dma_desc *p)
1590     {
1591     struct net_device_stats *stats = (struct net_device_stats *)data;
1592     - unsigned int rdes0 = p->des0;
1593     + unsigned int rdes0 = le32_to_cpu(p->des0);
1594     int ret = good_frame;
1595    
1596     if (unlikely(rdes0 & RDES0_OWN))
1597     @@ -265,8 +265,8 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
1598     static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
1599     int mode, int end)
1600     {
1601     - p->des0 |= RDES0_OWN;
1602     - p->des1 |= ((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
1603     + p->des0 |= cpu_to_le32(RDES0_OWN);
1604     + p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
1605    
1606     if (mode == STMMAC_CHAIN_MODE)
1607     ehn_desc_rx_set_on_chain(p);
1608     @@ -274,12 +274,12 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
1609     ehn_desc_rx_set_on_ring(p, end);
1610    
1611     if (disable_rx_ic)
1612     - p->des1 |= ERDES1_DISABLE_IC;
1613     + p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
1614     }
1615    
1616     static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
1617     {
1618     - p->des0 &= ~ETDES0_OWN;
1619     + p->des0 &= cpu_to_le32(~ETDES0_OWN);
1620     if (mode == STMMAC_CHAIN_MODE)
1621     enh_desc_end_tx_desc_on_chain(p);
1622     else
1623     @@ -288,27 +288,27 @@ static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
1624    
1625     static int enh_desc_get_tx_owner(struct dma_desc *p)
1626     {
1627     - return (p->des0 & ETDES0_OWN) >> 31;
1628     + return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31;
1629     }
1630    
1631     static void enh_desc_set_tx_owner(struct dma_desc *p)
1632     {
1633     - p->des0 |= ETDES0_OWN;
1634     + p->des0 |= cpu_to_le32(ETDES0_OWN);
1635     }
1636    
1637     static void enh_desc_set_rx_owner(struct dma_desc *p)
1638     {
1639     - p->des0 |= RDES0_OWN;
1640     + p->des0 |= cpu_to_le32(RDES0_OWN);
1641     }
1642    
1643     static int enh_desc_get_tx_ls(struct dma_desc *p)
1644     {
1645     - return (p->des0 & ETDES0_LAST_SEGMENT) >> 29;
1646     + return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29;
1647     }
1648    
1649     static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
1650     {
1651     - int ter = (p->des0 & ETDES0_END_RING) >> 21;
1652     + int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21;
1653    
1654     memset(p, 0, offsetof(struct dma_desc, des2));
1655     if (mode == STMMAC_CHAIN_MODE)
1656     @@ -321,7 +321,7 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
1657     bool csum_flag, int mode, bool tx_own,
1658     bool ls)
1659     {
1660     - unsigned int tdes0 = p->des0;
1661     + unsigned int tdes0 = le32_to_cpu(p->des0);
1662    
1663     if (mode == STMMAC_CHAIN_MODE)
1664     enh_set_tx_desc_len_on_chain(p, len);
1665     @@ -352,12 +352,12 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
1666     */
1667     wmb();
1668    
1669     - p->des0 = tdes0;
1670     + p->des0 = cpu_to_le32(tdes0);
1671     }
1672    
1673     static void enh_desc_set_tx_ic(struct dma_desc *p)
1674     {
1675     - p->des0 |= ETDES0_INTERRUPT;
1676     + p->des0 |= cpu_to_le32(ETDES0_INTERRUPT);
1677     }
1678    
1679     static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
1680     @@ -372,18 +372,18 @@ static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
1681     if (rx_coe_type == STMMAC_RX_COE_TYPE1)
1682     csum = 2;
1683    
1684     - return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) -
1685     - csum);
1686     + return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
1687     + >> RDES0_FRAME_LEN_SHIFT) - csum);
1688     }
1689    
1690     static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
1691     {
1692     - p->des0 |= ETDES0_TIME_STAMP_ENABLE;
1693     + p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE);
1694     }
1695    
1696     static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
1697     {
1698     - return (p->des0 & ETDES0_TIME_STAMP_STATUS) >> 17;
1699     + return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17;
1700     }
1701    
1702     static u64 enh_desc_get_timestamp(void *desc, u32 ats)
1703     @@ -392,13 +392,13 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats)
1704    
1705     if (ats) {
1706     struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
1707     - ns = p->des6;
1708     + ns = le32_to_cpu(p->des6);
1709     /* convert high/sec time stamp value to nanosecond */
1710     - ns += p->des7 * 1000000000ULL;
1711     + ns += le32_to_cpu(p->des7) * 1000000000ULL;
1712     } else {
1713     struct dma_desc *p = (struct dma_desc *)desc;
1714     - ns = p->des2;
1715     - ns += p->des3 * 1000000000ULL;
1716     + ns = le32_to_cpu(p->des2);
1717     + ns += le32_to_cpu(p->des3) * 1000000000ULL;
1718     }
1719    
1720     return ns;
1721     @@ -408,10 +408,11 @@ static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
1722     {
1723     if (ats) {
1724     struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
1725     - return (p->basic.des0 & RDES0_IPC_CSUM_ERROR) >> 7;
1726     + return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7;
1727     } else {
1728     struct dma_desc *p = (struct dma_desc *)desc;
1729     - if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
1730     + if ((le32_to_cpu(p->des2) == 0xffffffff) &&
1731     + (le32_to_cpu(p->des3) == 0xffffffff))
1732     /* timestamp is corrupted, hence don't store it */
1733     return 0;
1734     else
1735     diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
1736     index 2beacd0d3043a..fd78406e2e9af 100644
1737     --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
1738     +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
1739     @@ -30,8 +30,8 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
1740     struct dma_desc *p, void __iomem *ioaddr)
1741     {
1742     struct net_device_stats *stats = (struct net_device_stats *)data;
1743     - unsigned int tdes0 = p->des0;
1744     - unsigned int tdes1 = p->des1;
1745     + unsigned int tdes0 = le32_to_cpu(p->des0);
1746     + unsigned int tdes1 = le32_to_cpu(p->des1);
1747     int ret = tx_done;
1748    
1749     /* Get tx owner first */
1750     @@ -77,7 +77,7 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
1751    
1752     static int ndesc_get_tx_len(struct dma_desc *p)
1753     {
1754     - return (p->des1 & RDES1_BUFFER1_SIZE_MASK);
1755     + return (le32_to_cpu(p->des1) & RDES1_BUFFER1_SIZE_MASK);
1756     }
1757    
1758     /* This function verifies if each incoming frame has some errors
1759     @@ -88,7 +88,7 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
1760     struct dma_desc *p)
1761     {
1762     int ret = good_frame;
1763     - unsigned int rdes0 = p->des0;
1764     + unsigned int rdes0 = le32_to_cpu(p->des0);
1765     struct net_device_stats *stats = (struct net_device_stats *)data;
1766    
1767     if (unlikely(rdes0 & RDES0_OWN))
1768     @@ -141,8 +141,8 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
1769     static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
1770     int end)
1771     {
1772     - p->des0 |= RDES0_OWN;
1773     - p->des1 |= (BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK;
1774     + p->des0 |= cpu_to_le32(RDES0_OWN);
1775     + p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
1776    
1777     if (mode == STMMAC_CHAIN_MODE)
1778     ndesc_rx_set_on_chain(p, end);
1779     @@ -150,12 +150,12 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
1780     ndesc_rx_set_on_ring(p, end);
1781    
1782     if (disable_rx_ic)
1783     - p->des1 |= RDES1_DISABLE_IC;
1784     + p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
1785     }
1786    
1787     static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
1788     {
1789     - p->des0 &= ~TDES0_OWN;
1790     + p->des0 &= cpu_to_le32(~TDES0_OWN);
1791     if (mode == STMMAC_CHAIN_MODE)
1792     ndesc_tx_set_on_chain(p);
1793     else
1794     @@ -164,27 +164,27 @@ static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
1795    
1796     static int ndesc_get_tx_owner(struct dma_desc *p)
1797     {
1798     - return (p->des0 & TDES0_OWN) >> 31;
1799     + return (le32_to_cpu(p->des0) & TDES0_OWN) >> 31;
1800     }
1801    
1802     static void ndesc_set_tx_owner(struct dma_desc *p)
1803     {
1804     - p->des0 |= TDES0_OWN;
1805     + p->des0 |= cpu_to_le32(TDES0_OWN);
1806     }
1807    
1808     static void ndesc_set_rx_owner(struct dma_desc *p)
1809     {
1810     - p->des0 |= RDES0_OWN;
1811     + p->des0 |= cpu_to_le32(RDES0_OWN);
1812     }
1813    
1814     static int ndesc_get_tx_ls(struct dma_desc *p)
1815     {
1816     - return (p->des1 & TDES1_LAST_SEGMENT) >> 30;
1817     + return (le32_to_cpu(p->des1) & TDES1_LAST_SEGMENT) >> 30;
1818     }
1819    
1820     static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
1821     {
1822     - int ter = (p->des1 & TDES1_END_RING) >> 25;
1823     + int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25;
1824    
1825     memset(p, 0, offsetof(struct dma_desc, des2));
1826     if (mode == STMMAC_CHAIN_MODE)
1827     @@ -197,7 +197,7 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
1828     bool csum_flag, int mode, bool tx_own,
1829     bool ls)
1830     {
1831     - unsigned int tdes1 = p->des1;
1832     + unsigned int tdes1 = le32_to_cpu(p->des1);
1833    
1834     if (is_fs)
1835     tdes1 |= TDES1_FIRST_SEGMENT;
1836     @@ -212,7 +212,7 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
1837     if (ls)
1838     tdes1 |= TDES1_LAST_SEGMENT;
1839    
1840     - p->des1 = tdes1;
1841     + p->des1 = cpu_to_le32(tdes1);
1842    
1843     if (mode == STMMAC_CHAIN_MODE)
1844     norm_set_tx_desc_len_on_chain(p, len);
1845     @@ -220,12 +220,12 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
1846     norm_set_tx_desc_len_on_ring(p, len);
1847    
1848     if (tx_own)
1849     - p->des0 |= TDES0_OWN;
1850     + p->des0 |= cpu_to_le32(TDES0_OWN);
1851     }
1852    
1853     static void ndesc_set_tx_ic(struct dma_desc *p)
1854     {
1855     - p->des1 |= TDES1_INTERRUPT;
1856     + p->des1 |= cpu_to_le32(TDES1_INTERRUPT);
1857     }
1858    
1859     static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
1860     @@ -241,19 +241,20 @@ static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
1861     if (rx_coe_type == STMMAC_RX_COE_TYPE1)
1862     csum = 2;
1863    
1864     - return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) -
1865     + return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
1866     + >> RDES0_FRAME_LEN_SHIFT) -
1867     csum);
1868    
1869     }
1870    
1871     static void ndesc_enable_tx_timestamp(struct dma_desc *p)
1872     {
1873     - p->des1 |= TDES1_TIME_STAMP_ENABLE;
1874     + p->des1 |= cpu_to_le32(TDES1_TIME_STAMP_ENABLE);
1875     }
1876    
1877     static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
1878     {
1879     - return (p->des0 & TDES0_TIME_STAMP_STATUS) >> 17;
1880     + return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17;
1881     }
1882    
1883     static u64 ndesc_get_timestamp(void *desc, u32 ats)
1884     @@ -261,9 +262,9 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats)
1885     struct dma_desc *p = (struct dma_desc *)desc;
1886     u64 ns;
1887    
1888     - ns = p->des2;
1889     + ns = le32_to_cpu(p->des2);
1890     /* convert high/sec time stamp value to nanosecond */
1891     - ns += p->des3 * 1000000000ULL;
1892     + ns += le32_to_cpu(p->des3) * 1000000000ULL;
1893    
1894     return ns;
1895     }
1896     @@ -272,7 +273,8 @@ static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
1897     {
1898     struct dma_desc *p = (struct dma_desc *)desc;
1899    
1900     - if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
1901     + if ((le32_to_cpu(p->des2) == 0xffffffff) &&
1902     + (le32_to_cpu(p->des3) == 0xffffffff))
1903     /* timestamp is corrupted, hence don't store it */
1904     return 0;
1905     else
1906     diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1907     index 7723b5d2499a1..9983ce9bd90de 100644
1908     --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1909     +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1910     @@ -34,7 +34,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
1911     unsigned int entry = priv->cur_tx;
1912     struct dma_desc *desc;
1913     unsigned int nopaged_len = skb_headlen(skb);
1914     - unsigned int bmax, len;
1915     + unsigned int bmax, len, des2;
1916    
1917     if (priv->extend_desc)
1918     desc = (struct dma_desc *)(priv->dma_etx + entry);
1919     @@ -50,16 +50,17 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
1920    
1921     if (nopaged_len > BUF_SIZE_8KiB) {
1922    
1923     - desc->des2 = dma_map_single(priv->device, skb->data,
1924     - bmax, DMA_TO_DEVICE);
1925     - if (dma_mapping_error(priv->device, desc->des2))
1926     + des2 = dma_map_single(priv->device, skb->data, bmax,
1927     + DMA_TO_DEVICE);
1928     + desc->des2 = cpu_to_le32(des2);
1929     + if (dma_mapping_error(priv->device, des2))
1930     return -1;
1931    
1932     - priv->tx_skbuff_dma[entry].buf = desc->des2;
1933     + priv->tx_skbuff_dma[entry].buf = des2;
1934     priv->tx_skbuff_dma[entry].len = bmax;
1935     priv->tx_skbuff_dma[entry].is_jumbo = true;
1936    
1937     - desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1938     + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
1939     priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
1940     STMMAC_RING_MODE, 0, false);
1941     priv->tx_skbuff[entry] = NULL;
1942     @@ -70,26 +71,28 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
1943     else
1944     desc = priv->dma_tx + entry;
1945    
1946     - desc->des2 = dma_map_single(priv->device, skb->data + bmax,
1947     - len, DMA_TO_DEVICE);
1948     - if (dma_mapping_error(priv->device, desc->des2))
1949     + des2 = dma_map_single(priv->device, skb->data + bmax, len,
1950     + DMA_TO_DEVICE);
1951     + desc->des2 = cpu_to_le32(des2);
1952     + if (dma_mapping_error(priv->device, des2))
1953     return -1;
1954     - priv->tx_skbuff_dma[entry].buf = desc->des2;
1955     + priv->tx_skbuff_dma[entry].buf = des2;
1956     priv->tx_skbuff_dma[entry].len = len;
1957     priv->tx_skbuff_dma[entry].is_jumbo = true;
1958    
1959     - desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1960     + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
1961     priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
1962     STMMAC_RING_MODE, 1, true);
1963     } else {
1964     - desc->des2 = dma_map_single(priv->device, skb->data,
1965     - nopaged_len, DMA_TO_DEVICE);
1966     - if (dma_mapping_error(priv->device, desc->des2))
1967     + des2 = dma_map_single(priv->device, skb->data,
1968     + nopaged_len, DMA_TO_DEVICE);
1969     + desc->des2 = cpu_to_le32(des2);
1970     + if (dma_mapping_error(priv->device, des2))
1971     return -1;
1972     - priv->tx_skbuff_dma[entry].buf = desc->des2;
1973     + priv->tx_skbuff_dma[entry].buf = des2;
1974     priv->tx_skbuff_dma[entry].len = nopaged_len;
1975     priv->tx_skbuff_dma[entry].is_jumbo = true;
1976     - desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1977     + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
1978     priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
1979     STMMAC_RING_MODE, 0, true);
1980     }
1981     @@ -115,13 +118,13 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
1982    
1983     /* Fill DES3 in case of RING mode */
1984     if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
1985     - p->des3 = p->des2 + BUF_SIZE_8KiB;
1986     + p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
1987     }
1988    
1989     /* In ring mode we need to fill the desc3 because it is used as buffer */
1990     static void stmmac_init_desc3(struct dma_desc *p)
1991     {
1992     - p->des3 = p->des2 + BUF_SIZE_8KiB;
1993     + p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
1994     }
1995    
1996     static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
1997     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1998     index 65ed02bc3ea34..20a2b01b392c1 100644
1999     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2000     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2001     @@ -1002,9 +1002,9 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
2002     }
2003    
2004     if (priv->synopsys_id >= DWMAC_CORE_4_00)
2005     - p->des0 = priv->rx_skbuff_dma[i];
2006     + p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
2007     else
2008     - p->des2 = priv->rx_skbuff_dma[i];
2009     + p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
2010    
2011     if ((priv->hw->mode->init_desc3) &&
2012     (priv->dma_buf_sz == BUF_SIZE_16KiB))
2013     @@ -1968,7 +1968,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2014     priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2015     desc = priv->dma_tx + priv->cur_tx;
2016    
2017     - desc->des0 = des + (total_len - tmp_len);
2018     + desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2019     buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2020     TSO_MAX_BUFF_SIZE : tmp_len;
2021    
2022     @@ -2070,11 +2070,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2023     priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2024     priv->tx_skbuff[first_entry] = skb;
2025    
2026     - first->des0 = des;
2027     + first->des0 = cpu_to_le32(des);
2028    
2029     /* Fill start of payload in buff2 of first descriptor */
2030     if (pay_len)
2031     - first->des1 = des + proto_hdr_len;
2032     + first->des1 = cpu_to_le32(des + proto_hdr_len);
2033    
2034     /* If needed take extra descriptors to fill the remaining payload */
2035     tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2036     @@ -2271,13 +2271,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2037    
2038     priv->tx_skbuff[entry] = NULL;
2039    
2040     - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2041     - desc->des0 = des;
2042     - priv->tx_skbuff_dma[entry].buf = desc->des0;
2043     - } else {
2044     - desc->des2 = des;
2045     - priv->tx_skbuff_dma[entry].buf = desc->des2;
2046     - }
2047     + priv->tx_skbuff_dma[entry].buf = des;
2048     + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2049     + desc->des0 = cpu_to_le32(des);
2050     + else
2051     + desc->des2 = cpu_to_le32(des);
2052    
2053     priv->tx_skbuff_dma[entry].map_as_page = true;
2054     priv->tx_skbuff_dma[entry].len = len;
2055     @@ -2348,13 +2346,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2056     if (dma_mapping_error(priv->device, des))
2057     goto dma_map_err;
2058    
2059     - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2060     - first->des0 = des;
2061     - priv->tx_skbuff_dma[first_entry].buf = first->des0;
2062     - } else {
2063     - first->des2 = des;
2064     - priv->tx_skbuff_dma[first_entry].buf = first->des2;
2065     - }
2066     + priv->tx_skbuff_dma[first_entry].buf = des;
2067     + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2068     + first->des0 = cpu_to_le32(des);
2069     + else
2070     + first->des2 = cpu_to_le32(des);
2071    
2072     priv->tx_skbuff_dma[first_entry].len = nopaged_len;
2073     priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
2074     @@ -2468,10 +2464,10 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2075     }
2076    
2077     if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2078     - p->des0 = priv->rx_skbuff_dma[entry];
2079     + p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2080     p->des1 = 0;
2081     } else {
2082     - p->des2 = priv->rx_skbuff_dma[entry];
2083     + p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2084     }
2085     if (priv->hw->mode->refill_desc3)
2086     priv->hw->mode->refill_desc3(priv, p);
2087     @@ -2575,9 +2571,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2088     unsigned int des;
2089    
2090     if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2091     - des = p->des0;
2092     + des = le32_to_cpu(p->des0);
2093     else
2094     - des = p->des2;
2095     + des = le32_to_cpu(p->des2);
2096    
2097     frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2098    
2099     @@ -2951,14 +2947,17 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
2100     x = *(u64 *) ep;
2101     seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2102     i, (unsigned int)virt_to_phys(ep),
2103     - ep->basic.des0, ep->basic.des1,
2104     - ep->basic.des2, ep->basic.des3);
2105     + le32_to_cpu(ep->basic.des0),
2106     + le32_to_cpu(ep->basic.des1),
2107     + le32_to_cpu(ep->basic.des2),
2108     + le32_to_cpu(ep->basic.des3));
2109     ep++;
2110     } else {
2111     x = *(u64 *) p;
2112     seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2113     i, (unsigned int)virt_to_phys(ep),
2114     - p->des0, p->des1, p->des2, p->des3);
2115     + le32_to_cpu(p->des0), le32_to_cpu(p->des1),
2116     + le32_to_cpu(p->des2), le32_to_cpu(p->des3));
2117     p++;
2118     }
2119     seq_printf(seq, "\n");
2120     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
2121     index eafc28142cd21..49eaede34eea6 100644
2122     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
2123     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
2124     @@ -231,7 +231,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
2125     */
2126     static void stmmac_pci_remove(struct pci_dev *pdev)
2127     {
2128     + int i;
2129     +
2130     stmmac_dvr_remove(&pdev->dev);
2131     +
2132     + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
2133     + if (pci_resource_len(pdev, i) == 0)
2134     + continue;
2135     + pcim_iounmap_regions(pdev, BIT(i));
2136     + break;
2137     + }
2138     +
2139     pci_disable_device(pdev);
2140     }
2141    
2142     diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
2143     index 94b05dd827af6..375b6810bf461 100644
2144     --- a/drivers/net/team/team.c
2145     +++ b/drivers/net/team/team.c
2146     @@ -261,17 +261,6 @@ static void __team_option_inst_mark_removed_port(struct team *team,
2147     }
2148     }
2149    
2150     -static bool __team_option_inst_tmp_find(const struct list_head *opts,
2151     - const struct team_option_inst *needle)
2152     -{
2153     - struct team_option_inst *opt_inst;
2154     -
2155     - list_for_each_entry(opt_inst, opts, tmp_list)
2156     - if (opt_inst == needle)
2157     - return true;
2158     - return false;
2159     -}
2160     -
2161     static int __team_options_register(struct team *team,
2162     const struct team_option *option,
2163     size_t option_count)
2164     @@ -2466,7 +2455,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2165     int err = 0;
2166     int i;
2167     struct nlattr *nl_option;
2168     - LIST_HEAD(opt_inst_list);
2169    
2170     rtnl_lock();
2171    
2172     @@ -2486,6 +2474,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2173     struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2174     struct nlattr *attr;
2175     struct nlattr *attr_data;
2176     + LIST_HEAD(opt_inst_list);
2177     enum team_option_type opt_type;
2178     int opt_port_ifindex = 0; /* != 0 for per-port options */
2179     u32 opt_array_index = 0;
2180     @@ -2589,23 +2578,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2181     if (err)
2182     goto team_put;
2183     opt_inst->changed = true;
2184     -
2185     - /* dumb/evil user-space can send us duplicate opt,
2186     - * keep only the last one
2187     - */
2188     - if (__team_option_inst_tmp_find(&opt_inst_list,
2189     - opt_inst))
2190     - continue;
2191     -
2192     list_add(&opt_inst->tmp_list, &opt_inst_list);
2193     }
2194     if (!opt_found) {
2195     err = -ENOENT;
2196     goto team_put;
2197     }
2198     - }
2199    
2200     - err = team_nl_send_event_options_get(team, &opt_inst_list);
2201     + err = team_nl_send_event_options_get(team, &opt_inst_list);
2202     + if (err)
2203     + break;
2204     + }
2205    
2206     team_put:
2207     team_nl_team_put(team);
2208     diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
2209     index 873424ab0e328..bd0e659002161 100644
2210     --- a/drivers/phy/tegra/xusb.c
2211     +++ b/drivers/phy/tegra/xusb.c
2212     @@ -418,7 +418,7 @@ tegra_xusb_port_find_lane(struct tegra_xusb_port *port,
2213     {
2214     struct tegra_xusb_lane *lane, *match = ERR_PTR(-ENODEV);
2215    
2216     - for (map = map; map->type; map++) {
2217     + for (; map->type; map++) {
2218     if (port->index != map->port)
2219     continue;
2220    
2221     diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
2222     index d9ff53e8f715a..a7c4e32d31c36 100644
2223     --- a/drivers/pinctrl/pinctrl-max77620.c
2224     +++ b/drivers/pinctrl/pinctrl-max77620.c
2225     @@ -34,14 +34,12 @@ enum max77620_pin_ppdrv {
2226     MAX77620_PIN_PP_DRV,
2227     };
2228    
2229     -enum max77620_pinconf_param {
2230     - MAX77620_ACTIVE_FPS_SOURCE = PIN_CONFIG_END + 1,
2231     - MAX77620_ACTIVE_FPS_POWER_ON_SLOTS,
2232     - MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS,
2233     - MAX77620_SUSPEND_FPS_SOURCE,
2234     - MAX77620_SUSPEND_FPS_POWER_ON_SLOTS,
2235     - MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS,
2236     -};
2237     +#define MAX77620_ACTIVE_FPS_SOURCE (PIN_CONFIG_END + 1)
2238     +#define MAX77620_ACTIVE_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 2)
2239     +#define MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 3)
2240     +#define MAX77620_SUSPEND_FPS_SOURCE (PIN_CONFIG_END + 4)
2241     +#define MAX77620_SUSPEND_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 5)
2242     +#define MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 6)
2243    
2244     struct max77620_pin_function {
2245     const char *name;
2246     diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
2247     index 77128d680e3bc..6f38fa1f468a7 100644
2248     --- a/drivers/scsi/isci/init.c
2249     +++ b/drivers/scsi/isci/init.c
2250     @@ -595,6 +595,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
2251     shost->max_lun = ~0;
2252     shost->max_cmd_len = MAX_COMMAND_SIZE;
2253    
2254     + /* turn on DIF support */
2255     + scsi_host_set_prot(shost,
2256     + SHOST_DIF_TYPE1_PROTECTION |
2257     + SHOST_DIF_TYPE2_PROTECTION |
2258     + SHOST_DIF_TYPE3_PROTECTION);
2259     + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
2260     +
2261     err = scsi_add_host(shost, &pdev->dev);
2262     if (err)
2263     goto err_shost;
2264     @@ -682,13 +689,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2265     goto err_host_alloc;
2266     }
2267     pci_info->hosts[i] = h;
2268     -
2269     - /* turn on DIF support */
2270     - scsi_host_set_prot(to_shost(h),
2271     - SHOST_DIF_TYPE1_PROTECTION |
2272     - SHOST_DIF_TYPE2_PROTECTION |
2273     - SHOST_DIF_TYPE3_PROTECTION);
2274     - scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
2275     }
2276    
2277     err = isci_setup_interrupts(pdev);
2278     diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
2279     index d8c03431d0aa8..f9f899ec94270 100644
2280     --- a/drivers/scsi/qla4xxx/ql4_os.c
2281     +++ b/drivers/scsi/qla4xxx/ql4_os.c
2282     @@ -7245,6 +7245,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
2283    
2284     rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
2285     fw_ddb_entry);
2286     + if (rc)
2287     + goto free_sess;
2288    
2289     ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
2290     __func__, fnode_sess->dev.kobj.name);
2291     diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
2292     index 9ff5219d849e9..411e9df0d40e9 100644
2293     --- a/fs/ceph/snap.c
2294     +++ b/fs/ceph/snap.c
2295     @@ -609,7 +609,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
2296     capsnap->size);
2297    
2298     spin_lock(&mdsc->snap_flush_lock);
2299     - list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
2300     + if (list_empty(&ci->i_snap_flush_item))
2301     + list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
2302     spin_unlock(&mdsc->snap_flush_lock);
2303     return 1; /* caller may want to ceph_flush_snaps */
2304     }
2305     diff --git a/fs/proc/base.c b/fs/proc/base.c
2306     index 79702d405ba72..b9e41832315a6 100644
2307     --- a/fs/proc/base.c
2308     +++ b/fs/proc/base.c
2309     @@ -1134,10 +1134,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
2310    
2311     task_lock(p);
2312     if (!p->vfork_done && process_shares_mm(p, mm)) {
2313     - pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
2314     - task_pid_nr(p), p->comm,
2315     - p->signal->oom_score_adj, oom_adj,
2316     - task_pid_nr(task), task->comm);
2317     p->signal->oom_score_adj = oom_adj;
2318     if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
2319     p->signal->oom_score_adj_min = (short)oom_adj;
2320     diff --git a/include/keys/user-type.h b/include/keys/user-type.h
2321     index c56fef40f53ef..5d744ec8f644a 100644
2322     --- a/include/keys/user-type.h
2323     +++ b/include/keys/user-type.h
2324     @@ -31,7 +31,7 @@
2325     struct user_key_payload {
2326     struct rcu_head rcu; /* RCU destructor */
2327     unsigned short datalen; /* length of this data */
2328     - char data[0]; /* actual data */
2329     + char data[0] __aligned(__alignof__(u64)); /* actual data */
2330     };
2331    
2332     extern struct key_type key_type_user;
2333     diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
2334     index 08398182f56ec..36dc52067377b 100644
2335     --- a/include/linux/clocksource.h
2336     +++ b/include/linux/clocksource.h
2337     @@ -117,7 +117,7 @@ struct clocksource {
2338     #define CLOCK_SOURCE_RESELECT 0x100
2339    
2340     /* simplify initialization of mask field */
2341     -#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
2342     +#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
2343    
2344     static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
2345     {
2346     diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
2347     index 22db1e63707ec..05e8b6e4edcb6 100644
2348     --- a/include/linux/sched/sysctl.h
2349     +++ b/include/linux/sched/sysctl.h
2350     @@ -33,9 +33,9 @@ extern unsigned int sysctl_numa_balancing_scan_period_max;
2351     extern unsigned int sysctl_numa_balancing_scan_size;
2352    
2353     #ifdef CONFIG_SCHED_DEBUG
2354     -extern unsigned int sysctl_sched_migration_cost;
2355     -extern unsigned int sysctl_sched_nr_migrate;
2356     -extern unsigned int sysctl_sched_time_avg;
2357     +extern __read_mostly unsigned int sysctl_sched_migration_cost;
2358     +extern __read_mostly unsigned int sysctl_sched_nr_migrate;
2359     +extern __read_mostly unsigned int sysctl_sched_time_avg;
2360     extern unsigned int sysctl_sched_shares_window;
2361    
2362     int sched_proc_update_handler(struct ctl_table *table, int write,
2363     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2364     index a47339b156ce7..6786c507f1f98 100644
2365     --- a/kernel/trace/trace.c
2366     +++ b/kernel/trace/trace.c
2367     @@ -3022,13 +3022,14 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
2368     if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2369     return;
2370    
2371     - if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
2372     + if (cpumask_available(iter->started) &&
2373     + cpumask_test_cpu(iter->cpu, iter->started))
2374     return;
2375    
2376     if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2377     return;
2378    
2379     - if (iter->started)
2380     + if (cpumask_available(iter->started))
2381     cpumask_set_cpu(iter->cpu, iter->started);
2382    
2383     /* Don't print started cpu buffer for the first entry of the trace */
2384     diff --git a/mm/mempolicy.c b/mm/mempolicy.c
2385     index e21d9b44247bc..593b74bed59b8 100644
2386     --- a/mm/mempolicy.c
2387     +++ b/mm/mempolicy.c
2388     @@ -1327,7 +1327,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
2389     nodemask_t *nodes)
2390     {
2391     unsigned long copy = ALIGN(maxnode-1, 64) / 8;
2392     - const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
2393     + unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
2394    
2395     if (copy > nbytes) {
2396     if (copy > PAGE_SIZE)
2397     @@ -1488,7 +1488,7 @@ SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
2398     int uninitialized_var(pval);
2399     nodemask_t nodes;
2400    
2401     - if (nmask != NULL && maxnode < MAX_NUMNODES)
2402     + if (nmask != NULL && maxnode < nr_node_ids)
2403     return -EINVAL;
2404    
2405     err = do_get_mempolicy(&pval, &nodes, addr, flags);
2406     @@ -1517,7 +1517,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
2407     unsigned long nr_bits, alloc_size;
2408     DECLARE_BITMAP(bm, MAX_NUMNODES);
2409    
2410     - nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
2411     + nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
2412     alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
2413    
2414     if (nmask)
2415     diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
2416     index d3548c48369f0..cf15851a7d2fb 100644
2417     --- a/mm/zsmalloc.c
2418     +++ b/mm/zsmalloc.c
2419     @@ -473,7 +473,7 @@ static bool is_zspage_isolated(struct zspage *zspage)
2420     return zspage->isolated;
2421     }
2422    
2423     -static int is_first_page(struct page *page)
2424     +static __maybe_unused int is_first_page(struct page *page)
2425     {
2426     return PagePrivate(page);
2427     }
2428     @@ -558,20 +558,23 @@ static int get_size_class_index(int size)
2429     return min(zs_size_classes - 1, idx);
2430     }
2431    
2432     +/* type can be of enum type zs_stat_type or fullness_group */
2433     static inline void zs_stat_inc(struct size_class *class,
2434     - enum zs_stat_type type, unsigned long cnt)
2435     + int type, unsigned long cnt)
2436     {
2437     class->stats.objs[type] += cnt;
2438     }
2439    
2440     +/* type can be of enum type zs_stat_type or fullness_group */
2441     static inline void zs_stat_dec(struct size_class *class,
2442     - enum zs_stat_type type, unsigned long cnt)
2443     + int type, unsigned long cnt)
2444     {
2445     class->stats.objs[type] -= cnt;
2446     }
2447    
2448     +/* type can be of enum type zs_stat_type or fullness_group */
2449     static inline unsigned long zs_stat_get(struct size_class *class,
2450     - enum zs_stat_type type)
2451     + int type)
2452     {
2453     return class->stats.objs[type];
2454     }
2455     diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
2456     index 835af771a9fd1..a92512a46e91e 100644
2457     --- a/net/batman-adv/soft-interface.c
2458     +++ b/net/batman-adv/soft-interface.c
2459     @@ -217,6 +217,8 @@ static int batadv_interface_tx(struct sk_buff *skb,
2460    
2461     switch (ntohs(ethhdr->h_proto)) {
2462     case ETH_P_8021Q:
2463     + if (!pskb_may_pull(skb, sizeof(*vhdr)))
2464     + goto dropped;
2465     vhdr = vlan_eth_hdr(skb);
2466    
2467     /* drop batman-in-batman packets to prevent loops */
2468     diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
2469     index 4bd57507b9a45..2136e45f52777 100644
2470     --- a/net/bridge/br_multicast.c
2471     +++ b/net/bridge/br_multicast.c
2472     @@ -1287,14 +1287,7 @@ static void br_multicast_query_received(struct net_bridge *br,
2473     return;
2474    
2475     br_multicast_update_query_timer(br, query, max_delay);
2476     -
2477     - /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
2478     - * the arrival port for IGMP Queries where the source address
2479     - * is 0.0.0.0 should not be added to router port list.
2480     - */
2481     - if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
2482     - saddr->proto == htons(ETH_P_IPV6))
2483     - br_multicast_mark_router(br, port);
2484     + br_multicast_mark_router(br, port);
2485     }
2486    
2487     static int br_ip4_multicast_query(struct net_bridge *br,
2488     diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
2489     index 93eb606f76282..7e27cabb04ef9 100644
2490     --- a/net/ceph/messenger.c
2491     +++ b/net/ceph/messenger.c
2492     @@ -2042,6 +2042,8 @@ static int process_connect(struct ceph_connection *con)
2493     dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
2494    
2495     if (con->auth) {
2496     + int len = le32_to_cpu(con->in_reply.authorizer_len);
2497     +
2498     /*
2499     * Any connection that defines ->get_authorizer()
2500     * should also define ->add_authorizer_challenge() and
2501     @@ -2051,8 +2053,7 @@ static int process_connect(struct ceph_connection *con)
2502     */
2503     if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
2504     ret = con->ops->add_authorizer_challenge(
2505     - con, con->auth->authorizer_reply_buf,
2506     - le32_to_cpu(con->in_reply.authorizer_len));
2507     + con, con->auth->authorizer_reply_buf, len);
2508     if (ret < 0)
2509     return ret;
2510    
2511     @@ -2062,10 +2063,12 @@ static int process_connect(struct ceph_connection *con)
2512     return 0;
2513     }
2514    
2515     - ret = con->ops->verify_authorizer_reply(con);
2516     - if (ret < 0) {
2517     - con->error_msg = "bad authorize reply";
2518     - return ret;
2519     + if (len) {
2520     + ret = con->ops->verify_authorizer_reply(con);
2521     + if (ret < 0) {
2522     + con->error_msg = "bad authorize reply";
2523     + return ret;
2524     + }
2525     }
2526     }
2527    
2528     diff --git a/net/core/netpoll.c b/net/core/netpoll.c
2529     index 457f882b0f7ba..9b2d61120c0d7 100644
2530     --- a/net/core/netpoll.c
2531     +++ b/net/core/netpoll.c
2532     @@ -666,7 +666,7 @@ int netpoll_setup(struct netpoll *np)
2533     int err;
2534    
2535     rtnl_lock();
2536     - if (np->dev_name) {
2537     + if (np->dev_name[0]) {
2538     struct net *net = current->nsproxy->net_ns;
2539     ndev = __dev_get_by_name(net, np->dev_name);
2540     }
2541     diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
2542     index fc7ca1e469081..4381ea53fa91d 100644
2543     --- a/net/ipv6/sit.c
2544     +++ b/net/ipv6/sit.c
2545     @@ -540,7 +540,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
2546     }
2547    
2548     err = 0;
2549     - if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
2550     + if (__in6_dev_get(skb->dev) &&
2551     + !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
2552     goto out;
2553    
2554     if (t->parms.iph.daddr == 0)
2555     diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
2556     index f0e6175a9821f..197753ad50b4e 100644
2557     --- a/net/mac80211/mesh_pathtbl.c
2558     +++ b/net/mac80211/mesh_pathtbl.c
2559     @@ -449,17 +449,15 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
2560    
2561     } while (unlikely(ret == -EEXIST && !mpath));
2562    
2563     - if (ret && ret != -EEXIST)
2564     - return ERR_PTR(ret);
2565     -
2566     - /* At this point either new_mpath was added, or we found a
2567     - * matching entry already in the table; in the latter case
2568     - * free the unnecessary new entry.
2569     - */
2570     - if (ret == -EEXIST) {
2571     + if (ret) {
2572     kfree(new_mpath);
2573     +
2574     + if (ret != -EEXIST)
2575     + return ERR_PTR(ret);
2576     +
2577     new_mpath = mpath;
2578     }
2579     +
2580     sdata->u.mesh.mesh_paths_generation++;
2581     return new_mpath;
2582     }
2583     @@ -489,6 +487,9 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
2584     &new_mpath->rhash,
2585     mesh_rht_params);
2586    
2587     + if (ret)
2588     + kfree(new_mpath);
2589     +
2590     sdata->u.mesh.mpp_paths_generation++;
2591     return ret;
2592     }
2593     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
2594     index a3fb30f5a1a95..2fa1c4f2e94e0 100644
2595     --- a/net/netfilter/nf_tables_api.c
2596     +++ b/net/netfilter/nf_tables_api.c
2597     @@ -263,6 +263,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
2598     int err;
2599    
2600     list_for_each_entry(rule, &ctx->chain->rules, list) {
2601     + if (!nft_is_active_next(ctx->net, rule))
2602     + continue;
2603     +
2604     err = nft_delrule(ctx, rule);
2605     if (err < 0)
2606     return err;
2607     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2608     index 82e222cd48454..14df2fcf61384 100644
2609     --- a/net/packet/af_packet.c
2610     +++ b/net/packet/af_packet.c
2611     @@ -4316,7 +4316,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
2612     rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
2613     if (unlikely(rb->frames_per_block == 0))
2614     goto out;
2615     - if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
2616     + if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
2617     goto out;
2618     if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
2619     req->tp_frame_nr))
2620     diff --git a/net/sctp/offload.c b/net/sctp/offload.c
2621     index 6300f28c95888..31b9a12fc35a1 100644
2622     --- a/net/sctp/offload.c
2623     +++ b/net/sctp/offload.c
2624     @@ -35,6 +35,7 @@
2625     static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
2626     {
2627     skb->ip_summed = CHECKSUM_NONE;
2628     + gso_reset_checksum(skb, ~0);
2629     return sctp_compute_cksum(skb, skb_transport_offset(skb));
2630     }
2631    
2632     diff --git a/security/keys/key.c b/security/keys/key.c
2633     index 7dc59069e8c76..7276d1a009d49 100644
2634     --- a/security/keys/key.c
2635     +++ b/security/keys/key.c
2636     @@ -264,8 +264,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
2637    
2638     spin_lock(&user->lock);
2639     if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
2640     - if (user->qnkeys + 1 >= maxkeys ||
2641     - user->qnbytes + quotalen >= maxbytes ||
2642     + if (user->qnkeys + 1 > maxkeys ||
2643     + user->qnbytes + quotalen > maxbytes ||
2644     user->qnbytes + quotalen < user->qnbytes)
2645     goto no_quota;
2646     }
2647     diff --git a/security/keys/keyring.c b/security/keys/keyring.c
2648     index 4e9b4d23e20ef..7308067dcc5d8 100644
2649     --- a/security/keys/keyring.c
2650     +++ b/security/keys/keyring.c
2651     @@ -652,9 +652,6 @@ static bool search_nested_keyrings(struct key *keyring,
2652     BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
2653     (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
2654    
2655     - if (ctx->index_key.description)
2656     - ctx->index_key.desc_len = strlen(ctx->index_key.description);
2657     -
2658     /* Check to see if this top-level keyring is what we are looking for
2659     * and whether it is valid or not.
2660     */
2661     @@ -912,6 +909,7 @@ key_ref_t keyring_search(key_ref_t keyring,
2662     struct keyring_search_context ctx = {
2663     .index_key.type = type,
2664     .index_key.description = description,
2665     + .index_key.desc_len = strlen(description),
2666     .cred = current_cred(),
2667     .match_data.cmp = key_default_cmp,
2668     .match_data.raw_data = description,
2669     diff --git a/security/keys/proc.c b/security/keys/proc.c
2670     index 0361286824638..ec493ddadd111 100644
2671     --- a/security/keys/proc.c
2672     +++ b/security/keys/proc.c
2673     @@ -186,8 +186,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
2674     int rc;
2675    
2676     struct keyring_search_context ctx = {
2677     - .index_key.type = key->type,
2678     - .index_key.description = key->description,
2679     + .index_key = key->index_key,
2680     .cred = current_cred(),
2681     .match_data.cmp = lookup_user_key_possessed,
2682     .match_data.raw_data = key,
2683     diff --git a/security/keys/request_key.c b/security/keys/request_key.c
2684     index cb7f8f730c6dd..aa292e01c5621 100644
2685     --- a/security/keys/request_key.c
2686     +++ b/security/keys/request_key.c
2687     @@ -544,6 +544,7 @@ struct key *request_key_and_link(struct key_type *type,
2688     struct keyring_search_context ctx = {
2689     .index_key.type = type,
2690     .index_key.description = description,
2691     + .index_key.desc_len = strlen(description),
2692     .cred = current_cred(),
2693     .match_data.cmp = key_default_cmp,
2694     .match_data.raw_data = description,
2695     diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
2696     index ba74a0b4d1cb6..f60baeb338e5f 100644
2697     --- a/security/keys/request_key_auth.c
2698     +++ b/security/keys/request_key_auth.c
2699     @@ -254,7 +254,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
2700     struct key *authkey;
2701     key_ref_t authkey_ref;
2702    
2703     - sprintf(description, "%x", target_id);
2704     + ctx.index_key.desc_len = sprintf(description, "%x", target_id);
2705    
2706     authkey_ref = search_process_keyrings(&ctx);
2707