Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.19/0114-4.19.15-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3393 - (hide annotations) (download)
Fri Aug 2 11:47:28 2019 UTC (4 years, 10 months ago) by niro
File size: 190531 byte(s)
-linux-4.19.15
1 niro 3393 diff --git a/Makefile b/Makefile
2     index 3324dd0e11a3..0e30d48274fa 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 19
9     -SUBLEVEL = 14
10     +SUBLEVEL = 15
11     EXTRAVERSION =
12     NAME = "People's Front"
13    
14     @@ -482,18 +482,18 @@ endif
15    
16     ifeq ($(cc-name),clang)
17     ifneq ($(CROSS_COMPILE),)
18     -CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
19     +CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
20     GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
21     -CLANG_PREFIX := --prefix=$(GCC_TOOLCHAIN_DIR)
22     +CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
23     GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
24     endif
25     ifneq ($(GCC_TOOLCHAIN),)
26     -CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
27     +CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN)
28     endif
29     -KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
30     -KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
31     -KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
32     -KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
33     +CLANG_FLAGS += -no-integrated-as
34     +KBUILD_CFLAGS += $(CLANG_FLAGS)
35     +KBUILD_AFLAGS += $(CLANG_FLAGS)
36     +export CLANG_FLAGS
37     endif
38    
39     RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
40     @@ -954,11 +954,6 @@ ifdef CONFIG_STACK_VALIDATION
41     ifeq ($(has_libelf),1)
42     objtool_target := tools/objtool FORCE
43     else
44     - ifdef CONFIG_UNWINDER_ORC
45     - $(error "Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
46     - else
47     - $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
48     - endif
49     SKIP_STACK_VALIDATION := 1
50     export SKIP_STACK_VALIDATION
51     endif
52     @@ -1115,6 +1110,14 @@ uapi-asm-generic:
53    
54     PHONY += prepare-objtool
55     prepare-objtool: $(objtool_target)
56     +ifeq ($(SKIP_STACK_VALIDATION),1)
57     +ifdef CONFIG_UNWINDER_ORC
58     + @echo "error: Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
59     + @false
60     +else
61     + @echo "warning: Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
62     +endif
63     +endif
64    
65     # Generate some files
66     # ---------------------------------------------------------------------------
67     diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts
68     index d8aac4a2d02a..177d21fdeb28 100644
69     --- a/arch/arm/boot/dts/imx7d-nitrogen7.dts
70     +++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts
71     @@ -86,13 +86,17 @@
72     compatible = "regulator-fixed";
73     regulator-min-microvolt = <3300000>;
74     regulator-max-microvolt = <3300000>;
75     - clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
76     - clock-names = "slow";
77     regulator-name = "reg_wlan";
78     startup-delay-us = <70000>;
79     gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
80     enable-active-high;
81     };
82     +
83     + usdhc2_pwrseq: usdhc2_pwrseq {
84     + compatible = "mmc-pwrseq-simple";
85     + clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
86     + clock-names = "ext_clock";
87     + };
88     };
89    
90     &adc1 {
91     @@ -375,6 +379,7 @@
92     bus-width = <4>;
93     non-removable;
94     vmmc-supply = <&reg_wlan>;
95     + mmc-pwrseq = <&usdhc2_pwrseq>;
96     cap-power-off-card;
97     keep-power-in-suspend;
98     status = "okay";
99     diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi
100     index 21973eb55671..f27b3849d3ff 100644
101     --- a/arch/arm/boot/dts/imx7d-pico.dtsi
102     +++ b/arch/arm/boot/dts/imx7d-pico.dtsi
103     @@ -100,6 +100,19 @@
104     regulator-min-microvolt = <1800000>;
105     regulator-max-microvolt = <1800000>;
106     };
107     +
108     + usdhc2_pwrseq: usdhc2_pwrseq {
109     + compatible = "mmc-pwrseq-simple";
110     + clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
111     + clock-names = "ext_clock";
112     + };
113     +};
114     +
115     +&clks {
116     + assigned-clocks = <&clks IMX7D_CLKO2_ROOT_SRC>,
117     + <&clks IMX7D_CLKO2_ROOT_DIV>;
118     + assigned-clock-parents = <&clks IMX7D_CKIL>;
119     + assigned-clock-rates = <0>, <32768>;
120     };
121    
122     &i2c4 {
123     @@ -199,12 +212,13 @@
124    
125     &usdhc2 { /* Wifi SDIO */
126     pinctrl-names = "default";
127     - pinctrl-0 = <&pinctrl_usdhc2>;
128     + pinctrl-0 = <&pinctrl_usdhc2 &pinctrl_wifi_clk>;
129     no-1-8-v;
130     non-removable;
131     keep-power-in-suspend;
132     wakeup-source;
133     vmmc-supply = <&reg_ap6212>;
134     + mmc-pwrseq = <&usdhc2_pwrseq>;
135     status = "okay";
136     };
137    
138     @@ -301,6 +315,12 @@
139     };
140    
141     &iomuxc_lpsr {
142     + pinctrl_wifi_clk: wificlkgrp {
143     + fsl,pins = <
144     + MX7D_PAD_LPSR_GPIO1_IO03__CCM_CLKO2 0x7d
145     + >;
146     + };
147     +
148     pinctrl_wdog: wdoggrp {
149     fsl,pins = <
150     MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B 0x74
151     diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
152     index c7ce4158d6c8..f250b20af493 100644
153     --- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
154     +++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
155     @@ -309,8 +309,8 @@
156    
157     &reg_dldo3 {
158     regulator-always-on;
159     - regulator-min-microvolt = <2500000>;
160     - regulator-max-microvolt = <2500000>;
161     + regulator-min-microvolt = <3300000>;
162     + regulator-max-microvolt = <3300000>;
163     regulator-name = "vcc-pd";
164     };
165    
166     diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
167     index 243a108a940b..fd0053e47a15 100644
168     --- a/arch/arm/mach-imx/cpuidle-imx6sx.c
169     +++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
170     @@ -110,7 +110,7 @@ int __init imx6sx_cpuidle_init(void)
171     * except for power up sw2iso which need to be
172     * larger than LDO ramp up time.
173     */
174     - imx_gpc_set_arm_power_up_timing(2, 1);
175     + imx_gpc_set_arm_power_up_timing(0xf, 1);
176     imx_gpc_set_arm_power_down_timing(1, 1);
177    
178     return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
179     diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
180     index 106039d25e2f..35649ee8ad56 100644
181     --- a/arch/arm64/Makefile
182     +++ b/arch/arm64/Makefile
183     @@ -18,7 +18,7 @@ ifeq ($(CONFIG_RELOCATABLE), y)
184     # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
185     # for relative relocs, since this leads to better Image compression
186     # with the relocation offsets always being zero.
187     -LDFLAGS_vmlinux += -pie -shared -Bsymbolic \
188     +LDFLAGS_vmlinux += -shared -Bsymbolic -z notext -z norelro \
189     $(call ld-option, --no-apply-dynamic-relocs)
190     endif
191    
192     diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
193     index a747b7bf132d..387be39d40cd 100644
194     --- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
195     +++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
196     @@ -17,8 +17,13 @@
197     model = "MediaTek MT7622 RFB1 board";
198     compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622";
199    
200     + aliases {
201     + serial0 = &uart0;
202     + };
203     +
204     chosen {
205     - bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
206     + stdout-path = "serial0:115200n8";
207     + bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
208     };
209    
210     cpus {
211     diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
212     index a820ed07fb80..8da289dc843a 100644
213     --- a/arch/arm64/kernel/image.h
214     +++ b/arch/arm64/kernel/image.h
215     @@ -75,16 +75,6 @@
216    
217     __efistub_stext_offset = stext - _text;
218    
219     -/*
220     - * Prevent the symbol aliases below from being emitted into the kallsyms
221     - * table, by forcing them to be absolute symbols (which are conveniently
222     - * ignored by scripts/kallsyms) rather than section relative symbols.
223     - * The distinction is only relevant for partial linking, and only for symbols
224     - * that are defined within a section declaration (which is not the case for
225     - * the definitions below) so the resulting values will be identical.
226     - */
227     -#define KALLSYMS_HIDE(sym) ABSOLUTE(sym)
228     -
229     /*
230     * The EFI stub has its own symbol namespace prefixed by __efistub_, to
231     * isolate it from the kernel proper. The following symbols are legally
232     @@ -94,28 +84,28 @@ __efistub_stext_offset = stext - _text;
233     * linked at. The routines below are all implemented in assembler in a
234     * position independent manner
235     */
236     -__efistub_memcmp = KALLSYMS_HIDE(__pi_memcmp);
237     -__efistub_memchr = KALLSYMS_HIDE(__pi_memchr);
238     -__efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy);
239     -__efistub_memmove = KALLSYMS_HIDE(__pi_memmove);
240     -__efistub_memset = KALLSYMS_HIDE(__pi_memset);
241     -__efistub_strlen = KALLSYMS_HIDE(__pi_strlen);
242     -__efistub_strnlen = KALLSYMS_HIDE(__pi_strnlen);
243     -__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp);
244     -__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp);
245     -__efistub_strrchr = KALLSYMS_HIDE(__pi_strrchr);
246     -__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area);
247     +__efistub_memcmp = __pi_memcmp;
248     +__efistub_memchr = __pi_memchr;
249     +__efistub_memcpy = __pi_memcpy;
250     +__efistub_memmove = __pi_memmove;
251     +__efistub_memset = __pi_memset;
252     +__efistub_strlen = __pi_strlen;
253     +__efistub_strnlen = __pi_strnlen;
254     +__efistub_strcmp = __pi_strcmp;
255     +__efistub_strncmp = __pi_strncmp;
256     +__efistub_strrchr = __pi_strrchr;
257     +__efistub___flush_dcache_area = __pi___flush_dcache_area;
258    
259     #ifdef CONFIG_KASAN
260     -__efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy);
261     -__efistub___memmove = KALLSYMS_HIDE(__pi_memmove);
262     -__efistub___memset = KALLSYMS_HIDE(__pi_memset);
263     +__efistub___memcpy = __pi_memcpy;
264     +__efistub___memmove = __pi_memmove;
265     +__efistub___memset = __pi_memset;
266     #endif
267    
268     -__efistub__text = KALLSYMS_HIDE(_text);
269     -__efistub__end = KALLSYMS_HIDE(_end);
270     -__efistub__edata = KALLSYMS_HIDE(_edata);
271     -__efistub_screen_info = KALLSYMS_HIDE(screen_info);
272     +__efistub__text = _text;
273     +__efistub__end = _end;
274     +__efistub__edata = _edata;
275     +__efistub_screen_info = screen_info;
276    
277     #endif
278    
279     diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
280     index 605d1b60469c..74e469f8a850 100644
281     --- a/arch/arm64/kernel/vmlinux.lds.S
282     +++ b/arch/arm64/kernel/vmlinux.lds.S
283     @@ -99,7 +99,8 @@ SECTIONS
284     *(.discard)
285     *(.discard.*)
286     *(.interp .dynamic)
287     - *(.dynsym .dynstr .hash)
288     + *(.dynsym .dynstr .hash .gnu.hash)
289     + *(.eh_frame)
290     }
291    
292     . = KIMAGE_VADDR + TEXT_OFFSET;
293     @@ -176,12 +177,12 @@ SECTIONS
294    
295     PERCPU_SECTION(L1_CACHE_BYTES)
296    
297     - .rela : ALIGN(8) {
298     + .rela.dyn : ALIGN(8) {
299     *(.rela .rela*)
300     }
301    
302     - __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
303     - __rela_size = SIZEOF(.rela);
304     + __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
305     + __rela_size = SIZEOF(.rela.dyn);
306    
307     . = ALIGN(SEGMENT_ALIGN);
308     __initdata_end = .;
309     diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
310     index 5d3596c180f9..de44899c0e61 100644
311     --- a/arch/m68k/kernel/setup_mm.c
312     +++ b/arch/m68k/kernel/setup_mm.c
313     @@ -165,8 +165,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
314     be32_to_cpu(m->addr);
315     m68k_memory[m68k_num_memory].size =
316     be32_to_cpu(m->size);
317     - memblock_add(m68k_memory[m68k_num_memory].addr,
318     - m68k_memory[m68k_num_memory].size);
319     m68k_num_memory++;
320     } else
321     pr_warn("%s: too many memory chunks\n",
322     diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
323     index 4e17ecb5928a..2eb2b31fb16a 100644
324     --- a/arch/m68k/mm/motorola.c
325     +++ b/arch/m68k/mm/motorola.c
326     @@ -228,6 +228,7 @@ void __init paging_init(void)
327    
328     min_addr = m68k_memory[0].addr;
329     max_addr = min_addr + m68k_memory[0].size;
330     + memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
331     for (i = 1; i < m68k_num_memory;) {
332     if (m68k_memory[i].addr < min_addr) {
333     printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
334     @@ -238,6 +239,7 @@ void __init paging_init(void)
335     (m68k_num_memory - i) * sizeof(struct m68k_mem_info));
336     continue;
337     }
338     + memblock_add(m68k_memory[i].addr, m68k_memory[i].size);
339     addr = m68k_memory[i].addr + m68k_memory[i].size;
340     if (addr > max_addr)
341     max_addr = addr;
342     diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
343     index d2824b0cc142..c4c03992ee82 100644
344     --- a/arch/powerpc/Makefile
345     +++ b/arch/powerpc/Makefile
346     @@ -160,8 +160,17 @@ else
347     CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
348     endif
349    
350     +ifdef CONFIG_FUNCTION_TRACER
351     +CC_FLAGS_FTRACE := -pg
352     ifdef CONFIG_MPROFILE_KERNEL
353     - CC_FLAGS_FTRACE := -pg -mprofile-kernel
354     +CC_FLAGS_FTRACE += -mprofile-kernel
355     +endif
356     +# Work around gcc code-gen bugs with -pg / -fno-omit-frame-pointer in gcc <= 4.8
357     +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=44199
358     +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52828
359     +ifneq ($(cc-name),clang)
360     +CC_FLAGS_FTRACE += $(call cc-ifversion, -lt, 0409, -mno-sched-epilog)
361     +endif
362     endif
363    
364     CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
365     @@ -229,11 +238,6 @@ ifdef CONFIG_6xx
366     KBUILD_CFLAGS += -mcpu=powerpc
367     endif
368    
369     -# Work around a gcc code-gen bug with -fno-omit-frame-pointer.
370     -ifdef CONFIG_FUNCTION_TRACER
371     -KBUILD_CFLAGS += -mno-sched-epilog
372     -endif
373     -
374     cpu-as-$(CONFIG_4xx) += -Wa,-m405
375     cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
376     cpu-as-$(CONFIG_E200) += -Wa,-me200
377     @@ -408,36 +412,9 @@ archprepare: checkbin
378     # to stdout and these checks are run even on install targets.
379     TOUT := .tmp_gas_check
380    
381     -# Check gcc and binutils versions:
382     -# - gcc-3.4 and binutils-2.14 are a fatal combination
383     -# - Require gcc 4.0 or above on 64-bit
384     -# - gcc-4.2.0 has issues compiling modules on 64-bit
385     +# Check toolchain versions:
386     +# - gcc-4.6 is the minimum kernel-wide version so nothing required.
387     checkbin:
388     - @if test "$(cc-name)" != "clang" \
389     - && test "$(cc-version)" = "0304" ; then \
390     - if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
391     - echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
392     - echo 'correctly with gcc-3.4 and your version of binutils.'; \
393     - echo '*** Please upgrade your binutils or downgrade your gcc'; \
394     - false; \
395     - fi ; \
396     - fi
397     - @if test "$(cc-name)" != "clang" \
398     - && test "$(cc-version)" -lt "0400" \
399     - && test "x${CONFIG_PPC64}" = "xy" ; then \
400     - echo -n "Sorry, GCC v4.0 or above is required to build " ; \
401     - echo "the 64-bit powerpc kernel." ; \
402     - false ; \
403     - fi
404     - @if test "$(cc-name)" != "clang" \
405     - && test "$(cc-fullversion)" = "040200" \
406     - && test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \
407     - echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \
408     - echo 'kernel with modules enabled.' ; \
409     - echo -n '*** Please use a different GCC version or ' ; \
410     - echo 'disable kernel modules' ; \
411     - false ; \
412     - fi
413     @if test "x${CONFIG_CPU_LITTLE_ENDIAN}" = "xy" \
414     && $(LD) --version | head -1 | grep ' 2\.24$$' >/dev/null ; then \
415     echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \
416     diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
417     index 0fb96c26136f..25e3184f11f7 100644
418     --- a/arch/powerpc/boot/Makefile
419     +++ b/arch/powerpc/boot/Makefile
420     @@ -55,6 +55,11 @@ BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
421    
422     BOOTARFLAGS := -cr$(KBUILD_ARFLAGS)
423    
424     +ifdef CONFIG_CC_IS_CLANG
425     +BOOTCFLAGS += $(CLANG_FLAGS)
426     +BOOTAFLAGS += $(CLANG_FLAGS)
427     +endif
428     +
429     ifdef CONFIG_DEBUG_INFO
430     BOOTCFLAGS += -g
431     endif
432     diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
433     index 32dfe6d083f3..9b9d17437373 100644
434     --- a/arch/powerpc/boot/crt0.S
435     +++ b/arch/powerpc/boot/crt0.S
436     @@ -15,7 +15,7 @@
437     RELA = 7
438     RELACOUNT = 0x6ffffff9
439    
440     - .text
441     + .data
442     /* A procedure descriptor used when booting this as a COFF file.
443     * When making COFF, this comes first in the link and we're
444     * linked at 0x500000.
445     @@ -23,6 +23,8 @@ RELACOUNT = 0x6ffffff9
446     .globl _zimage_start_opd
447     _zimage_start_opd:
448     .long 0x500000, 0, 0, 0
449     + .text
450     + b _zimage_start
451    
452     #ifdef __powerpc64__
453     .balign 8
454     diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
455     index 3b66f2c19c84..eac18790d1b1 100644
456     --- a/arch/powerpc/kernel/Makefile
457     +++ b/arch/powerpc/kernel/Makefile
458     @@ -5,6 +5,9 @@
459    
460     CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
461    
462     +# Disable clang warning for using setjmp without setjmp.h header
463     +CFLAGS_crash.o += $(call cc-disable-warning, builtin-requires-header)
464     +
465     subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
466    
467     ifdef CONFIG_PPC64
468     @@ -22,10 +25,10 @@ CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
469    
470     ifdef CONFIG_FUNCTION_TRACER
471     # Do not trace early boot code
472     -CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
473     -CFLAGS_REMOVE_prom_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
474     -CFLAGS_REMOVE_btext.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
475     -CFLAGS_REMOVE_prom.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
476     +CFLAGS_REMOVE_cputable.o = $(CC_FLAGS_FTRACE)
477     +CFLAGS_REMOVE_prom_init.o = $(CC_FLAGS_FTRACE)
478     +CFLAGS_REMOVE_btext.o = $(CC_FLAGS_FTRACE)
479     +CFLAGS_REMOVE_prom.o = $(CC_FLAGS_FTRACE)
480     endif
481    
482     obj-y := cputable.o ptrace.o syscalls.o \
483     diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
484     index 6327fd79b0fb..fd59fef9931b 100644
485     --- a/arch/powerpc/kernel/signal_32.c
486     +++ b/arch/powerpc/kernel/signal_32.c
487     @@ -848,7 +848,23 @@ static long restore_tm_user_regs(struct pt_regs *regs,
488     /* If TM bits are set to the reserved value, it's an invalid context */
489     if (MSR_TM_RESV(msr_hi))
490     return 1;
491     - /* Pull in the MSR TM bits from the user context */
492     +
493     + /*
494     + * Disabling preemption, since it is unsafe to be preempted
495     + * with MSR[TS] set without recheckpointing.
496     + */
497     + preempt_disable();
498     +
499     + /*
500     + * CAUTION:
501     + * After regs->MSR[TS] being updated, make sure that get_user(),
502     + * put_user() or similar functions are *not* called. These
503     + * functions can generate page faults which will cause the process
504     + * to be de-scheduled with MSR[TS] set but without calling
505     + * tm_recheckpoint(). This can cause a bug.
506     + *
507     + * Pull in the MSR TM bits from the user context
508     + */
509     regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
510     /* Now, recheckpoint. This loads up all of the checkpointed (older)
511     * registers, including FP and V[S]Rs. After recheckpointing, the
512     @@ -873,6 +889,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
513     }
514     #endif
515    
516     + preempt_enable();
517     +
518     return 0;
519     }
520     #endif
521     @@ -1140,11 +1158,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
522     {
523     struct rt_sigframe __user *rt_sf;
524     struct pt_regs *regs = current_pt_regs();
525     - int tm_restore = 0;
526     #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
527     struct ucontext __user *uc_transact;
528     unsigned long msr_hi;
529     unsigned long tmp;
530     + int tm_restore = 0;
531     #endif
532     /* Always make any pending restarted system calls return -EINTR */
533     current->restart_block.fn = do_no_restart_syscall;
534     @@ -1192,19 +1210,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
535     goto bad;
536     }
537     }
538     - if (!tm_restore) {
539     - /*
540     - * Unset regs->msr because ucontext MSR TS is not
541     - * set, and recheckpoint was not called. This avoid
542     - * hitting a TM Bad thing at RFID
543     - */
544     - regs->msr &= ~MSR_TS_MASK;
545     - }
546     - /* Fall through, for non-TM restore */
547     -#endif
548     if (!tm_restore)
549     - if (do_setcontext(&rt_sf->uc, regs, 1))
550     - goto bad;
551     + /* Fall through, for non-TM restore */
552     +#endif
553     + if (do_setcontext(&rt_sf->uc, regs, 1))
554     + goto bad;
555    
556     /*
557     * It's not clear whether or why it is desirable to save the
558     diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
559     index daa28cb72272..bbd1c73243d7 100644
560     --- a/arch/powerpc/kernel/signal_64.c
561     +++ b/arch/powerpc/kernel/signal_64.c
562     @@ -467,20 +467,6 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
563     if (MSR_TM_RESV(msr))
564     return -EINVAL;
565    
566     - /* pull in MSR TS bits from user context */
567     - regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
568     -
569     - /*
570     - * Ensure that TM is enabled in regs->msr before we leave the signal
571     - * handler. It could be the case that (a) user disabled the TM bit
572     - * through the manipulation of the MSR bits in uc_mcontext or (b) the
573     - * TM bit was disabled because a sufficient number of context switches
574     - * happened whilst in the signal handler and load_tm overflowed,
575     - * disabling the TM bit. In either case we can end up with an illegal
576     - * TM state leading to a TM Bad Thing when we return to userspace.
577     - */
578     - regs->msr |= MSR_TM;
579     -
580     /* pull in MSR LE from user context */
581     regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
582    
583     @@ -572,6 +558,34 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
584     tm_enable();
585     /* Make sure the transaction is marked as failed */
586     tsk->thread.tm_texasr |= TEXASR_FS;
587     +
588     + /*
589     + * Disabling preemption, since it is unsafe to be preempted
590     + * with MSR[TS] set without recheckpointing.
591     + */
592     + preempt_disable();
593     +
594     + /* pull in MSR TS bits from user context */
595     + regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
596     +
597     + /*
598     + * Ensure that TM is enabled in regs->msr before we leave the signal
599     + * handler. It could be the case that (a) user disabled the TM bit
600     + * through the manipulation of the MSR bits in uc_mcontext or (b) the
601     + * TM bit was disabled because a sufficient number of context switches
602     + * happened whilst in the signal handler and load_tm overflowed,
603     + * disabling the TM bit. In either case we can end up with an illegal
604     + * TM state leading to a TM Bad Thing when we return to userspace.
605     + *
606     + * CAUTION:
607     + * After regs->MSR[TS] being updated, make sure that get_user(),
608     + * put_user() or similar functions are *not* called. These
609     + * functions can generate page faults which will cause the process
610     + * to be de-scheduled with MSR[TS] set but without calling
611     + * tm_recheckpoint(). This can cause a bug.
612     + */
613     + regs->msr |= MSR_TM;
614     +
615     /* This loads the checkpointed FP/VEC state, if used */
616     tm_recheckpoint(&tsk->thread);
617    
618     @@ -585,6 +599,8 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
619     regs->msr |= MSR_VEC;
620     }
621    
622     + preempt_enable();
623     +
624     return err;
625     }
626     #endif
627     @@ -740,23 +756,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
628     &uc_transact->uc_mcontext))
629     goto badframe;
630     }
631     -#endif
632     + else
633     /* Fall through, for non-TM restore */
634     - if (!MSR_TM_ACTIVE(msr)) {
635     - /*
636     - * Unset MSR[TS] on the thread regs since MSR from user
637     - * context does not have MSR active, and recheckpoint was
638     - * not called since restore_tm_sigcontexts() was not called
639     - * also.
640     - *
641     - * If not unsetting it, the code can RFID to userspace with
642     - * MSR[TS] set, but without CPU in the proper state,
643     - * causing a TM bad thing.
644     - */
645     - current->thread.regs->msr &= ~MSR_TS_MASK;
646     - if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
647     - goto badframe;
648     - }
649     +#endif
650     + if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
651     + goto badframe;
652    
653     if (restore_altstack(&uc->uc_stack))
654     goto badframe;
655     diff --git a/arch/powerpc/kernel/trace/Makefile b/arch/powerpc/kernel/trace/Makefile
656     index d22d8bafb643..d868ba42032f 100644
657     --- a/arch/powerpc/kernel/trace/Makefile
658     +++ b/arch/powerpc/kernel/trace/Makefile
659     @@ -7,7 +7,7 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
660    
661     ifdef CONFIG_FUNCTION_TRACER
662     # do not trace tracer code
663     -CFLAGS_REMOVE_ftrace.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
664     +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
665     endif
666    
667     obj32-$(CONFIG_FUNCTION_TRACER) += ftrace_32.o
668     diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
669     index bdf33b989f98..8464c2c01c0c 100644
670     --- a/arch/powerpc/mm/dump_linuxpagetables.c
671     +++ b/arch/powerpc/mm/dump_linuxpagetables.c
672     @@ -19,6 +19,7 @@
673     #include <linux/hugetlb.h>
674     #include <linux/io.h>
675     #include <linux/mm.h>
676     +#include <linux/highmem.h>
677     #include <linux/sched.h>
678     #include <linux/seq_file.h>
679     #include <asm/fixmap.h>
680     diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
681     index 7a9886f98b0c..a5091c034747 100644
682     --- a/arch/powerpc/mm/init_64.c
683     +++ b/arch/powerpc/mm/init_64.c
684     @@ -188,15 +188,20 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
685     pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
686    
687     for (; start < end; start += page_size) {
688     - void *p;
689     + void *p = NULL;
690     int rc;
691    
692     if (vmemmap_populated(start, page_size))
693     continue;
694    
695     + /*
696     + * Allocate from the altmap first if we have one. This may
697     + * fail due to alignment issues when using 16MB hugepages, so
698     + * fall back to system memory if the altmap allocation fail.
699     + */
700     if (altmap)
701     p = altmap_alloc_block_buf(page_size, altmap);
702     - else
703     + if (!p)
704     p = vmemmap_alloc_block_buf(page_size, node);
705     if (!p)
706     return -ENOMEM;
707     @@ -255,8 +260,15 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
708     {
709     unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
710     unsigned long page_order = get_order(page_size);
711     + unsigned long alt_start = ~0, alt_end = ~0;
712     + unsigned long base_pfn;
713    
714     start = _ALIGN_DOWN(start, page_size);
715     + if (altmap) {
716     + alt_start = altmap->base_pfn;
717     + alt_end = altmap->base_pfn + altmap->reserve +
718     + altmap->free + altmap->alloc + altmap->align;
719     + }
720    
721     pr_debug("vmemmap_free %lx...%lx\n", start, end);
722    
723     @@ -280,8 +292,9 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
724     page = pfn_to_page(addr >> PAGE_SHIFT);
725     section_base = pfn_to_page(vmemmap_section_start(start));
726     nr_pages = 1 << page_order;
727     + base_pfn = PHYS_PFN(addr);
728    
729     - if (altmap) {
730     + if (base_pfn >= alt_start && base_pfn < alt_end) {
731     vmem_altmap_free(altmap, nr_pages);
732     } else if (PageReserved(page)) {
733     /* allocated from bootmem */
734     diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
735     index f2839eed0f89..561a67d65e4d 100644
736     --- a/arch/powerpc/platforms/powermac/Makefile
737     +++ b/arch/powerpc/platforms/powermac/Makefile
738     @@ -3,7 +3,7 @@ CFLAGS_bootx_init.o += -fPIC
739    
740     ifdef CONFIG_FUNCTION_TRACER
741     # Do not trace early boot code
742     -CFLAGS_REMOVE_bootx_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
743     +CFLAGS_REMOVE_bootx_init.o = $(CC_FLAGS_FTRACE)
744     endif
745    
746     obj-y += pic.o setup.o time.o feature.o pci.o \
747     diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
748     index 1bc3abb237cd..9d7d8e6d705c 100644
749     --- a/arch/powerpc/xmon/Makefile
750     +++ b/arch/powerpc/xmon/Makefile
751     @@ -1,14 +1,17 @@
752     # SPDX-License-Identifier: GPL-2.0
753     # Makefile for xmon
754    
755     -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
756     +# Disable clang warning for using setjmp without setjmp.h header
757     +subdir-ccflags-y := $(call cc-disable-warning, builtin-requires-header)
758     +
759     +subdir-ccflags-$(CONFIG_PPC_WERROR) += -Werror
760    
761     GCOV_PROFILE := n
762     UBSAN_SANITIZE := n
763    
764     # Disable ftrace for the entire directory
765     ORIG_CFLAGS := $(KBUILD_CFLAGS)
766     -KBUILD_CFLAGS = $(subst -mno-sched-epilog,,$(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)))
767     +KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
768    
769     ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
770    
771     diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
772     index 84bd9bdc1987..88bca456da99 100644
773     --- a/arch/x86/include/asm/pgtable_64_types.h
774     +++ b/arch/x86/include/asm/pgtable_64_types.h
775     @@ -111,6 +111,11 @@ extern unsigned int ptrs_per_p4d;
776     */
777     #define MAXMEM (1UL << MAX_PHYSMEM_BITS)
778    
779     +#define GUARD_HOLE_PGD_ENTRY -256UL
780     +#define GUARD_HOLE_SIZE (16UL << PGDIR_SHIFT)
781     +#define GUARD_HOLE_BASE_ADDR (GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT)
782     +#define GUARD_HOLE_END_ADDR (GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE)
783     +
784     #define LDT_PGD_ENTRY -240UL
785     #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
786     #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE)
787     diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
788     index a12afff146d1..c05a818224bb 100644
789     --- a/arch/x86/mm/dump_pagetables.c
790     +++ b/arch/x86/mm/dump_pagetables.c
791     @@ -53,10 +53,10 @@ struct addr_marker {
792     enum address_markers_idx {
793     USER_SPACE_NR = 0,
794     KERNEL_SPACE_NR,
795     - LOW_KERNEL_NR,
796     -#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
797     +#ifdef CONFIG_MODIFY_LDT_SYSCALL
798     LDT_NR,
799     #endif
800     + LOW_KERNEL_NR,
801     VMALLOC_START_NR,
802     VMEMMAP_START_NR,
803     #ifdef CONFIG_KASAN
804     @@ -64,9 +64,6 @@ enum address_markers_idx {
805     KASAN_SHADOW_END_NR,
806     #endif
807     CPU_ENTRY_AREA_NR,
808     -#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
809     - LDT_NR,
810     -#endif
811     #ifdef CONFIG_X86_ESPFIX64
812     ESPFIX_START_NR,
813     #endif
814     @@ -493,11 +490,11 @@ static inline bool is_hypervisor_range(int idx)
815     {
816     #ifdef CONFIG_X86_64
817     /*
818     - * ffff800000000000 - ffff87ffffffffff is reserved for
819     - * the hypervisor.
820     + * A hole in the beginning of kernel address space reserved
821     + * for a hypervisor.
822     */
823     - return (idx >= pgd_index(__PAGE_OFFSET) - 16) &&
824     - (idx < pgd_index(__PAGE_OFFSET));
825     + return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
826     + (idx < pgd_index(GUARD_HOLE_END_ADDR));
827     #else
828     return false;
829     #endif
830     diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
831     index 2c84c6ad8b50..c8f011e07a15 100644
832     --- a/arch/x86/xen/mmu_pv.c
833     +++ b/arch/x86/xen/mmu_pv.c
834     @@ -640,19 +640,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
835     unsigned long limit)
836     {
837     int i, nr, flush = 0;
838     - unsigned hole_low, hole_high;
839     + unsigned hole_low = 0, hole_high = 0;
840    
841     /* The limit is the last byte to be touched */
842     limit--;
843     BUG_ON(limit >= FIXADDR_TOP);
844    
845     +#ifdef CONFIG_X86_64
846     /*
847     * 64-bit has a great big hole in the middle of the address
848     - * space, which contains the Xen mappings. On 32-bit these
849     - * will end up making a zero-sized hole and so is a no-op.
850     + * space, which contains the Xen mappings.
851     */
852     - hole_low = pgd_index(USER_LIMIT);
853     - hole_high = pgd_index(PAGE_OFFSET);
854     + hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
855     + hole_high = pgd_index(GUARD_HOLE_END_ADDR);
856     +#endif
857    
858     nr = pgd_index(limit) + 1;
859     for (i = 0; i < nr; i++) {
860     diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
861     index 29bfe8017a2d..da1de190a3b1 100644
862     --- a/block/blk-mq-sched.c
863     +++ b/block/blk-mq-sched.c
864     @@ -54,13 +54,14 @@ void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
865     * Mark a hardware queue as needing a restart. For shared queues, maintain
866     * a count of how many hardware queues are marked for restart.
867     */
868     -static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
869     +void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
870     {
871     if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
872     return;
873    
874     set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
875     }
876     +EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
877    
878     void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
879     {
880     diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
881     index 4e028ee42430..fe660764b8d1 100644
882     --- a/block/blk-mq-sched.h
883     +++ b/block/blk-mq-sched.h
884     @@ -15,6 +15,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
885     struct request **merged_request);
886     bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
887     bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
888     +void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
889     void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
890    
891     void blk_mq_sched_insert_request(struct request *rq, bool at_head,
892     diff --git a/block/blk-stat.h b/block/blk-stat.h
893     index f4a1568e81a4..17b47a86eefb 100644
894     --- a/block/blk-stat.h
895     +++ b/block/blk-stat.h
896     @@ -145,6 +145,11 @@ static inline void blk_stat_activate_nsecs(struct blk_stat_callback *cb,
897     mod_timer(&cb->timer, jiffies + nsecs_to_jiffies(nsecs));
898     }
899    
900     +static inline void blk_stat_deactivate(struct blk_stat_callback *cb)
901     +{
902     + del_timer_sync(&cb->timer);
903     +}
904     +
905     /**
906     * blk_stat_activate_msecs() - Gather block statistics during a time window in
907     * milliseconds.
908     diff --git a/block/blk-wbt.c b/block/blk-wbt.c
909     index 8ac93fcbaa2e..0c62bf4eca75 100644
910     --- a/block/blk-wbt.c
911     +++ b/block/blk-wbt.c
912     @@ -760,8 +760,10 @@ void wbt_disable_default(struct request_queue *q)
913     if (!rqos)
914     return;
915     rwb = RQWB(rqos);
916     - if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
917     + if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
918     + blk_stat_deactivate(rwb->cb);
919     rwb->wb_normal = 0;
920     + }
921     }
922     EXPORT_SYMBOL_GPL(wbt_disable_default);
923    
924     diff --git a/block/mq-deadline.c b/block/mq-deadline.c
925     index 099a9e05854c..d5e21ce44d2c 100644
926     --- a/block/mq-deadline.c
927     +++ b/block/mq-deadline.c
928     @@ -373,9 +373,16 @@ done:
929    
930     /*
931     * One confusing aspect here is that we get called for a specific
932     - * hardware queue, but we return a request that may not be for a
933     + * hardware queue, but we may return a request that is for a
934     * different hardware queue. This is because mq-deadline has shared
935     * state for all hardware queues, in terms of sorting, FIFOs, etc.
936     + *
937     + * For a zoned block device, __dd_dispatch_request() may return NULL
938     + * if all the queued write requests are directed at zones that are already
939     + * locked due to on-going write requests. In this case, make sure to mark
940     + * the queue as needing a restart to ensure that the queue is run again
941     + * and the pending writes dispatched once the target zones for the ongoing
942     + * write requests are unlocked in dd_finish_request().
943     */
944     static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
945     {
946     @@ -384,6 +391,9 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
947    
948     spin_lock(&dd->lock);
949     rq = __dd_dispatch_request(dd);
950     + if (!rq && blk_queue_is_zoned(hctx->queue) &&
951     + !list_empty(&dd->fifo_list[WRITE]))
952     + blk_mq_sched_mark_restart_hctx(hctx);
953     spin_unlock(&dd->lock);
954    
955     return rq;
956     diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
957     index 81c22d20d9d9..60e0b772673f 100644
958     --- a/drivers/auxdisplay/charlcd.c
959     +++ b/drivers/auxdisplay/charlcd.c
960     @@ -538,6 +538,9 @@ static inline int handle_lcd_special_code(struct charlcd *lcd)
961     }
962     case 'x': /* gotoxy : LxXXX[yYYY]; */
963     case 'y': /* gotoxy : LyYYY[xXXX]; */
964     + if (priv->esc_seq.buf[priv->esc_seq.len - 1] != ';')
965     + break;
966     +
967     /* If the command is valid, move to the new address */
968     if (parse_xy(esc, &priv->addr.x, &priv->addr.y))
969     charlcd_gotoxy(lcd);
970     diff --git a/drivers/base/dd.c b/drivers/base/dd.c
971     index edfc9f0b1180..2607f859881a 100644
972     --- a/drivers/base/dd.c
973     +++ b/drivers/base/dd.c
974     @@ -931,11 +931,11 @@ static void __device_release_driver(struct device *dev, struct device *parent)
975    
976     while (device_links_busy(dev)) {
977     device_unlock(dev);
978     - if (parent)
979     + if (parent && dev->bus->need_parent_lock)
980     device_unlock(parent);
981    
982     device_links_unbind_consumers(dev);
983     - if (parent)
984     + if (parent && dev->bus->need_parent_lock)
985     device_lock(parent);
986    
987     device_lock(dev);
988     diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
989     index 66921427d109..e19bf0a750cf 100644
990     --- a/drivers/block/zram/zram_drv.c
991     +++ b/drivers/block/zram/zram_drv.c
992     @@ -382,8 +382,10 @@ static ssize_t backing_dev_store(struct device *dev,
993    
994     bdev = bdgrab(I_BDEV(inode));
995     err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
996     - if (err < 0)
997     + if (err < 0) {
998     + bdev = NULL;
999     goto out;
1000     + }
1001    
1002     nr_pages = i_size_read(inode) >> PAGE_SHIFT;
1003     bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
1004     diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
1005     index 99e2aace8078..2c1f459c0c63 100644
1006     --- a/drivers/dax/pmem.c
1007     +++ b/drivers/dax/pmem.c
1008     @@ -48,9 +48,8 @@ static void dax_pmem_percpu_exit(void *data)
1009     percpu_ref_exit(ref);
1010     }
1011    
1012     -static void dax_pmem_percpu_kill(void *data)
1013     +static void dax_pmem_percpu_kill(struct percpu_ref *ref)
1014     {
1015     - struct percpu_ref *ref = data;
1016     struct dax_pmem *dax_pmem = to_dax_pmem(ref);
1017    
1018     dev_dbg(dax_pmem->dev, "trace\n");
1019     @@ -112,17 +111,10 @@ static int dax_pmem_probe(struct device *dev)
1020     }
1021    
1022     dax_pmem->pgmap.ref = &dax_pmem->ref;
1023     + dax_pmem->pgmap.kill = dax_pmem_percpu_kill;
1024     addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
1025     - if (IS_ERR(addr)) {
1026     - devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
1027     - percpu_ref_exit(&dax_pmem->ref);
1028     + if (IS_ERR(addr))
1029     return PTR_ERR(addr);
1030     - }
1031     -
1032     - rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
1033     - &dax_pmem->ref);
1034     - if (rc)
1035     - return rc;
1036    
1037     /* adjust the dax_region resource to the start of data */
1038     memcpy(&res, &dax_pmem->pgmap.res, sizeof(res));
1039     diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
1040     index 6e02148c208b..adc768f908f1 100644
1041     --- a/drivers/gpio/gpio-mvebu.c
1042     +++ b/drivers/gpio/gpio-mvebu.c
1043     @@ -773,9 +773,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
1044     "marvell,armada-370-gpio"))
1045     return 0;
1046    
1047     - if (IS_ERR(mvchip->clk))
1048     - return PTR_ERR(mvchip->clk);
1049     -
1050     /*
1051     * There are only two sets of PWM configuration registers for
1052     * all the GPIO lines on those SoCs which this driver reserves
1053     @@ -786,6 +783,9 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
1054     if (!res)
1055     return 0;
1056    
1057     + if (IS_ERR(mvchip->clk))
1058     + return PTR_ERR(mvchip->clk);
1059     +
1060     /*
1061     * Use set A for lines of GPIO chip with id 0, B for GPIO chip
1062     * with id 1. Don't allow further GPIO chips to be used for PWM.
1063     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1064     index b31d121a876b..81001d879322 100644
1065     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1066     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1067     @@ -122,14 +122,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
1068     goto free_chunk;
1069     }
1070    
1071     + mutex_lock(&p->ctx->lock);
1072     +
1073     /* skip guilty context job */
1074     if (atomic_read(&p->ctx->guilty) == 1) {
1075     ret = -ECANCELED;
1076     goto free_chunk;
1077     }
1078    
1079     - mutex_lock(&p->ctx->lock);
1080     -
1081     /* get chunks */
1082     chunk_array_user = u64_to_user_ptr(cs->in.chunks);
1083     if (copy_from_user(chunk_array, chunk_array_user,
1084     diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1085     index 299def84e69c..d792735f1365 100644
1086     --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1087     +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1088     @@ -2894,6 +2894,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
1089     state->underscan_enable = false;
1090     state->underscan_hborder = 0;
1091     state->underscan_vborder = 0;
1092     + state->max_bpc = 8;
1093    
1094     __drm_atomic_helper_connector_reset(connector, &state->base);
1095     }
1096     @@ -2911,6 +2912,7 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
1097     if (new_state) {
1098     __drm_atomic_helper_connector_duplicate_state(connector,
1099     &new_state->base);
1100     + new_state->max_bpc = state->max_bpc;
1101     return &new_state->base;
1102     }
1103    
1104     diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
1105     index 2abcd7bf104f..f889d41a281f 100644
1106     --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
1107     +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
1108     @@ -1224,8 +1224,16 @@ nv50_mstm_fini(struct nv50_mstm *mstm)
1109     static void
1110     nv50_mstm_init(struct nv50_mstm *mstm)
1111     {
1112     - if (mstm && mstm->mgr.mst_state)
1113     - drm_dp_mst_topology_mgr_resume(&mstm->mgr);
1114     + int ret;
1115     +
1116     + if (!mstm || !mstm->mgr.mst_state)
1117     + return;
1118     +
1119     + ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr);
1120     + if (ret == -1) {
1121     + drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
1122     + drm_kms_helper_hotplug_event(mstm->mgr.dev);
1123     + }
1124     }
1125    
1126     static void
1127     diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
1128     index 79d00d861a31..01ff3c858875 100644
1129     --- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
1130     +++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
1131     @@ -189,12 +189,14 @@ EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
1132     int rockchip_drm_psr_register(struct drm_encoder *encoder,
1133     int (*psr_set)(struct drm_encoder *, bool enable))
1134     {
1135     - struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
1136     + struct rockchip_drm_private *drm_drv;
1137     struct psr_drv *psr;
1138    
1139     if (!encoder || !psr_set)
1140     return -EINVAL;
1141    
1142     + drm_drv = encoder->dev->dev_private;
1143     +
1144     psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL);
1145     if (!psr)
1146     return -ENOMEM;
1147     diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
1148     index a3275fa66b7b..629f40424bba 100644
1149     --- a/drivers/gpu/drm/vc4/vc4_plane.c
1150     +++ b/drivers/gpu/drm/vc4/vc4_plane.c
1151     @@ -322,6 +322,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
1152     if (vc4_state->is_unity)
1153     vc4_state->x_scaling[0] = VC4_SCALING_PPF;
1154     } else {
1155     + vc4_state->is_yuv = false;
1156     vc4_state->x_scaling[1] = VC4_SCALING_NONE;
1157     vc4_state->y_scaling[1] = VC4_SCALING_NONE;
1158     }
1159     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1160     index 46182d4dd1ce..b7870e7e41d4 100644
1161     --- a/drivers/hid/hid-ids.h
1162     +++ b/drivers/hid/hid-ids.h
1163     @@ -17,6 +17,9 @@
1164     #ifndef HID_IDS_H_FILE
1165     #define HID_IDS_H_FILE
1166    
1167     +#define USB_VENDOR_ID_258A 0x258a
1168     +#define USB_DEVICE_ID_258A_6A88 0x6a88
1169     +
1170     #define USB_VENDOR_ID_3M 0x0596
1171     #define USB_DEVICE_ID_3M1968 0x0500
1172     #define USB_DEVICE_ID_3M2256 0x0502
1173     diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
1174     index 1882a4ab0f29..98b059d79bc8 100644
1175     --- a/drivers/hid/hid-ite.c
1176     +++ b/drivers/hid/hid-ite.c
1177     @@ -42,6 +42,7 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
1178    
1179     static const struct hid_device_id ite_devices[] = {
1180     { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
1181     + { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
1182     { }
1183     };
1184     MODULE_DEVICE_TABLE(hid, ite_devices);
1185     diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
1186     index 97954f575c3f..1c1a2514d6f3 100644
1187     --- a/drivers/hv/Kconfig
1188     +++ b/drivers/hv/Kconfig
1189     @@ -4,7 +4,7 @@ menu "Microsoft Hyper-V guest support"
1190    
1191     config HYPERV
1192     tristate "Microsoft Hyper-V client drivers"
1193     - depends on X86 && ACPI && PCI && X86_LOCAL_APIC && HYPERVISOR_GUEST
1194     + depends on X86 && ACPI && X86_LOCAL_APIC && HYPERVISOR_GUEST
1195     select PARAVIRT
1196     help
1197     Select this option to run Linux as a Hyper-V client operating
1198     diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
1199     index d293e55553bd..ba7aaf421f36 100644
1200     --- a/drivers/hwtracing/intel_th/msu.c
1201     +++ b/drivers/hwtracing/intel_th/msu.c
1202     @@ -1423,7 +1423,8 @@ nr_pages_store(struct device *dev, struct device_attribute *attr,
1203     if (!end)
1204     break;
1205    
1206     - len -= end - p;
1207     + /* consume the number and the following comma, hence +1 */
1208     + len -= end - p + 1;
1209     p = end + 1;
1210     } while (len);
1211    
1212     diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
1213     index 2ddbfc3fdbae..cba62ad26cd8 100644
1214     --- a/drivers/iio/dac/ad5686.c
1215     +++ b/drivers/iio/dac/ad5686.c
1216     @@ -124,7 +124,8 @@ static int ad5686_read_raw(struct iio_dev *indio_dev,
1217     mutex_unlock(&indio_dev->mlock);
1218     if (ret < 0)
1219     return ret;
1220     - *val = ret;
1221     + *val = (ret >> chan->scan_type.shift) &
1222     + GENMASK(chan->scan_type.realbits - 1, 0);
1223     return IIO_VAL_INT;
1224     case IIO_CHAN_INFO_SCALE:
1225     *val = st->vref_mv;
1226     diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
1227     index 25d43c8f1c2a..558de0b9895c 100644
1228     --- a/drivers/infiniband/core/roce_gid_mgmt.c
1229     +++ b/drivers/infiniband/core/roce_gid_mgmt.c
1230     @@ -267,6 +267,9 @@ is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
1231     struct net_device *cookie_ndev = cookie;
1232     bool match = false;
1233    
1234     + if (!rdma_ndev)
1235     + return false;
1236     +
1237     rcu_read_lock();
1238     if (netif_is_bond_master(cookie_ndev) &&
1239     rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
1240     diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
1241     index f2f11e652dcd..02f36ab72ad4 100644
1242     --- a/drivers/infiniband/hw/mlx5/devx.c
1243     +++ b/drivers/infiniband/hw/mlx5/devx.c
1244     @@ -857,7 +857,9 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
1245    
1246     err = uverbs_get_flags32(&access, attrs,
1247     MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
1248     - IB_ACCESS_SUPPORTED);
1249     + IB_ACCESS_LOCAL_WRITE |
1250     + IB_ACCESS_REMOTE_WRITE |
1251     + IB_ACCESS_REMOTE_READ);
1252     if (err)
1253     return err;
1254    
1255     diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
1256     index fc6c880756da..4111b798fd3c 100644
1257     --- a/drivers/infiniband/sw/rxe/rxe_resp.c
1258     +++ b/drivers/infiniband/sw/rxe/rxe_resp.c
1259     @@ -844,11 +844,16 @@ static enum resp_states do_complete(struct rxe_qp *qp,
1260    
1261     memset(&cqe, 0, sizeof(cqe));
1262    
1263     - wc->wr_id = wqe->wr_id;
1264     - wc->status = qp->resp.status;
1265     - wc->qp = &qp->ibqp;
1266     + if (qp->rcq->is_user) {
1267     + uwc->status = qp->resp.status;
1268     + uwc->qp_num = qp->ibqp.qp_num;
1269     + uwc->wr_id = wqe->wr_id;
1270     + } else {
1271     + wc->status = qp->resp.status;
1272     + wc->qp = &qp->ibqp;
1273     + wc->wr_id = wqe->wr_id;
1274     + }
1275    
1276     - /* fields after status are not required for errors */
1277     if (wc->status == IB_WC_SUCCESS) {
1278     wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
1279     pkt->mask & RXE_WRITE_MASK) ?
1280     diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
1281     index f37cbad022a2..f4bce5aa0ff8 100644
1282     --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
1283     +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
1284     @@ -2009,6 +2009,14 @@ static void srpt_free_ch(struct kref *kref)
1285     kfree_rcu(ch, rcu);
1286     }
1287    
1288     +/*
1289     + * Shut down the SCSI target session, tell the connection manager to
1290     + * disconnect the associated RDMA channel, transition the QP to the error
1291     + * state and remove the channel from the channel list. This function is
1292     + * typically called from inside srpt_zerolength_write_done(). Concurrent
1293     + * srpt_zerolength_write() calls from inside srpt_close_ch() are possible
1294     + * as long as the channel is on sport->nexus_list.
1295     + */
1296     static void srpt_release_channel_work(struct work_struct *w)
1297     {
1298     struct srpt_rdma_ch *ch;
1299     @@ -2036,6 +2044,11 @@ static void srpt_release_channel_work(struct work_struct *w)
1300     else
1301     ib_destroy_cm_id(ch->ib_cm.cm_id);
1302    
1303     + sport = ch->sport;
1304     + mutex_lock(&sport->mutex);
1305     + list_del_rcu(&ch->list);
1306     + mutex_unlock(&sport->mutex);
1307     +
1308     srpt_destroy_ch_ib(ch);
1309    
1310     srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
1311     @@ -2046,11 +2059,6 @@ static void srpt_release_channel_work(struct work_struct *w)
1312     sdev, ch->rq_size,
1313     srp_max_req_size, DMA_FROM_DEVICE);
1314    
1315     - sport = ch->sport;
1316     - mutex_lock(&sport->mutex);
1317     - list_del_rcu(&ch->list);
1318     - mutex_unlock(&sport->mutex);
1319     -
1320     wake_up(&sport->ch_releaseQ);
1321    
1322     kref_put(&ch->kref, srpt_free_ch);
1323     diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
1324     index a7dc286f406c..840e53732753 100644
1325     --- a/drivers/input/keyboard/omap4-keypad.c
1326     +++ b/drivers/input/keyboard/omap4-keypad.c
1327     @@ -126,12 +126,8 @@ static irqreturn_t omap4_keypad_irq_handler(int irq, void *dev_id)
1328     {
1329     struct omap4_keypad *keypad_data = dev_id;
1330    
1331     - if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)) {
1332     - /* Disable interrupts */
1333     - kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
1334     - OMAP4_VAL_IRQDISABLE);
1335     + if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS))
1336     return IRQ_WAKE_THREAD;
1337     - }
1338    
1339     return IRQ_NONE;
1340     }
1341     @@ -173,11 +169,6 @@ static irqreturn_t omap4_keypad_irq_thread_fn(int irq, void *dev_id)
1342     kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
1343     kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
1344    
1345     - /* enable interrupts */
1346     - kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
1347     - OMAP4_DEF_IRQENABLE_EVENTEN |
1348     - OMAP4_DEF_IRQENABLE_LONGKEY);
1349     -
1350     return IRQ_HANDLED;
1351     }
1352    
1353     @@ -214,9 +205,10 @@ static void omap4_keypad_close(struct input_dev *input)
1354    
1355     disable_irq(keypad_data->irq);
1356    
1357     - /* Disable interrupts */
1358     + /* Disable interrupts and wake-up events */
1359     kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
1360     OMAP4_VAL_IRQDISABLE);
1361     + kbd_writel(keypad_data, OMAP4_KBD_WAKEUPENABLE, 0);
1362    
1363     /* clear pending interrupts */
1364     kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
1365     @@ -365,7 +357,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
1366     }
1367    
1368     error = request_threaded_irq(keypad_data->irq, omap4_keypad_irq_handler,
1369     - omap4_keypad_irq_thread_fn, 0,
1370     + omap4_keypad_irq_thread_fn, IRQF_ONESHOT,
1371     "omap4-keypad", keypad_data);
1372     if (error) {
1373     dev_err(&pdev->dev, "failed to register interrupt\n");
1374     diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1375     index 2bd5bb11c8ba..b6da0c1267e3 100644
1376     --- a/drivers/input/mouse/synaptics.c
1377     +++ b/drivers/input/mouse/synaptics.c
1378     @@ -171,6 +171,7 @@ static const char * const smbus_pnp_ids[] = {
1379     "LEN0046", /* X250 */
1380     "LEN004a", /* W541 */
1381     "LEN005b", /* P50 */
1382     + "LEN005e", /* T560 */
1383     "LEN0071", /* T480 */
1384     "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
1385     "LEN0073", /* X1 Carbon G5 (Elantech) */
1386     @@ -178,6 +179,7 @@ static const char * const smbus_pnp_ids[] = {
1387     "LEN0096", /* X280 */
1388     "LEN0097", /* X280 -> ALPS trackpoint */
1389     "LEN200f", /* T450s */
1390     + "SYN3052", /* HP EliteBook 840 G4 */
1391     "SYN3221", /* HP 15-ay000 */
1392     NULL
1393     };
1394     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1395     index a76c47f20587..4339177629e3 100644
1396     --- a/drivers/iommu/intel-iommu.c
1397     +++ b/drivers/iommu/intel-iommu.c
1398     @@ -2069,7 +2069,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1399     * than default. Unnecessary for PT mode.
1400     */
1401     if (translation != CONTEXT_TT_PASS_THROUGH) {
1402     - for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1403     + for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
1404     ret = -ENOMEM;
1405     pgd = phys_to_virt(dma_pte_addr(pgd));
1406     if (!dma_pte_present(pgd))
1407     @@ -2083,7 +2083,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1408     translation = CONTEXT_TT_MULTI_LEVEL;
1409    
1410     context_set_address_root(context, virt_to_phys(pgd));
1411     - context_set_address_width(context, iommu->agaw);
1412     + context_set_address_width(context, agaw);
1413     } else {
1414     /*
1415     * In pass through mode, AW must be programmed to
1416     diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
1417     index df80c89ebe7f..5d3faae51d59 100644
1418     --- a/drivers/leds/leds-pwm.c
1419     +++ b/drivers/leds/leds-pwm.c
1420     @@ -100,8 +100,9 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
1421     led_data->pwm = devm_pwm_get(dev, led->name);
1422     if (IS_ERR(led_data->pwm)) {
1423     ret = PTR_ERR(led_data->pwm);
1424     - dev_err(dev, "unable to request PWM for %s: %d\n",
1425     - led->name, ret);
1426     + if (ret != -EPROBE_DEFER)
1427     + dev_err(dev, "unable to request PWM for %s: %d\n",
1428     + led->name, ret);
1429     return ret;
1430     }
1431    
1432     diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
1433     index 39804d830305..fd5c52b21436 100644
1434     --- a/drivers/media/pci/cx23885/cx23885-core.c
1435     +++ b/drivers/media/pci/cx23885/cx23885-core.c
1436     @@ -23,6 +23,7 @@
1437     #include <linux/moduleparam.h>
1438     #include <linux/kmod.h>
1439     #include <linux/kernel.h>
1440     +#include <linux/pci.h>
1441     #include <linux/slab.h>
1442     #include <linux/interrupt.h>
1443     #include <linux/delay.h>
1444     @@ -41,6 +42,18 @@ MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
1445     MODULE_LICENSE("GPL");
1446     MODULE_VERSION(CX23885_VERSION);
1447    
1448     +/*
1449     + * Some platforms have been found to require periodic resetting of the DMA
1450     + * engine. Ryzen and XEON platforms are known to be affected. The symptom
1451     + * encountered is "mpeg risc op code error". Only Ryzen platforms employ
1452     + * this workaround if the option equals 1. The workaround can be explicitly
1453     + * disabled for all platforms by setting to 0, the workaround can be forced
1454     + * on for any platform by setting to 2.
1455     + */
1456     +static unsigned int dma_reset_workaround = 1;
1457     +module_param(dma_reset_workaround, int, 0644);
1458     +MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable");
1459     +
1460     static unsigned int debug;
1461     module_param(debug, int, 0644);
1462     MODULE_PARM_DESC(debug, "enable debug messages");
1463     @@ -603,8 +616,13 @@ static void cx23885_risc_disasm(struct cx23885_tsport *port,
1464    
1465     static void cx23885_clear_bridge_error(struct cx23885_dev *dev)
1466     {
1467     - uint32_t reg1_val = cx_read(TC_REQ); /* read-only */
1468     - uint32_t reg2_val = cx_read(TC_REQ_SET);
1469     + uint32_t reg1_val, reg2_val;
1470     +
1471     + if (!dev->need_dma_reset)
1472     + return;
1473     +
1474     + reg1_val = cx_read(TC_REQ); /* read-only */
1475     + reg2_val = cx_read(TC_REQ_SET);
1476    
1477     if (reg1_val && reg2_val) {
1478     cx_write(TC_REQ, reg1_val);
1479     @@ -2058,6 +2076,37 @@ void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1480     /* TODO: 23-19 */
1481     }
1482    
1483     +static struct {
1484     + int vendor, dev;
1485     +} const broken_dev_id[] = {
1486     + /* According with
1487     + * https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci,
1488     + * 0x1451 is PCI ID for the IOMMU found on Ryzen
1489     + */
1490     + { PCI_VENDOR_ID_AMD, 0x1451 },
1491     +};
1492     +
1493     +static bool cx23885_does_need_dma_reset(void)
1494     +{
1495     + int i;
1496     + struct pci_dev *pdev = NULL;
1497     +
1498     + if (dma_reset_workaround == 0)
1499     + return false;
1500     + else if (dma_reset_workaround == 2)
1501     + return true;
1502     +
1503     + for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) {
1504     + pdev = pci_get_device(broken_dev_id[i].vendor,
1505     + broken_dev_id[i].dev, NULL);
1506     + if (pdev) {
1507     + pci_dev_put(pdev);
1508     + return true;
1509     + }
1510     + }
1511     + return false;
1512     +}
1513     +
1514     static int cx23885_initdev(struct pci_dev *pci_dev,
1515     const struct pci_device_id *pci_id)
1516     {
1517     @@ -2069,6 +2118,8 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
1518     if (NULL == dev)
1519     return -ENOMEM;
1520    
1521     + dev->need_dma_reset = cx23885_does_need_dma_reset();
1522     +
1523     err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
1524     if (err < 0)
1525     goto fail_free;
1526     diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
1527     index d54c7ee1ab21..cf965efabe66 100644
1528     --- a/drivers/media/pci/cx23885/cx23885.h
1529     +++ b/drivers/media/pci/cx23885/cx23885.h
1530     @@ -451,6 +451,8 @@ struct cx23885_dev {
1531     /* Analog raw audio */
1532     struct cx23885_audio_dev *audio_dev;
1533    
1534     + /* Does the system require periodic DMA resets? */
1535     + unsigned int need_dma_reset:1;
1536     };
1537    
1538     static inline struct cx23885_dev *to_cx23885(struct v4l2_device *v4l2_dev)
1539     diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
1540     index 8679e0bd8ec2..f4f8ab602442 100644
1541     --- a/drivers/misc/genwqe/card_utils.c
1542     +++ b/drivers/misc/genwqe/card_utils.c
1543     @@ -217,7 +217,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
1544     void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
1545     dma_addr_t *dma_handle)
1546     {
1547     - if (get_order(size) > MAX_ORDER)
1548     + if (get_order(size) >= MAX_ORDER)
1549     return NULL;
1550    
1551     return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle,
1552     diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
1553     index 3b889efddf78..50dd6bf176d0 100644
1554     --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
1555     +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
1556     @@ -29,9 +29,6 @@
1557     #define RES_RING_CSR 1
1558     #define RES_RING_CMD 2
1559    
1560     -static const struct of_device_id xgene_enet_of_match[];
1561     -static const struct acpi_device_id xgene_enet_acpi_match[];
1562     -
1563     static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
1564     {
1565     struct xgene_enet_raw_desc16 *raw_desc;
1566     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
1567     index 0de487a8f0eb..3db54b664aed 100644
1568     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
1569     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
1570     @@ -1282,6 +1282,7 @@ enum sp_rtnl_flag {
1571     BNX2X_SP_RTNL_TX_STOP,
1572     BNX2X_SP_RTNL_GET_DRV_VERSION,
1573     BNX2X_SP_RTNL_CHANGE_UDP_PORT,
1574     + BNX2X_SP_RTNL_UPDATE_SVID,
1575     };
1576    
1577     enum bnx2x_iov_flag {
1578     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1579     index fcc2328bb0d9..a585f1025a58 100644
1580     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1581     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1582     @@ -2925,6 +2925,10 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
1583     func_params.f_obj = &bp->func_obj;
1584     func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
1585    
1586     + /* Prepare parameters for function state transitions */
1587     + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1588     + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
1589     +
1590     if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
1591     int func = BP_ABS_FUNC(bp);
1592     u32 val;
1593     @@ -4301,7 +4305,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
1594     bnx2x_handle_eee_event(bp);
1595    
1596     if (val & DRV_STATUS_OEM_UPDATE_SVID)
1597     - bnx2x_handle_update_svid_cmd(bp);
1598     + bnx2x_schedule_sp_rtnl(bp,
1599     + BNX2X_SP_RTNL_UPDATE_SVID, 0);
1600    
1601     if (bp->link_vars.periodic_flags &
1602     PERIODIC_FLAGS_LINK_EVENT) {
1603     @@ -8462,6 +8467,7 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
1604     /* Fill a user request section if needed */
1605     if (!test_bit(RAMROD_CONT, ramrod_flags)) {
1606     ramrod_param.user_req.u.vlan.vlan = vlan;
1607     + __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
1608     /* Set the command: ADD or DEL */
1609     if (set)
1610     ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
1611     @@ -8482,6 +8488,27 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
1612     return rc;
1613     }
1614    
1615     +static int bnx2x_del_all_vlans(struct bnx2x *bp)
1616     +{
1617     + struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
1618     + unsigned long ramrod_flags = 0, vlan_flags = 0;
1619     + struct bnx2x_vlan_entry *vlan;
1620     + int rc;
1621     +
1622     + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1623     + __set_bit(BNX2X_VLAN, &vlan_flags);
1624     + rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
1625     + if (rc)
1626     + return rc;
1627     +
1628     + /* Mark that hw forgot all entries */
1629     + list_for_each_entry(vlan, &bp->vlan_reg, link)
1630     + vlan->hw = false;
1631     + bp->vlan_cnt = 0;
1632     +
1633     + return 0;
1634     +}
1635     +
1636     int bnx2x_del_all_macs(struct bnx2x *bp,
1637     struct bnx2x_vlan_mac_obj *mac_obj,
1638     int mac_type, bool wait_for_comp)
1639     @@ -9320,6 +9347,17 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
1640     BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
1641     rc);
1642    
1643     + /* The whole *vlan_obj structure may be not initialized if VLAN
1644     + * filtering offload is not supported by hardware. Currently this is
1645     + * true for all hardware covered by CHIP_IS_E1x().
1646     + */
1647     + if (!CHIP_IS_E1x(bp)) {
1648     + /* Remove all currently configured VLANs */
1649     + rc = bnx2x_del_all_vlans(bp);
1650     + if (rc < 0)
1651     + BNX2X_ERR("Failed to delete all VLANs\n");
1652     + }
1653     +
1654     /* Disable LLH */
1655     if (!CHIP_IS_E1(bp))
1656     REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1657     @@ -10349,6 +10387,9 @@ sp_rtnl_not_reset:
1658     &bp->sp_rtnl_state))
1659     bnx2x_update_mng_version(bp);
1660    
1661     + if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
1662     + bnx2x_handle_update_svid_cmd(bp);
1663     +
1664     if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
1665     &bp->sp_rtnl_state)) {
1666     if (bnx2x_udp_port_update(bp)) {
1667     @@ -11740,8 +11781,10 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
1668     * If maximum allowed number of connections is zero -
1669     * disable the feature.
1670     */
1671     - if (!bp->cnic_eth_dev.max_fcoe_conn)
1672     + if (!bp->cnic_eth_dev.max_fcoe_conn) {
1673     bp->flags |= NO_FCOE_FLAG;
1674     + eth_zero_addr(bp->fip_mac);
1675     + }
1676     }
1677    
1678     static void bnx2x_get_cnic_info(struct bnx2x *bp)
1679     @@ -13014,13 +13057,6 @@ static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
1680    
1681     int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
1682     {
1683     - struct bnx2x_vlan_entry *vlan;
1684     -
1685     - /* The hw forgot all entries after reload */
1686     - list_for_each_entry(vlan, &bp->vlan_reg, link)
1687     - vlan->hw = false;
1688     - bp->vlan_cnt = 0;
1689     -
1690     /* Don't set rx mode here. Our caller will do it. */
1691     bnx2x_vlan_configure(bp, false);
1692    
1693     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
1694     index 0bf2fd470819..7a6e82db4231 100644
1695     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
1696     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
1697     @@ -265,6 +265,7 @@ enum {
1698     BNX2X_ETH_MAC,
1699     BNX2X_ISCSI_ETH_MAC,
1700     BNX2X_NETQ_ETH_MAC,
1701     + BNX2X_VLAN,
1702     BNX2X_DONT_CONSUME_CAM_CREDIT,
1703     BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1704     };
1705     @@ -272,7 +273,8 @@ enum {
1706     #define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
1707     1 << BNX2X_ETH_MAC | \
1708     1 << BNX2X_ISCSI_ETH_MAC | \
1709     - 1 << BNX2X_NETQ_ETH_MAC)
1710     + 1 << BNX2X_NETQ_ETH_MAC | \
1711     + 1 << BNX2X_VLAN)
1712     #define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
1713     ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
1714    
1715     diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
1716     index 240fd36b5074..8f4b2f9a8e07 100644
1717     --- a/drivers/net/ethernet/cadence/macb_main.c
1718     +++ b/drivers/net/ethernet/cadence/macb_main.c
1719     @@ -682,6 +682,11 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_
1720     if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
1721     desc_64 = macb_64b_desc(bp, desc);
1722     desc_64->addrh = upper_32_bits(addr);
1723     + /* The low bits of RX address contain the RX_USED bit, clearing
1724     + * of which allows packet RX. Make sure the high bits are also
1725     + * visible to HW at that point.
1726     + */
1727     + dma_wmb();
1728     }
1729     #endif
1730     desc->addr = lower_32_bits(addr);
1731     @@ -930,14 +935,19 @@ static void gem_rx_refill(struct macb_queue *queue)
1732    
1733     if (entry == bp->rx_ring_size - 1)
1734     paddr |= MACB_BIT(RX_WRAP);
1735     - macb_set_addr(bp, desc, paddr);
1736     desc->ctrl = 0;
1737     + /* Setting addr clears RX_USED and allows reception,
1738     + * make sure ctrl is cleared first to avoid a race.
1739     + */
1740     + dma_wmb();
1741     + macb_set_addr(bp, desc, paddr);
1742    
1743     /* properly align Ethernet header */
1744     skb_reserve(skb, NET_IP_ALIGN);
1745     } else {
1746     - desc->addr &= ~MACB_BIT(RX_USED);
1747     desc->ctrl = 0;
1748     + dma_wmb();
1749     + desc->addr &= ~MACB_BIT(RX_USED);
1750     }
1751     }
1752    
1753     @@ -991,11 +1001,15 @@ static int gem_rx(struct macb_queue *queue, int budget)
1754    
1755     rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
1756     addr = macb_get_addr(bp, desc);
1757     - ctrl = desc->ctrl;
1758    
1759     if (!rxused)
1760     break;
1761    
1762     + /* Ensure ctrl is at least as up-to-date as rxused */
1763     + dma_rmb();
1764     +
1765     + ctrl = desc->ctrl;
1766     +
1767     queue->rx_tail++;
1768     count++;
1769    
1770     @@ -1170,11 +1184,14 @@ static int macb_rx(struct macb_queue *queue, int budget)
1771     /* Make hw descriptor updates visible to CPU */
1772     rmb();
1773    
1774     - ctrl = desc->ctrl;
1775     -
1776     if (!(desc->addr & MACB_BIT(RX_USED)))
1777     break;
1778    
1779     + /* Ensure ctrl is at least as up-to-date as addr */
1780     + dma_rmb();
1781     +
1782     + ctrl = desc->ctrl;
1783     +
1784     if (ctrl & MACB_BIT(RX_SOF)) {
1785     if (first_frag != -1)
1786     discard_partial_frame(queue, first_frag, tail);
1787     diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
1788     index cd5296b84229..a6dc47edc4cf 100644
1789     --- a/drivers/net/ethernet/cadence/macb_ptp.c
1790     +++ b/drivers/net/ethernet/cadence/macb_ptp.c
1791     @@ -319,6 +319,8 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
1792     desc_ptp = macb_ptp_desc(queue->bp, desc);
1793     tx_timestamp = &queue->tx_timestamps[head];
1794     tx_timestamp->skb = skb;
1795     + /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
1796     + dma_rmb();
1797     tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
1798     tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
1799     /* move head */
1800     diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1801     index b52029e26d15..ad1779fc410e 100644
1802     --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1803     +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1804     @@ -379,6 +379,9 @@ static void hns_ae_stop(struct hnae_handle *handle)
1805    
1806     hns_ae_ring_enable_all(handle, 0);
1807    
1808     + /* clean rx fbd. */
1809     + hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX);
1810     +
1811     (void)hns_mac_vm_config_bc_en(mac_cb, 0, false);
1812     }
1813    
1814     diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
1815     index 09e4061d1fa6..aa2c25d7a61d 100644
1816     --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
1817     +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
1818     @@ -67,11 +67,14 @@ static void hns_gmac_enable(void *mac_drv, enum mac_commom_mode mode)
1819     struct mac_driver *drv = (struct mac_driver *)mac_drv;
1820    
1821     /*enable GE rX/tX */
1822     - if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
1823     + if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
1824     dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1);
1825    
1826     - if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
1827     + if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
1828     + /* enable rx pcs */
1829     + dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 0);
1830     dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1);
1831     + }
1832     }
1833    
1834     static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
1835     @@ -79,11 +82,14 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
1836     struct mac_driver *drv = (struct mac_driver *)mac_drv;
1837    
1838     /*disable GE rX/tX */
1839     - if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
1840     + if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
1841     dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0);
1842    
1843     - if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
1844     + if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
1845     + /* disable rx pcs */
1846     + dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 1);
1847     dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
1848     + }
1849     }
1850    
1851     /* hns_gmac_get_en - get port enable
1852     diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
1853     index 6ed6f142427e..cfdc92de9dc0 100644
1854     --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
1855     +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
1856     @@ -778,6 +778,17 @@ static int hns_mac_register_phy(struct hns_mac_cb *mac_cb)
1857     return rc;
1858     }
1859    
1860     +static void hns_mac_remove_phydev(struct hns_mac_cb *mac_cb)
1861     +{
1862     + if (!to_acpi_device_node(mac_cb->fw_port) || !mac_cb->phy_dev)
1863     + return;
1864     +
1865     + phy_device_remove(mac_cb->phy_dev);
1866     + phy_device_free(mac_cb->phy_dev);
1867     +
1868     + mac_cb->phy_dev = NULL;
1869     +}
1870     +
1871     #define MAC_MEDIA_TYPE_MAX_LEN 16
1872    
1873     static const struct {
1874     @@ -1117,7 +1128,11 @@ void hns_mac_uninit(struct dsaf_device *dsaf_dev)
1875     int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
1876    
1877     for (i = 0; i < max_port_num; i++) {
1878     + if (!dsaf_dev->mac_cb[i])
1879     + continue;
1880     +
1881     dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]);
1882     + hns_mac_remove_phydev(dsaf_dev->mac_cb[i]);
1883     dsaf_dev->mac_cb[i] = NULL;
1884     }
1885     }
1886     diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
1887     index e557a4ef5996..3b9e74be5fbd 100644
1888     --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
1889     +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
1890     @@ -934,6 +934,62 @@ static void hns_dsaf_tcam_mc_cfg(
1891     spin_unlock_bh(&dsaf_dev->tcam_lock);
1892     }
1893    
1894     +/**
1895     + * hns_dsaf_tcam_uc_cfg_vague - INT
1896     + * @dsaf_dev: dsa fabric device struct pointer
1897     + * @address,
1898     + * @ptbl_tcam_data,
1899     + */
1900     +static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev,
1901     + u32 address,
1902     + struct dsaf_tbl_tcam_data *tcam_data,
1903     + struct dsaf_tbl_tcam_data *tcam_mask,
1904     + struct dsaf_tbl_tcam_ucast_cfg *tcam_uc)
1905     +{
1906     + spin_lock_bh(&dsaf_dev->tcam_lock);
1907     + hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
1908     + hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
1909     + hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, tcam_uc);
1910     + hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
1911     + hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
1912     +
1913     + /*Restore Match Data*/
1914     + tcam_mask->tbl_tcam_data_high = 0xffffffff;
1915     + tcam_mask->tbl_tcam_data_low = 0xffffffff;
1916     + hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
1917     +
1918     + spin_unlock_bh(&dsaf_dev->tcam_lock);
1919     +}
1920     +
1921     +/**
1922     + * hns_dsaf_tcam_mc_cfg_vague - INT
1923     + * @dsaf_dev: dsa fabric device struct pointer
1924     + * @address,
1925     + * @ptbl_tcam_data,
1926     + * @ptbl_tcam_mask
1927     + * @ptbl_tcam_mcast
1928     + */
1929     +static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev,
1930     + u32 address,
1931     + struct dsaf_tbl_tcam_data *tcam_data,
1932     + struct dsaf_tbl_tcam_data *tcam_mask,
1933     + struct dsaf_tbl_tcam_mcast_cfg *tcam_mc)
1934     +{
1935     + spin_lock_bh(&dsaf_dev->tcam_lock);
1936     + hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
1937     + hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
1938     + hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, tcam_mc);
1939     + hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
1940     + hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
1941     +
1942     + /*Restore Match Data*/
1943     + tcam_mask->tbl_tcam_data_high = 0xffffffff;
1944     + tcam_mask->tbl_tcam_data_low = 0xffffffff;
1945     + hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
1946     +
1947     + spin_unlock_bh(&dsaf_dev->tcam_lock);
1948     +}
1949     +
1950     /**
1951     * hns_dsaf_tcam_mc_invld - INT
1952     * @dsaf_id: dsa fabric id
1953     @@ -1492,6 +1548,27 @@ static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev)
1954     return DSAF_INVALID_ENTRY_IDX;
1955     }
1956    
1957     +/**
1958     + * hns_dsaf_find_empty_mac_entry_reverse
1959     + * search dsa fabric soft empty-entry from the end
1960     + * @dsaf_dev: dsa fabric device struct pointer
1961     + */
1962     +static u16 hns_dsaf_find_empty_mac_entry_reverse(struct dsaf_device *dsaf_dev)
1963     +{
1964     + struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
1965     + struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
1966     + int i;
1967     +
1968     + soft_mac_entry = priv->soft_mac_tbl + (DSAF_TCAM_SUM - 1);
1969     + for (i = (DSAF_TCAM_SUM - 1); i > 0; i--) {
1970     + /* search all entry from end to start.*/
1971     + if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
1972     + return i;
1973     + soft_mac_entry--;
1974     + }
1975     + return DSAF_INVALID_ENTRY_IDX;
1976     +}
1977     +
1978     /**
1979     * hns_dsaf_set_mac_key - set mac key
1980     * @dsaf_dev: dsa fabric device struct pointer
1981     @@ -2166,9 +2243,9 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
1982     DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
1983    
1984     hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev,
1985     - DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num);
1986     + DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 4 * (u64)node_num);
1987     hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
1988     - DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
1989     + DSAF_INODE_IN_DATA_STP_DISC_0_REG + 4 * (u64)node_num);
1990    
1991     /* pfc pause frame statistics stored in dsaf inode*/
1992     if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) {
1993     @@ -2285,237 +2362,237 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
1994     DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4);
1995     p[223 + i] = dsaf_read_dev(ddev,
1996     DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4);
1997     - p[224 + i] = dsaf_read_dev(ddev,
1998     + p[226 + i] = dsaf_read_dev(ddev,
1999     DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4);
2000     }
2001    
2002     - p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
2003     + p[229] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
2004    
2005     for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
2006     j = i * DSAF_COMM_CHN + port;
2007     - p[228 + i] = dsaf_read_dev(ddev,
2008     + p[230 + i] = dsaf_read_dev(ddev,
2009     DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4);
2010     }
2011    
2012     - p[231] = dsaf_read_dev(ddev,
2013     - DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4);
2014     + p[233] = dsaf_read_dev(ddev,
2015     + DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 0x80);
2016    
2017     /* dsaf inode registers */
2018     for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) {
2019     j = i * DSAF_COMM_CHN + port;
2020     - p[232 + i] = dsaf_read_dev(ddev,
2021     + p[234 + i] = dsaf_read_dev(ddev,
2022     DSAF_SBM_CFG_REG_0_REG + j * 0x80);
2023     - p[235 + i] = dsaf_read_dev(ddev,
2024     + p[237 + i] = dsaf_read_dev(ddev,
2025     DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80);
2026     - p[238 + i] = dsaf_read_dev(ddev,
2027     + p[240 + i] = dsaf_read_dev(ddev,
2028     DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80);
2029     - p[241 + i] = dsaf_read_dev(ddev,
2030     + p[243 + i] = dsaf_read_dev(ddev,
2031     DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80);
2032     - p[244 + i] = dsaf_read_dev(ddev,
2033     + p[246 + i] = dsaf_read_dev(ddev,
2034     DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80);
2035     - p[245 + i] = dsaf_read_dev(ddev,
2036     + p[249 + i] = dsaf_read_dev(ddev,
2037     DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80);
2038     - p[248 + i] = dsaf_read_dev(ddev,
2039     + p[252 + i] = dsaf_read_dev(ddev,
2040     DSAF_SBM_BP_CNT_0_0_REG + j * 0x80);
2041     - p[251 + i] = dsaf_read_dev(ddev,
2042     + p[255 + i] = dsaf_read_dev(ddev,
2043     DSAF_SBM_BP_CNT_1_0_REG + j * 0x80);
2044     - p[254 + i] = dsaf_read_dev(ddev,
2045     + p[258 + i] = dsaf_read_dev(ddev,
2046     DSAF_SBM_BP_CNT_2_0_REG + j * 0x80);
2047     - p[257 + i] = dsaf_read_dev(ddev,
2048     + p[261 + i] = dsaf_read_dev(ddev,
2049     DSAF_SBM_BP_CNT_3_0_REG + j * 0x80);
2050     - p[260 + i] = dsaf_read_dev(ddev,
2051     + p[264 + i] = dsaf_read_dev(ddev,
2052     DSAF_SBM_INER_ST_0_REG + j * 0x80);
2053     - p[263 + i] = dsaf_read_dev(ddev,
2054     + p[267 + i] = dsaf_read_dev(ddev,
2055     DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80);
2056     - p[266 + i] = dsaf_read_dev(ddev,
2057     + p[270 + i] = dsaf_read_dev(ddev,
2058     DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80);
2059     - p[269 + i] = dsaf_read_dev(ddev,
2060     + p[273 + i] = dsaf_read_dev(ddev,
2061     DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80);
2062     - p[272 + i] = dsaf_read_dev(ddev,
2063     + p[276 + i] = dsaf_read_dev(ddev,
2064     DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80);
2065     - p[275 + i] = dsaf_read_dev(ddev,
2066     + p[279 + i] = dsaf_read_dev(ddev,
2067     DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80);
2068     - p[278 + i] = dsaf_read_dev(ddev,
2069     + p[282 + i] = dsaf_read_dev(ddev,
2070     DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80);
2071     - p[281 + i] = dsaf_read_dev(ddev,
2072     + p[285 + i] = dsaf_read_dev(ddev,
2073     DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80);
2074     - p[284 + i] = dsaf_read_dev(ddev,
2075     + p[288 + i] = dsaf_read_dev(ddev,
2076     DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80);
2077     - p[287 + i] = dsaf_read_dev(ddev,
2078     + p[291 + i] = dsaf_read_dev(ddev,
2079     DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80);
2080     - p[290 + i] = dsaf_read_dev(ddev,
2081     + p[294 + i] = dsaf_read_dev(ddev,
2082     DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80);
2083     - p[293 + i] = dsaf_read_dev(ddev,
2084     + p[297 + i] = dsaf_read_dev(ddev,
2085     DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80);
2086     - p[296 + i] = dsaf_read_dev(ddev,
2087     + p[300 + i] = dsaf_read_dev(ddev,
2088     DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80);
2089     - p[299 + i] = dsaf_read_dev(ddev,
2090     + p[303 + i] = dsaf_read_dev(ddev,
2091     DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80);
2092     - p[302 + i] = dsaf_read_dev(ddev,
2093     + p[306 + i] = dsaf_read_dev(ddev,
2094     DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80);
2095     - p[305 + i] = dsaf_read_dev(ddev,
2096     + p[309 + i] = dsaf_read_dev(ddev,
2097     DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80);
2098     - p[308 + i] = dsaf_read_dev(ddev,
2099     + p[312 + i] = dsaf_read_dev(ddev,
2100     DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80);
2101     }
2102    
2103     /* dsaf onode registers */
2104     for (i = 0; i < DSAF_XOD_NUM; i++) {
2105     - p[311 + i] = dsaf_read_dev(ddev,
2106     + p[315 + i] = dsaf_read_dev(ddev,
2107     DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
2108     - p[319 + i] = dsaf_read_dev(ddev,
2109     + p[323 + i] = dsaf_read_dev(ddev,
2110     DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
2111     - p[327 + i] = dsaf_read_dev(ddev,
2112     + p[331 + i] = dsaf_read_dev(ddev,
2113     DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
2114     - p[335 + i] = dsaf_read_dev(ddev,
2115     + p[339 + i] = dsaf_read_dev(ddev,
2116     DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
2117     - p[343 + i] = dsaf_read_dev(ddev,
2118     + p[347 + i] = dsaf_read_dev(ddev,
2119     DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
2120     - p[351 + i] = dsaf_read_dev(ddev,
2121     + p[355 + i] = dsaf_read_dev(ddev,
2122     DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
2123     }
2124    
2125     - p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
2126     - p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
2127     - p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
2128     + p[363] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
2129     + p[364] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
2130     + p[365] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
2131    
2132     for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) {
2133     j = i * DSAF_COMM_CHN + port;
2134     - p[362 + i] = dsaf_read_dev(ddev,
2135     + p[366 + i] = dsaf_read_dev(ddev,
2136     DSAF_XOD_GNT_L_0_REG + j * 0x90);
2137     - p[365 + i] = dsaf_read_dev(ddev,
2138     + p[369 + i] = dsaf_read_dev(ddev,
2139     DSAF_XOD_GNT_H_0_REG + j * 0x90);
2140     - p[368 + i] = dsaf_read_dev(ddev,
2141     + p[372 + i] = dsaf_read_dev(ddev,
2142     DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90);
2143     - p[371 + i] = dsaf_read_dev(ddev,
2144     + p[375 + i] = dsaf_read_dev(ddev,
2145     DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90);
2146     - p[374 + i] = dsaf_read_dev(ddev,
2147     + p[378 + i] = dsaf_read_dev(ddev,
2148     DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90);
2149     - p[377 + i] = dsaf_read_dev(ddev,
2150     + p[381 + i] = dsaf_read_dev(ddev,
2151     DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90);
2152     - p[380 + i] = dsaf_read_dev(ddev,
2153     + p[384 + i] = dsaf_read_dev(ddev,
2154     DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90);
2155     - p[383 + i] = dsaf_read_dev(ddev,
2156     + p[387 + i] = dsaf_read_dev(ddev,
2157     DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90);
2158     - p[386 + i] = dsaf_read_dev(ddev,
2159     + p[390 + i] = dsaf_read_dev(ddev,
2160     DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90);
2161     - p[389 + i] = dsaf_read_dev(ddev,
2162     + p[393 + i] = dsaf_read_dev(ddev,
2163     DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90);
2164     }
2165    
2166     - p[392] = dsaf_read_dev(ddev,
2167     + p[396] = dsaf_read_dev(ddev,
2168     DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
2169     - p[393] = dsaf_read_dev(ddev,
2170     + p[397] = dsaf_read_dev(ddev,
2171     DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
2172     - p[394] = dsaf_read_dev(ddev,
2173     + p[398] = dsaf_read_dev(ddev,
2174     DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
2175     - p[395] = dsaf_read_dev(ddev,
2176     + p[399] = dsaf_read_dev(ddev,
2177     DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
2178     - p[396] = dsaf_read_dev(ddev,
2179     + p[400] = dsaf_read_dev(ddev,
2180     DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
2181     - p[397] = dsaf_read_dev(ddev,
2182     + p[401] = dsaf_read_dev(ddev,
2183     DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
2184     - p[398] = dsaf_read_dev(ddev,
2185     + p[402] = dsaf_read_dev(ddev,
2186     DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
2187     - p[399] = dsaf_read_dev(ddev,
2188     + p[403] = dsaf_read_dev(ddev,
2189     DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
2190     - p[400] = dsaf_read_dev(ddev,
2191     + p[404] = dsaf_read_dev(ddev,
2192     DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
2193     - p[401] = dsaf_read_dev(ddev,
2194     + p[405] = dsaf_read_dev(ddev,
2195     DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
2196     - p[402] = dsaf_read_dev(ddev,
2197     + p[406] = dsaf_read_dev(ddev,
2198     DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
2199     - p[403] = dsaf_read_dev(ddev,
2200     + p[407] = dsaf_read_dev(ddev,
2201     DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
2202     - p[404] = dsaf_read_dev(ddev,
2203     + p[408] = dsaf_read_dev(ddev,
2204     DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90);
2205    
2206     /* dsaf voq registers */
2207     for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) {
2208     j = (i * DSAF_COMM_CHN + port) * 0x90;
2209     - p[405 + i] = dsaf_read_dev(ddev,
2210     + p[409 + i] = dsaf_read_dev(ddev,
2211     DSAF_VOQ_ECC_INVERT_EN_0_REG + j);
2212     - p[408 + i] = dsaf_read_dev(ddev,
2213     + p[412 + i] = dsaf_read_dev(ddev,
2214     DSAF_VOQ_SRAM_PKT_NUM_0_REG + j);
2215     - p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
2216     - p[414 + i] = dsaf_read_dev(ddev,
2217     + p[415 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
2218     + p[418 + i] = dsaf_read_dev(ddev,
2219     DSAF_VOQ_OUT_PKT_NUM_0_REG + j);
2220     - p[417 + i] = dsaf_read_dev(ddev,
2221     + p[421 + i] = dsaf_read_dev(ddev,
2222     DSAF_VOQ_ECC_ERR_ADDR_0_REG + j);
2223     - p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
2224     - p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
2225     - p[426 + i] = dsaf_read_dev(ddev,
2226     + p[424 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
2227     + p[427 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
2228     + p[430 + i] = dsaf_read_dev(ddev,
2229     DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j);
2230     - p[429 + i] = dsaf_read_dev(ddev,
2231     + p[433 + i] = dsaf_read_dev(ddev,
2232     DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j);
2233     - p[432 + i] = dsaf_read_dev(ddev,
2234     + p[436 + i] = dsaf_read_dev(ddev,
2235     DSAF_VOQ_PPE_XOD_REQ_0_REG + j);
2236     - p[435 + i] = dsaf_read_dev(ddev,
2237     + p[439 + i] = dsaf_read_dev(ddev,
2238     DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j);
2239     - p[438 + i] = dsaf_read_dev(ddev,
2240     + p[442 + i] = dsaf_read_dev(ddev,
2241     DSAF_VOQ_BP_ALL_THRD_0_REG + j);
2242     }
2243    
2244     /* dsaf tbl registers */
2245     - p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
2246     - p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
2247     - p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
2248     - p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
2249     - p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
2250     - p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
2251     - p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
2252     - p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
2253     - p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
2254     - p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
2255     - p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
2256     - p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
2257     - p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
2258     - p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
2259     - p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
2260     - p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
2261     - p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
2262     - p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
2263     - p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
2264     - p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
2265     - p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
2266     - p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
2267     - p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
2268     + p[445] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
2269     + p[446] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
2270     + p[447] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
2271     + p[448] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
2272     + p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
2273     + p[450] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
2274     + p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
2275     + p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
2276     + p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
2277     + p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
2278     + p[455] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
2279     + p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
2280     + p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
2281     + p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
2282     + p[459] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
2283     + p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
2284     + p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
2285     + p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
2286     + p[463] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
2287     + p[464] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
2288     + p[465] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
2289     + p[466] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
2290     + p[467] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
2291    
2292     for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
2293     j = i * 0x8;
2294     - p[464 + 2 * i] = dsaf_read_dev(ddev,
2295     + p[468 + 2 * i] = dsaf_read_dev(ddev,
2296     DSAF_TBL_DA0_MIS_INFO1_0_REG + j);
2297     - p[465 + 2 * i] = dsaf_read_dev(ddev,
2298     + p[469 + 2 * i] = dsaf_read_dev(ddev,
2299     DSAF_TBL_DA0_MIS_INFO0_0_REG + j);
2300     }
2301    
2302     - p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
2303     - p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
2304     - p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
2305     - p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
2306     - p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
2307     - p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
2308     - p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
2309     - p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
2310     - p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
2311     - p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
2312     - p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
2313     - p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
2314     + p[484] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
2315     + p[485] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
2316     + p[486] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
2317     + p[487] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
2318     + p[488] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
2319     + p[489] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
2320     + p[490] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
2321     + p[491] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
2322     + p[492] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
2323     + p[493] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
2324     + p[494] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
2325     + p[495] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
2326    
2327     /* dsaf other registers */
2328     - p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
2329     - p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
2330     - p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
2331     - p[495] = dsaf_read_dev(ddev,
2332     + p[496] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
2333     + p[497] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
2334     + p[498] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
2335     + p[499] = dsaf_read_dev(ddev,
2336     DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4);
2337     - p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
2338     - p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
2339     + p[500] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
2340     + p[501] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
2341    
2342     if (!is_ver1)
2343     - p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
2344     + p[502] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
2345    
2346     /* mark end of dsaf regs */
2347     - for (i = 499; i < 504; i++)
2348     + for (i = 503; i < 504; i++)
2349     p[i] = 0xdddddddd;
2350     }
2351    
2352     @@ -2673,58 +2750,156 @@ int hns_dsaf_get_regs_count(void)
2353     return DSAF_DUMP_REGS_NUM;
2354     }
2355    
2356     -/* Reserve the last TCAM entry for promisc support */
2357     -#define dsaf_promisc_tcam_entry(port) \
2358     - (DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM + (port))
2359     -void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
2360     - u32 port, bool enable)
2361     +static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
2362     {
2363     + struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
2364     + struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
2365     + struct dsaf_tbl_tcam_data tbl_tcam_mask_uc = {0x01000000, 0xf};
2366     + struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
2367     struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
2368     - struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
2369     - u16 entry_index;
2370     - struct dsaf_drv_tbl_tcam_key tbl_tcam_data, tbl_tcam_mask;
2371     - struct dsaf_tbl_tcam_mcast_cfg mac_data = {0};
2372     + struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, port};
2373     + struct dsaf_drv_mac_single_dest_entry mask_entry;
2374     + struct dsaf_drv_tbl_tcam_key temp_key, mask_key;
2375     + struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
2376     + u16 entry_index = DSAF_INVALID_ENTRY_IDX;
2377     + struct dsaf_drv_tbl_tcam_key mac_key;
2378     + struct hns_mac_cb *mac_cb;
2379     + u8 addr[ETH_ALEN] = {0};
2380     + u8 port_num;
2381     + u16 mskid;
2382     +
2383     + /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
2384     + hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
2385     + entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
2386     + if (entry_index != DSAF_INVALID_ENTRY_IDX)
2387     + return;
2388     +
2389     + /* put promisc tcam entry in the end. */
2390     + /* 1. set promisc unicast vague tcam entry. */
2391     + entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
2392     + if (entry_index == DSAF_INVALID_ENTRY_IDX) {
2393     + dev_err(dsaf_dev->dev,
2394     + "enable uc promisc failed (port:%#x)\n",
2395     + port);
2396     + return;
2397     + }
2398     +
2399     + mac_cb = dsaf_dev->mac_cb[port];
2400     + (void)hns_mac_get_inner_port_num(mac_cb, 0, &port_num);
2401     + tbl_tcam_ucast.tbl_ucast_out_port = port_num;
2402    
2403     - if ((AE_IS_VER1(dsaf_dev->dsaf_ver)) || HNS_DSAF_IS_DEBUG(dsaf_dev))
2404     + /* config uc vague table */
2405     + hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
2406     + &tbl_tcam_mask_uc, &tbl_tcam_ucast);
2407     +
2408     + /* update software entry */
2409     + soft_mac_entry = priv->soft_mac_tbl;
2410     + soft_mac_entry += entry_index;
2411     + soft_mac_entry->index = entry_index;
2412     + soft_mac_entry->tcam_key.high.val = mac_key.high.val;
2413     + soft_mac_entry->tcam_key.low.val = mac_key.low.val;
2414     + /* step back to the START for mc. */
2415     + soft_mac_entry = priv->soft_mac_tbl;
2416     +
2417     + /* 2. set promisc multicast vague tcam entry. */
2418     + entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
2419     + if (entry_index == DSAF_INVALID_ENTRY_IDX) {
2420     + dev_err(dsaf_dev->dev,
2421     + "enable mc promisc failed (port:%#x)\n",
2422     + port);
2423     return;
2424     + }
2425     +
2426     + memset(&mask_entry, 0x0, sizeof(mask_entry));
2427     + memset(&mask_key, 0x0, sizeof(mask_key));
2428     + memset(&temp_key, 0x0, sizeof(temp_key));
2429     + mask_entry.addr[0] = 0x01;
2430     + hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
2431     + port, mask_entry.addr);
2432     + tbl_tcam_mcast.tbl_mcast_item_vld = 1;
2433     + tbl_tcam_mcast.tbl_mcast_old_en = 0;
2434    
2435     - /* find the tcam entry index for promisc */
2436     - entry_index = dsaf_promisc_tcam_entry(port);
2437     -
2438     - memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
2439     - memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
2440     -
2441     - /* config key mask */
2442     - if (enable) {
2443     - dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
2444     - DSAF_TBL_TCAM_KEY_PORT_M,
2445     - DSAF_TBL_TCAM_KEY_PORT_S, port);
2446     - dsaf_set_field(tbl_tcam_mask.low.bits.port_vlan,
2447     - DSAF_TBL_TCAM_KEY_PORT_M,
2448     - DSAF_TBL_TCAM_KEY_PORT_S, 0xf);
2449     -
2450     - /* SUB_QID */
2451     - dsaf_set_bit(mac_data.tbl_mcast_port_msk[0],
2452     - DSAF_SERVICE_NW_NUM, true);
2453     - mac_data.tbl_mcast_item_vld = true; /* item_vld bit */
2454     + if (port < DSAF_SERVICE_NW_NUM) {
2455     + mskid = port;
2456     + } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
2457     + mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
2458     } else {
2459     - mac_data.tbl_mcast_item_vld = false; /* item_vld bit */
2460     + dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
2461     + dsaf_dev->ae_dev.name, port,
2462     + mask_key.high.val, mask_key.low.val);
2463     + return;
2464     }
2465    
2466     - dev_dbg(dsaf_dev->dev,
2467     - "set_promisc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
2468     - dsaf_dev->ae_dev.name, tbl_tcam_data.high.val,
2469     - tbl_tcam_data.low.val, entry_index);
2470     + dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
2471     + mskid % 32, 1);
2472     + memcpy(&temp_key, &mask_key, sizeof(mask_key));
2473     + hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
2474     + (struct dsaf_tbl_tcam_data *)(&mask_key),
2475     + &tbl_tcam_mcast);
2476     +
2477     + /* update software entry */
2478     + soft_mac_entry += entry_index;
2479     + soft_mac_entry->index = entry_index;
2480     + soft_mac_entry->tcam_key.high.val = temp_key.high.val;
2481     + soft_mac_entry->tcam_key.low.val = temp_key.low.val;
2482     +}
2483    
2484     - /* config promisc entry with mask */
2485     - hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
2486     - (struct dsaf_tbl_tcam_data *)&tbl_tcam_data,
2487     - (struct dsaf_tbl_tcam_data *)&tbl_tcam_mask,
2488     - &mac_data);
2489     +static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port)
2490     +{
2491     + struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
2492     + struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 0, 0, 0, 0};
2493     + struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
2494     + struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
2495     + struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0};
2496     + struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0};
2497     + struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
2498     + u16 entry_index = DSAF_INVALID_ENTRY_IDX;
2499     + struct dsaf_drv_tbl_tcam_key mac_key;
2500     + u8 addr[ETH_ALEN] = {0};
2501    
2502     - /* config software entry */
2503     + /* 1. delete uc vague tcam entry. */
2504     + /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
2505     + hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
2506     + entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
2507     +
2508     + if (entry_index == DSAF_INVALID_ENTRY_IDX)
2509     + return;
2510     +
2511     + /* config uc vague table */
2512     + hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
2513     + &tbl_tcam_mask, &tbl_tcam_ucast);
2514     + /* update soft management table. */
2515     + soft_mac_entry = priv->soft_mac_tbl;
2516     + soft_mac_entry += entry_index;
2517     + soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
2518     + /* step back to the START for mc. */
2519     + soft_mac_entry = priv->soft_mac_tbl;
2520     +
2521     + /* 2. delete mc vague tcam entry. */
2522     + addr[0] = 0x01;
2523     + memset(&mac_key, 0x0, sizeof(mac_key));
2524     + hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
2525     + entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
2526     +
2527     + if (entry_index == DSAF_INVALID_ENTRY_IDX)
2528     + return;
2529     +
2530     + /* config mc vague table */
2531     + hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
2532     + &tbl_tcam_mask, &tbl_tcam_mcast);
2533     + /* update soft management table. */
2534     soft_mac_entry += entry_index;
2535     - soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
2536     + soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
2537     +}
2538     +
2539     +/* Reserve the last TCAM entry for promisc support */
2540     +void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
2541     + u32 port, bool enable)
2542     +{
2543     + if (enable)
2544     + set_promisc_tcam_enable(dsaf_dev, port);
2545     + else
2546     + set_promisc_tcam_disable(dsaf_dev, port);
2547     }
2548    
2549     int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
2550     diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
2551     index 74d935d82cbc..b9733b0b8482 100644
2552     --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
2553     +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
2554     @@ -176,7 +176,7 @@
2555     #define DSAF_INODE_IN_DATA_STP_DISC_0_REG 0x1A50
2556     #define DSAF_INODE_GE_FC_EN_0_REG 0x1B00
2557     #define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50
2558     -#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00
2559     +#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x103C
2560     #define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00
2561     #define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100
2562     #define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50
2563     @@ -404,11 +404,11 @@
2564     #define RCB_ECC_ERR_ADDR4_REG 0x460
2565     #define RCB_ECC_ERR_ADDR5_REG 0x464
2566    
2567     -#define RCB_COM_SF_CFG_INTMASK_RING 0x480
2568     -#define RCB_COM_SF_CFG_RING_STS 0x484
2569     -#define RCB_COM_SF_CFG_RING 0x488
2570     -#define RCB_COM_SF_CFG_INTMASK_BD 0x48C
2571     -#define RCB_COM_SF_CFG_BD_RINT_STS 0x470
2572     +#define RCB_COM_SF_CFG_INTMASK_RING 0x470
2573     +#define RCB_COM_SF_CFG_RING_STS 0x474
2574     +#define RCB_COM_SF_CFG_RING 0x478
2575     +#define RCB_COM_SF_CFG_INTMASK_BD 0x47C
2576     +#define RCB_COM_SF_CFG_BD_RINT_STS 0x480
2577     #define RCB_COM_RCB_RD_BD_BUSY 0x490
2578     #define RCB_COM_RCB_FBD_CRT_EN 0x494
2579     #define RCB_COM_AXI_WR_ERR_INTMASK 0x498
2580     @@ -534,6 +534,7 @@
2581     #define GMAC_LD_LINK_COUNTER_REG 0x01D0UL
2582     #define GMAC_LOOP_REG 0x01DCUL
2583     #define GMAC_RECV_CONTROL_REG 0x01E0UL
2584     +#define GMAC_PCS_RX_EN_REG 0x01E4UL
2585     #define GMAC_VLAN_CODE_REG 0x01E8UL
2586     #define GMAC_RX_OVERRUN_CNT_REG 0x01ECUL
2587     #define GMAC_RX_LENGTHFIELD_ERR_CNT_REG 0x01F4UL
2588     diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2589     index 28e907831b0e..6242249c9f4c 100644
2590     --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2591     +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2592     @@ -1186,6 +1186,9 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
2593     if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
2594     phy_dev->autoneg = false;
2595    
2596     + if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
2597     + phy_stop(phy_dev);
2598     +
2599     return 0;
2600     }
2601    
2602     @@ -1281,6 +1284,22 @@ static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
2603     return cpu;
2604     }
2605    
2606     +static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
2607     +{
2608     + int i;
2609     +
2610     + for (i = 0; i < q_num * 2; i++) {
2611     + if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
2612     + irq_set_affinity_hint(priv->ring_data[i].ring->irq,
2613     + NULL);
2614     + free_irq(priv->ring_data[i].ring->irq,
2615     + &priv->ring_data[i]);
2616     + priv->ring_data[i].ring->irq_init_flag =
2617     + RCB_IRQ_NOT_INITED;
2618     + }
2619     + }
2620     +}
2621     +
2622     static int hns_nic_init_irq(struct hns_nic_priv *priv)
2623     {
2624     struct hnae_handle *h = priv->ae_handle;
2625     @@ -1306,7 +1325,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
2626     if (ret) {
2627     netdev_err(priv->netdev, "request irq(%d) fail\n",
2628     rd->ring->irq);
2629     - return ret;
2630     + goto out_free_irq;
2631     }
2632     disable_irq(rd->ring->irq);
2633    
2634     @@ -1321,6 +1340,10 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
2635     }
2636    
2637     return 0;
2638     +
2639     +out_free_irq:
2640     + hns_nic_free_irq(h->q_num, priv);
2641     + return ret;
2642     }
2643    
2644     static int hns_nic_net_up(struct net_device *ndev)
2645     @@ -1330,6 +1353,9 @@ static int hns_nic_net_up(struct net_device *ndev)
2646     int i, j;
2647     int ret;
2648    
2649     + if (!test_bit(NIC_STATE_DOWN, &priv->state))
2650     + return 0;
2651     +
2652     ret = hns_nic_init_irq(priv);
2653     if (ret != 0) {
2654     netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
2655     @@ -1365,6 +1391,7 @@ out_has_some_queues:
2656     for (j = i - 1; j >= 0; j--)
2657     hns_nic_ring_close(ndev, j);
2658    
2659     + hns_nic_free_irq(h->q_num, priv);
2660     set_bit(NIC_STATE_DOWN, &priv->state);
2661    
2662     return ret;
2663     @@ -1482,11 +1509,19 @@ static int hns_nic_net_stop(struct net_device *ndev)
2664     }
2665    
2666     static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
2667     +#define HNS_TX_TIMEO_LIMIT (40 * HZ)
2668     static void hns_nic_net_timeout(struct net_device *ndev)
2669     {
2670     struct hns_nic_priv *priv = netdev_priv(ndev);
2671    
2672     - hns_tx_timeout_reset(priv);
2673     + if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
2674     + ndev->watchdog_timeo *= 2;
2675     + netdev_info(ndev, "watchdog_timo changed to %d.\n",
2676     + ndev->watchdog_timeo);
2677     + } else {
2678     + ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
2679     + hns_tx_timeout_reset(priv);
2680     + }
2681     }
2682    
2683     static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
2684     @@ -2049,11 +2084,11 @@ static void hns_nic_service_task(struct work_struct *work)
2685     = container_of(work, struct hns_nic_priv, service_task);
2686     struct hnae_handle *h = priv->ae_handle;
2687    
2688     + hns_nic_reset_subtask(priv);
2689     hns_nic_update_link_status(priv->netdev);
2690     h->dev->ops->update_led_status(h);
2691     hns_nic_update_stats(priv->netdev);
2692    
2693     - hns_nic_reset_subtask(priv);
2694     hns_nic_service_event_complete(priv);
2695     }
2696    
2697     @@ -2339,7 +2374,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
2698     ndev->min_mtu = MAC_MIN_MTU;
2699     switch (priv->enet_ver) {
2700     case AE_VERSION_2:
2701     - ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2702     + ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
2703     ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2704     NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2705     NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
2706     diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
2707     index 5ab21a1b5444..c8704b1690eb 100644
2708     --- a/drivers/net/ethernet/ibm/ibmvnic.c
2709     +++ b/drivers/net/ethernet/ibm/ibmvnic.c
2710     @@ -1939,8 +1939,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
2711     static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2712     {
2713     struct ibmvnic_rwi *rwi;
2714     + unsigned long flags;
2715    
2716     - mutex_lock(&adapter->rwi_lock);
2717     + spin_lock_irqsave(&adapter->rwi_lock, flags);
2718    
2719     if (!list_empty(&adapter->rwi_list)) {
2720     rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2721     @@ -1950,7 +1951,7 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2722     rwi = NULL;
2723     }
2724    
2725     - mutex_unlock(&adapter->rwi_lock);
2726     + spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2727     return rwi;
2728     }
2729    
2730     @@ -2025,6 +2026,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2731     struct list_head *entry, *tmp_entry;
2732     struct ibmvnic_rwi *rwi, *tmp;
2733     struct net_device *netdev = adapter->netdev;
2734     + unsigned long flags;
2735     int ret;
2736    
2737     if (adapter->state == VNIC_REMOVING ||
2738     @@ -2041,21 +2043,21 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2739     goto err;
2740     }
2741    
2742     - mutex_lock(&adapter->rwi_lock);
2743     + spin_lock_irqsave(&adapter->rwi_lock, flags);
2744    
2745     list_for_each(entry, &adapter->rwi_list) {
2746     tmp = list_entry(entry, struct ibmvnic_rwi, list);
2747     if (tmp->reset_reason == reason) {
2748     netdev_dbg(netdev, "Skipping matching reset\n");
2749     - mutex_unlock(&adapter->rwi_lock);
2750     + spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2751     ret = EBUSY;
2752     goto err;
2753     }
2754     }
2755    
2756     - rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
2757     + rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2758     if (!rwi) {
2759     - mutex_unlock(&adapter->rwi_lock);
2760     + spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2761     ibmvnic_close(netdev);
2762     ret = ENOMEM;
2763     goto err;
2764     @@ -2069,7 +2071,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2765     }
2766     rwi->reset_reason = reason;
2767     list_add_tail(&rwi->list, &adapter->rwi_list);
2768     - mutex_unlock(&adapter->rwi_lock);
2769     + spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2770     adapter->resetting = true;
2771     netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2772     schedule_work(&adapter->ibmvnic_reset);
2773     @@ -4700,7 +4702,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
2774    
2775     INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
2776     INIT_LIST_HEAD(&adapter->rwi_list);
2777     - mutex_init(&adapter->rwi_lock);
2778     + spin_lock_init(&adapter->rwi_lock);
2779     adapter->resetting = false;
2780    
2781     adapter->mac_change_pending = false;
2782     diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
2783     index 735f481b1870..09465397b7ff 100644
2784     --- a/drivers/net/ethernet/ibm/ibmvnic.h
2785     +++ b/drivers/net/ethernet/ibm/ibmvnic.h
2786     @@ -1068,7 +1068,7 @@ struct ibmvnic_adapter {
2787     struct tasklet_struct tasklet;
2788     enum vnic_state state;
2789     enum ibmvnic_reset_reason reset_reason;
2790     - struct mutex rwi_lock;
2791     + spinlock_t rwi_lock;
2792     struct list_head rwi_list;
2793     struct work_struct ibmvnic_reset;
2794     bool resetting;
2795     diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
2796     index 3c342700bf5f..ed9d3fc4aaba 100644
2797     --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
2798     +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
2799     @@ -1539,17 +1539,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
2800     netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
2801    
2802     /* Copy the address first, so that we avoid a possible race with
2803     - * .set_rx_mode(). If we copy after changing the address in the filter
2804     - * list, we might open ourselves to a narrow race window where
2805     - * .set_rx_mode could delete our dev_addr filter and prevent traffic
2806     - * from passing.
2807     + * .set_rx_mode().
2808     + * - Remove old address from MAC filter
2809     + * - Copy new address
2810     + * - Add new address to MAC filter
2811     */
2812     - ether_addr_copy(netdev->dev_addr, addr->sa_data);
2813     -
2814     spin_lock_bh(&vsi->mac_filter_hash_lock);
2815     i40e_del_mac_filter(vsi, netdev->dev_addr);
2816     - i40e_add_mac_filter(vsi, addr->sa_data);
2817     + ether_addr_copy(netdev->dev_addr, addr->sa_data);
2818     + i40e_add_mac_filter(vsi, netdev->dev_addr);
2819     spin_unlock_bh(&vsi->mac_filter_hash_lock);
2820     +
2821     if (vsi->type == I40E_VSI_MAIN) {
2822     i40e_status ret;
2823    
2824     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
2825     index eea63a99f29c..f6ffd9fb2079 100644
2826     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
2827     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
2828     @@ -699,7 +699,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
2829     u8 num_tcs = adapter->hw_tcs;
2830     u32 reg_val;
2831     u32 queue;
2832     - u32 word;
2833    
2834     /* remove VLAN filters beloning to this VF */
2835     ixgbe_clear_vf_vlans(adapter, vf);
2836     @@ -754,6 +753,14 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
2837     }
2838     }
2839    
2840     + IXGBE_WRITE_FLUSH(hw);
2841     +}
2842     +
2843     +static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
2844     +{
2845     + struct ixgbe_hw *hw = &adapter->hw;
2846     + u32 word;
2847     +
2848     /* Clear VF's mailbox memory */
2849     for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
2850     IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
2851     @@ -827,6 +834,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
2852     /* reset the filters for the device */
2853     ixgbe_vf_reset_event(adapter, vf);
2854    
2855     + ixgbe_vf_clear_mbx(adapter, vf);
2856     +
2857     /* set vf mac address */
2858     if (!is_zero_ether_addr(vf_mac))
2859     ixgbe_set_vf_mac(adapter, vf, vf_mac);
2860     diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
2861     index aaedf1072460..1393252c6e3c 100644
2862     --- a/drivers/net/ethernet/microchip/lan743x_main.c
2863     +++ b/drivers/net/ethernet/microchip/lan743x_main.c
2864     @@ -802,14 +802,8 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
2865     u32 mac_addr_hi = 0;
2866     u32 mac_addr_lo = 0;
2867     u32 data;
2868     - int ret;
2869    
2870     netdev = adapter->netdev;
2871     - lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_);
2872     - ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_,
2873     - 0, 1000, 20000, 100);
2874     - if (ret)
2875     - return ret;
2876    
2877     /* setup auto duplex, and speed detection */
2878     data = lan743x_csr_read(adapter, MAC_CR);
2879     @@ -2722,8 +2716,9 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
2880     snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
2881     "pci-%s", pci_name(adapter->pdev));
2882    
2883     - /* set to internal PHY id */
2884     - adapter->mdiobus->phy_mask = ~(u32)BIT(1);
2885     + if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
2886     + /* LAN7430 uses internal phy at address 1 */
2887     + adapter->mdiobus->phy_mask = ~(u32)BIT(1);
2888    
2889     /* register mdiobus */
2890     ret = mdiobus_register(adapter->mdiobus);
2891     diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
2892     index 398011c87643..bf4302e45dcd 100644
2893     --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
2894     +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
2895     @@ -807,7 +807,7 @@ __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
2896     struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
2897     struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
2898     struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
2899     - u64 data0, data1 = 0, steer_ctrl = 0;
2900     + u64 data0 = 0, data1 = 0, steer_ctrl = 0;
2901     enum vxge_hw_status status;
2902    
2903     status = vxge_hw_vpath_fw_api(vpath,
2904     diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
2905     index 052b3d2c07a1..c662c6f5bee3 100644
2906     --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
2907     +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
2908     @@ -912,7 +912,7 @@ static const struct net_device_ops w90p910_ether_netdev_ops = {
2909     .ndo_validate_addr = eth_validate_addr,
2910     };
2911    
2912     -static void __init get_mac_address(struct net_device *dev)
2913     +static void get_mac_address(struct net_device *dev)
2914     {
2915     struct w90p910_ether *ether = netdev_priv(dev);
2916     struct platform_device *pdev;
2917     diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
2918     index 0ea141ece19e..6547a9dd5935 100644
2919     --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
2920     +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
2921     @@ -1125,7 +1125,8 @@ netxen_validate_firmware(struct netxen_adapter *adapter)
2922     return -EINVAL;
2923     }
2924     val = nx_get_bios_version(adapter);
2925     - netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
2926     + if (netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios))
2927     + return -EIO;
2928     if ((__force u32)val != bios) {
2929     dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
2930     fw_name[fw_type]);
2931     diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2932     index 14ac9cab2653..2fa1c050a14b 100644
2933     --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2934     +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2935     @@ -2485,6 +2485,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2936     if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2937     DP_NOTICE(cdev,
2938     "Unable to map frag - dropping packet\n");
2939     + rc = -ENOMEM;
2940     goto err;
2941     }
2942    
2943     diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
2944     index 0ff5a403a8dc..b2ff903a9cb6 100644
2945     --- a/drivers/net/ieee802154/ca8210.c
2946     +++ b/drivers/net/ieee802154/ca8210.c
2947     @@ -721,7 +721,7 @@ static void ca8210_mlme_reset_worker(struct work_struct *work)
2948     static void ca8210_rx_done(struct cas_control *cas_ctl)
2949     {
2950     u8 *buf;
2951     - u8 len;
2952     + unsigned int len;
2953     struct work_priv_container *mlme_reset_wpc;
2954     struct ca8210_priv *priv = cas_ctl->priv;
2955    
2956     @@ -730,7 +730,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl)
2957     if (len > CA8210_SPI_BUF_SIZE) {
2958     dev_crit(
2959     &priv->spi->dev,
2960     - "Received packet len (%d) erroneously long\n",
2961     + "Received packet len (%u) erroneously long\n",
2962     len
2963     );
2964     goto finish;
2965     diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
2966     index bf70ab892e69..624bff4d3636 100644
2967     --- a/drivers/net/ieee802154/mac802154_hwsim.c
2968     +++ b/drivers/net/ieee802154/mac802154_hwsim.c
2969     @@ -500,7 +500,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
2970     !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
2971     return -EINVAL;
2972    
2973     - if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
2974     + if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
2975     info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
2976     hwsim_edge_policy, NULL))
2977     return -EINVAL;
2978     @@ -550,7 +550,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
2979     !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
2980     return -EINVAL;
2981    
2982     - if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
2983     + if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
2984     info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
2985     hwsim_edge_policy, NULL))
2986     return -EINVAL;
2987     diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
2988     index c3c9ba44e2a1..8d140495da79 100644
2989     --- a/drivers/net/usb/lan78xx.c
2990     +++ b/drivers/net/usb/lan78xx.c
2991     @@ -2335,6 +2335,10 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2992     ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2993     ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2994    
2995     + /* Added to support MAC address changes */
2996     + ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2997     + ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2998     +
2999     return 0;
3000     }
3001    
3002     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
3003     index f5bac5075386..774e1ff01c9a 100644
3004     --- a/drivers/net/usb/qmi_wwan.c
3005     +++ b/drivers/net/usb/qmi_wwan.c
3006     @@ -151,17 +151,18 @@ static bool qmimux_has_slaves(struct usbnet *dev)
3007    
3008     static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
3009     {
3010     - unsigned int len, offset = sizeof(struct qmimux_hdr);
3011     + unsigned int len, offset = 0;
3012     struct qmimux_hdr *hdr;
3013     struct net_device *net;
3014     struct sk_buff *skbn;
3015     + u8 qmimux_hdr_sz = sizeof(*hdr);
3016    
3017     - while (offset < skb->len) {
3018     - hdr = (struct qmimux_hdr *)skb->data;
3019     + while (offset + qmimux_hdr_sz < skb->len) {
3020     + hdr = (struct qmimux_hdr *)(skb->data + offset);
3021     len = be16_to_cpu(hdr->pkt_len);
3022    
3023     /* drop the packet, bogus length */
3024     - if (offset + len > skb->len)
3025     + if (offset + len + qmimux_hdr_sz > skb->len)
3026     return 0;
3027    
3028     /* control packet, we do not know what to do */
3029     @@ -176,7 +177,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
3030     return 0;
3031     skbn->dev = net;
3032    
3033     - switch (skb->data[offset] & 0xf0) {
3034     + switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) {
3035     case 0x40:
3036     skbn->protocol = htons(ETH_P_IP);
3037     break;
3038     @@ -188,12 +189,12 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
3039     goto skip;
3040     }
3041    
3042     - skb_put_data(skbn, skb->data + offset, len);
3043     + skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, len);
3044     if (netif_rx(skbn) != NET_RX_SUCCESS)
3045     return 0;
3046    
3047     skip:
3048     - offset += len + sizeof(struct qmimux_hdr);
3049     + offset += len + qmimux_hdr_sz;
3050     }
3051     return 1;
3052     }
3053     diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c
3054     index 85f2ca989565..ef3ffa5ad466 100644
3055     --- a/drivers/net/wireless/broadcom/b43/phy_common.c
3056     +++ b/drivers/net/wireless/broadcom/b43/phy_common.c
3057     @@ -616,7 +616,7 @@ struct b43_c32 b43_cordic(int theta)
3058     u8 i;
3059     s32 tmp;
3060     s8 signx = 1;
3061     - u32 angle = 0;
3062     + s32 angle = 0;
3063     struct b43_c32 ret = { .i = 39797, .q = 0, };
3064    
3065     while (theta > (180 << 16))
3066     diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
3067     index 7cdb3e740522..0a3e046d78db 100644
3068     --- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
3069     +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
3070     @@ -681,6 +681,7 @@ int mt76x0_register_device(struct mt76x0_dev *dev)
3071     ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
3072     ieee80211_hw_set(hw, AMPDU_AGGREGATION);
3073     ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
3074     + ieee80211_hw_set(hw, MFP_CAPABLE);
3075     hw->max_rates = 1;
3076     hw->max_report_rates = 7;
3077     hw->max_rate_tries = 1;
3078     diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
3079     index af48d43bb7dc..20447fdce4c3 100644
3080     --- a/drivers/net/wireless/mediatek/mt76/tx.c
3081     +++ b/drivers/net/wireless/mediatek/mt76/tx.c
3082     @@ -385,7 +385,12 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
3083    
3084     for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
3085     struct ieee80211_txq *txq = sta->txq[i];
3086     - struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
3087     + struct mt76_txq *mtxq;
3088     +
3089     + if (!txq)
3090     + continue;
3091     +
3092     + mtxq = (struct mt76_txq *)txq->drv_priv;
3093    
3094     spin_lock_bh(&mtxq->hwq->lock);
3095     mtxq->send_bar = mtxq->aggr && send_bar;
3096     diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
3097     index 2082ae01b9c8..1d432c5ed275 100644
3098     --- a/drivers/nvdimm/pmem.c
3099     +++ b/drivers/nvdimm/pmem.c
3100     @@ -309,8 +309,11 @@ static void pmem_release_queue(void *q)
3101     blk_cleanup_queue(q);
3102     }
3103    
3104     -static void pmem_freeze_queue(void *q)
3105     +static void pmem_freeze_queue(struct percpu_ref *ref)
3106     {
3107     + struct request_queue *q;
3108     +
3109     + q = container_of(ref, typeof(*q), q_usage_counter);
3110     blk_freeze_queue_start(q);
3111     }
3112    
3113     @@ -402,6 +405,7 @@ static int pmem_attach_disk(struct device *dev,
3114    
3115     pmem->pfn_flags = PFN_DEV;
3116     pmem->pgmap.ref = &q->q_usage_counter;
3117     + pmem->pgmap.kill = pmem_freeze_queue;
3118     if (is_nd_pfn(dev)) {
3119     if (setup_pagemap_fsdax(dev, &pmem->pgmap))
3120     return -ENOMEM;
3121     @@ -427,13 +431,6 @@ static int pmem_attach_disk(struct device *dev,
3122     memcpy(&bb_res, &nsio->res, sizeof(bb_res));
3123     }
3124    
3125     - /*
3126     - * At release time the queue must be frozen before
3127     - * devm_memremap_pages is unwound
3128     - */
3129     - if (devm_add_action_or_reset(dev, pmem_freeze_queue, q))
3130     - return -ENOMEM;
3131     -
3132     if (IS_ERR(addr))
3133     return PTR_ERR(addr);
3134     pmem->virt_addr = addr;
3135     diff --git a/drivers/of/base.c b/drivers/of/base.c
3136     index 70f5fd08891b..3f21ea6a90dc 100644
3137     --- a/drivers/of/base.c
3138     +++ b/drivers/of/base.c
3139     @@ -115,9 +115,6 @@ int __weak of_node_to_nid(struct device_node *np)
3140     }
3141     #endif
3142    
3143     -static struct device_node **phandle_cache;
3144     -static u32 phandle_cache_mask;
3145     -
3146     /*
3147     * Assumptions behind phandle_cache implementation:
3148     * - phandle property values are in a contiguous range of 1..n
3149     @@ -126,6 +123,66 @@ static u32 phandle_cache_mask;
3150     * - the phandle lookup overhead reduction provided by the cache
3151     * will likely be less
3152     */
3153     +
3154     +static struct device_node **phandle_cache;
3155     +static u32 phandle_cache_mask;
3156     +
3157     +/*
3158     + * Caller must hold devtree_lock.
3159     + */
3160     +static void __of_free_phandle_cache(void)
3161     +{
3162     + u32 cache_entries = phandle_cache_mask + 1;
3163     + u32 k;
3164     +
3165     + if (!phandle_cache)
3166     + return;
3167     +
3168     + for (k = 0; k < cache_entries; k++)
3169     + of_node_put(phandle_cache[k]);
3170     +
3171     + kfree(phandle_cache);
3172     + phandle_cache = NULL;
3173     +}
3174     +
3175     +int of_free_phandle_cache(void)
3176     +{
3177     + unsigned long flags;
3178     +
3179     + raw_spin_lock_irqsave(&devtree_lock, flags);
3180     +
3181     + __of_free_phandle_cache();
3182     +
3183     + raw_spin_unlock_irqrestore(&devtree_lock, flags);
3184     +
3185     + return 0;
3186     +}
3187     +#if !defined(CONFIG_MODULES)
3188     +late_initcall_sync(of_free_phandle_cache);
3189     +#endif
3190     +
3191     +/*
3192     + * Caller must hold devtree_lock.
3193     + */
3194     +void __of_free_phandle_cache_entry(phandle handle)
3195     +{
3196     + phandle masked_handle;
3197     + struct device_node *np;
3198     +
3199     + if (!handle)
3200     + return;
3201     +
3202     + masked_handle = handle & phandle_cache_mask;
3203     +
3204     + if (phandle_cache) {
3205     + np = phandle_cache[masked_handle];
3206     + if (np && handle == np->phandle) {
3207     + of_node_put(np);
3208     + phandle_cache[masked_handle] = NULL;
3209     + }
3210     + }
3211     +}
3212     +
3213     void of_populate_phandle_cache(void)
3214     {
3215     unsigned long flags;
3216     @@ -135,8 +192,7 @@ void of_populate_phandle_cache(void)
3217    
3218     raw_spin_lock_irqsave(&devtree_lock, flags);
3219    
3220     - kfree(phandle_cache);
3221     - phandle_cache = NULL;
3222     + __of_free_phandle_cache();
3223    
3224     for_each_of_allnodes(np)
3225     if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
3226     @@ -154,30 +210,15 @@ void of_populate_phandle_cache(void)
3227     goto out;
3228    
3229     for_each_of_allnodes(np)
3230     - if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
3231     + if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
3232     + of_node_get(np);
3233     phandle_cache[np->phandle & phandle_cache_mask] = np;
3234     + }
3235    
3236     out:
3237     raw_spin_unlock_irqrestore(&devtree_lock, flags);
3238     }
3239    
3240     -int of_free_phandle_cache(void)
3241     -{
3242     - unsigned long flags;
3243     -
3244     - raw_spin_lock_irqsave(&devtree_lock, flags);
3245     -
3246     - kfree(phandle_cache);
3247     - phandle_cache = NULL;
3248     -
3249     - raw_spin_unlock_irqrestore(&devtree_lock, flags);
3250     -
3251     - return 0;
3252     -}
3253     -#if !defined(CONFIG_MODULES)
3254     -late_initcall_sync(of_free_phandle_cache);
3255     -#endif
3256     -
3257     void __init of_core_init(void)
3258     {
3259     struct device_node *np;
3260     @@ -1150,13 +1191,23 @@ struct device_node *of_find_node_by_phandle(phandle handle)
3261     if (phandle_cache[masked_handle] &&
3262     handle == phandle_cache[masked_handle]->phandle)
3263     np = phandle_cache[masked_handle];
3264     + if (np && of_node_check_flag(np, OF_DETACHED)) {
3265     + WARN_ON(1); /* did not uncache np on node removal */
3266     + of_node_put(np);
3267     + phandle_cache[masked_handle] = NULL;
3268     + np = NULL;
3269     + }
3270     }
3271    
3272     if (!np) {
3273     for_each_of_allnodes(np)
3274     - if (np->phandle == handle) {
3275     - if (phandle_cache)
3276     + if (np->phandle == handle &&
3277     + !of_node_check_flag(np, OF_DETACHED)) {
3278     + if (phandle_cache) {
3279     + /* will put when removed from cache */
3280     + of_node_get(np);
3281     phandle_cache[masked_handle] = np;
3282     + }
3283     break;
3284     }
3285     }
3286     diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
3287     index f4f8ed9b5454..ecea92f68c87 100644
3288     --- a/drivers/of/dynamic.c
3289     +++ b/drivers/of/dynamic.c
3290     @@ -268,6 +268,9 @@ void __of_detach_node(struct device_node *np)
3291     }
3292    
3293     of_node_set_flag(np, OF_DETACHED);
3294     +
3295     + /* race with of_find_node_by_phandle() prevented by devtree_lock */
3296     + __of_free_phandle_cache_entry(np->phandle);
3297     }
3298    
3299     /**
3300     diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
3301     index 216175d11d3d..f5da842841e5 100644
3302     --- a/drivers/of/of_private.h
3303     +++ b/drivers/of/of_private.h
3304     @@ -76,6 +76,10 @@ static inline void __of_detach_node_sysfs(struct device_node *np) {}
3305     int of_resolve_phandles(struct device_node *tree);
3306     #endif
3307    
3308     +#if defined(CONFIG_OF_DYNAMIC)
3309     +void __of_free_phandle_cache_entry(phandle handle);
3310     +#endif
3311     +
3312     #if defined(CONFIG_OF_OVERLAY)
3313     void of_overlay_mutex_lock(void);
3314     void of_overlay_mutex_unlock(void);
3315     diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
3316     index bef17c3fca67..33f3f475e5c6 100644
3317     --- a/drivers/pci/pci-driver.c
3318     +++ b/drivers/pci/pci-driver.c
3319     @@ -1251,30 +1251,29 @@ static int pci_pm_runtime_suspend(struct device *dev)
3320     return 0;
3321     }
3322    
3323     - if (!pm || !pm->runtime_suspend)
3324     - return -ENOSYS;
3325     -
3326     pci_dev->state_saved = false;
3327     - error = pm->runtime_suspend(dev);
3328     - if (error) {
3329     + if (pm && pm->runtime_suspend) {
3330     + error = pm->runtime_suspend(dev);
3331     /*
3332     * -EBUSY and -EAGAIN is used to request the runtime PM core
3333     * to schedule a new suspend, so log the event only with debug
3334     * log level.
3335     */
3336     - if (error == -EBUSY || error == -EAGAIN)
3337     + if (error == -EBUSY || error == -EAGAIN) {
3338     dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
3339     pm->runtime_suspend, error);
3340     - else
3341     + return error;
3342     + } else if (error) {
3343     dev_err(dev, "can't suspend (%pf returned %d)\n",
3344     pm->runtime_suspend, error);
3345     -
3346     - return error;
3347     + return error;
3348     + }
3349     }
3350    
3351     pci_fixup_device(pci_fixup_suspend, pci_dev);
3352    
3353     - if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
3354     + if (pm && pm->runtime_suspend
3355     + && !pci_dev->state_saved && pci_dev->current_state != PCI_D0
3356     && pci_dev->current_state != PCI_UNKNOWN) {
3357     WARN_ONCE(pci_dev->current_state != prev,
3358     "PCI PM: State of device not saved by %pF\n",
3359     @@ -1292,7 +1291,7 @@ static int pci_pm_runtime_suspend(struct device *dev)
3360    
3361     static int pci_pm_runtime_resume(struct device *dev)
3362     {
3363     - int rc;
3364     + int rc = 0;
3365     struct pci_dev *pci_dev = to_pci_dev(dev);
3366     const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
3367    
3368     @@ -1306,14 +1305,12 @@ static int pci_pm_runtime_resume(struct device *dev)
3369     if (!pci_dev->driver)
3370     return 0;
3371    
3372     - if (!pm || !pm->runtime_resume)
3373     - return -ENOSYS;
3374     -
3375     pci_fixup_device(pci_fixup_resume_early, pci_dev);
3376     pci_enable_wake(pci_dev, PCI_D0, false);
3377     pci_fixup_device(pci_fixup_resume, pci_dev);
3378    
3379     - rc = pm->runtime_resume(dev);
3380     + if (pm && pm->runtime_resume)
3381     + rc = pm->runtime_resume(dev);
3382    
3383     pci_dev->runtime_d3cold = false;
3384    
3385     diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
3386     index 1b10ea05a914..69372e2bc93c 100644
3387     --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
3388     +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
3389     @@ -30,8 +30,8 @@
3390     #define DDRC_FLUX_RCMD 0x38c
3391     #define DDRC_PRE_CMD 0x3c0
3392     #define DDRC_ACT_CMD 0x3c4
3393     -#define DDRC_BNK_CHG 0x3c8
3394     #define DDRC_RNK_CHG 0x3cc
3395     +#define DDRC_RW_CHG 0x3d0
3396     #define DDRC_EVENT_CTRL 0x6C0
3397     #define DDRC_INT_MASK 0x6c8
3398     #define DDRC_INT_STATUS 0x6cc
3399     @@ -51,7 +51,7 @@
3400    
3401     static const u32 ddrc_reg_off[] = {
3402     DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
3403     - DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_BNK_CHG, DDRC_RNK_CHG
3404     + DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG
3405     };
3406    
3407     /*
3408     diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
3409     index 4f3ab18636a3..c8eff70fdb1c 100644
3410     --- a/drivers/pinctrl/meson/pinctrl-meson.c
3411     +++ b/drivers/pinctrl/meson/pinctrl-meson.c
3412     @@ -191,7 +191,8 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
3413     case PIN_CONFIG_BIAS_DISABLE:
3414     dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
3415    
3416     - meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
3417     + meson_calc_reg_and_bit(bank, pin, REG_PULLEN, &reg,
3418     + &bit);
3419     ret = regmap_update_bits(pc->reg_pullen, reg,
3420     BIT(bit), 0);
3421     if (ret)
3422     diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
3423     index 6da79ae14860..5a97e42a3547 100644
3424     --- a/drivers/power/supply/olpc_battery.c
3425     +++ b/drivers/power/supply/olpc_battery.c
3426     @@ -428,14 +428,14 @@ static int olpc_bat_get_property(struct power_supply *psy,
3427     if (ret)
3428     return ret;
3429    
3430     - val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256;
3431     + val->intval = (s16)be16_to_cpu(ec_word) * 10 / 256;
3432     break;
3433     case POWER_SUPPLY_PROP_TEMP_AMBIENT:
3434     ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
3435     if (ret)
3436     return ret;
3437    
3438     - val->intval = (int)be16_to_cpu(ec_word) * 100 / 256;
3439     + val->intval = (int)be16_to_cpu(ec_word) * 10 / 256;
3440     break;
3441     case POWER_SUPPLY_PROP_CHARGE_COUNTER:
3442     ret = olpc_ec_cmd(EC_BAT_ACR, NULL, 0, (void *)&ec_word, 2);
3443     diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
3444     index 94f4d8fe85e0..d1b531fe9ada 100644
3445     --- a/drivers/s390/scsi/zfcp_aux.c
3446     +++ b/drivers/s390/scsi/zfcp_aux.c
3447     @@ -275,16 +275,16 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
3448     */
3449     int zfcp_status_read_refill(struct zfcp_adapter *adapter)
3450     {
3451     - while (atomic_read(&adapter->stat_miss) > 0)
3452     + while (atomic_add_unless(&adapter->stat_miss, -1, 0))
3453     if (zfcp_fsf_status_read(adapter->qdio)) {
3454     + atomic_inc(&adapter->stat_miss); /* undo add -1 */
3455     if (atomic_read(&adapter->stat_miss) >=
3456     adapter->stat_read_buf_num) {
3457     zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
3458     return 1;
3459     }
3460     break;
3461     - } else
3462     - atomic_dec(&adapter->stat_miss);
3463     + }
3464     return 0;
3465     }
3466    
3467     diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
3468     index f00045813378..3f97ec4aac4b 100644
3469     --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
3470     +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
3471     @@ -2371,7 +2371,7 @@ static int _bnx2fc_create(struct net_device *netdev,
3472     if (!interface) {
3473     printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
3474     rc = -ENOMEM;
3475     - goto ifput_err;
3476     + goto netdev_err;
3477     }
3478    
3479     if (is_vlan_dev(netdev)) {
3480     diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
3481     index be2bac9355cd..a490e63c94b6 100644
3482     --- a/drivers/scsi/lpfc/lpfc_sli.c
3483     +++ b/drivers/scsi/lpfc/lpfc_sli.c
3484     @@ -14221,7 +14221,8 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
3485     hw_page_size))/hw_page_size;
3486    
3487     /* If needed, Adjust page count to match the max the adapter supports */
3488     - if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
3489     + if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
3490     + (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
3491     queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
3492    
3493     INIT_LIST_HEAD(&queue->list);
3494     diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
3495     index ae9fd2d01004..42b8f0d3e580 100644
3496     --- a/drivers/scsi/qla2xxx/qla_os.c
3497     +++ b/drivers/scsi/qla2xxx/qla_os.c
3498     @@ -4808,10 +4808,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
3499     fcport->d_id = e->u.new_sess.id;
3500     fcport->flags |= FCF_FABRIC_DEVICE;
3501     fcport->fw_login_state = DSC_LS_PLOGI_PEND;
3502     - if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
3503     + if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
3504     fcport->fc4_type = FC4_TYPE_FCP_SCSI;
3505    
3506     - if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
3507     + if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
3508     fcport->fc4_type = FC4_TYPE_OTHER;
3509     fcport->fc4f_nvme = FC4_TYPE_NVME;
3510     }
3511     diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
3512     index 8de16016b6de..b289b90ae6dc 100644
3513     --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
3514     +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
3515     @@ -631,8 +631,11 @@ static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
3516    
3517     static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
3518     {
3519     + struct cxgbit_sock *csk = handle;
3520     +
3521     pr_debug("%s cxgbit_device %p\n", __func__, handle);
3522     kfree_skb(skb);
3523     + cxgbit_put_csk(csk);
3524     }
3525    
3526     static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
3527     @@ -1190,7 +1193,7 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
3528     rpl5->opt0 = cpu_to_be64(opt0);
3529     rpl5->opt2 = cpu_to_be32(opt2);
3530     set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
3531     - t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard);
3532     + t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
3533     cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
3534     }
3535    
3536     diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
3537     index f3f8856bfb68..c011c826fc26 100644
3538     --- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
3539     +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
3540     @@ -58,6 +58,7 @@ static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi)
3541     return ERR_PTR(-ENOMEM);
3542    
3543     kref_init(&cdev->kref);
3544     + spin_lock_init(&cdev->np_lock);
3545    
3546     cdev->lldi = *lldi;
3547    
3548     diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
3549     index 6cf3e9b0728f..3e77475668c0 100644
3550     --- a/drivers/tty/serial/sunsu.c
3551     +++ b/drivers/tty/serial/sunsu.c
3552     @@ -1394,22 +1394,43 @@ static inline struct console *SUNSU_CONSOLE(void)
3553     static enum su_type su_get_type(struct device_node *dp)
3554     {
3555     struct device_node *ap = of_find_node_by_path("/aliases");
3556     + enum su_type rc = SU_PORT_PORT;
3557    
3558     if (ap) {
3559     const char *keyb = of_get_property(ap, "keyboard", NULL);
3560     const char *ms = of_get_property(ap, "mouse", NULL);
3561     + struct device_node *match;
3562    
3563     if (keyb) {
3564     - if (dp == of_find_node_by_path(keyb))
3565     - return SU_PORT_KBD;
3566     + match = of_find_node_by_path(keyb);
3567     +
3568     + /*
3569     + * The pointer is used as an identifier not
3570     + * as a pointer, we can drop the refcount on
3571     + * the of__node immediately after getting it.
3572     + */
3573     + of_node_put(match);
3574     +
3575     + if (dp == match) {
3576     + rc = SU_PORT_KBD;
3577     + goto out;
3578     + }
3579     }
3580     if (ms) {
3581     - if (dp == of_find_node_by_path(ms))
3582     - return SU_PORT_MS;
3583     + match = of_find_node_by_path(ms);
3584     +
3585     + of_node_put(match);
3586     +
3587     + if (dp == match) {
3588     + rc = SU_PORT_MS;
3589     + goto out;
3590     + }
3591     }
3592     }
3593    
3594     - return SU_PORT_PORT;
3595     +out:
3596     + of_node_put(ap);
3597     + return rc;
3598     }
3599    
3600     static int su_probe(struct platform_device *op)
3601     diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
3602     index bbed039617a4..d59c8a59f582 100644
3603     --- a/drivers/video/fbdev/pxafb.c
3604     +++ b/drivers/video/fbdev/pxafb.c
3605     @@ -2234,10 +2234,8 @@ static struct pxafb_mach_info *of_pxafb_of_mach_info(struct device *dev)
3606     if (!info)
3607     return ERR_PTR(-ENOMEM);
3608     ret = of_get_pxafb_mode_info(dev, info);
3609     - if (ret) {
3610     - kfree(info->modes);
3611     + if (ret)
3612     return ERR_PTR(ret);
3613     - }
3614    
3615     /*
3616     * On purpose, neither lccrX registers nor video memory size can be
3617     diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
3618     index dd7dfdd2ba13..eadffaa39f4e 100644
3619     --- a/fs/ceph/caps.c
3620     +++ b/fs/ceph/caps.c
3621     @@ -3566,7 +3566,6 @@ retry:
3622     tcap->cap_id = t_cap_id;
3623     tcap->seq = t_seq - 1;
3624     tcap->issue_seq = t_seq - 1;
3625     - tcap->mseq = t_mseq;
3626     tcap->issued |= issued;
3627     tcap->implemented |= issued;
3628     if (cap == ci->i_auth_cap)
3629     diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
3630     index cc91963683de..a928ba008d7d 100644
3631     --- a/fs/dlm/lock.c
3632     +++ b/fs/dlm/lock.c
3633     @@ -1209,6 +1209,7 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
3634    
3635     if (rv < 0) {
3636     log_error(ls, "create_lkb idr error %d", rv);
3637     + dlm_free_lkb(lkb);
3638     return rv;
3639     }
3640    
3641     @@ -4179,6 +4180,7 @@ static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
3642     (unsigned long long)lkb->lkb_recover_seq,
3643     ms->m_header.h_nodeid, ms->m_lkid);
3644     error = -ENOENT;
3645     + dlm_put_lkb(lkb);
3646     goto fail;
3647     }
3648    
3649     @@ -4232,6 +4234,7 @@ static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
3650     lkb->lkb_id, lkb->lkb_remid,
3651     ms->m_header.h_nodeid, ms->m_lkid);
3652     error = -ENOENT;
3653     + dlm_put_lkb(lkb);
3654     goto fail;
3655     }
3656    
3657     @@ -5792,20 +5795,20 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
3658     goto out;
3659     }
3660     }
3661     -
3662     - /* After ua is attached to lkb it will be freed by dlm_free_lkb().
3663     - When DLM_IFL_USER is set, the dlm knows that this is a userspace
3664     - lock and that lkb_astparam is the dlm_user_args structure. */
3665     -
3666     error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
3667     fake_astfn, ua, fake_bastfn, &args);
3668     - lkb->lkb_flags |= DLM_IFL_USER;
3669     -
3670     if (error) {
3671     + kfree(ua->lksb.sb_lvbptr);
3672     + ua->lksb.sb_lvbptr = NULL;
3673     + kfree(ua);
3674     __put_lkb(ls, lkb);
3675     goto out;
3676     }
3677    
3678     + /* After ua is attached to lkb it will be freed by dlm_free_lkb().
3679     + When DLM_IFL_USER is set, the dlm knows that this is a userspace
3680     + lock and that lkb_astparam is the dlm_user_args structure. */
3681     + lkb->lkb_flags |= DLM_IFL_USER;
3682     error = request_lock(ls, lkb, name, namelen, &args);
3683    
3684     switch (error) {
3685     diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
3686     index 5ba94be006ee..6a1529e478f3 100644
3687     --- a/fs/dlm/lockspace.c
3688     +++ b/fs/dlm/lockspace.c
3689     @@ -680,11 +680,11 @@ static int new_lockspace(const char *name, const char *cluster,
3690     kfree(ls->ls_recover_buf);
3691     out_lkbidr:
3692     idr_destroy(&ls->ls_lkbidr);
3693     + out_rsbtbl:
3694     for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
3695     if (ls->ls_remove_names[i])
3696     kfree(ls->ls_remove_names[i]);
3697     }
3698     - out_rsbtbl:
3699     vfree(ls->ls_rsbtbl);
3700     out_lsfree:
3701     if (do_unreg)
3702     diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
3703     index 648f0ca1ad57..998051c4aea7 100644
3704     --- a/fs/gfs2/inode.c
3705     +++ b/fs/gfs2/inode.c
3706     @@ -744,17 +744,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
3707     the gfs2 structures. */
3708     if (default_acl) {
3709     error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
3710     + if (error)
3711     + goto fail_gunlock3;
3712     posix_acl_release(default_acl);
3713     + default_acl = NULL;
3714     }
3715     if (acl) {
3716     - if (!error)
3717     - error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
3718     + error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
3719     + if (error)
3720     + goto fail_gunlock3;
3721     posix_acl_release(acl);
3722     + acl = NULL;
3723     }
3724    
3725     - if (error)
3726     - goto fail_gunlock3;
3727     -
3728     error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name,
3729     &gfs2_initxattrs, NULL);
3730     if (error)
3731     @@ -789,10 +791,8 @@ fail_free_inode:
3732     }
3733     gfs2_rsqa_delete(ip, NULL);
3734     fail_free_acls:
3735     - if (default_acl)
3736     - posix_acl_release(default_acl);
3737     - if (acl)
3738     - posix_acl_release(acl);
3739     + posix_acl_release(default_acl);
3740     + posix_acl_release(acl);
3741     fail_gunlock:
3742     gfs2_dir_no_add(&da);
3743     gfs2_glock_dq_uninit(ghs);
3744     diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
3745     index 449d0cb45a84..e978f6930575 100644
3746     --- a/fs/gfs2/rgrp.c
3747     +++ b/fs/gfs2/rgrp.c
3748     @@ -1747,9 +1747,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
3749     goto next_iter;
3750     }
3751     if (ret == -E2BIG) {
3752     + n += rbm->bii - initial_bii;
3753     rbm->bii = 0;
3754     rbm->offset = 0;
3755     - n += (rbm->bii - initial_bii);
3756     goto res_covered_end_of_rgrp;
3757     }
3758     return ret;
3759     diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
3760     index d20b92f271c2..0a67dd4250e9 100644
3761     --- a/fs/lockd/clntproc.c
3762     +++ b/fs/lockd/clntproc.c
3763     @@ -442,7 +442,7 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
3764     fl->fl_start = req->a_res.lock.fl.fl_start;
3765     fl->fl_end = req->a_res.lock.fl.fl_end;
3766     fl->fl_type = req->a_res.lock.fl.fl_type;
3767     - fl->fl_pid = 0;
3768     + fl->fl_pid = -req->a_res.lock.fl.fl_pid;
3769     break;
3770     default:
3771     status = nlm_stat_to_errno(req->a_res.status);
3772     diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
3773     index 7147e4aebecc..9846f7e95282 100644
3774     --- a/fs/lockd/xdr.c
3775     +++ b/fs/lockd/xdr.c
3776     @@ -127,7 +127,7 @@ nlm_decode_lock(__be32 *p, struct nlm_lock *lock)
3777    
3778     locks_init_lock(fl);
3779     fl->fl_owner = current->files;
3780     - fl->fl_pid = (pid_t)lock->svid;
3781     + fl->fl_pid = current->tgid;
3782     fl->fl_flags = FL_POSIX;
3783     fl->fl_type = F_RDLCK; /* as good as anything else */
3784     start = ntohl(*p++);
3785     @@ -269,7 +269,7 @@ nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
3786     memset(lock, 0, sizeof(*lock));
3787     locks_init_lock(&lock->fl);
3788     lock->svid = ~(u32) 0;
3789     - lock->fl.fl_pid = (pid_t)lock->svid;
3790     + lock->fl.fl_pid = current->tgid;
3791    
3792     if (!(p = nlm_decode_cookie(p, &argp->cookie))
3793     || !(p = xdr_decode_string_inplace(p, &lock->caller,
3794     diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
3795     index 7ed9edf9aed4..70154f376695 100644
3796     --- a/fs/lockd/xdr4.c
3797     +++ b/fs/lockd/xdr4.c
3798     @@ -119,7 +119,7 @@ nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
3799    
3800     locks_init_lock(fl);
3801     fl->fl_owner = current->files;
3802     - fl->fl_pid = (pid_t)lock->svid;
3803     + fl->fl_pid = current->tgid;
3804     fl->fl_flags = FL_POSIX;
3805     fl->fl_type = F_RDLCK; /* as good as anything else */
3806     p = xdr_decode_hyper(p, &start);
3807     @@ -266,7 +266,7 @@ nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
3808     memset(lock, 0, sizeof(*lock));
3809     locks_init_lock(&lock->fl);
3810     lock->svid = ~(u32) 0;
3811     - lock->fl.fl_pid = (pid_t)lock->svid;
3812     + lock->fl.fl_pid = current->tgid;
3813    
3814     if (!(p = nlm4_decode_cookie(p, &argp->cookie))
3815     || !(p = xdr_decode_string_inplace(p, &lock->caller,
3816     diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
3817     index 9d6b4f0f1a25..f35aa9f88b5e 100644
3818     --- a/fs/nfsd/nfs4proc.c
3819     +++ b/fs/nfsd/nfs4proc.c
3820     @@ -1015,8 +1015,6 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3821    
3822     nvecs = svc_fill_write_vector(rqstp, write->wr_pagelist,
3823     &write->wr_head, write->wr_buflen);
3824     - if (!nvecs)
3825     - return nfserr_io;
3826     WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
3827    
3828     status = nfsd_vfs_write(rqstp, &cstate->current_fh, filp,
3829     diff --git a/include/linux/hmm.h b/include/linux/hmm.h
3830     index 4c92e3ba3e16..5ec8635f602c 100644
3831     --- a/include/linux/hmm.h
3832     +++ b/include/linux/hmm.h
3833     @@ -499,8 +499,7 @@ struct hmm_devmem {
3834     * enough and allocate struct page for it.
3835     *
3836     * The device driver can wrap the hmm_devmem struct inside a private device
3837     - * driver struct. The device driver must call hmm_devmem_remove() before the
3838     - * device goes away and before freeing the hmm_devmem struct memory.
3839     + * driver struct.
3840     */
3841     struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
3842     struct device *device,
3843     @@ -508,7 +507,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
3844     struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
3845     struct device *device,
3846     struct resource *res);
3847     -void hmm_devmem_remove(struct hmm_devmem *devmem);
3848    
3849     /*
3850     * hmm_devmem_page_set_drvdata - set per-page driver data field
3851     diff --git a/include/linux/memremap.h b/include/linux/memremap.h
3852     index f91f9e763557..a84572cdc438 100644
3853     --- a/include/linux/memremap.h
3854     +++ b/include/linux/memremap.h
3855     @@ -106,6 +106,7 @@ typedef void (*dev_page_free_t)(struct page *page, void *data);
3856     * @altmap: pre-allocated/reserved memory for vmemmap allocations
3857     * @res: physical address range covered by @ref
3858     * @ref: reference count that pins the devm_memremap_pages() mapping
3859     + * @kill: callback to transition @ref to the dead state
3860     * @dev: host device of the mapping for debug
3861     * @data: private data pointer for page_free()
3862     * @type: memory type: see MEMORY_* in memory_hotplug.h
3863     @@ -117,6 +118,7 @@ struct dev_pagemap {
3864     bool altmap_valid;
3865     struct resource res;
3866     struct percpu_ref *ref;
3867     + void (*kill)(struct percpu_ref *ref);
3868     struct device *dev;
3869     void *data;
3870     enum memory_type type;
3871     diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
3872     index 4a520d3304a2..cf09ab37b45b 100644
3873     --- a/include/linux/netfilter/nfnetlink.h
3874     +++ b/include/linux/netfilter/nfnetlink.h
3875     @@ -62,18 +62,6 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
3876     }
3877     #endif /* CONFIG_PROVE_LOCKING */
3878    
3879     -/*
3880     - * nfnl_dereference - fetch RCU pointer when updates are prevented by subsys mutex
3881     - *
3882     - * @p: The pointer to read, prior to dereferencing
3883     - * @ss: The nfnetlink subsystem ID
3884     - *
3885     - * Return the value of the specified RCU-protected pointer, but omit
3886     - * the READ_ONCE(), because caller holds the NFNL subsystem mutex.
3887     - */
3888     -#define nfnl_dereference(p, ss) \
3889     - rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
3890     -
3891     #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
3892     MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
3893    
3894     diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
3895     index 53fbae27b280..61a5799b440b 100644
3896     --- a/include/uapi/linux/input-event-codes.h
3897     +++ b/include/uapi/linux/input-event-codes.h
3898     @@ -744,6 +744,15 @@
3899    
3900     #define ABS_MISC 0x28
3901    
3902     +/*
3903     + * 0x2e is reserved and should not be used in input drivers.
3904     + * It was used by HID as ABS_MISC+6 and userspace needs to detect if
3905     + * the next ABS_* event is correct or is just ABS_MISC + n.
3906     + * We define here ABS_RESERVED so userspace can rely on it and detect
3907     + * the situation described above.
3908     + */
3909     +#define ABS_RESERVED 0x2e
3910     +
3911     #define ABS_MT_SLOT 0x2f /* MT slot being modified */
3912     #define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
3913     #define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
3914     diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
3915     index de87b0282e74..1d2f147f737d 100644
3916     --- a/kernel/dma/direct.c
3917     +++ b/kernel/dma/direct.c
3918     @@ -168,7 +168,12 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
3919     int dma_direct_supported(struct device *dev, u64 mask)
3920     {
3921     #ifdef CONFIG_ZONE_DMA
3922     - if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
3923     + /*
3924     + * This check needs to be against the actual bit mask value, so
3925     + * use __phys_to_dma() here so that the SME encryption mask isn't
3926     + * part of the check.
3927     + */
3928     + if (mask < __phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
3929     return 0;
3930     #else
3931     /*
3932     @@ -176,8 +181,12 @@ int dma_direct_supported(struct device *dev, u64 mask)
3933     * to be able to satisfy them - either by not supporting more physical
3934     * memory, or by providing a ZONE_DMA32. If neither is the case, the
3935     * architecture needs to use an IOMMU instead of the direct mapping.
3936     + *
3937     + * This check needs to be against the actual bit mask value, so
3938     + * use __phys_to_dma() here so that the SME encryption mask isn't
3939     + * part of the check.
3940     */
3941     - if (mask < phys_to_dma(dev, DMA_BIT_MASK(32)))
3942     + if (mask < __phys_to_dma(dev, DMA_BIT_MASK(32)))
3943     return 0;
3944     #endif
3945     /*
3946     diff --git a/kernel/fork.c b/kernel/fork.c
3947     index f0b58479534f..64ef113e387e 100644
3948     --- a/kernel/fork.c
3949     +++ b/kernel/fork.c
3950     @@ -1784,8 +1784,6 @@ static __latent_entropy struct task_struct *copy_process(
3951    
3952     posix_cpu_timers_init(p);
3953    
3954     - p->start_time = ktime_get_ns();
3955     - p->real_start_time = ktime_get_boot_ns();
3956     p->io_context = NULL;
3957     audit_set_context(p, NULL);
3958     cgroup_fork(p);
3959     @@ -1949,6 +1947,17 @@ static __latent_entropy struct task_struct *copy_process(
3960     if (retval)
3961     goto bad_fork_free_pid;
3962    
3963     + /*
3964     + * From this point on we must avoid any synchronous user-space
3965     + * communication until we take the tasklist-lock. In particular, we do
3966     + * not want user-space to be able to predict the process start-time by
3967     + * stalling fork(2) after we recorded the start_time but before it is
3968     + * visible to the system.
3969     + */
3970     +
3971     + p->start_time = ktime_get_ns();
3972     + p->real_start_time = ktime_get_boot_ns();
3973     +
3974     /*
3975     * Make it visible to the rest of the system, but dont wake it up yet.
3976     * Need tasklist lock for parent etc handling!
3977     diff --git a/kernel/memremap.c b/kernel/memremap.c
3978     index 5b8600d39931..7c5fb8a208ac 100644
3979     --- a/kernel/memremap.c
3980     +++ b/kernel/memremap.c
3981     @@ -122,23 +122,25 @@ static void devm_memremap_pages_release(void *data)
3982     resource_size_t align_start, align_size;
3983     unsigned long pfn;
3984    
3985     + pgmap->kill(pgmap->ref);
3986     for_each_device_pfn(pfn, pgmap)
3987     put_page(pfn_to_page(pfn));
3988    
3989     - if (percpu_ref_tryget_live(pgmap->ref)) {
3990     - dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
3991     - percpu_ref_put(pgmap->ref);
3992     - }
3993     -
3994     /* pages are dead and unused, undo the arch mapping */
3995     align_start = res->start & ~(SECTION_SIZE - 1);
3996     align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
3997     - align_start;
3998    
3999     mem_hotplug_begin();
4000     - arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
4001     - &pgmap->altmap : NULL);
4002     - kasan_remove_zero_shadow(__va(align_start), align_size);
4003     + if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
4004     + pfn = align_start >> PAGE_SHIFT;
4005     + __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
4006     + align_size >> PAGE_SHIFT, NULL);
4007     + } else {
4008     + arch_remove_memory(align_start, align_size,
4009     + pgmap->altmap_valid ? &pgmap->altmap : NULL);
4010     + kasan_remove_zero_shadow(__va(align_start), align_size);
4011     + }
4012     mem_hotplug_done();
4013    
4014     untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
4015     @@ -150,7 +152,7 @@ static void devm_memremap_pages_release(void *data)
4016     /**
4017     * devm_memremap_pages - remap and provide memmap backing for the given resource
4018     * @dev: hosting device for @res
4019     - * @pgmap: pointer to a struct dev_pgmap
4020     + * @pgmap: pointer to a struct dev_pagemap
4021     *
4022     * Notes:
4023     * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
4024     @@ -159,11 +161,8 @@ static void devm_memremap_pages_release(void *data)
4025     * 2/ The altmap field may optionally be initialized, in which case altmap_valid
4026     * must be set to true
4027     *
4028     - * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages()
4029     - * time (or devm release event). The expected order of events is that ref has
4030     - * been through percpu_ref_kill() before devm_memremap_pages_release(). The
4031     - * wait for the completion of all references being dropped and
4032     - * percpu_ref_exit() must occur after devm_memremap_pages_release().
4033     + * 3/ pgmap->ref must be 'live' on entry and will be killed at
4034     + * devm_memremap_pages_release() time, or if this routine fails.
4035     *
4036     * 4/ res is expected to be a host memory range that could feasibly be
4037     * treated as a "System RAM" range, i.e. not a device mmio range, but
4038     @@ -180,6 +179,9 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
4039     int error, nid, is_ram;
4040     struct dev_pagemap *conflict_pgmap;
4041    
4042     + if (!pgmap->ref || !pgmap->kill)
4043     + return ERR_PTR(-EINVAL);
4044     +
4045     align_start = res->start & ~(SECTION_SIZE - 1);
4046     align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
4047     - align_start;
4048     @@ -202,18 +204,13 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
4049     is_ram = region_intersects(align_start, align_size,
4050     IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
4051    
4052     - if (is_ram == REGION_MIXED) {
4053     - WARN_ONCE(1, "%s attempted on mixed region %pr\n",
4054     - __func__, res);
4055     - return ERR_PTR(-ENXIO);
4056     + if (is_ram != REGION_DISJOINT) {
4057     + WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
4058     + is_ram == REGION_MIXED ? "mixed" : "ram", res);
4059     + error = -ENXIO;
4060     + goto err_array;
4061     }
4062    
4063     - if (is_ram == REGION_INTERSECTS)
4064     - return __va(res->start);
4065     -
4066     - if (!pgmap->ref)
4067     - return ERR_PTR(-EINVAL);
4068     -
4069     pgmap->dev = dev;
4070    
4071     mutex_lock(&pgmap_lock);
4072     @@ -241,17 +238,40 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
4073     goto err_pfn_remap;
4074    
4075     mem_hotplug_begin();
4076     - error = kasan_add_zero_shadow(__va(align_start), align_size);
4077     - if (error) {
4078     - mem_hotplug_done();
4079     - goto err_kasan;
4080     +
4081     + /*
4082     + * For device private memory we call add_pages() as we only need to
4083     + * allocate and initialize struct page for the device memory. More-
4084     + * over the device memory is un-accessible thus we do not want to
4085     + * create a linear mapping for the memory like arch_add_memory()
4086     + * would do.
4087     + *
4088     + * For all other device memory types, which are accessible by
4089     + * the CPU, we do want the linear mapping and thus use
4090     + * arch_add_memory().
4091     + */
4092     + if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
4093     + error = add_pages(nid, align_start >> PAGE_SHIFT,
4094     + align_size >> PAGE_SHIFT, NULL, false);
4095     + } else {
4096     + error = kasan_add_zero_shadow(__va(align_start), align_size);
4097     + if (error) {
4098     + mem_hotplug_done();
4099     + goto err_kasan;
4100     + }
4101     +
4102     + error = arch_add_memory(nid, align_start, align_size, altmap,
4103     + false);
4104     + }
4105     +
4106     + if (!error) {
4107     + struct zone *zone;
4108     +
4109     + zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
4110     + move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
4111     + align_size >> PAGE_SHIFT, altmap);
4112     }
4113    
4114     - error = arch_add_memory(nid, align_start, align_size, altmap, false);
4115     - if (!error)
4116     - move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
4117     - align_start >> PAGE_SHIFT,
4118     - align_size >> PAGE_SHIFT, altmap);
4119     mem_hotplug_done();
4120     if (error)
4121     goto err_add_memory;
4122     @@ -270,7 +290,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
4123     percpu_ref_get(pgmap->ref);
4124     }
4125    
4126     - devm_add_action(dev, devm_memremap_pages_release, pgmap);
4127     + error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
4128     + pgmap);
4129     + if (error)
4130     + return ERR_PTR(error);
4131    
4132     return __va(res->start);
4133    
4134     @@ -281,9 +304,11 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
4135     err_pfn_remap:
4136     err_radix:
4137     pgmap_radix_release(res, pgoff);
4138     + err_array:
4139     + pgmap->kill(pgmap->ref);
4140     return ERR_PTR(error);
4141     }
4142     -EXPORT_SYMBOL(devm_memremap_pages);
4143     +EXPORT_SYMBOL_GPL(devm_memremap_pages);
4144    
4145     unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
4146     {
4147     diff --git a/kernel/pid.c b/kernel/pid.c
4148     index cdf63e53a014..b88fe5e494cc 100644
4149     --- a/kernel/pid.c
4150     +++ b/kernel/pid.c
4151     @@ -233,8 +233,10 @@ out_unlock:
4152    
4153     out_free:
4154     spin_lock_irq(&pidmap_lock);
4155     - while (++i <= ns->level)
4156     - idr_remove(&ns->idr, (pid->numbers + i)->nr);
4157     + while (++i <= ns->level) {
4158     + upid = pid->numbers + i;
4159     + idr_remove(&upid->ns->idr, upid->nr);
4160     + }
4161    
4162     /* On failure to allocate the first pid, reset the state */
4163     if (ns->pid_allocated == PIDNS_ADDING)
4164     diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
4165     index 6c9866a854b1..1ff17e297f0c 100644
4166     --- a/kernel/rcu/srcutree.c
4167     +++ b/kernel/rcu/srcutree.c
4168     @@ -448,10 +448,12 @@ static void srcu_gp_start(struct srcu_struct *sp)
4169    
4170     lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
4171     WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
4172     + spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
4173     rcu_segcblist_advance(&sdp->srcu_cblist,
4174     rcu_seq_current(&sp->srcu_gp_seq));
4175     (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
4176     rcu_seq_snap(&sp->srcu_gp_seq));
4177     + spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
4178     smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
4179     rcu_seq_start(&sp->srcu_gp_seq);
4180     state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
4181     diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
4182     index eabbf6b10b44..7137bc343b4a 100644
4183     --- a/kernel/sched/fair.c
4184     +++ b/kernel/sched/fair.c
4185     @@ -352,10 +352,9 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
4186     }
4187     }
4188    
4189     -/* Iterate thr' all leaf cfs_rq's on a runqueue */
4190     -#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
4191     - list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
4192     - leaf_cfs_rq_list)
4193     +/* Iterate through all leaf cfs_rq's on a runqueue: */
4194     +#define for_each_leaf_cfs_rq(rq, cfs_rq) \
4195     + list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
4196    
4197     /* Do the two (enqueued) entities belong to the same group ? */
4198     static inline struct cfs_rq *
4199     @@ -447,8 +446,8 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
4200     {
4201     }
4202    
4203     -#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
4204     - for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
4205     +#define for_each_leaf_cfs_rq(rq, cfs_rq) \
4206     + for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
4207    
4208     static inline struct sched_entity *parent_entity(struct sched_entity *se)
4209     {
4210     @@ -7371,27 +7370,10 @@ static inline bool others_have_blocked(struct rq *rq)
4211    
4212     #ifdef CONFIG_FAIR_GROUP_SCHED
4213    
4214     -static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
4215     -{
4216     - if (cfs_rq->load.weight)
4217     - return false;
4218     -
4219     - if (cfs_rq->avg.load_sum)
4220     - return false;
4221     -
4222     - if (cfs_rq->avg.util_sum)
4223     - return false;
4224     -
4225     - if (cfs_rq->avg.runnable_load_sum)
4226     - return false;
4227     -
4228     - return true;
4229     -}
4230     -
4231     static void update_blocked_averages(int cpu)
4232     {
4233     struct rq *rq = cpu_rq(cpu);
4234     - struct cfs_rq *cfs_rq, *pos;
4235     + struct cfs_rq *cfs_rq;
4236     const struct sched_class *curr_class;
4237     struct rq_flags rf;
4238     bool done = true;
4239     @@ -7403,7 +7385,7 @@ static void update_blocked_averages(int cpu)
4240     * Iterates the task_group tree in a bottom up fashion, see
4241     * list_add_leaf_cfs_rq() for details.
4242     */
4243     - for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
4244     + for_each_leaf_cfs_rq(rq, cfs_rq) {
4245     struct sched_entity *se;
4246    
4247     /* throttled entities do not contribute to load */
4248     @@ -7418,13 +7400,6 @@ static void update_blocked_averages(int cpu)
4249     if (se && !skip_blocked_update(se))
4250     update_load_avg(cfs_rq_of(se), se, 0);
4251    
4252     - /*
4253     - * There can be a lot of idle CPU cgroups. Don't let fully
4254     - * decayed cfs_rqs linger on the list.
4255     - */
4256     - if (cfs_rq_is_decayed(cfs_rq))
4257     - list_del_leaf_cfs_rq(cfs_rq);
4258     -
4259     /* Don't need periodic decay once load/util_avg are null */
4260     if (cfs_rq_has_blocked(cfs_rq))
4261     done = false;
4262     @@ -10196,10 +10171,10 @@ const struct sched_class fair_sched_class = {
4263     #ifdef CONFIG_SCHED_DEBUG
4264     void print_cfs_stats(struct seq_file *m, int cpu)
4265     {
4266     - struct cfs_rq *cfs_rq, *pos;
4267     + struct cfs_rq *cfs_rq;
4268    
4269     rcu_read_lock();
4270     - for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
4271     + for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
4272     print_cfs_rq(m, cpu, cfs_rq);
4273     rcu_read_unlock();
4274     }
4275     diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
4276     index 2f8b61dfd9b0..7ed43eaa02ef 100644
4277     --- a/lib/raid6/Makefile
4278     +++ b/lib/raid6/Makefile
4279     @@ -18,6 +18,21 @@ quiet_cmd_unroll = UNROLL $@
4280    
4281     ifeq ($(CONFIG_ALTIVEC),y)
4282     altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
4283     +
4284     +ifdef CONFIG_CC_IS_CLANG
4285     +# clang ppc port does not yet support -maltivec when -msoft-float is
4286     +# enabled. A future release of clang will resolve this
4287     +# https://bugs.llvm.org/show_bug.cgi?id=31177
4288     +CFLAGS_REMOVE_altivec1.o += -msoft-float
4289     +CFLAGS_REMOVE_altivec2.o += -msoft-float
4290     +CFLAGS_REMOVE_altivec4.o += -msoft-float
4291     +CFLAGS_REMOVE_altivec8.o += -msoft-float
4292     +CFLAGS_REMOVE_altivec8.o += -msoft-float
4293     +CFLAGS_REMOVE_vpermxor1.o += -msoft-float
4294     +CFLAGS_REMOVE_vpermxor2.o += -msoft-float
4295     +CFLAGS_REMOVE_vpermxor4.o += -msoft-float
4296     +CFLAGS_REMOVE_vpermxor8.o += -msoft-float
4297     +endif
4298     endif
4299    
4300     # The GCC option -ffreestanding is required in order to compile code containing
4301     diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c
4302     index d5a06addeb27..bf864c73e462 100644
4303     --- a/lib/test_debug_virtual.c
4304     +++ b/lib/test_debug_virtual.c
4305     @@ -5,6 +5,7 @@
4306     #include <linux/vmalloc.h>
4307     #include <linux/slab.h>
4308     #include <linux/sizes.h>
4309     +#include <linux/io.h>
4310    
4311     #include <asm/page.h>
4312     #ifdef CONFIG_MIPS
4313     diff --git a/mm/hmm.c b/mm/hmm.c
4314     index 90193a7fabce..57f0d2a4ff34 100644
4315     --- a/mm/hmm.c
4316     +++ b/mm/hmm.c
4317     @@ -945,7 +945,6 @@ static void hmm_devmem_ref_exit(void *data)
4318    
4319     devmem = container_of(ref, struct hmm_devmem, ref);
4320     percpu_ref_exit(ref);
4321     - devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
4322     }
4323    
4324     static void hmm_devmem_ref_kill(void *data)
4325     @@ -956,7 +955,6 @@ static void hmm_devmem_ref_kill(void *data)
4326     devmem = container_of(ref, struct hmm_devmem, ref);
4327     percpu_ref_kill(ref);
4328     wait_for_completion(&devmem->completion);
4329     - devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
4330     }
4331    
4332     static int hmm_devmem_fault(struct vm_area_struct *vma,
4333     @@ -994,7 +992,7 @@ static void hmm_devmem_radix_release(struct resource *resource)
4334     mutex_unlock(&hmm_devmem_lock);
4335     }
4336    
4337     -static void hmm_devmem_release(struct device *dev, void *data)
4338     +static void hmm_devmem_release(void *data)
4339     {
4340     struct hmm_devmem *devmem = data;
4341     struct resource *resource = devmem->resource;
4342     @@ -1002,11 +1000,6 @@ static void hmm_devmem_release(struct device *dev, void *data)
4343     struct zone *zone;
4344     struct page *page;
4345    
4346     - if (percpu_ref_tryget_live(&devmem->ref)) {
4347     - dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
4348     - percpu_ref_put(&devmem->ref);
4349     - }
4350     -
4351     /* pages are dead and unused, undo the arch mapping */
4352     start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
4353     npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
4354     @@ -1130,19 +1123,6 @@ error:
4355     return ret;
4356     }
4357    
4358     -static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
4359     -{
4360     - struct hmm_devmem *devmem = data;
4361     -
4362     - return devmem->resource == match_data;
4363     -}
4364     -
4365     -static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
4366     -{
4367     - devres_release(devmem->device, &hmm_devmem_release,
4368     - &hmm_devmem_match, devmem->resource);
4369     -}
4370     -
4371     /*
4372     * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
4373     *
4374     @@ -1170,8 +1150,7 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
4375    
4376     dev_pagemap_get_ops();
4377    
4378     - devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
4379     - GFP_KERNEL, dev_to_node(device));
4380     + devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
4381     if (!devmem)
4382     return ERR_PTR(-ENOMEM);
4383    
4384     @@ -1185,11 +1164,11 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
4385     ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
4386     0, GFP_KERNEL);
4387     if (ret)
4388     - goto error_percpu_ref;
4389     + return ERR_PTR(ret);
4390    
4391     - ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
4392     + ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
4393     if (ret)
4394     - goto error_devm_add_action;
4395     + return ERR_PTR(ret);
4396    
4397     size = ALIGN(size, PA_SECTION_SIZE);
4398     addr = min((unsigned long)iomem_resource.end,
4399     @@ -1209,16 +1188,12 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
4400    
4401     devmem->resource = devm_request_mem_region(device, addr, size,
4402     dev_name(device));
4403     - if (!devmem->resource) {
4404     - ret = -ENOMEM;
4405     - goto error_no_resource;
4406     - }
4407     + if (!devmem->resource)
4408     + return ERR_PTR(-ENOMEM);
4409     break;
4410     }
4411     - if (!devmem->resource) {
4412     - ret = -ERANGE;
4413     - goto error_no_resource;
4414     - }
4415     + if (!devmem->resource)
4416     + return ERR_PTR(-ERANGE);
4417    
4418     devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
4419     devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
4420     @@ -1227,30 +1202,15 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
4421    
4422     ret = hmm_devmem_pages_create(devmem);
4423     if (ret)
4424     - goto error_pages;
4425     -
4426     - devres_add(device, devmem);
4427     + return ERR_PTR(ret);
4428    
4429     - ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
4430     - if (ret) {
4431     - hmm_devmem_remove(devmem);
4432     + ret = devm_add_action_or_reset(device, hmm_devmem_release, devmem);
4433     + if (ret)
4434     return ERR_PTR(ret);
4435     - }
4436    
4437     return devmem;
4438     -
4439     -error_pages:
4440     - devm_release_mem_region(device, devmem->resource->start,
4441     - resource_size(devmem->resource));
4442     -error_no_resource:
4443     -error_devm_add_action:
4444     - hmm_devmem_ref_kill(&devmem->ref);
4445     - hmm_devmem_ref_exit(&devmem->ref);
4446     -error_percpu_ref:
4447     - devres_free(devmem);
4448     - return ERR_PTR(ret);
4449     }
4450     -EXPORT_SYMBOL(hmm_devmem_add);
4451     +EXPORT_SYMBOL_GPL(hmm_devmem_add);
4452    
4453     struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
4454     struct device *device,
4455     @@ -1264,8 +1224,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
4456    
4457     dev_pagemap_get_ops();
4458    
4459     - devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
4460     - GFP_KERNEL, dev_to_node(device));
4461     + devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
4462     if (!devmem)
4463     return ERR_PTR(-ENOMEM);
4464    
4465     @@ -1279,12 +1238,12 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
4466     ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
4467     0, GFP_KERNEL);
4468     if (ret)
4469     - goto error_percpu_ref;
4470     + return ERR_PTR(ret);
4471    
4472     - ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
4473     + ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
4474     + &devmem->ref);
4475     if (ret)
4476     - goto error_devm_add_action;
4477     -
4478     + return ERR_PTR(ret);
4479    
4480     devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
4481     devmem->pfn_last = devmem->pfn_first +
4482     @@ -1292,58 +1251,20 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
4483    
4484     ret = hmm_devmem_pages_create(devmem);
4485     if (ret)
4486     - goto error_devm_add_action;
4487     + return ERR_PTR(ret);
4488    
4489     - devres_add(device, devmem);
4490     + ret = devm_add_action_or_reset(device, hmm_devmem_release, devmem);
4491     + if (ret)
4492     + return ERR_PTR(ret);
4493    
4494     - ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
4495     - if (ret) {
4496     - hmm_devmem_remove(devmem);
4497     + ret = devm_add_action_or_reset(device, hmm_devmem_ref_kill,
4498     + &devmem->ref);
4499     + if (ret)
4500     return ERR_PTR(ret);
4501     - }
4502    
4503     return devmem;
4504     -
4505     -error_devm_add_action:
4506     - hmm_devmem_ref_kill(&devmem->ref);
4507     - hmm_devmem_ref_exit(&devmem->ref);
4508     -error_percpu_ref:
4509     - devres_free(devmem);
4510     - return ERR_PTR(ret);
4511     -}
4512     -EXPORT_SYMBOL(hmm_devmem_add_resource);
4513     -
4514     -/*
4515     - * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
4516     - *
4517     - * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
4518     - *
4519     - * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
4520     - * of the device driver. It will free struct page and remove the resource that
4521     - * reserved the physical address range for this device memory.
4522     - */
4523     -void hmm_devmem_remove(struct hmm_devmem *devmem)
4524     -{
4525     - resource_size_t start, size;
4526     - struct device *device;
4527     - bool cdm = false;
4528     -
4529     - if (!devmem)
4530     - return;
4531     -
4532     - device = devmem->device;
4533     - start = devmem->resource->start;
4534     - size = resource_size(devmem->resource);
4535     -
4536     - cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY;
4537     - hmm_devmem_ref_kill(&devmem->ref);
4538     - hmm_devmem_ref_exit(&devmem->ref);
4539     - hmm_devmem_pages_remove(devmem);
4540     -
4541     - if (!cdm)
4542     - devm_release_mem_region(device, start, size);
4543     }
4544     -EXPORT_SYMBOL(hmm_devmem_remove);
4545     +EXPORT_SYMBOL_GPL(hmm_devmem_add_resource);
4546    
4547     /*
4548     * A device driver that wants to handle multiple devices memory through a
4549     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4550     index e79cb59552d9..9518aefd8cbb 100644
4551     --- a/mm/memcontrol.c
4552     +++ b/mm/memcontrol.c
4553     @@ -1666,6 +1666,9 @@ enum oom_status {
4554    
4555     static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
4556     {
4557     + enum oom_status ret;
4558     + bool locked;
4559     +
4560     if (order > PAGE_ALLOC_COSTLY_ORDER)
4561     return OOM_SKIPPED;
4562    
4563     @@ -1698,10 +1701,23 @@ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int
4564     return OOM_ASYNC;
4565     }
4566    
4567     + mem_cgroup_mark_under_oom(memcg);
4568     +
4569     + locked = mem_cgroup_oom_trylock(memcg);
4570     +
4571     + if (locked)
4572     + mem_cgroup_oom_notify(memcg);
4573     +
4574     + mem_cgroup_unmark_under_oom(memcg);
4575     if (mem_cgroup_out_of_memory(memcg, mask, order))
4576     - return OOM_SUCCESS;
4577     + ret = OOM_SUCCESS;
4578     + else
4579     + ret = OOM_FAILED;
4580    
4581     - return OOM_FAILED;
4582     + if (locked)
4583     + mem_cgroup_oom_unlock(memcg);
4584     +
4585     + return ret;
4586     }
4587    
4588     /**
4589     diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
4590     index f3f919728f5c..8a136ffda370 100644
4591     --- a/mm/memory_hotplug.c
4592     +++ b/mm/memory_hotplug.c
4593     @@ -35,6 +35,7 @@
4594     #include <linux/memblock.h>
4595     #include <linux/bootmem.h>
4596     #include <linux/compaction.h>
4597     +#include <linux/rmap.h>
4598    
4599     #include <asm/tlbflush.h>
4600    
4601     @@ -1393,6 +1394,21 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
4602     pfn = page_to_pfn(compound_head(page))
4603     + hpage_nr_pages(page) - 1;
4604    
4605     + /*
4606     + * HWPoison pages have elevated reference counts so the migration would
4607     + * fail on them. It also doesn't make any sense to migrate them in the
4608     + * first place. Still try to unmap such a page in case it is still mapped
4609     + * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
4610     + * the unmap as the catch all safety net).
4611     + */
4612     + if (PageHWPoison(page)) {
4613     + if (WARN_ON(PageLRU(page)))
4614     + isolate_lru_page(page);
4615     + if (page_mapped(page))
4616     + try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS);
4617     + continue;
4618     + }
4619     +
4620     if (!get_page_unless_zero(page))
4621     continue;
4622     /*
4623     diff --git a/mm/swapfile.c b/mm/swapfile.c
4624     index 8810a6d7d67f..67aaf7ae22ff 100644
4625     --- a/mm/swapfile.c
4626     +++ b/mm/swapfile.c
4627     @@ -2208,7 +2208,8 @@ int try_to_unuse(unsigned int type, bool frontswap,
4628     */
4629     if (PageSwapCache(page) &&
4630     likely(page_private(page) == entry.val) &&
4631     - !page_swapped(page))
4632     + (!PageTransCompound(page) ||
4633     + !swap_page_trans_huge_swapped(si, entry)))
4634     delete_from_swap_cache(compound_head(page));
4635    
4636     /*
4637     diff --git a/net/9p/client.c b/net/9p/client.c
4638     index deae53a7dffc..75b7bf7c7f07 100644
4639     --- a/net/9p/client.c
4640     +++ b/net/9p/client.c
4641     @@ -181,6 +181,12 @@ static int parse_opts(char *opts, struct p9_client *clnt)
4642     ret = r;
4643     continue;
4644     }
4645     + if (option < 4096) {
4646     + p9_debug(P9_DEBUG_ERROR,
4647     + "msize should be at least 4k\n");
4648     + ret = -EINVAL;
4649     + continue;
4650     + }
4651     clnt->msize = option;
4652     break;
4653     case Opt_trans:
4654     @@ -993,10 +999,18 @@ static int p9_client_version(struct p9_client *c)
4655     else if (!strncmp(version, "9P2000", 6))
4656     c->proto_version = p9_proto_legacy;
4657     else {
4658     + p9_debug(P9_DEBUG_ERROR,
4659     + "server returned an unknown version: %s\n", version);
4660     err = -EREMOTEIO;
4661     goto error;
4662     }
4663    
4664     + if (msize < 4096) {
4665     + p9_debug(P9_DEBUG_ERROR,
4666     + "server returned a msize < 4096: %d\n", msize);
4667     + err = -EREMOTEIO;
4668     + goto error;
4669     + }
4670     if (msize < c->msize)
4671     c->msize = msize;
4672    
4673     @@ -1055,6 +1069,13 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
4674     if (clnt->msize > clnt->trans_mod->maxsize)
4675     clnt->msize = clnt->trans_mod->maxsize;
4676    
4677     + if (clnt->msize < 4096) {
4678     + p9_debug(P9_DEBUG_ERROR,
4679     + "Please specify a msize of at least 4k\n");
4680     + err = -EINVAL;
4681     + goto free_client;
4682     + }
4683     +
4684     err = p9_client_version(clnt);
4685     if (err)
4686     goto close_trans;
4687     diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
4688     index 5f3c81e705c7..3a0171a65db3 100644
4689     --- a/net/mac80211/iface.c
4690     +++ b/net/mac80211/iface.c
4691     @@ -7,6 +7,7 @@
4692     * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
4693     * Copyright 2013-2014 Intel Mobile Communications GmbH
4694     * Copyright (c) 2016 Intel Deutschland GmbH
4695     + * Copyright (C) 2018 Intel Corporation
4696     *
4697     * This program is free software; you can redistribute it and/or modify
4698     * it under the terms of the GNU General Public License version 2 as
4699     @@ -1951,6 +1952,8 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
4700     WARN(local->open_count, "%s: open count remains %d\n",
4701     wiphy_name(local->hw.wiphy), local->open_count);
4702    
4703     + ieee80211_txq_teardown_flows(local);
4704     +
4705     mutex_lock(&local->iflist_mtx);
4706     list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
4707     list_del(&sdata->list);
4708     diff --git a/net/mac80211/main.c b/net/mac80211/main.c
4709     index 513627896204..68db2a356443 100644
4710     --- a/net/mac80211/main.c
4711     +++ b/net/mac80211/main.c
4712     @@ -1198,7 +1198,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
4713     rtnl_unlock();
4714     ieee80211_led_exit(local);
4715     ieee80211_wep_free(local);
4716     - ieee80211_txq_teardown_flows(local);
4717     fail_flows:
4718     destroy_workqueue(local->workqueue);
4719     fail_workqueue:
4720     @@ -1224,7 +1223,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
4721     #if IS_ENABLED(CONFIG_IPV6)
4722     unregister_inet6addr_notifier(&local->ifa6_notifier);
4723     #endif
4724     - ieee80211_txq_teardown_flows(local);
4725    
4726     rtnl_lock();
4727    
4728     diff --git a/net/mac80211/status.c b/net/mac80211/status.c
4729     index 7fa10d06cc51..534a604b75c2 100644
4730     --- a/net/mac80211/status.c
4731     +++ b/net/mac80211/status.c
4732     @@ -556,6 +556,11 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
4733     }
4734    
4735     ieee80211_led_tx(local);
4736     +
4737     + if (skb_has_frag_list(skb)) {
4738     + kfree_skb_list(skb_shinfo(skb)->frag_list);
4739     + skb_shinfo(skb)->frag_list = NULL;
4740     + }
4741     }
4742    
4743     /*
4744     diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
4745     index 4eef55da0878..8da228da53ae 100644
4746     --- a/net/netfilter/ipset/ip_set_list_set.c
4747     +++ b/net/netfilter/ipset/ip_set_list_set.c
4748     @@ -531,8 +531,8 @@ nla_put_failure:
4749     ret = -EMSGSIZE;
4750     } else {
4751     cb->args[IPSET_CB_ARG0] = i;
4752     + ipset_nest_end(skb, atd);
4753     }
4754     - ipset_nest_end(skb, atd);
4755     out:
4756     rcu_read_unlock();
4757     return ret;
4758     diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
4759     index b6d0f6deea86..9cd180bda092 100644
4760     --- a/net/netfilter/nf_conncount.c
4761     +++ b/net/netfilter/nf_conncount.c
4762     @@ -427,7 +427,7 @@ insert_tree(struct net *net,
4763     count = 1;
4764     rbconn->list.count = count;
4765    
4766     - rb_link_node(&rbconn->node, parent, rbnode);
4767     + rb_link_node_rcu(&rbconn->node, parent, rbnode);
4768     rb_insert_color(&rbconn->node, root);
4769     out_unlock:
4770     spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
4771     diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
4772     index a975efd6b8c3..9da303461069 100644
4773     --- a/net/netfilter/nf_conntrack_seqadj.c
4774     +++ b/net/netfilter/nf_conntrack_seqadj.c
4775     @@ -115,12 +115,12 @@ static void nf_ct_sack_block_adjust(struct sk_buff *skb,
4776     /* TCP SACK sequence number adjustment */
4777     static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
4778     unsigned int protoff,
4779     - struct tcphdr *tcph,
4780     struct nf_conn *ct,
4781     enum ip_conntrack_info ctinfo)
4782     {
4783     - unsigned int dir, optoff, optend;
4784     + struct tcphdr *tcph = (void *)skb->data + protoff;
4785     struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
4786     + unsigned int dir, optoff, optend;
4787    
4788     optoff = protoff + sizeof(struct tcphdr);
4789     optend = protoff + tcph->doff * 4;
4790     @@ -128,6 +128,7 @@ static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
4791     if (!skb_make_writable(skb, optend))
4792     return 0;
4793    
4794     + tcph = (void *)skb->data + protoff;
4795     dir = CTINFO2DIR(ctinfo);
4796    
4797     while (optoff < optend) {
4798     @@ -207,7 +208,7 @@ int nf_ct_seq_adjust(struct sk_buff *skb,
4799     ntohl(newack));
4800     tcph->ack_seq = newack;
4801    
4802     - res = nf_ct_sack_adjust(skb, protoff, tcph, ct, ctinfo);
4803     + res = nf_ct_sack_adjust(skb, protoff, ct, ctinfo);
4804     out:
4805     spin_unlock_bh(&ct->lock);
4806    
4807     diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
4808     index e2b196054dfc..2268b10a9dcf 100644
4809     --- a/net/netfilter/nf_nat_core.c
4810     +++ b/net/netfilter/nf_nat_core.c
4811     @@ -117,7 +117,8 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
4812     dst = skb_dst(skb);
4813     if (dst->xfrm)
4814     dst = ((struct xfrm_dst *)dst)->route;
4815     - dst_hold(dst);
4816     + if (!dst_hold_safe(dst))
4817     + return -EHOSTUNREACH;
4818    
4819     if (sk && !net_eq(net, sock_net(sk)))
4820     sk = NULL;
4821     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
4822     index fe0558b15fd3..ed9af46720e1 100644
4823     --- a/net/netfilter/nf_tables_api.c
4824     +++ b/net/netfilter/nf_tables_api.c
4825     @@ -1199,7 +1199,8 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
4826     if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
4827     goto nla_put_failure;
4828    
4829     - if (basechain->stats && nft_dump_stats(skb, basechain->stats))
4830     + if (rcu_access_pointer(basechain->stats) &&
4831     + nft_dump_stats(skb, rcu_dereference(basechain->stats)))
4832     goto nla_put_failure;
4833     }
4834    
4835     @@ -1375,7 +1376,8 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
4836     return newstats;
4837     }
4838    
4839     -static void nft_chain_stats_replace(struct nft_base_chain *chain,
4840     +static void nft_chain_stats_replace(struct net *net,
4841     + struct nft_base_chain *chain,
4842     struct nft_stats __percpu *newstats)
4843     {
4844     struct nft_stats __percpu *oldstats;
4845     @@ -1383,8 +1385,9 @@ static void nft_chain_stats_replace(struct nft_base_chain *chain,
4846     if (newstats == NULL)
4847     return;
4848    
4849     - if (chain->stats) {
4850     - oldstats = nfnl_dereference(chain->stats, NFNL_SUBSYS_NFTABLES);
4851     + if (rcu_access_pointer(chain->stats)) {
4852     + oldstats = rcu_dereference_protected(chain->stats,
4853     + lockdep_commit_lock_is_held(net));
4854     rcu_assign_pointer(chain->stats, newstats);
4855     synchronize_rcu();
4856     free_percpu(oldstats);
4857     @@ -1421,9 +1424,10 @@ static void nf_tables_chain_destroy(struct nft_ctx *ctx)
4858     struct nft_base_chain *basechain = nft_base_chain(chain);
4859    
4860     module_put(basechain->type->owner);
4861     - free_percpu(basechain->stats);
4862     - if (basechain->stats)
4863     + if (rcu_access_pointer(basechain->stats)) {
4864     static_branch_dec(&nft_counters_enabled);
4865     + free_percpu(rcu_dereference_raw(basechain->stats));
4866     + }
4867     kfree(chain->name);
4868     kfree(basechain);
4869     } else {
4870     @@ -1572,7 +1576,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
4871     kfree(basechain);
4872     return PTR_ERR(stats);
4873     }
4874     - basechain->stats = stats;
4875     + rcu_assign_pointer(basechain->stats, stats);
4876     static_branch_inc(&nft_counters_enabled);
4877     }
4878    
4879     @@ -6145,7 +6149,8 @@ static void nft_chain_commit_update(struct nft_trans *trans)
4880     return;
4881    
4882     basechain = nft_base_chain(trans->ctx.chain);
4883     - nft_chain_stats_replace(basechain, nft_trans_chain_stats(trans));
4884     + nft_chain_stats_replace(trans->ctx.net, basechain,
4885     + nft_trans_chain_stats(trans));
4886    
4887     switch (nft_trans_chain_policy(trans)) {
4888     case NF_DROP:
4889     diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
4890     index ffd5c0f9412b..60f258f2c707 100644
4891     --- a/net/netfilter/nf_tables_core.c
4892     +++ b/net/netfilter/nf_tables_core.c
4893     @@ -101,7 +101,7 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain,
4894     struct nft_stats *stats;
4895    
4896     base_chain = nft_base_chain(chain);
4897     - if (!base_chain->stats)
4898     + if (!rcu_access_pointer(base_chain->stats))
4899     return;
4900    
4901     local_bh_disable();
4902     diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
4903     index 860f2a1bbb67..1a65f88d021a 100644
4904     --- a/net/sunrpc/auth_gss/svcauth_gss.c
4905     +++ b/net/sunrpc/auth_gss/svcauth_gss.c
4906     @@ -1122,7 +1122,7 @@ static int svcauth_gss_legacy_init(struct svc_rqst *rqstp,
4907     struct kvec *resv = &rqstp->rq_res.head[0];
4908     struct rsi *rsip, rsikey;
4909     int ret;
4910     - struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
4911     + struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
4912    
4913     memset(&rsikey, 0, sizeof(rsikey));
4914     ret = gss_read_verf(gc, argv, authp,
4915     @@ -1233,7 +1233,7 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
4916     uint64_t handle;
4917     int status;
4918     int ret;
4919     - struct net *net = rqstp->rq_xprt->xpt_net;
4920     + struct net *net = SVC_NET(rqstp);
4921     struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
4922    
4923     memset(&ud, 0, sizeof(ud));
4924     @@ -1424,7 +1424,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
4925     __be32 *rpcstart;
4926     __be32 *reject_stat = resv->iov_base + resv->iov_len;
4927     int ret;
4928     - struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
4929     + struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
4930    
4931     dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",
4932     argv->iov_len);
4933     @@ -1714,7 +1714,7 @@ svcauth_gss_release(struct svc_rqst *rqstp)
4934     struct rpc_gss_wire_cred *gc = &gsd->clcred;
4935     struct xdr_buf *resbuf = &rqstp->rq_res;
4936     int stat = -EINVAL;
4937     - struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
4938     + struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
4939    
4940     if (gc->gc_proc != RPC_GSS_PROC_DATA)
4941     goto out;
4942     diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
4943     index 109fbe591e7b..b6e8eccf2a52 100644
4944     --- a/net/sunrpc/cache.c
4945     +++ b/net/sunrpc/cache.c
4946     @@ -54,6 +54,11 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
4947     h->last_refresh = now;
4948     }
4949    
4950     +static void cache_fresh_locked(struct cache_head *head, time_t expiry,
4951     + struct cache_detail *detail);
4952     +static void cache_fresh_unlocked(struct cache_head *head,
4953     + struct cache_detail *detail);
4954     +
4955     struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
4956     struct cache_head *key, int hash)
4957     {
4958     @@ -95,6 +100,7 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
4959     if (cache_is_expired(detail, tmp)) {
4960     hlist_del_init(&tmp->cache_list);
4961     detail->entries --;
4962     + cache_fresh_locked(tmp, 0, detail);
4963     freeme = tmp;
4964     break;
4965     }
4966     @@ -110,8 +116,10 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
4967     cache_get(new);
4968     write_unlock(&detail->hash_lock);
4969    
4970     - if (freeme)
4971     + if (freeme) {
4972     + cache_fresh_unlocked(freeme, detail);
4973     cache_put(freeme, detail);
4974     + }
4975     return new;
4976     }
4977     EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
4978     diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
4979     index 6b7539c0466e..7d8cce1dfcad 100644
4980     --- a/net/sunrpc/xprtsock.c
4981     +++ b/net/sunrpc/xprtsock.c
4982     @@ -2244,8 +2244,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
4983     trace_rpc_socket_connect(xprt, sock, 0);
4984     status = 0;
4985     out:
4986     - xprt_unlock_connect(xprt, transport);
4987     xprt_clear_connecting(xprt);
4988     + xprt_unlock_connect(xprt, transport);
4989     xprt_wake_pending_tasks(xprt, status);
4990     }
4991    
4992     @@ -2480,8 +2480,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
4993     }
4994     status = -EAGAIN;
4995     out:
4996     - xprt_unlock_connect(xprt, transport);
4997     xprt_clear_connecting(xprt);
4998     + xprt_unlock_connect(xprt, transport);
4999     xprt_wake_pending_tasks(xprt, status);
5000     }
5001    
5002     diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
5003     index 7fab2891ce7f..a091c03abcb2 100644
5004     --- a/net/tls/tls_main.c
5005     +++ b/net/tls/tls_main.c
5006     @@ -555,6 +555,9 @@ static struct tls_context *create_ctx(struct sock *sk)
5007     return NULL;
5008    
5009     icsk->icsk_ulp_data = ctx;
5010     + ctx->setsockopt = sk->sk_prot->setsockopt;
5011     + ctx->getsockopt = sk->sk_prot->getsockopt;
5012     + ctx->sk_proto_close = sk->sk_prot->close;
5013     return ctx;
5014     }
5015    
5016     @@ -685,9 +688,6 @@ static int tls_init(struct sock *sk)
5017     rc = -ENOMEM;
5018     goto out;
5019     }
5020     - ctx->setsockopt = sk->sk_prot->setsockopt;
5021     - ctx->getsockopt = sk->sk_prot->getsockopt;
5022     - ctx->sk_proto_close = sk->sk_prot->close;
5023    
5024     /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
5025     if (ip_ver == TLSV6 &&
5026     diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5027     index 176edfefcbaa..295cd8d5554f 100644
5028     --- a/net/wireless/nl80211.c
5029     +++ b/net/wireless/nl80211.c
5030     @@ -8993,8 +8993,10 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
5031     if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) {
5032     int r = validate_pae_over_nl80211(rdev, info);
5033    
5034     - if (r < 0)
5035     + if (r < 0) {
5036     + kzfree(connkeys);
5037     return r;
5038     + }
5039    
5040     ibss.control_port_over_nl80211 = true;
5041     }
5042     diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
5043     index be3520e429c9..790b514f86b6 100644
5044     --- a/net/xfrm/xfrm_input.c
5045     +++ b/net/xfrm/xfrm_input.c
5046     @@ -346,6 +346,12 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
5047    
5048     skb->sp->xvec[skb->sp->len++] = x;
5049    
5050     + skb_dst_force(skb);
5051     + if (!skb_dst(skb)) {
5052     + XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
5053     + goto drop;
5054     + }
5055     +
5056     lock:
5057     spin_lock(&x->lock);
5058    
5059     @@ -385,7 +391,6 @@ lock:
5060     XFRM_SKB_CB(skb)->seq.input.low = seq;
5061     XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
5062    
5063     - skb_dst_force(skb);
5064     dev_hold(skb->dev);
5065    
5066     if (crypto_done)
5067     diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
5068     index 261995d37ced..6d20fbcde000 100644
5069     --- a/net/xfrm/xfrm_output.c
5070     +++ b/net/xfrm/xfrm_output.c
5071     @@ -102,6 +102,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
5072     skb_dst_force(skb);
5073     if (!skb_dst(skb)) {
5074     XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
5075     + err = -EHOSTUNREACH;
5076     goto error_nolock;
5077     }
5078    
5079     diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
5080     index 7a34990a68b1..cc0203efb584 100644
5081     --- a/net/xfrm/xfrm_state.c
5082     +++ b/net/xfrm/xfrm_state.c
5083     @@ -794,7 +794,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
5084     {
5085     spin_lock_bh(&net->xfrm.xfrm_state_lock);
5086     si->sadcnt = net->xfrm.state_num;
5087     - si->sadhcnt = net->xfrm.state_hmask;
5088     + si->sadhcnt = net->xfrm.state_hmask + 1;
5089     si->sadhmcnt = xfrm_state_hashmax;
5090     spin_unlock_bh(&net->xfrm.xfrm_state_lock);
5091     }
5092     diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
5093     index 8081b6cf67d2..34414c6efad6 100755
5094     --- a/scripts/checkstack.pl
5095     +++ b/scripts/checkstack.pl
5096     @@ -47,8 +47,8 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
5097     $xs = "[0-9a-f ]"; # hex character or space
5098     $funcre = qr/^$x* <(.*)>:$/;
5099     if ($arch eq 'aarch64') {
5100     - #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp,#-80]!
5101     - $re = qr/^.*stp.*sp,\#-([0-9]{1,8})\]\!/o;
5102     + #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]!
5103     + $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
5104     } elsif ($arch eq 'arm') {
5105     #c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64
5106     $re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o;
5107     diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
5108     index f4eadd3f7350..b63ef865ce1e 100644
5109     --- a/security/selinux/ss/policydb.c
5110     +++ b/security/selinux/ss/policydb.c
5111     @@ -2108,6 +2108,7 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
5112     {
5113     int i, j, rc;
5114     u32 nel, len;
5115     + __be64 prefixbuf[1];
5116     __le32 buf[3];
5117     struct ocontext *l, *c;
5118     u32 nodebuf[8];
5119     @@ -2217,21 +2218,30 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
5120     goto out;
5121     break;
5122     }
5123     - case OCON_IBPKEY:
5124     - rc = next_entry(nodebuf, fp, sizeof(u32) * 4);
5125     + case OCON_IBPKEY: {
5126     + u32 pkey_lo, pkey_hi;
5127     +
5128     + rc = next_entry(prefixbuf, fp, sizeof(u64));
5129     + if (rc)
5130     + goto out;
5131     +
5132     + /* we need to have subnet_prefix in CPU order */
5133     + c->u.ibpkey.subnet_prefix = be64_to_cpu(prefixbuf[0]);
5134     +
5135     + rc = next_entry(buf, fp, sizeof(u32) * 2);
5136     if (rc)
5137     goto out;
5138    
5139     - c->u.ibpkey.subnet_prefix = be64_to_cpu(*((__be64 *)nodebuf));
5140     + pkey_lo = le32_to_cpu(buf[0]);
5141     + pkey_hi = le32_to_cpu(buf[1]);
5142    
5143     - if (nodebuf[2] > 0xffff ||
5144     - nodebuf[3] > 0xffff) {
5145     + if (pkey_lo > U16_MAX || pkey_hi > U16_MAX) {
5146     rc = -EINVAL;
5147     goto out;
5148     }
5149    
5150     - c->u.ibpkey.low_pkey = le32_to_cpu(nodebuf[2]);
5151     - c->u.ibpkey.high_pkey = le32_to_cpu(nodebuf[3]);
5152     + c->u.ibpkey.low_pkey = pkey_lo;
5153     + c->u.ibpkey.high_pkey = pkey_hi;
5154    
5155     rc = context_read_and_validate(&c->context[0],
5156     p,
5157     @@ -2239,7 +2249,10 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
5158     if (rc)
5159     goto out;
5160     break;
5161     - case OCON_IBENDPORT:
5162     + }
5163     + case OCON_IBENDPORT: {
5164     + u32 port;
5165     +
5166     rc = next_entry(buf, fp, sizeof(u32) * 2);
5167     if (rc)
5168     goto out;
5169     @@ -2249,12 +2262,13 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
5170     if (rc)
5171     goto out;
5172    
5173     - if (buf[1] > 0xff || buf[1] == 0) {
5174     + port = le32_to_cpu(buf[1]);
5175     + if (port > U8_MAX || port == 0) {
5176     rc = -EINVAL;
5177     goto out;
5178     }
5179    
5180     - c->u.ibendport.port = le32_to_cpu(buf[1]);
5181     + c->u.ibendport.port = port;
5182    
5183     rc = context_read_and_validate(&c->context[0],
5184     p,
5185     @@ -2262,7 +2276,8 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
5186     if (rc)
5187     goto out;
5188     break;
5189     - }
5190     + } /* end case */
5191     + } /* end switch */
5192     }
5193     }
5194     rc = 0;
5195     @@ -3105,6 +3120,7 @@ static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
5196     {
5197     unsigned int i, j, rc;
5198     size_t nel, len;
5199     + __be64 prefixbuf[1];
5200     __le32 buf[3];
5201     u32 nodebuf[8];
5202     struct ocontext *c;
5203     @@ -3192,12 +3208,17 @@ static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
5204     return rc;
5205     break;
5206     case OCON_IBPKEY:
5207     - *((__be64 *)nodebuf) = cpu_to_be64(c->u.ibpkey.subnet_prefix);
5208     + /* subnet_prefix is in CPU order */
5209     + prefixbuf[0] = cpu_to_be64(c->u.ibpkey.subnet_prefix);
5210    
5211     - nodebuf[2] = cpu_to_le32(c->u.ibpkey.low_pkey);
5212     - nodebuf[3] = cpu_to_le32(c->u.ibpkey.high_pkey);
5213     + rc = put_entry(prefixbuf, sizeof(u64), 1, fp);
5214     + if (rc)
5215     + return rc;
5216     +
5217     + buf[0] = cpu_to_le32(c->u.ibpkey.low_pkey);
5218     + buf[1] = cpu_to_le32(c->u.ibpkey.high_pkey);
5219    
5220     - rc = put_entry(nodebuf, sizeof(u32), 4, fp);
5221     + rc = put_entry(buf, sizeof(u32), 2, fp);
5222     if (rc)
5223     return rc;
5224     rc = context_write(p, &c->context[0], fp);
5225     diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
5226     index 598d140bb7cb..5fc497c6d738 100644
5227     --- a/sound/pci/cs46xx/dsp_spos.c
5228     +++ b/sound/pci/cs46xx/dsp_spos.c
5229     @@ -903,6 +903,9 @@ int cs46xx_dsp_proc_done (struct snd_cs46xx *chip)
5230     struct dsp_spos_instance * ins = chip->dsp_spos_instance;
5231     int i;
5232    
5233     + if (!ins)
5234     + return 0;
5235     +
5236     snd_info_free_entry(ins->proc_sym_info_entry);
5237     ins->proc_sym_info_entry = NULL;
5238    
5239     diff --git a/sound/usb/card.c b/sound/usb/card.c
5240     index a105947eaf55..746a72e23cf9 100644
5241     --- a/sound/usb/card.c
5242     +++ b/sound/usb/card.c
5243     @@ -246,7 +246,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
5244     h1 = snd_usb_find_csint_desc(host_iface->extra,
5245     host_iface->extralen,
5246     NULL, UAC_HEADER);
5247     - if (!h1) {
5248     + if (!h1 || h1->bLength < sizeof(*h1)) {
5249     dev_err(&dev->dev, "cannot find UAC_HEADER\n");
5250     return -EINVAL;
5251     }
5252     diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
5253     index c63c84b54969..e7d441d0e839 100644
5254     --- a/sound/usb/mixer.c
5255     +++ b/sound/usb/mixer.c
5256     @@ -753,8 +753,9 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
5257     struct uac_mixer_unit_descriptor *desc)
5258     {
5259     int mu_channels;
5260     + void *c;
5261    
5262     - if (desc->bLength < 11)
5263     + if (desc->bLength < sizeof(*desc))
5264     return -EINVAL;
5265     if (!desc->bNrInPins)
5266     return -EINVAL;
5267     @@ -763,6 +764,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
5268     case UAC_VERSION_1:
5269     case UAC_VERSION_2:
5270     default:
5271     + if (desc->bLength < sizeof(*desc) + desc->bNrInPins + 1)
5272     + return 0; /* no bmControls -> skip */
5273     mu_channels = uac_mixer_unit_bNrChannels(desc);
5274     break;
5275     case UAC_VERSION_3:
5276     @@ -772,7 +775,11 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
5277     }
5278    
5279     if (!mu_channels)
5280     - return -EINVAL;
5281     + return 0;
5282     +
5283     + c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
5284     + if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
5285     + return 0; /* no bmControls -> skip */
5286    
5287     return mu_channels;
5288     }
5289     @@ -944,7 +951,7 @@ static int check_input_term(struct mixer_build *state, int id,
5290     struct uac_mixer_unit_descriptor *d = p1;
5291    
5292     err = uac_mixer_unit_get_channels(state, d);
5293     - if (err < 0)
5294     + if (err <= 0)
5295     return err;
5296    
5297     term->channels = err;
5298     @@ -2068,11 +2075,15 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid,
5299    
5300     if (state->mixer->protocol == UAC_VERSION_2) {
5301     struct uac2_input_terminal_descriptor *d_v2 = raw_desc;
5302     + if (d_v2->bLength < sizeof(*d_v2))
5303     + return -EINVAL;
5304     control = UAC2_TE_CONNECTOR;
5305     term_id = d_v2->bTerminalID;
5306     bmctls = le16_to_cpu(d_v2->bmControls);
5307     } else if (state->mixer->protocol == UAC_VERSION_3) {
5308     struct uac3_input_terminal_descriptor *d_v3 = raw_desc;
5309     + if (d_v3->bLength < sizeof(*d_v3))
5310     + return -EINVAL;
5311     control = UAC3_TE_INSERTION;
5312     term_id = d_v3->bTerminalID;
5313     bmctls = le32_to_cpu(d_v3->bmControls);
5314     @@ -2118,7 +2129,7 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
5315     if (err < 0)
5316     continue;
5317     /* no bmControls field (e.g. Maya44) -> ignore */
5318     - if (desc->bLength <= 10 + input_pins)
5319     + if (!num_outs)
5320     continue;
5321     err = check_input_term(state, desc->baSourceID[pin], &iterm);
5322     if (err < 0)
5323     @@ -2314,7 +2325,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
5324     char *name)
5325     {
5326     struct uac_processing_unit_descriptor *desc = raw_desc;
5327     - int num_ins = desc->bNrInPins;
5328     + int num_ins;
5329     struct usb_mixer_elem_info *cval;
5330     struct snd_kcontrol *kctl;
5331     int i, err, nameid, type, len;
5332     @@ -2329,7 +2340,13 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
5333     0, NULL, default_value_info
5334     };
5335    
5336     - if (desc->bLength < 13 || desc->bLength < 13 + num_ins ||
5337     + if (desc->bLength < 13) {
5338     + usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
5339     + return -EINVAL;
5340     + }
5341     +
5342     + num_ins = desc->bNrInPins;
5343     + if (desc->bLength < 13 + num_ins ||
5344     desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) {
5345     usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
5346     return -EINVAL;
5347     diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
5348     index 1c73b9ed44a6..57c6209a4ccb 100644
5349     --- a/sound/usb/quirks-table.h
5350     +++ b/sound/usb/quirks-table.h
5351     @@ -3326,6 +3326,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
5352     }
5353     }
5354     },
5355     + {
5356     + .ifnum = -1
5357     + },
5358     }
5359     }
5360     },
5361     @@ -3374,6 +3377,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
5362     }
5363     }
5364     },
5365     + {
5366     + .ifnum = -1
5367     + },
5368     }
5369     }
5370     },
5371     diff --git a/sound/usb/stream.c b/sound/usb/stream.c
5372     index 67cf849aa16b..d9e3de495c16 100644
5373     --- a/sound/usb/stream.c
5374     +++ b/sound/usb/stream.c
5375     @@ -596,12 +596,8 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
5376     csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT);
5377    
5378     if (!csep || csep->bLength < 7 ||
5379     - csep->bDescriptorSubtype != UAC_EP_GENERAL) {
5380     - usb_audio_warn(chip,
5381     - "%u:%d : no or invalid class specific endpoint descriptor\n",
5382     - iface_no, altsd->bAlternateSetting);
5383     - return 0;
5384     - }
5385     + csep->bDescriptorSubtype != UAC_EP_GENERAL)
5386     + goto error;
5387    
5388     if (protocol == UAC_VERSION_1) {
5389     attributes = csep->bmAttributes;
5390     @@ -609,6 +605,8 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
5391     struct uac2_iso_endpoint_descriptor *csep2 =
5392     (struct uac2_iso_endpoint_descriptor *) csep;
5393    
5394     + if (csep2->bLength < sizeof(*csep2))
5395     + goto error;
5396     attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX;
5397    
5398     /* emulate the endpoint attributes of a v1 device */
5399     @@ -618,12 +616,20 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
5400     struct uac3_iso_endpoint_descriptor *csep3 =
5401     (struct uac3_iso_endpoint_descriptor *) csep;
5402    
5403     + if (csep3->bLength < sizeof(*csep3))
5404     + goto error;
5405     /* emulate the endpoint attributes of a v1 device */
5406     if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH)
5407     attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL;
5408     }
5409    
5410     return attributes;
5411     +
5412     + error:
5413     + usb_audio_warn(chip,
5414     + "%u:%d : no or invalid class specific endpoint descriptor\n",
5415     + iface_no, altsd->bAlternateSetting);
5416     + return 0;
5417     }
5418    
5419     /* find an input terminal descriptor (either UAC1 or UAC2) with the given
5420     @@ -631,13 +637,17 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
5421     */
5422     static void *
5423     snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface,
5424     - int terminal_id)
5425     + int terminal_id, bool uac23)
5426     {
5427     struct uac2_input_terminal_descriptor *term = NULL;
5428     + size_t minlen = uac23 ? sizeof(struct uac2_input_terminal_descriptor) :
5429     + sizeof(struct uac_input_terminal_descriptor);
5430    
5431     while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
5432     ctrl_iface->extralen,
5433     term, UAC_INPUT_TERMINAL))) {
5434     + if (term->bLength < minlen)
5435     + continue;
5436     if (term->bTerminalID == terminal_id)
5437     return term;
5438     }
5439     @@ -655,7 +665,8 @@ snd_usb_find_output_terminal_descriptor(struct usb_host_interface *ctrl_iface,
5440     while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
5441     ctrl_iface->extralen,
5442     term, UAC_OUTPUT_TERMINAL))) {
5443     - if (term->bTerminalID == terminal_id)
5444     + if (term->bLength >= sizeof(*term) &&
5445     + term->bTerminalID == terminal_id)
5446     return term;
5447     }
5448    
5449     @@ -729,7 +740,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip,
5450     format = le16_to_cpu(as->wFormatTag); /* remember the format value */
5451    
5452     iterm = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
5453     - as->bTerminalLink);
5454     + as->bTerminalLink,
5455     + false);
5456     if (iterm) {
5457     num_channels = iterm->bNrChannels;
5458     chconfig = le16_to_cpu(iterm->wChannelConfig);
5459     @@ -764,7 +776,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip,
5460     * to extract the clock
5461     */
5462     input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
5463     - as->bTerminalLink);
5464     + as->bTerminalLink,
5465     + true);
5466     if (input_term) {
5467     clock = input_term->bCSourceID;
5468     if (!chconfig && (num_channels == input_term->bNrChannels))
5469     @@ -998,7 +1011,8 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip,
5470     * to extract the clock
5471     */
5472     input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
5473     - as->bTerminalLink);
5474     + as->bTerminalLink,
5475     + true);
5476     if (input_term) {
5477     clock = input_term->bCSourceID;
5478     goto found_clock;
5479     diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
5480     index ff9d3a5825e1..c6635fee27d8 100644
5481     --- a/tools/testing/nvdimm/test/iomap.c
5482     +++ b/tools/testing/nvdimm/test/iomap.c
5483     @@ -104,16 +104,29 @@ void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
5484     }
5485     EXPORT_SYMBOL(__wrap_devm_memremap);
5486    
5487     +static void nfit_test_kill(void *_pgmap)
5488     +{
5489     + struct dev_pagemap *pgmap = _pgmap;
5490     +
5491     + pgmap->kill(pgmap->ref);
5492     +}
5493     +
5494     void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
5495     {
5496     resource_size_t offset = pgmap->res.start;
5497     struct nfit_test_resource *nfit_res = get_nfit_res(offset);
5498    
5499     - if (nfit_res)
5500     + if (nfit_res) {
5501     + int rc;
5502     +
5503     + rc = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
5504     + if (rc)
5505     + return ERR_PTR(rc);
5506     return nfit_res->buf + offset - nfit_res->res.start;
5507     + }
5508     return devm_memremap_pages(dev, pgmap);
5509     }
5510     -EXPORT_SYMBOL(__wrap_devm_memremap_pages);
5511     +EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
5512    
5513     pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
5514     {
5515     diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
5516     index fb22bccfbc8a..7ef45a4a3cba 100644
5517     --- a/tools/virtio/linux/kernel.h
5518     +++ b/tools/virtio/linux/kernel.h
5519     @@ -23,6 +23,10 @@
5520     #define PAGE_MASK (~(PAGE_SIZE-1))
5521     #define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK)
5522    
5523     +/* generic data direction definitions */
5524     +#define READ 0
5525     +#define WRITE 1
5526     +
5527     typedef unsigned long long phys_addr_t;
5528     typedef unsigned long long dma_addr_t;
5529     typedef size_t __kernel_size_t;