Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0124-5.4.25-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3505 - (hide annotations) (download)
Mon May 11 14:36:27 2020 UTC (4 years ago) by niro
File size: 207817 byte(s)
-linux-5.4.25
1 niro 3505 diff --git a/Makefile b/Makefile
2     index c32c78cf2fe5..85e41313f078 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 4
9     -SUBLEVEL = 24
10     +SUBLEVEL = 25
11     EXTRAVERSION =
12     NAME = Kleptomaniac Octopus
13    
14     diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts
15     index f3ced6df0c9b..9f66f96d09c9 100644
16     --- a/arch/arm/boot/dts/am437x-idk-evm.dts
17     +++ b/arch/arm/boot/dts/am437x-idk-evm.dts
18     @@ -526,11 +526,11 @@
19     * Supply voltage supervisor on board will not allow opp50 so
20     * disable it and set opp100 as suspend OPP.
21     */
22     - opp50@300000000 {
23     + opp50-300000000 {
24     status = "disabled";
25     };
26    
27     - opp100@600000000 {
28     + opp100-600000000 {
29     opp-suspend;
30     };
31     };
32     diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi
33     index cdcba3f561c4..9f6fbe4c1fee 100644
34     --- a/arch/arm/boot/dts/dra76x.dtsi
35     +++ b/arch/arm/boot/dts/dra76x.dtsi
36     @@ -86,3 +86,8 @@
37     &usb4_tm {
38     status = "disabled";
39     };
40     +
41     +&mmc3 {
42     + /* dra76x is not affected by i887 */
43     + max-frequency = <96000000>;
44     +};
45     diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
46     index 93e1eb83bed9..d7d98d2069df 100644
47     --- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
48     +++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
49     @@ -796,16 +796,6 @@
50     clock-div = <1>;
51     };
52    
53     - ipu1_gfclk_mux: ipu1_gfclk_mux@520 {
54     - #clock-cells = <0>;
55     - compatible = "ti,mux-clock";
56     - clocks = <&dpll_abe_m2x2_ck>, <&dpll_core_h22x2_ck>;
57     - ti,bit-shift = <24>;
58     - reg = <0x0520>;
59     - assigned-clocks = <&ipu1_gfclk_mux>;
60     - assigned-clock-parents = <&dpll_core_h22x2_ck>;
61     - };
62     -
63     dummy_ck: dummy_ck {
64     #clock-cells = <0>;
65     compatible = "fixed-clock";
66     @@ -1564,6 +1554,8 @@
67     compatible = "ti,clkctrl";
68     reg = <0x20 0x4>;
69     #clock-cells = <2>;
70     + assigned-clocks = <&ipu1_clkctrl DRA7_IPU1_MMU_IPU1_CLKCTRL 24>;
71     + assigned-clock-parents = <&dpll_core_h22x2_ck>;
72     };
73    
74     ipu_clkctrl: ipu-clkctrl@50 {
75     diff --git a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
76     index 6486df3e2942..881cea0b61ba 100644
77     --- a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
78     +++ b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
79     @@ -183,7 +183,6 @@
80     pinctrl-0 = <&pinctrl_usdhc4>;
81     bus-width = <8>;
82     non-removable;
83     - vmmc-supply = <&vdd_emmc_1p8>;
84     status = "disabled";
85     };
86    
87     diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
88     index 917eb0b58b13..eed78f12e79e 100644
89     --- a/arch/arm/boot/dts/imx7-colibri.dtsi
90     +++ b/arch/arm/boot/dts/imx7-colibri.dtsi
91     @@ -337,7 +337,6 @@
92     assigned-clock-rates = <400000000>;
93     bus-width = <8>;
94     fsl,tuning-step = <2>;
95     - max-frequency = <100000000>;
96     vmmc-supply = <&reg_module_3v3>;
97     vqmmc-supply = <&reg_DCDC3>;
98     non-removable;
99     diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
100     index 2f6977ada447..63d9f4a066e3 100644
101     --- a/arch/arm/boot/dts/ls1021a.dtsi
102     +++ b/arch/arm/boot/dts/ls1021a.dtsi
103     @@ -728,7 +728,7 @@
104     };
105    
106     mdio0: mdio@2d24000 {
107     - compatible = "fsl,etsec2-mdio";
108     + compatible = "gianfar";
109     device_type = "mdio";
110     #address-cells = <1>;
111     #size-cells = <0>;
112     @@ -737,7 +737,7 @@
113     };
114    
115     mdio1: mdio@2d64000 {
116     - compatible = "fsl,etsec2-mdio";
117     + compatible = "gianfar";
118     device_type = "mdio";
119     #address-cells = <1>;
120     #size-cells = <0>;
121     diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
122     index 35ff620537e6..03506ce46149 100644
123     --- a/arch/arm/mach-imx/Makefile
124     +++ b/arch/arm/mach-imx/Makefile
125     @@ -91,6 +91,8 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
126     obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
127     obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
128     endif
129     +AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
130     +obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
131     obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
132    
133     obj-$(CONFIG_SOC_IMX1) += mach-imx1.o
134     diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
135     index 912aeceb4ff8..5aa5796cff0e 100644
136     --- a/arch/arm/mach-imx/common.h
137     +++ b/arch/arm/mach-imx/common.h
138     @@ -109,17 +109,17 @@ void imx_cpu_die(unsigned int cpu);
139     int imx_cpu_kill(unsigned int cpu);
140    
141     #ifdef CONFIG_SUSPEND
142     -void v7_cpu_resume(void);
143     void imx53_suspend(void __iomem *ocram_vbase);
144     extern const u32 imx53_suspend_sz;
145     void imx6_suspend(void __iomem *ocram_vbase);
146     #else
147     -static inline void v7_cpu_resume(void) {}
148     static inline void imx53_suspend(void __iomem *ocram_vbase) {}
149     static const u32 imx53_suspend_sz;
150     static inline void imx6_suspend(void __iomem *ocram_vbase) {}
151     #endif
152    
153     +void v7_cpu_resume(void);
154     +
155     void imx6_pm_ccm_init(const char *ccm_compat);
156     void imx6q_pm_init(void);
157     void imx6dl_pm_init(void);
158     diff --git a/arch/arm/mach-imx/resume-imx6.S b/arch/arm/mach-imx/resume-imx6.S
159     new file mode 100644
160     index 000000000000..5bd1ba7ef15b
161     --- /dev/null
162     +++ b/arch/arm/mach-imx/resume-imx6.S
163     @@ -0,0 +1,24 @@
164     +/* SPDX-License-Identifier: GPL-2.0-or-later */
165     +/*
166     + * Copyright 2014 Freescale Semiconductor, Inc.
167     + */
168     +
169     +#include <linux/linkage.h>
170     +#include <asm/assembler.h>
171     +#include <asm/asm-offsets.h>
172     +#include <asm/hardware/cache-l2x0.h>
173     +#include "hardware.h"
174     +
175     +/*
176     + * The following code must assume it is running from physical address
177     + * where absolute virtual addresses to the data section have to be
178     + * turned into relative ones.
179     + */
180     +
181     +ENTRY(v7_cpu_resume)
182     + bl v7_invalidate_l1
183     +#ifdef CONFIG_CACHE_L2X0
184     + bl l2c310_early_resume
185     +#endif
186     + b cpu_resume
187     +ENDPROC(v7_cpu_resume)
188     diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S
189     index 062391ff13da..1eabf2d2834b 100644
190     --- a/arch/arm/mach-imx/suspend-imx6.S
191     +++ b/arch/arm/mach-imx/suspend-imx6.S
192     @@ -327,17 +327,3 @@ resume:
193    
194     ret lr
195     ENDPROC(imx6_suspend)
196     -
197     -/*
198     - * The following code must assume it is running from physical address
199     - * where absolute virtual addresses to the data section have to be
200     - * turned into relative ones.
201     - */
202     -
203     -ENTRY(v7_cpu_resume)
204     - bl v7_invalidate_l1
205     -#ifdef CONFIG_CACHE_L2X0
206     - bl l2c310_early_resume
207     -#endif
208     - b cpu_resume
209     -ENDPROC(v7_cpu_resume)
210     diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
211     index 4d67eb715b91..3f43716d5c45 100644
212     --- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
213     +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
214     @@ -327,7 +327,7 @@
215     #size-cells = <0>;
216    
217     bus-width = <4>;
218     - max-frequency = <50000000>;
219     + max-frequency = <60000000>;
220    
221     non-removable;
222     disable-wp;
223     diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
224     index 5d6a8dafe8dc..29ac78ddc057 100644
225     --- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
226     +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
227     @@ -363,6 +363,7 @@
228     compatible = "brcm,bcm43438-bt";
229     interrupt-parent = <&gpio_intc>;
230     interrupts = <95 IRQ_TYPE_LEVEL_HIGH>;
231     + interrupt-names = "host-wakeup";
232     shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
233     max-speed = <2000000>;
234     clocks = <&wifi32k>;
235     diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
236     index 19468058e6ae..8148196902dd 100644
237     --- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
238     +++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
239     @@ -52,11 +52,6 @@
240     compatible = "ethernet-phy-ieee802.3-c22";
241     reg = <0>;
242     };
243     -
244     - ethphy1: ethernet-phy@1 {
245     - compatible = "ethernet-phy-ieee802.3-c22";
246     - reg = <1>;
247     - };
248     };
249     };
250    
251     diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
252     index d2cb28da3fff..d911d38877e5 100644
253     --- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
254     +++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
255     @@ -82,7 +82,7 @@
256     ranges = <0 0 0 0xffffffff>;
257    
258     gmac0: ethernet@ff800000 {
259     - compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
260     + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
261     reg = <0xff800000 0x2000>;
262     interrupts = <0 90 4>;
263     interrupt-names = "macirq";
264     @@ -97,7 +97,7 @@
265     };
266    
267     gmac1: ethernet@ff802000 {
268     - compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
269     + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
270     reg = <0xff802000 0x2000>;
271     interrupts = <0 91 4>;
272     interrupt-names = "macirq";
273     @@ -112,7 +112,7 @@
274     };
275    
276     gmac2: ethernet@ff804000 {
277     - compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
278     + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
279     reg = <0xff804000 0x2000>;
280     interrupts = <0 92 4>;
281     interrupt-names = "macirq";
282     diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
283     index 3973847b5f42..48b2e1b59119 100644
284     --- a/arch/csky/Kconfig
285     +++ b/arch/csky/Kconfig
286     @@ -36,6 +36,7 @@ config CSKY
287     select GX6605S_TIMER if CPU_CK610
288     select HAVE_ARCH_TRACEHOOK
289     select HAVE_ARCH_AUDITSYSCALL
290     + select HAVE_COPY_THREAD_TLS
291     select HAVE_DYNAMIC_FTRACE
292     select HAVE_FUNCTION_TRACER
293     select HAVE_FUNCTION_GRAPH_TRACER
294     @@ -74,7 +75,7 @@ config CPU_HAS_TLBI
295     config CPU_HAS_LDSTEX
296     bool
297     help
298     - For SMP, CPU needs "ldex&stex" instrcutions to atomic operations.
299     + For SMP, CPU needs "ldex&stex" instructions for atomic operations.
300    
301     config CPU_NEED_TLBSYNC
302     bool
303     diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h
304     index 7ab78bd0f3b1..f35a9f3315ee 100644
305     --- a/arch/csky/abiv1/inc/abi/entry.h
306     +++ b/arch/csky/abiv1/inc/abi/entry.h
307     @@ -16,14 +16,16 @@
308     #define LSAVE_A4 40
309     #define LSAVE_A5 44
310    
311     +#define usp ss1
312     +
313     .macro USPTOKSP
314     - mtcr sp, ss1
315     + mtcr sp, usp
316     mfcr sp, ss0
317     .endm
318    
319     .macro KSPTOUSP
320     mtcr sp, ss0
321     - mfcr sp, ss1
322     + mfcr sp, usp
323     .endm
324    
325     .macro SAVE_ALL epc_inc
326     @@ -45,7 +47,13 @@
327     add lr, r13
328     stw lr, (sp, 8)
329    
330     + mov lr, sp
331     + addi lr, 32
332     + addi lr, 32
333     + addi lr, 16
334     + bt 2f
335     mfcr lr, ss1
336     +2:
337     stw lr, (sp, 16)
338    
339     stw a0, (sp, 20)
340     @@ -79,9 +87,10 @@
341     ldw a0, (sp, 12)
342     mtcr a0, epsr
343     btsti a0, 31
344     + bt 1f
345     ldw a0, (sp, 16)
346     mtcr a0, ss1
347     -
348     +1:
349     ldw a0, (sp, 24)
350     ldw a1, (sp, 28)
351     ldw a2, (sp, 32)
352     @@ -102,9 +111,9 @@
353     addi sp, 32
354     addi sp, 8
355    
356     - bt 1f
357     + bt 2f
358     KSPTOUSP
359     -1:
360     +2:
361     rte
362     .endm
363    
364     diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h
365     index 9897a16b45e5..94a7a58765df 100644
366     --- a/arch/csky/abiv2/inc/abi/entry.h
367     +++ b/arch/csky/abiv2/inc/abi/entry.h
368     @@ -31,7 +31,13 @@
369    
370     mfcr lr, epsr
371     stw lr, (sp, 12)
372     + btsti lr, 31
373     + bf 1f
374     + addi lr, sp, 152
375     + br 2f
376     +1:
377     mfcr lr, usp
378     +2:
379     stw lr, (sp, 16)
380    
381     stw a0, (sp, 20)
382     @@ -64,8 +70,10 @@
383     mtcr a0, epc
384     ldw a0, (sp, 12)
385     mtcr a0, epsr
386     + btsti a0, 31
387     ldw a0, (sp, 16)
388     mtcr a0, usp
389     + mtcr a0, ss0
390    
391     #ifdef CONFIG_CPU_HAS_HILO
392     ldw a0, (sp, 140)
393     @@ -86,6 +94,9 @@
394     addi sp, 40
395     ldm r16-r30, (sp)
396     addi sp, 72
397     + bf 1f
398     + mfcr sp, ss0
399     +1:
400     rte
401     .endm
402    
403     diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h
404     index 211c983c7282..ba4018929733 100644
405     --- a/arch/csky/include/uapi/asm/unistd.h
406     +++ b/arch/csky/include/uapi/asm/unistd.h
407     @@ -1,7 +1,10 @@
408     /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
409     // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
410    
411     +#define __ARCH_WANT_STAT64
412     +#define __ARCH_WANT_NEW_STAT
413     #define __ARCH_WANT_SYS_CLONE
414     +#define __ARCH_WANT_SYS_CLONE3
415     #define __ARCH_WANT_SET_GET_RLIMIT
416     #define __ARCH_WANT_TIME32_SYSCALLS
417     #include <asm-generic/unistd.h>
418     diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S
419     index 5b84f11485ae..3821ef9b7567 100644
420     --- a/arch/csky/kernel/atomic.S
421     +++ b/arch/csky/kernel/atomic.S
422     @@ -17,10 +17,12 @@ ENTRY(csky_cmpxchg)
423     mfcr a3, epc
424     addi a3, TRAP0_SIZE
425    
426     - subi sp, 8
427     + subi sp, 16
428     stw a3, (sp, 0)
429     mfcr a3, epsr
430     stw a3, (sp, 4)
431     + mfcr a3, usp
432     + stw a3, (sp, 8)
433    
434     psrset ee
435     #ifdef CONFIG_CPU_HAS_LDSTEX
436     @@ -47,7 +49,9 @@ ENTRY(csky_cmpxchg)
437     mtcr a3, epc
438     ldw a3, (sp, 4)
439     mtcr a3, epsr
440     - addi sp, 8
441     + ldw a3, (sp, 8)
442     + mtcr a3, usp
443     + addi sp, 16
444     KSPTOUSP
445     rte
446     END(csky_cmpxchg)
447     diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
448     index f320d9248a22..397962e11bd1 100644
449     --- a/arch/csky/kernel/process.c
450     +++ b/arch/csky/kernel/process.c
451     @@ -34,10 +34,11 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
452     return sw->r15;
453     }
454    
455     -int copy_thread(unsigned long clone_flags,
456     +int copy_thread_tls(unsigned long clone_flags,
457     unsigned long usp,
458     unsigned long kthread_arg,
459     - struct task_struct *p)
460     + struct task_struct *p,
461     + unsigned long tls)
462     {
463     struct switch_stack *childstack;
464     struct pt_regs *childregs = task_pt_regs(p);
465     @@ -64,7 +65,7 @@ int copy_thread(unsigned long clone_flags,
466     childregs->usp = usp;
467     if (clone_flags & CLONE_SETTLS)
468     task_thread_info(p)->tp_value = childregs->tls
469     - = childregs->regs[0];
470     + = tls;
471    
472     childregs->a0 = 0;
473     childstack->r15 = (unsigned long) ret_from_fork;
474     diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
475     index b753d382e4ce..0bb0954d5570 100644
476     --- a/arch/csky/kernel/smp.c
477     +++ b/arch/csky/kernel/smp.c
478     @@ -120,7 +120,7 @@ void __init setup_smp_ipi(void)
479     int rc;
480    
481     if (ipi_irq == 0)
482     - panic("%s IRQ mapping failed\n", __func__);
483     + return;
484    
485     rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
486     &ipi_dummy_dev);
487     diff --git a/arch/csky/mm/Makefile b/arch/csky/mm/Makefile
488     index c94ef6481098..efb7ebab342b 100644
489     --- a/arch/csky/mm/Makefile
490     +++ b/arch/csky/mm/Makefile
491     @@ -1,8 +1,10 @@
492     # SPDX-License-Identifier: GPL-2.0-only
493     ifeq ($(CONFIG_CPU_HAS_CACHEV2),y)
494     obj-y += cachev2.o
495     +CFLAGS_REMOVE_cachev2.o = $(CC_FLAGS_FTRACE)
496     else
497     obj-y += cachev1.o
498     +CFLAGS_REMOVE_cachev1.o = $(CC_FLAGS_FTRACE)
499     endif
500    
501     obj-y += dma-mapping.o
502     diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
503     index d4c2292ea46b..00e96278b377 100644
504     --- a/arch/csky/mm/init.c
505     +++ b/arch/csky/mm/init.c
506     @@ -31,6 +31,7 @@
507    
508     pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
509     pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
510     +EXPORT_SYMBOL(invalid_pte_table);
511     unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
512     __page_aligned_bss;
513     EXPORT_SYMBOL(empty_zero_page);
514     diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
515     index 45e3137ccd71..72b81015cebe 100644
516     --- a/arch/powerpc/include/asm/cache.h
517     +++ b/arch/powerpc/include/asm/cache.h
518     @@ -55,42 +55,48 @@ struct ppc64_caches {
519    
520     extern struct ppc64_caches ppc64_caches;
521    
522     -static inline u32 l1_cache_shift(void)
523     +static inline u32 l1_dcache_shift(void)
524     {
525     return ppc64_caches.l1d.log_block_size;
526     }
527    
528     -static inline u32 l1_cache_bytes(void)
529     +static inline u32 l1_dcache_bytes(void)
530     {
531     return ppc64_caches.l1d.block_size;
532     }
533     +
534     +static inline u32 l1_icache_shift(void)
535     +{
536     + return ppc64_caches.l1i.log_block_size;
537     +}
538     +
539     +static inline u32 l1_icache_bytes(void)
540     +{
541     + return ppc64_caches.l1i.block_size;
542     +}
543     #else
544     -static inline u32 l1_cache_shift(void)
545     +static inline u32 l1_dcache_shift(void)
546     {
547     return L1_CACHE_SHIFT;
548     }
549    
550     -static inline u32 l1_cache_bytes(void)
551     +static inline u32 l1_dcache_bytes(void)
552     {
553     return L1_CACHE_BYTES;
554     }
555     +
556     +static inline u32 l1_icache_shift(void)
557     +{
558     + return L1_CACHE_SHIFT;
559     +}
560     +
561     +static inline u32 l1_icache_bytes(void)
562     +{
563     + return L1_CACHE_BYTES;
564     +}
565     +
566     #endif
567     -#endif /* ! __ASSEMBLY__ */
568     -
569     -#if defined(__ASSEMBLY__)
570     -/*
571     - * For a snooping icache, we still need a dummy icbi to purge all the
572     - * prefetched instructions from the ifetch buffers. We also need a sync
573     - * before the icbi to order the the actual stores to memory that might
574     - * have modified instructions with the icbi.
575     - */
576     -#define PURGE_PREFETCHED_INS \
577     - sync; \
578     - icbi 0,r3; \
579     - sync; \
580     - isync
581    
582     -#else
583     #define __read_mostly __attribute__((__section__(".data..read_mostly")))
584    
585     #ifdef CONFIG_PPC_BOOK3S_32
586     @@ -124,6 +130,17 @@ static inline void dcbst(void *addr)
587     {
588     __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
589     }
590     +
591     +static inline void icbi(void *addr)
592     +{
593     + asm volatile ("icbi 0, %0" : : "r"(addr) : "memory");
594     +}
595     +
596     +static inline void iccci(void *addr)
597     +{
598     + asm volatile ("iccci 0, %0" : : "r"(addr) : "memory");
599     +}
600     +
601     #endif /* !__ASSEMBLY__ */
602     #endif /* __KERNEL__ */
603     #endif /* _ASM_POWERPC_CACHE_H */
604     diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
605     index eef388f2659f..4a1c9f0200e1 100644
606     --- a/arch/powerpc/include/asm/cacheflush.h
607     +++ b/arch/powerpc/include/asm/cacheflush.h
608     @@ -42,29 +42,25 @@ extern void flush_dcache_page(struct page *page);
609     #define flush_dcache_mmap_lock(mapping) do { } while (0)
610     #define flush_dcache_mmap_unlock(mapping) do { } while (0)
611    
612     -extern void flush_icache_range(unsigned long, unsigned long);
613     +void flush_icache_range(unsigned long start, unsigned long stop);
614     extern void flush_icache_user_range(struct vm_area_struct *vma,
615     struct page *page, unsigned long addr,
616     int len);
617     -extern void __flush_dcache_icache(void *page_va);
618     extern void flush_dcache_icache_page(struct page *page);
619     -#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
620     -extern void __flush_dcache_icache_phys(unsigned long physaddr);
621     -#else
622     -static inline void __flush_dcache_icache_phys(unsigned long physaddr)
623     -{
624     - BUG();
625     -}
626     -#endif
627     -
628     -/*
629     - * Write any modified data cache blocks out to memory and invalidate them.
630     - * Does not invalidate the corresponding instruction cache blocks.
631     +void __flush_dcache_icache(void *page);
632     +
633     +/**
634     + * flush_dcache_range(): Write any modified data cache blocks out to memory and
635     + * invalidate them. Does not invalidate the corresponding instruction cache
636     + * blocks.
637     + *
638     + * @start: the start address
639     + * @stop: the stop address (exclusive)
640     */
641     static inline void flush_dcache_range(unsigned long start, unsigned long stop)
642     {
643     - unsigned long shift = l1_cache_shift();
644     - unsigned long bytes = l1_cache_bytes();
645     + unsigned long shift = l1_dcache_shift();
646     + unsigned long bytes = l1_dcache_bytes();
647     void *addr = (void *)(start & ~(bytes - 1));
648     unsigned long size = stop - (unsigned long)addr + (bytes - 1);
649     unsigned long i;
650     @@ -89,8 +85,8 @@ static inline void flush_dcache_range(unsigned long start, unsigned long stop)
651     */
652     static inline void clean_dcache_range(unsigned long start, unsigned long stop)
653     {
654     - unsigned long shift = l1_cache_shift();
655     - unsigned long bytes = l1_cache_bytes();
656     + unsigned long shift = l1_dcache_shift();
657     + unsigned long bytes = l1_dcache_bytes();
658     void *addr = (void *)(start & ~(bytes - 1));
659     unsigned long size = stop - (unsigned long)addr + (bytes - 1);
660     unsigned long i;
661     @@ -108,8 +104,8 @@ static inline void clean_dcache_range(unsigned long start, unsigned long stop)
662     static inline void invalidate_dcache_range(unsigned long start,
663     unsigned long stop)
664     {
665     - unsigned long shift = l1_cache_shift();
666     - unsigned long bytes = l1_cache_bytes();
667     + unsigned long shift = l1_dcache_shift();
668     + unsigned long bytes = l1_dcache_bytes();
669     void *addr = (void *)(start & ~(bytes - 1));
670     unsigned long size = stop - (unsigned long)addr + (bytes - 1);
671     unsigned long i;
672     diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
673     index e745abc5457a..245be4fafe13 100644
674     --- a/arch/powerpc/kernel/cputable.c
675     +++ b/arch/powerpc/kernel/cputable.c
676     @@ -2193,11 +2193,13 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
677     * oprofile_cpu_type already has a value, then we are
678     * possibly overriding a real PVR with a logical one,
679     * and, in that case, keep the current value for
680     - * oprofile_cpu_type.
681     + * oprofile_cpu_type. Futhermore, let's ensure that the
682     + * fix for the PMAO bug is enabled on compatibility mode.
683     */
684     if (old.oprofile_cpu_type != NULL) {
685     t->oprofile_cpu_type = old.oprofile_cpu_type;
686     t->oprofile_type = old.oprofile_type;
687     + t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
688     }
689     }
690    
691     diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
692     index 82df4b09e79f..f4e4a1926a7a 100644
693     --- a/arch/powerpc/kernel/misc_32.S
694     +++ b/arch/powerpc/kernel/misc_32.S
695     @@ -316,126 +316,6 @@ _GLOBAL(flush_instruction_cache)
696     EXPORT_SYMBOL(flush_instruction_cache)
697     #endif /* CONFIG_PPC_8xx */
698    
699     -/*
700     - * Write any modified data cache blocks out to memory
701     - * and invalidate the corresponding instruction cache blocks.
702     - * This is a no-op on the 601.
703     - *
704     - * flush_icache_range(unsigned long start, unsigned long stop)
705     - */
706     -_GLOBAL(flush_icache_range)
707     -#if defined(CONFIG_PPC_BOOK3S_601) || defined(CONFIG_E200)
708     - PURGE_PREFETCHED_INS
709     - blr /* for 601 and e200, do nothing */
710     -#else
711     - rlwinm r3,r3,0,0,31 - L1_CACHE_SHIFT
712     - subf r4,r3,r4
713     - addi r4,r4,L1_CACHE_BYTES - 1
714     - srwi. r4,r4,L1_CACHE_SHIFT
715     - beqlr
716     - mtctr r4
717     - mr r6,r3
718     -1: dcbst 0,r3
719     - addi r3,r3,L1_CACHE_BYTES
720     - bdnz 1b
721     - sync /* wait for dcbst's to get to ram */
722     -#ifndef CONFIG_44x
723     - mtctr r4
724     -2: icbi 0,r6
725     - addi r6,r6,L1_CACHE_BYTES
726     - bdnz 2b
727     -#else
728     - /* Flash invalidate on 44x because we are passed kmapped addresses and
729     - this doesn't work for userspace pages due to the virtually tagged
730     - icache. Sigh. */
731     - iccci 0, r0
732     -#endif
733     - sync /* additional sync needed on g4 */
734     - isync
735     - blr
736     -#endif
737     -_ASM_NOKPROBE_SYMBOL(flush_icache_range)
738     -EXPORT_SYMBOL(flush_icache_range)
739     -
740     -/*
741     - * Flush a particular page from the data cache to RAM.
742     - * Note: this is necessary because the instruction cache does *not*
743     - * snoop from the data cache.
744     - * This is a no-op on the 601 and e200 which have a unified cache.
745     - *
746     - * void __flush_dcache_icache(void *page)
747     - */
748     -_GLOBAL(__flush_dcache_icache)
749     -#if defined(CONFIG_PPC_BOOK3S_601) || defined(CONFIG_E200)
750     - PURGE_PREFETCHED_INS
751     - blr
752     -#else
753     - rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
754     - li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
755     - mtctr r4
756     - mr r6,r3
757     -0: dcbst 0,r3 /* Write line to ram */
758     - addi r3,r3,L1_CACHE_BYTES
759     - bdnz 0b
760     - sync
761     -#ifdef CONFIG_44x
762     - /* We don't flush the icache on 44x. Those have a virtual icache
763     - * and we don't have access to the virtual address here (it's
764     - * not the page vaddr but where it's mapped in user space). The
765     - * flushing of the icache on these is handled elsewhere, when
766     - * a change in the address space occurs, before returning to
767     - * user space
768     - */
769     -BEGIN_MMU_FTR_SECTION
770     - blr
771     -END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
772     -#endif /* CONFIG_44x */
773     - mtctr r4
774     -1: icbi 0,r6
775     - addi r6,r6,L1_CACHE_BYTES
776     - bdnz 1b
777     - sync
778     - isync
779     - blr
780     -#endif
781     -
782     -#ifndef CONFIG_BOOKE
783     -/*
784     - * Flush a particular page from the data cache to RAM, identified
785     - * by its physical address. We turn off the MMU so we can just use
786     - * the physical address (this may be a highmem page without a kernel
787     - * mapping).
788     - *
789     - * void __flush_dcache_icache_phys(unsigned long physaddr)
790     - */
791     -_GLOBAL(__flush_dcache_icache_phys)
792     -#if defined(CONFIG_PPC_BOOK3S_601) || defined(CONFIG_E200)
793     - PURGE_PREFETCHED_INS
794     - blr /* for 601 and e200, do nothing */
795     -#else
796     - mfmsr r10
797     - rlwinm r0,r10,0,28,26 /* clear DR */
798     - mtmsr r0
799     - isync
800     - rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
801     - li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
802     - mtctr r4
803     - mr r6,r3
804     -0: dcbst 0,r3 /* Write line to ram */
805     - addi r3,r3,L1_CACHE_BYTES
806     - bdnz 0b
807     - sync
808     - mtctr r4
809     -1: icbi 0,r6
810     - addi r6,r6,L1_CACHE_BYTES
811     - bdnz 1b
812     - sync
813     - mtmsr r10 /* restore DR */
814     - isync
815     - blr
816     -#endif
817     -#endif /* CONFIG_BOOKE */
818     -
819     /*
820     * Copy a whole page. We use the dcbz instruction on the destination
821     * to reduce memory traffic (it eliminates the unnecessary reads of
822     diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
823     index 9bc0aa9aeb65..ff20c253f273 100644
824     --- a/arch/powerpc/kernel/misc_64.S
825     +++ b/arch/powerpc/kernel/misc_64.S
826     @@ -49,108 +49,6 @@ _GLOBAL(call_do_irq)
827     mtlr r0
828     blr
829    
830     - .section ".toc","aw"
831     -PPC64_CACHES:
832     - .tc ppc64_caches[TC],ppc64_caches
833     - .section ".text"
834     -
835     -/*
836     - * Write any modified data cache blocks out to memory
837     - * and invalidate the corresponding instruction cache blocks.
838     - *
839     - * flush_icache_range(unsigned long start, unsigned long stop)
840     - *
841     - * flush all bytes from start through stop-1 inclusive
842     - */
843     -
844     -_GLOBAL_TOC(flush_icache_range)
845     -BEGIN_FTR_SECTION
846     - PURGE_PREFETCHED_INS
847     - blr
848     -END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
849     -/*
850     - * Flush the data cache to memory
851     - *
852     - * Different systems have different cache line sizes
853     - * and in some cases i-cache and d-cache line sizes differ from
854     - * each other.
855     - */
856     - ld r10,PPC64_CACHES@toc(r2)
857     - lwz r7,DCACHEL1BLOCKSIZE(r10)/* Get cache block size */
858     - addi r5,r7,-1
859     - andc r6,r3,r5 /* round low to line bdy */
860     - subf r8,r6,r4 /* compute length */
861     - add r8,r8,r5 /* ensure we get enough */
862     - lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of cache block size */
863     - srd. r8,r8,r9 /* compute line count */
864     - beqlr /* nothing to do? */
865     - mtctr r8
866     -1: dcbst 0,r6
867     - add r6,r6,r7
868     - bdnz 1b
869     - sync
870     -
871     -/* Now invalidate the instruction cache */
872     -
873     - lwz r7,ICACHEL1BLOCKSIZE(r10) /* Get Icache block size */
874     - addi r5,r7,-1
875     - andc r6,r3,r5 /* round low to line bdy */
876     - subf r8,r6,r4 /* compute length */
877     - add r8,r8,r5
878     - lwz r9,ICACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of Icache block size */
879     - srd. r8,r8,r9 /* compute line count */
880     - beqlr /* nothing to do? */
881     - mtctr r8
882     -2: icbi 0,r6
883     - add r6,r6,r7
884     - bdnz 2b
885     - isync
886     - blr
887     -_ASM_NOKPROBE_SYMBOL(flush_icache_range)
888     -EXPORT_SYMBOL(flush_icache_range)
889     -
890     -/*
891     - * Flush a particular page from the data cache to RAM.
892     - * Note: this is necessary because the instruction cache does *not*
893     - * snoop from the data cache.
894     - *
895     - * void __flush_dcache_icache(void *page)
896     - */
897     -_GLOBAL(__flush_dcache_icache)
898     -/*
899     - * Flush the data cache to memory
900     - *
901     - * Different systems have different cache line sizes
902     - */
903     -
904     -BEGIN_FTR_SECTION
905     - PURGE_PREFETCHED_INS
906     - blr
907     -END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
908     -
909     -/* Flush the dcache */
910     - ld r7,PPC64_CACHES@toc(r2)
911     - clrrdi r3,r3,PAGE_SHIFT /* Page align */
912     - lwz r4,DCACHEL1BLOCKSPERPAGE(r7) /* Get # dcache blocks per page */
913     - lwz r5,DCACHEL1BLOCKSIZE(r7) /* Get dcache block size */
914     - mr r6,r3
915     - mtctr r4
916     -0: dcbst 0,r6
917     - add r6,r6,r5
918     - bdnz 0b
919     - sync
920     -
921     -/* Now invalidate the icache */
922     -
923     - lwz r4,ICACHEL1BLOCKSPERPAGE(r7) /* Get # icache blocks per page */
924     - lwz r5,ICACHEL1BLOCKSIZE(r7) /* Get icache block size */
925     - mtctr r4
926     -1: icbi 0,r3
927     - add r3,r3,r5
928     - bdnz 1b
929     - isync
930     - blr
931     -
932     _GLOBAL(__bswapdi2)
933     EXPORT_SYMBOL(__bswapdi2)
934     srdi r8,r3,32
935     diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
936     index 9f5b32163bda..96ca90ce0264 100644
937     --- a/arch/powerpc/mm/mem.c
938     +++ b/arch/powerpc/mm/mem.c
939     @@ -348,6 +348,122 @@ void free_initmem(void)
940     free_initmem_default(POISON_FREE_INITMEM);
941     }
942    
943     +/**
944     + * flush_coherent_icache() - if a CPU has a coherent icache, flush it
945     + * @addr: The base address to use (can be any valid address, the whole cache will be flushed)
946     + * Return true if the cache was flushed, false otherwise
947     + */
948     +static inline bool flush_coherent_icache(unsigned long addr)
949     +{
950     + /*
951     + * For a snooping icache, we still need a dummy icbi to purge all the
952     + * prefetched instructions from the ifetch buffers. We also need a sync
953     + * before the icbi to order the the actual stores to memory that might
954     + * have modified instructions with the icbi.
955     + */
956     + if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
957     + mb(); /* sync */
958     + allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
959     + icbi((void *)addr);
960     + prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
961     + mb(); /* sync */
962     + isync();
963     + return true;
964     + }
965     +
966     + return false;
967     +}
968     +
969     +/**
970     + * invalidate_icache_range() - Flush the icache by issuing icbi across an address range
971     + * @start: the start address
972     + * @stop: the stop address (exclusive)
973     + */
974     +static void invalidate_icache_range(unsigned long start, unsigned long stop)
975     +{
976     + unsigned long shift = l1_icache_shift();
977     + unsigned long bytes = l1_icache_bytes();
978     + char *addr = (char *)(start & ~(bytes - 1));
979     + unsigned long size = stop - (unsigned long)addr + (bytes - 1);
980     + unsigned long i;
981     +
982     + for (i = 0; i < size >> shift; i++, addr += bytes)
983     + icbi(addr);
984     +
985     + mb(); /* sync */
986     + isync();
987     +}
988     +
989     +/**
990     + * flush_icache_range: Write any modified data cache blocks out to memory
991     + * and invalidate the corresponding blocks in the instruction cache
992     + *
993     + * Generic code will call this after writing memory, before executing from it.
994     + *
995     + * @start: the start address
996     + * @stop: the stop address (exclusive)
997     + */
998     +void flush_icache_range(unsigned long start, unsigned long stop)
999     +{
1000     + if (flush_coherent_icache(start))
1001     + return;
1002     +
1003     + clean_dcache_range(start, stop);
1004     +
1005     + if (IS_ENABLED(CONFIG_44x)) {
1006     + /*
1007     + * Flash invalidate on 44x because we are passed kmapped
1008     + * addresses and this doesn't work for userspace pages due to
1009     + * the virtually tagged icache.
1010     + */
1011     + iccci((void *)start);
1012     + mb(); /* sync */
1013     + isync();
1014     + } else
1015     + invalidate_icache_range(start, stop);
1016     +}
1017     +EXPORT_SYMBOL(flush_icache_range);
1018     +
1019     +#if !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
1020     +/**
1021     + * flush_dcache_icache_phys() - Flush a page by it's physical address
1022     + * @physaddr: the physical address of the page
1023     + */
1024     +static void flush_dcache_icache_phys(unsigned long physaddr)
1025     +{
1026     + unsigned long bytes = l1_dcache_bytes();
1027     + unsigned long nb = PAGE_SIZE / bytes;
1028     + unsigned long addr = physaddr & PAGE_MASK;
1029     + unsigned long msr, msr0;
1030     + unsigned long loop1 = addr, loop2 = addr;
1031     +
1032     + msr0 = mfmsr();
1033     + msr = msr0 & ~MSR_DR;
1034     + /*
1035     + * This must remain as ASM to prevent potential memory accesses
1036     + * while the data MMU is disabled
1037     + */
1038     + asm volatile(
1039     + " mtctr %2;\n"
1040     + " mtmsr %3;\n"
1041     + " isync;\n"
1042     + "0: dcbst 0, %0;\n"
1043     + " addi %0, %0, %4;\n"
1044     + " bdnz 0b;\n"
1045     + " sync;\n"
1046     + " mtctr %2;\n"
1047     + "1: icbi 0, %1;\n"
1048     + " addi %1, %1, %4;\n"
1049     + " bdnz 1b;\n"
1050     + " sync;\n"
1051     + " mtmsr %5;\n"
1052     + " isync;\n"
1053     + : "+&r" (loop1), "+&r" (loop2)
1054     + : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
1055     + : "ctr", "memory");
1056     +}
1057     +#endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
1058     +
1059     /*
1060     * This is called when a page has been modified by the kernel.
1061     * It just marks the page as not i-cache clean. We do the i-cache
1062     @@ -380,12 +496,46 @@ void flush_dcache_icache_page(struct page *page)
1063     __flush_dcache_icache(start);
1064     kunmap_atomic(start);
1065     } else {
1066     - __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
1067     + unsigned long addr = page_to_pfn(page) << PAGE_SHIFT;
1068     +
1069     + if (flush_coherent_icache(addr))
1070     + return;
1071     + flush_dcache_icache_phys(addr);
1072     }
1073     #endif
1074     }
1075     EXPORT_SYMBOL(flush_dcache_icache_page);
1076    
1077     +/**
1078     + * __flush_dcache_icache(): Flush a particular page from the data cache to RAM.
1079     + * Note: this is necessary because the instruction cache does *not*
1080     + * snoop from the data cache.
1081     + *
1082     + * @page: the address of the page to flush
1083     + */
1084     +void __flush_dcache_icache(void *p)
1085     +{
1086     + unsigned long addr = (unsigned long)p;
1087     +
1088     + if (flush_coherent_icache(addr))
1089     + return;
1090     +
1091     + clean_dcache_range(addr, addr + PAGE_SIZE);
1092     +
1093     + /*
1094     + * We don't flush the icache on 44x. Those have a virtual icache and we
1095     + * don't have access to the virtual address here (it's not the page
1096     + * vaddr but where it's mapped in user space). The flushing of the
1097     + * icache on these is handled elsewhere, when a change in the address
1098     + * space occurs, before returning to user space.
1099     + */
1100     +
1101     + if (cpu_has_feature(MMU_FTR_TYPE_44x))
1102     + return;
1103     +
1104     + invalidate_icache_range(addr, addr + PAGE_SIZE);
1105     +}
1106     +
1107     void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
1108     {
1109     clear_page(page);
1110     diff --git a/arch/s390/Makefile b/arch/s390/Makefile
1111     index 9ce1baeac2b2..2faaf456956a 100644
1112     --- a/arch/s390/Makefile
1113     +++ b/arch/s390/Makefile
1114     @@ -146,7 +146,7 @@ all: bzImage
1115     #KBUILD_IMAGE is necessary for packaging targets like rpm-pkg, deb-pkg...
1116     KBUILD_IMAGE := $(boot)/bzImage
1117    
1118     -install: vmlinux
1119     +install:
1120     $(Q)$(MAKE) $(build)=$(boot) $@
1121    
1122     bzImage: vmlinux
1123     diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
1124     index e2c47d3a1c89..0ff9261c915e 100644
1125     --- a/arch/s390/boot/Makefile
1126     +++ b/arch/s390/boot/Makefile
1127     @@ -70,7 +70,7 @@ $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
1128     $(obj)/startup.a: $(OBJECTS) FORCE
1129     $(call if_changed,ar)
1130    
1131     -install: $(CONFIGURE) $(obj)/bzImage
1132     +install:
1133     sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
1134     System.map "$(INSTALL_PATH)"
1135    
1136     diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
1137     index a9e46b83c536..34a655ad7123 100644
1138     --- a/arch/s390/include/asm/pgtable.h
1139     +++ b/arch/s390/include/asm/pgtable.h
1140     @@ -756,6 +756,12 @@ static inline int pmd_write(pmd_t pmd)
1141     return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
1142     }
1143    
1144     +#define pud_write pud_write
1145     +static inline int pud_write(pud_t pud)
1146     +{
1147     + return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
1148     +}
1149     +
1150     static inline int pmd_dirty(pmd_t pmd)
1151     {
1152     int dirty = 1;
1153     diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
1154     index e3f238e8c611..4f35b1055f30 100644
1155     --- a/arch/s390/include/asm/qdio.h
1156     +++ b/arch/s390/include/asm/qdio.h
1157     @@ -227,7 +227,7 @@ struct qdio_buffer {
1158     * @sbal: absolute SBAL address
1159     */
1160     struct sl_element {
1161     - unsigned long sbal;
1162     + u64 sbal;
1163     } __attribute__ ((packed));
1164    
1165     /**
1166     diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
1167     index 5b24fcc9c361..6105b1b6e49b 100644
1168     --- a/arch/s390/pci/pci.c
1169     +++ b/arch/s390/pci/pci.c
1170     @@ -423,7 +423,7 @@ static void zpci_map_resources(struct pci_dev *pdev)
1171    
1172     if (zpci_use_mio(zdev))
1173     pdev->resource[i].start =
1174     - (resource_size_t __force) zdev->bars[i].mio_wb;
1175     + (resource_size_t __force) zdev->bars[i].mio_wt;
1176     else
1177     pdev->resource[i].start = (resource_size_t __force)
1178     pci_iomap_range_fh(pdev, i, 0, 0);
1179     @@ -530,7 +530,7 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev,
1180     flags |= IORESOURCE_MEM_64;
1181    
1182     if (zpci_use_mio(zdev))
1183     - addr = (unsigned long) zdev->bars[i].mio_wb;
1184     + addr = (unsigned long) zdev->bars[i].mio_wt;
1185     else
1186     addr = ZPCI_ADDR(entry);
1187     size = 1UL << zdev->bars[i].size;
1188     diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c
1189     index 748456c365f4..9557c5a15b91 100644
1190     --- a/arch/x86/boot/compressed/kaslr_64.c
1191     +++ b/arch/x86/boot/compressed/kaslr_64.c
1192     @@ -29,9 +29,6 @@
1193     #define __PAGE_OFFSET __PAGE_OFFSET_BASE
1194     #include "../../mm/ident_map.c"
1195    
1196     -/* Used by pgtable.h asm code to force instruction serialization. */
1197     -unsigned long __force_order;
1198     -
1199     /* Used to track our page table allocation area. */
1200     struct alloc_pgt_data {
1201     unsigned char *pgt_buf;
1202     diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
1203     index fffe21945374..704caec136cf 100644
1204     --- a/arch/x86/kernel/cpu/common.c
1205     +++ b/arch/x86/kernel/cpu/common.c
1206     @@ -464,7 +464,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
1207     * cpuid bit to be set. We need to ensure that we
1208     * update that bit in this CPU's "cpu_info".
1209     */
1210     - get_cpu_cap(c);
1211     + set_cpu_cap(c, X86_FEATURE_OSPKE);
1212     }
1213    
1214     #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1215     diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
1216     index 52a1e5192fa8..fe0e647411da 100644
1217     --- a/arch/x86/platform/efi/efi_64.c
1218     +++ b/arch/x86/platform/efi/efi_64.c
1219     @@ -316,7 +316,7 @@ void efi_sync_low_kernel_mappings(void)
1220     static inline phys_addr_t
1221     virt_to_phys_or_null_size(void *va, unsigned long size)
1222     {
1223     - bool bad_size;
1224     + phys_addr_t pa;
1225    
1226     if (!va)
1227     return 0;
1228     @@ -324,16 +324,13 @@ virt_to_phys_or_null_size(void *va, unsigned long size)
1229     if (virt_addr_valid(va))
1230     return virt_to_phys(va);
1231    
1232     - /*
1233     - * A fully aligned variable on the stack is guaranteed not to
1234     - * cross a page bounary. Try to catch strings on the stack by
1235     - * checking that 'size' is a power of two.
1236     - */
1237     - bad_size = size > PAGE_SIZE || !is_power_of_2(size);
1238     + pa = slow_virt_to_phys(va);
1239    
1240     - WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
1241     + /* check if the object crosses a page boundary */
1242     + if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK))
1243     + return 0;
1244    
1245     - return slow_virt_to_phys(va);
1246     + return pa;
1247     }
1248    
1249     #define virt_to_phys_or_null(addr) \
1250     @@ -791,6 +788,8 @@ static efi_status_t
1251     efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
1252     u32 *attr, unsigned long *data_size, void *data)
1253     {
1254     + u8 buf[24] __aligned(8);
1255     + efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
1256     efi_status_t status;
1257     u32 phys_name, phys_vendor, phys_attr;
1258     u32 phys_data_size, phys_data;
1259     @@ -798,14 +797,19 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
1260    
1261     spin_lock_irqsave(&efi_runtime_lock, flags);
1262    
1263     + *vnd = *vendor;
1264     +
1265     phys_data_size = virt_to_phys_or_null(data_size);
1266     - phys_vendor = virt_to_phys_or_null(vendor);
1267     + phys_vendor = virt_to_phys_or_null(vnd);
1268     phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
1269     phys_attr = virt_to_phys_or_null(attr);
1270     phys_data = virt_to_phys_or_null_size(data, *data_size);
1271    
1272     - status = efi_thunk(get_variable, phys_name, phys_vendor,
1273     - phys_attr, phys_data_size, phys_data);
1274     + if (!phys_name || (data && !phys_data))
1275     + status = EFI_INVALID_PARAMETER;
1276     + else
1277     + status = efi_thunk(get_variable, phys_name, phys_vendor,
1278     + phys_attr, phys_data_size, phys_data);
1279    
1280     spin_unlock_irqrestore(&efi_runtime_lock, flags);
1281    
1282     @@ -816,19 +820,25 @@ static efi_status_t
1283     efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
1284     u32 attr, unsigned long data_size, void *data)
1285     {
1286     + u8 buf[24] __aligned(8);
1287     + efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
1288     u32 phys_name, phys_vendor, phys_data;
1289     efi_status_t status;
1290     unsigned long flags;
1291    
1292     spin_lock_irqsave(&efi_runtime_lock, flags);
1293    
1294     + *vnd = *vendor;
1295     +
1296     phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
1297     - phys_vendor = virt_to_phys_or_null(vendor);
1298     + phys_vendor = virt_to_phys_or_null(vnd);
1299     phys_data = virt_to_phys_or_null_size(data, data_size);
1300    
1301     - /* If data_size is > sizeof(u32) we've got problems */
1302     - status = efi_thunk(set_variable, phys_name, phys_vendor,
1303     - attr, data_size, phys_data);
1304     + if (!phys_name || !phys_data)
1305     + status = EFI_INVALID_PARAMETER;
1306     + else
1307     + status = efi_thunk(set_variable, phys_name, phys_vendor,
1308     + attr, data_size, phys_data);
1309    
1310     spin_unlock_irqrestore(&efi_runtime_lock, flags);
1311    
1312     @@ -840,6 +850,8 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
1313     u32 attr, unsigned long data_size,
1314     void *data)
1315     {
1316     + u8 buf[24] __aligned(8);
1317     + efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
1318     u32 phys_name, phys_vendor, phys_data;
1319     efi_status_t status;
1320     unsigned long flags;
1321     @@ -847,13 +859,17 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
1322     if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
1323     return EFI_NOT_READY;
1324    
1325     + *vnd = *vendor;
1326     +
1327     phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
1328     - phys_vendor = virt_to_phys_or_null(vendor);
1329     + phys_vendor = virt_to_phys_or_null(vnd);
1330     phys_data = virt_to_phys_or_null_size(data, data_size);
1331    
1332     - /* If data_size is > sizeof(u32) we've got problems */
1333     - status = efi_thunk(set_variable, phys_name, phys_vendor,
1334     - attr, data_size, phys_data);
1335     + if (!phys_name || !phys_data)
1336     + status = EFI_INVALID_PARAMETER;
1337     + else
1338     + status = efi_thunk(set_variable, phys_name, phys_vendor,
1339     + attr, data_size, phys_data);
1340    
1341     spin_unlock_irqrestore(&efi_runtime_lock, flags);
1342    
1343     @@ -865,21 +881,29 @@ efi_thunk_get_next_variable(unsigned long *name_size,
1344     efi_char16_t *name,
1345     efi_guid_t *vendor)
1346     {
1347     + u8 buf[24] __aligned(8);
1348     + efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
1349     efi_status_t status;
1350     u32 phys_name_size, phys_name, phys_vendor;
1351     unsigned long flags;
1352    
1353     spin_lock_irqsave(&efi_runtime_lock, flags);
1354    
1355     + *vnd = *vendor;
1356     +
1357     phys_name_size = virt_to_phys_or_null(name_size);
1358     - phys_vendor = virt_to_phys_or_null(vendor);
1359     + phys_vendor = virt_to_phys_or_null(vnd);
1360     phys_name = virt_to_phys_or_null_size(name, *name_size);
1361    
1362     - status = efi_thunk(get_next_variable, phys_name_size,
1363     - phys_name, phys_vendor);
1364     + if (!phys_name)
1365     + status = EFI_INVALID_PARAMETER;
1366     + else
1367     + status = efi_thunk(get_next_variable, phys_name_size,
1368     + phys_name, phys_vendor);
1369    
1370     spin_unlock_irqrestore(&efi_runtime_lock, flags);
1371    
1372     + *vendor = *vnd;
1373     return status;
1374     }
1375    
1376     diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
1377     index 6ea215cdeada..6d4d8a5700b7 100644
1378     --- a/arch/x86/xen/enlighten_pv.c
1379     +++ b/arch/x86/xen/enlighten_pv.c
1380     @@ -905,14 +905,15 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
1381     static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
1382     {
1383     int ret;
1384     +#ifdef CONFIG_X86_64
1385     + unsigned int which;
1386     + u64 base;
1387     +#endif
1388    
1389     ret = 0;
1390    
1391     switch (msr) {
1392     #ifdef CONFIG_X86_64
1393     - unsigned which;
1394     - u64 base;
1395     -
1396     case MSR_FS_BASE: which = SEGBASE_FS; goto set;
1397     case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set;
1398     case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set;
1399     diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
1400     index 86a607cf19a1..d0e36d652264 100644
1401     --- a/block/bfq-cgroup.c
1402     +++ b/block/bfq-cgroup.c
1403     @@ -332,7 +332,7 @@ static void bfqg_put(struct bfq_group *bfqg)
1404     kfree(bfqg);
1405     }
1406    
1407     -static void bfqg_and_blkg_get(struct bfq_group *bfqg)
1408     +void bfqg_and_blkg_get(struct bfq_group *bfqg)
1409     {
1410     /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
1411     bfqg_get(bfqg);
1412     @@ -634,6 +634,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1413     bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
1414     false, BFQQE_PREEMPTED);
1415    
1416     + /*
1417     + * get extra reference to prevent bfqq from being freed in
1418     + * next possible deactivate
1419     + */
1420     + bfqq->ref++;
1421     +
1422     if (bfq_bfqq_busy(bfqq))
1423     bfq_deactivate_bfqq(bfqd, bfqq, false, false);
1424     else if (entity->on_st)
1425     @@ -653,6 +659,8 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1426    
1427     if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
1428     bfq_schedule_dispatch(bfqd);
1429     + /* release extra ref taken above */
1430     + bfq_put_queue(bfqq);
1431     }
1432    
1433     /**
1434     @@ -1380,6 +1388,10 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1435     return bfqq->bfqd->root_group;
1436     }
1437    
1438     +void bfqg_and_blkg_get(struct bfq_group *bfqg) {}
1439     +
1440     +void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
1441     +
1442     struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1443     {
1444     struct bfq_group *bfqg;
1445     diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
1446     index 5498d05b873d..48189ff88916 100644
1447     --- a/block/bfq-iosched.c
1448     +++ b/block/bfq-iosched.c
1449     @@ -614,6 +614,10 @@ bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1450     bfqq->pos_root = NULL;
1451     }
1452    
1453     + /* oom_bfqq does not participate in queue merging */
1454     + if (bfqq == &bfqd->oom_bfqq)
1455     + return;
1456     +
1457     /*
1458     * bfqq cannot be merged any longer (see comments in
1459     * bfq_setup_cooperator): no point in adding bfqq into the
1460     @@ -4822,9 +4826,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
1461     {
1462     struct bfq_queue *item;
1463     struct hlist_node *n;
1464     -#ifdef CONFIG_BFQ_GROUP_IOSCHED
1465     struct bfq_group *bfqg = bfqq_group(bfqq);
1466     -#endif
1467    
1468     if (bfqq->bfqd)
1469     bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
1470     @@ -4897,9 +4899,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
1471     bfqq->bfqd->last_completed_rq_bfqq = NULL;
1472    
1473     kmem_cache_free(bfq_pool, bfqq);
1474     -#ifdef CONFIG_BFQ_GROUP_IOSCHED
1475     bfqg_and_blkg_put(bfqg);
1476     -#endif
1477     }
1478    
1479     static void bfq_put_cooperator(struct bfq_queue *bfqq)
1480     @@ -6383,10 +6383,10 @@ static void bfq_exit_queue(struct elevator_queue *e)
1481    
1482     hrtimer_cancel(&bfqd->idle_slice_timer);
1483    
1484     -#ifdef CONFIG_BFQ_GROUP_IOSCHED
1485     /* release oom-queue reference to root group */
1486     bfqg_and_blkg_put(bfqd->root_group);
1487    
1488     +#ifdef CONFIG_BFQ_GROUP_IOSCHED
1489     blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
1490     #else
1491     spin_lock_irq(&bfqd->lock);
1492     diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
1493     index 5d1a519640f6..1553a4e8f7ad 100644
1494     --- a/block/bfq-iosched.h
1495     +++ b/block/bfq-iosched.h
1496     @@ -916,6 +916,7 @@ struct bfq_group {
1497    
1498     #else
1499     struct bfq_group {
1500     + struct bfq_entity entity;
1501     struct bfq_sched_data sched_data;
1502    
1503     struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
1504     @@ -978,6 +979,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
1505     struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
1506     struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
1507     struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
1508     +void bfqg_and_blkg_get(struct bfq_group *bfqg);
1509     void bfqg_and_blkg_put(struct bfq_group *bfqg);
1510    
1511     #ifdef CONFIG_BFQ_GROUP_IOSCHED
1512     diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
1513     index 05f0bf4a1144..44079147e396 100644
1514     --- a/block/bfq-wf2q.c
1515     +++ b/block/bfq-wf2q.c
1516     @@ -536,7 +536,9 @@ static void bfq_get_entity(struct bfq_entity *entity)
1517     bfqq->ref++;
1518     bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
1519     bfqq, bfqq->ref);
1520     - }
1521     + } else
1522     + bfqg_and_blkg_get(container_of(entity, struct bfq_group,
1523     + entity));
1524     }
1525    
1526     /**
1527     @@ -650,8 +652,14 @@ static void bfq_forget_entity(struct bfq_service_tree *st,
1528    
1529     entity->on_st = false;
1530     st->wsum -= entity->weight;
1531     - if (bfqq && !is_in_service)
1532     + if (is_in_service)
1533     + return;
1534     +
1535     + if (bfqq)
1536     bfq_put_queue(bfqq);
1537     + else
1538     + bfqg_and_blkg_put(container_of(entity, struct bfq_group,
1539     + entity));
1540     }
1541    
1542     /**
1543     diff --git a/drivers/android/binder.c b/drivers/android/binder.c
1544     index 254f87b627fe..34a6de65aa7e 100644
1545     --- a/drivers/android/binder.c
1546     +++ b/drivers/android/binder.c
1547     @@ -5230,6 +5230,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
1548     binder_dev = container_of(filp->private_data,
1549     struct binder_device, miscdev);
1550     }
1551     + refcount_inc(&binder_dev->ref);
1552     proc->context = &binder_dev->context;
1553     binder_alloc_init(&proc->alloc);
1554    
1555     @@ -5407,6 +5408,7 @@ static int binder_node_release(struct binder_node *node, int refs)
1556     static void binder_deferred_release(struct binder_proc *proc)
1557     {
1558     struct binder_context *context = proc->context;
1559     + struct binder_device *device;
1560     struct rb_node *n;
1561     int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
1562    
1563     @@ -5423,6 +5425,12 @@ static void binder_deferred_release(struct binder_proc *proc)
1564     context->binder_context_mgr_node = NULL;
1565     }
1566     mutex_unlock(&context->context_mgr_node_lock);
1567     + device = container_of(proc->context, struct binder_device, context);
1568     + if (refcount_dec_and_test(&device->ref)) {
1569     + kfree(context->name);
1570     + kfree(device);
1571     + }
1572     + proc->context = NULL;
1573     binder_inner_proc_lock(proc);
1574     /*
1575     * Make sure proc stays alive after we
1576     @@ -6079,6 +6087,7 @@ static int __init init_binder_device(const char *name)
1577     binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
1578     binder_device->miscdev.name = name;
1579    
1580     + refcount_set(&binder_device->ref, 1);
1581     binder_device->context.binder_context_mgr_uid = INVALID_UID;
1582     binder_device->context.name = name;
1583     mutex_init(&binder_device->context.context_mgr_node_lock);
1584     diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
1585     index ae991097d14d..283d3cb9c16e 100644
1586     --- a/drivers/android/binder_internal.h
1587     +++ b/drivers/android/binder_internal.h
1588     @@ -8,6 +8,7 @@
1589     #include <linux/list.h>
1590     #include <linux/miscdevice.h>
1591     #include <linux/mutex.h>
1592     +#include <linux/refcount.h>
1593     #include <linux/stddef.h>
1594     #include <linux/types.h>
1595     #include <linux/uidgid.h>
1596     @@ -33,6 +34,7 @@ struct binder_device {
1597     struct miscdevice miscdev;
1598     struct binder_context context;
1599     struct inode *binderfs_inode;
1600     + refcount_t ref;
1601     };
1602    
1603     /**
1604     diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
1605     index e2580e5316a2..110e41f920c2 100644
1606     --- a/drivers/android/binderfs.c
1607     +++ b/drivers/android/binderfs.c
1608     @@ -154,6 +154,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
1609     if (!name)
1610     goto err;
1611    
1612     + refcount_set(&device->ref, 1);
1613     device->binderfs_inode = inode;
1614     device->context.binder_context_mgr_uid = INVALID_UID;
1615     device->context.name = name;
1616     @@ -257,8 +258,10 @@ static void binderfs_evict_inode(struct inode *inode)
1617     ida_free(&binderfs_minors, device->miscdev.minor);
1618     mutex_unlock(&binderfs_minors_mutex);
1619    
1620     - kfree(device->context.name);
1621     - kfree(device);
1622     + if (refcount_dec_and_test(&device->ref)) {
1623     + kfree(device->context.name);
1624     + kfree(device);
1625     + }
1626     }
1627    
1628     /**
1629     diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
1630     index a0cecb12b6f9..f0bc0841cbc4 100644
1631     --- a/drivers/bus/ti-sysc.c
1632     +++ b/drivers/bus/ti-sysc.c
1633     @@ -1406,7 +1406,7 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
1634     }
1635    
1636     /* 1-wire needs module's internal clocks enabled for reset */
1637     -static void sysc_clk_enable_quirk_hdq1w(struct sysc *ddata)
1638     +static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
1639     {
1640     int offset = 0x0c; /* HDQ_CTRL_STATUS */
1641     u16 val;
1642     @@ -1494,7 +1494,7 @@ static void sysc_init_module_quirks(struct sysc *ddata)
1643     return;
1644    
1645     if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
1646     - ddata->clk_enable_quirk = sysc_clk_enable_quirk_hdq1w;
1647     + ddata->clk_disable_quirk = sysc_pre_reset_quirk_hdq1w;
1648    
1649     return;
1650     }
1651     diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
1652     index 433d91d710e4..0fb0358f0073 100644
1653     --- a/drivers/dma-buf/dma-buf.c
1654     +++ b/drivers/dma-buf/dma-buf.c
1655     @@ -108,6 +108,7 @@ static int dma_buf_release(struct inode *inode, struct file *file)
1656     dma_resv_fini(dmabuf->resv);
1657    
1658     module_put(dmabuf->owner);
1659     + kfree(dmabuf->name);
1660     kfree(dmabuf);
1661     return 0;
1662     }
1663     diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
1664     index e51d836afcc7..1092d4ce723e 100644
1665     --- a/drivers/dma/coh901318.c
1666     +++ b/drivers/dma/coh901318.c
1667     @@ -1947,8 +1947,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
1668     return;
1669     }
1670    
1671     - spin_lock(&cohc->lock);
1672     -
1673     /*
1674     * When we reach this point, at least one queue item
1675     * should have been moved over from cohc->queue to
1676     @@ -1969,8 +1967,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
1677     if (coh901318_queue_start(cohc) == NULL)
1678     cohc->busy = 0;
1679    
1680     - spin_unlock(&cohc->lock);
1681     -
1682     /*
1683     * This tasklet will remove items from cohc->active
1684     * and thus terminates them.
1685     diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
1686     index c27e206a764c..67736c801f3c 100644
1687     --- a/drivers/dma/imx-sdma.c
1688     +++ b/drivers/dma/imx-sdma.c
1689     @@ -1328,13 +1328,14 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
1690    
1691     sdma_channel_synchronize(chan);
1692    
1693     - if (sdmac->event_id0)
1694     + if (sdmac->event_id0 >= 0)
1695     sdma_event_disable(sdmac, sdmac->event_id0);
1696     if (sdmac->event_id1)
1697     sdma_event_disable(sdmac, sdmac->event_id1);
1698    
1699     sdmac->event_id0 = 0;
1700     sdmac->event_id1 = 0;
1701     + sdmac->context_loaded = false;
1702    
1703     sdma_set_channel_priority(sdmac, 0);
1704    
1705     @@ -1628,7 +1629,7 @@ static int sdma_config(struct dma_chan *chan,
1706     memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
1707    
1708     /* Set ENBLn earlier to make sure dma request triggered after that */
1709     - if (sdmac->event_id0) {
1710     + if (sdmac->event_id0 >= 0) {
1711     if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
1712     return -EINVAL;
1713     sdma_event_enable(sdmac, sdmac->event_id0);
1714     diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
1715     index 3a45079d11ec..4a750e29bfb5 100644
1716     --- a/drivers/dma/tegra20-apb-dma.c
1717     +++ b/drivers/dma/tegra20-apb-dma.c
1718     @@ -281,7 +281,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
1719    
1720     /* Do not allocate if desc are waiting for ack */
1721     list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
1722     - if (async_tx_test_ack(&dma_desc->txd)) {
1723     + if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
1724     list_del(&dma_desc->node);
1725     spin_unlock_irqrestore(&tdc->lock, flags);
1726     dma_desc->txd.flags = 0;
1727     @@ -756,10 +756,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
1728     bool was_busy;
1729    
1730     spin_lock_irqsave(&tdc->lock, flags);
1731     - if (list_empty(&tdc->pending_sg_req)) {
1732     - spin_unlock_irqrestore(&tdc->lock, flags);
1733     - return 0;
1734     - }
1735    
1736     if (!tdc->busy)
1737     goto skip_dma_stop;
1738     diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
1739     index 2d263382d797..880ffd833718 100644
1740     --- a/drivers/edac/synopsys_edac.c
1741     +++ b/drivers/edac/synopsys_edac.c
1742     @@ -479,20 +479,14 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
1743     pinf = &p->ceinfo;
1744     if (!priv->p_data->quirks) {
1745     snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
1746     - "DDR ECC error type:%s Row %d Bank %d Col %d ",
1747     - "CE", pinf->row, pinf->bank, pinf->col);
1748     - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
1749     - "Bit Position: %d Data: 0x%08x\n",
1750     + "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
1751     + "CE", pinf->row, pinf->bank, pinf->col,
1752     pinf->bitpos, pinf->data);
1753     } else {
1754     snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
1755     - "DDR ECC error type:%s Row %d Bank %d Col %d ",
1756     - "CE", pinf->row, pinf->bank, pinf->col);
1757     - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
1758     - "BankGroup Number %d Block Number %d ",
1759     - pinf->bankgrpnr, pinf->blknr);
1760     - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
1761     - "Bit Position: %d Data: 0x%08x\n",
1762     + "DDR ECC error type:%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
1763     + "CE", pinf->row, pinf->bank, pinf->col,
1764     + pinf->bankgrpnr, pinf->blknr,
1765     pinf->bitpos, pinf->data);
1766     }
1767    
1768     @@ -509,10 +503,8 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
1769     "UE", pinf->row, pinf->bank, pinf->col);
1770     } else {
1771     snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
1772     - "DDR ECC error type :%s Row %d Bank %d Col %d ",
1773     - "UE", pinf->row, pinf->bank, pinf->col);
1774     - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
1775     - "BankGroup Number %d Block Number %d",
1776     + "DDR ECC error type :%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d",
1777     + "UE", pinf->row, pinf->bank, pinf->col,
1778     pinf->bankgrpnr, pinf->blknr);
1779     }
1780    
1781     diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
1782     index 34d41f67b54d..ad8a4bc074fb 100644
1783     --- a/drivers/firmware/efi/efi.c
1784     +++ b/drivers/firmware/efi/efi.c
1785     @@ -544,7 +544,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
1786    
1787     seed = early_memremap(efi.rng_seed, sizeof(*seed));
1788     if (seed != NULL) {
1789     - size = seed->size;
1790     + size = READ_ONCE(seed->size);
1791     early_memunmap(seed, sizeof(*seed));
1792     } else {
1793     pr_err("Could not map UEFI random seed!\n");
1794     @@ -554,7 +554,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
1795     sizeof(*seed) + size);
1796     if (seed != NULL) {
1797     pr_notice("seeding entropy pool\n");
1798     - add_bootloader_randomness(seed->bits, seed->size);
1799     + add_bootloader_randomness(seed->bits, size);
1800     early_memunmap(seed, sizeof(*seed) + size);
1801     } else {
1802     pr_err("Could not map UEFI random seed!\n");
1803     diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
1804     index 04a24a863d6e..35a5f8f8eea5 100644
1805     --- a/drivers/firmware/imx/imx-scu.c
1806     +++ b/drivers/firmware/imx/imx-scu.c
1807     @@ -29,6 +29,7 @@ struct imx_sc_chan {
1808     struct mbox_client cl;
1809     struct mbox_chan *ch;
1810     int idx;
1811     + struct completion tx_done;
1812     };
1813    
1814     struct imx_sc_ipc {
1815     @@ -100,6 +101,14 @@ int imx_scu_get_handle(struct imx_sc_ipc **ipc)
1816     }
1817     EXPORT_SYMBOL(imx_scu_get_handle);
1818    
1819     +/* Callback called when the word of a message is ack-ed, eg read by SCU */
1820     +static void imx_scu_tx_done(struct mbox_client *cl, void *mssg, int r)
1821     +{
1822     + struct imx_sc_chan *sc_chan = container_of(cl, struct imx_sc_chan, cl);
1823     +
1824     + complete(&sc_chan->tx_done);
1825     +}
1826     +
1827     static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
1828     {
1829     struct imx_sc_chan *sc_chan = container_of(c, struct imx_sc_chan, cl);
1830     @@ -143,6 +152,19 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
1831    
1832     for (i = 0; i < hdr->size; i++) {
1833     sc_chan = &sc_ipc->chans[i % 4];
1834     +
1835     + /*
1836     + * SCU requires that all messages words are written
1837     + * sequentially but linux MU driver implements multiple
1838     + * independent channels for each register so ordering between
1839     + * different channels must be ensured by SCU API interface.
1840     + *
1841     + * Wait for tx_done before every send to ensure that no
1842     + * queueing happens at the mailbox channel level.
1843     + */
1844     + wait_for_completion(&sc_chan->tx_done);
1845     + reinit_completion(&sc_chan->tx_done);
1846     +
1847     ret = mbox_send_message(sc_chan->ch, &data[i]);
1848     if (ret < 0)
1849     return ret;
1850     @@ -225,6 +247,11 @@ static int imx_scu_probe(struct platform_device *pdev)
1851     cl->knows_txdone = true;
1852     cl->rx_callback = imx_scu_rx_callback;
1853    
1854     + /* Initial tx_done completion as "done" */
1855     + cl->tx_done = imx_scu_tx_done;
1856     + init_completion(&sc_chan->tx_done);
1857     + complete(&sc_chan->tx_done);
1858     +
1859     sc_chan->sc_ipc = sc_ipc;
1860     sc_chan->idx = i % 4;
1861     sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
1862     diff --git a/drivers/firmware/imx/misc.c b/drivers/firmware/imx/misc.c
1863     index 4b56a587dacd..d073cb3ce699 100644
1864     --- a/drivers/firmware/imx/misc.c
1865     +++ b/drivers/firmware/imx/misc.c
1866     @@ -16,7 +16,7 @@ struct imx_sc_msg_req_misc_set_ctrl {
1867     u32 ctrl;
1868     u32 val;
1869     u16 resource;
1870     -} __packed;
1871     +} __packed __aligned(4);
1872    
1873     struct imx_sc_msg_req_cpu_start {
1874     struct imx_sc_rpc_msg hdr;
1875     @@ -24,18 +24,18 @@ struct imx_sc_msg_req_cpu_start {
1876     u32 address_lo;
1877     u16 resource;
1878     u8 enable;
1879     -} __packed;
1880     +} __packed __aligned(4);
1881    
1882     struct imx_sc_msg_req_misc_get_ctrl {
1883     struct imx_sc_rpc_msg hdr;
1884     u32 ctrl;
1885     u16 resource;
1886     -} __packed;
1887     +} __packed __aligned(4);
1888    
1889     struct imx_sc_msg_resp_misc_get_ctrl {
1890     struct imx_sc_rpc_msg hdr;
1891     u32 val;
1892     -} __packed;
1893     +} __packed __aligned(4);
1894    
1895     /*
1896     * This function sets a miscellaneous control value.
1897     diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
1898     index b556612207e5..af3ae0087de4 100644
1899     --- a/drivers/firmware/imx/scu-pd.c
1900     +++ b/drivers/firmware/imx/scu-pd.c
1901     @@ -61,7 +61,7 @@ struct imx_sc_msg_req_set_resource_power_mode {
1902     struct imx_sc_rpc_msg hdr;
1903     u16 resource;
1904     u8 mode;
1905     -} __packed;
1906     +} __packed __aligned(4);
1907    
1908     #define IMX_SCU_PD_NAME_SIZE 20
1909     struct imx_sc_pm_domain {
1910     diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
1911     index 12e748b202d6..18cb88b9105e 100644
1912     --- a/drivers/gpu/drm/drm_client_modeset.c
1913     +++ b/drivers/gpu/drm/drm_client_modeset.c
1914     @@ -952,7 +952,8 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
1915     * depending on the hardware this may require the framebuffer
1916     * to be in a specific tiling format.
1917     */
1918     - if ((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180 ||
1919     + if (((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0 &&
1920     + (*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180) ||
1921     !plane->rotation_property)
1922     return false;
1923    
1924     diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
1925     index 88232698d7a0..3fd35e6b9d53 100644
1926     --- a/drivers/gpu/drm/drm_modes.c
1927     +++ b/drivers/gpu/drm/drm_modes.c
1928     @@ -1672,6 +1672,13 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len,
1929     }
1930     }
1931    
1932     + if (!(rotation & DRM_MODE_ROTATE_MASK))
1933     + rotation |= DRM_MODE_ROTATE_0;
1934     +
1935     + /* Make sure there is exactly one rotation defined */
1936     + if (!is_power_of_2(rotation & DRM_MODE_ROTATE_MASK))
1937     + return -EINVAL;
1938     +
1939     mode->rotation_reflection = rotation;
1940    
1941     return 0;
1942     diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
1943     index 0da860200410..e2ac09894a6d 100644
1944     --- a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
1945     +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
1946     @@ -83,7 +83,6 @@
1947     #define VSIZE_OFST 20
1948     #define LDI_INT_EN 0x741C
1949     #define FRAME_END_INT_EN_OFST 1
1950     -#define UNDERFLOW_INT_EN_OFST 2
1951     #define LDI_CTRL 0x7420
1952     #define BPP_OFST 3
1953     #define DATA_GATE_EN BIT(2)
1954     diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
1955     index 73cd28a6ea07..86000127d4ee 100644
1956     --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
1957     +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
1958     @@ -46,7 +46,6 @@ struct ade_hw_ctx {
1959     struct clk *media_noc_clk;
1960     struct clk *ade_pix_clk;
1961     struct reset_control *reset;
1962     - struct work_struct display_reset_wq;
1963     bool power_on;
1964     int irq;
1965    
1966     @@ -136,7 +135,6 @@ static void ade_init(struct ade_hw_ctx *ctx)
1967     */
1968     ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST,
1969     FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
1970     - ade_update_bits(base + LDI_INT_EN, UNDERFLOW_INT_EN_OFST, MASK(1), 1);
1971     }
1972    
1973     static bool ade_crtc_mode_fixup(struct drm_crtc *crtc,
1974     @@ -304,17 +302,6 @@ static void ade_crtc_disable_vblank(struct drm_crtc *crtc)
1975     MASK(1), 0);
1976     }
1977    
1978     -static void drm_underflow_wq(struct work_struct *work)
1979     -{
1980     - struct ade_hw_ctx *ctx = container_of(work, struct ade_hw_ctx,
1981     - display_reset_wq);
1982     - struct drm_device *drm_dev = ctx->crtc->dev;
1983     - struct drm_atomic_state *state;
1984     -
1985     - state = drm_atomic_helper_suspend(drm_dev);
1986     - drm_atomic_helper_resume(drm_dev, state);
1987     -}
1988     -
1989     static irqreturn_t ade_irq_handler(int irq, void *data)
1990     {
1991     struct ade_hw_ctx *ctx = data;
1992     @@ -331,12 +318,6 @@ static irqreturn_t ade_irq_handler(int irq, void *data)
1993     MASK(1), 1);
1994     drm_crtc_handle_vblank(crtc);
1995     }
1996     - if (status & BIT(UNDERFLOW_INT_EN_OFST)) {
1997     - ade_update_bits(base + LDI_INT_CLR, UNDERFLOW_INT_EN_OFST,
1998     - MASK(1), 1);
1999     - DRM_ERROR("LDI underflow!");
2000     - schedule_work(&ctx->display_reset_wq);
2001     - }
2002    
2003     return IRQ_HANDLED;
2004     }
2005     @@ -919,7 +900,6 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev,
2006     if (ret)
2007     return ERR_PTR(-EIO);
2008    
2009     - INIT_WORK(&ctx->display_reset_wq, drm_underflow_wq);
2010     ctx->crtc = crtc;
2011    
2012     return ctx;
2013     diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
2014     index c002f234ff31..2422af6a99a0 100644
2015     --- a/drivers/gpu/drm/i915/display/intel_display_power.c
2016     +++ b/drivers/gpu/drm/i915/display/intel_display_power.c
2017     @@ -4205,13 +4205,19 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
2018    
2019     static void icl_mbus_init(struct drm_i915_private *dev_priv)
2020     {
2021     - u32 val;
2022     + u32 mask, val;
2023    
2024     - val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
2025     - MBUS_ABOX_BT_CREDIT_POOL2(16) |
2026     - MBUS_ABOX_B_CREDIT(1) |
2027     - MBUS_ABOX_BW_CREDIT(1);
2028     + mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
2029     + MBUS_ABOX_BT_CREDIT_POOL2_MASK |
2030     + MBUS_ABOX_B_CREDIT_MASK |
2031     + MBUS_ABOX_BW_CREDIT_MASK;
2032    
2033     + val = I915_READ(MBUS_ABOX_CTL);
2034     + val &= ~mask;
2035     + val |= MBUS_ABOX_BT_CREDIT_POOL1(16) |
2036     + MBUS_ABOX_BT_CREDIT_POOL2(16) |
2037     + MBUS_ABOX_B_CREDIT(1) |
2038     + MBUS_ABOX_BW_CREDIT(1);
2039     I915_WRITE(MBUS_ABOX_CTL, val);
2040     }
2041    
2042     diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
2043     index 1d27babff0ce..dc9d3a5ec4a6 100644
2044     --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
2045     +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
2046     @@ -375,7 +375,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
2047    
2048     obj = i915_gem_object_create_internal(i915, size);
2049     if (IS_ERR(obj))
2050     - return PTR_ERR(obj);
2051     + return false;
2052    
2053     err = create_mmap_offset(obj);
2054     i915_gem_object_put(obj);
2055     diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
2056     index eb0b4b7dc7cc..03c6d6157e4d 100644
2057     --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
2058     +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
2059     @@ -1112,8 +1112,8 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
2060     ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
2061     msecs_to_jiffies(50));
2062     if (ret == 0)
2063     - dev_warn(dev->dev, "pp done time out, lm=%d\n",
2064     - mdp5_cstate->pipeline.mixer->lm);
2065     + dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
2066     + mdp5_cstate->pipeline.mixer->lm);
2067     }
2068    
2069     static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
2070     diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
2071     index 271aa7bbca92..73127948f54d 100644
2072     --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
2073     +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
2074     @@ -336,7 +336,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
2075     return num;
2076     }
2077    
2078     -static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
2079     +static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector,
2080     struct drm_display_mode *mode)
2081     {
2082     int id = dsi_mgr_connector_get_id(connector);
2083     @@ -479,6 +479,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
2084     struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
2085     struct mipi_dsi_host *host = msm_dsi->host;
2086     struct drm_panel *panel = msm_dsi->panel;
2087     + struct msm_dsi_pll *src_pll;
2088     bool is_dual_dsi = IS_DUAL_DSI();
2089     int ret;
2090    
2091     @@ -519,6 +520,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
2092     id, ret);
2093     }
2094    
2095     + /* Save PLL status if it is a clock source */
2096     + src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
2097     + msm_dsi_pll_save_state(src_pll);
2098     +
2099     ret = msm_dsi_host_power_off(host);
2100     if (ret)
2101     pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
2102     diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
2103     index 3522863a4984..21519229fe73 100644
2104     --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
2105     +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
2106     @@ -724,10 +724,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
2107     if (!phy || !phy->cfg->ops.disable)
2108     return;
2109    
2110     - /* Save PLL status if it is a clock source */
2111     - if (phy->usecase != MSM_DSI_PHY_SLAVE)
2112     - msm_dsi_pll_save_state(phy->pll);
2113     -
2114     phy->cfg->ops.disable(phy);
2115    
2116     dsi_phy_regulator_disable(phy);
2117     diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
2118     index 8f6100db90ed..aa9385d5bfff 100644
2119     --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
2120     +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
2121     @@ -411,6 +411,12 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
2122     if (pll_10nm->slave)
2123     dsi_pll_enable_pll_bias(pll_10nm->slave);
2124    
2125     + rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
2126     + if (rc) {
2127     + pr_err("vco_set_rate failed, rc=%d\n", rc);
2128     + return rc;
2129     + }
2130     +
2131     /* Start PLL */
2132     pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
2133     0x01);
2134     diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
2135     index 3107b0738e40..5d75f8cf6477 100644
2136     --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
2137     +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
2138     @@ -601,33 +601,27 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
2139     source_id = (fault_status >> 16);
2140    
2141     /* Page fault only */
2142     - if ((status & mask) == BIT(i)) {
2143     - WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
2144     -
2145     + ret = -1;
2146     + if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
2147     ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
2148     - if (!ret) {
2149     - mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
2150     - status &= ~mask;
2151     - continue;
2152     - }
2153     - }
2154    
2155     - /* terminal fault, print info about the fault */
2156     - dev_err(pfdev->dev,
2157     - "Unhandled Page fault in AS%d at VA 0x%016llX\n"
2158     - "Reason: %s\n"
2159     - "raw fault status: 0x%X\n"
2160     - "decoded fault status: %s\n"
2161     - "exception type 0x%X: %s\n"
2162     - "access type 0x%X: %s\n"
2163     - "source id 0x%X\n",
2164     - i, addr,
2165     - "TODO",
2166     - fault_status,
2167     - (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
2168     - exception_type, panfrost_exception_name(pfdev, exception_type),
2169     - access_type, access_type_name(pfdev, fault_status),
2170     - source_id);
2171     + if (ret)
2172     + /* terminal fault, print info about the fault */
2173     + dev_err(pfdev->dev,
2174     + "Unhandled Page fault in AS%d at VA 0x%016llX\n"
2175     + "Reason: %s\n"
2176     + "raw fault status: 0x%X\n"
2177     + "decoded fault status: %s\n"
2178     + "exception type 0x%X: %s\n"
2179     + "access type 0x%X: %s\n"
2180     + "source id 0x%X\n",
2181     + i, addr,
2182     + "TODO",
2183     + fault_status,
2184     + (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
2185     + exception_type, panfrost_exception_name(pfdev, exception_type),
2186     + access_type, access_type_name(pfdev, fault_status),
2187     + source_id);
2188    
2189     mmu_write(pfdev, MMU_INT_CLEAR, mask);
2190    
2191     diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
2192     index 6d61a0eb5d64..84e6bc050bf2 100644
2193     --- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
2194     +++ b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
2195     @@ -53,6 +53,7 @@ cmdline_test(drm_cmdline_test_rotate_0)
2196     cmdline_test(drm_cmdline_test_rotate_90)
2197     cmdline_test(drm_cmdline_test_rotate_180)
2198     cmdline_test(drm_cmdline_test_rotate_270)
2199     +cmdline_test(drm_cmdline_test_rotate_multiple)
2200     cmdline_test(drm_cmdline_test_rotate_invalid_val)
2201     cmdline_test(drm_cmdline_test_rotate_truncated)
2202     cmdline_test(drm_cmdline_test_hmirror)
2203     diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
2204     index 013de9d27c35..035f86c5d648 100644
2205     --- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
2206     +++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
2207     @@ -856,6 +856,17 @@ static int drm_cmdline_test_rotate_270(void *ignored)
2208     return 0;
2209     }
2210    
2211     +static int drm_cmdline_test_rotate_multiple(void *ignored)
2212     +{
2213     + struct drm_cmdline_mode mode = { };
2214     +
2215     + FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,rotate=0,rotate=90",
2216     + &no_connector,
2217     + &mode));
2218     +
2219     + return 0;
2220     +}
2221     +
2222     static int drm_cmdline_test_rotate_invalid_val(void *ignored)
2223     {
2224     struct drm_cmdline_mode mode = { };
2225     @@ -888,7 +899,7 @@ static int drm_cmdline_test_hmirror(void *ignored)
2226     FAIL_ON(!mode.specified);
2227     FAIL_ON(mode.xres != 720);
2228     FAIL_ON(mode.yres != 480);
2229     - FAIL_ON(mode.rotation_reflection != DRM_MODE_REFLECT_X);
2230     + FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X));
2231    
2232     FAIL_ON(mode.refresh_specified);
2233    
2234     @@ -913,7 +924,7 @@ static int drm_cmdline_test_vmirror(void *ignored)
2235     FAIL_ON(!mode.specified);
2236     FAIL_ON(mode.xres != 720);
2237     FAIL_ON(mode.yres != 480);
2238     - FAIL_ON(mode.rotation_reflection != DRM_MODE_REFLECT_Y);
2239     + FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y));
2240    
2241     FAIL_ON(mode.refresh_specified);
2242    
2243     diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
2244     index 8b803eb903b8..18b4881f4481 100644
2245     --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
2246     +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
2247     @@ -106,48 +106,128 @@ static const struct de2_fmt_info de2_formats[] = {
2248     .rgb = true,
2249     .csc = SUN8I_CSC_MODE_OFF,
2250     },
2251     + {
2252     + /* for DE2 VI layer which ignores alpha */
2253     + .drm_fmt = DRM_FORMAT_XRGB4444,
2254     + .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
2255     + .rgb = true,
2256     + .csc = SUN8I_CSC_MODE_OFF,
2257     + },
2258     {
2259     .drm_fmt = DRM_FORMAT_ABGR4444,
2260     .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
2261     .rgb = true,
2262     .csc = SUN8I_CSC_MODE_OFF,
2263     },
2264     + {
2265     + /* for DE2 VI layer which ignores alpha */
2266     + .drm_fmt = DRM_FORMAT_XBGR4444,
2267     + .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
2268     + .rgb = true,
2269     + .csc = SUN8I_CSC_MODE_OFF,
2270     + },
2271     {
2272     .drm_fmt = DRM_FORMAT_RGBA4444,
2273     .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
2274     .rgb = true,
2275     .csc = SUN8I_CSC_MODE_OFF,
2276     },
2277     + {
2278     + /* for DE2 VI layer which ignores alpha */
2279     + .drm_fmt = DRM_FORMAT_RGBX4444,
2280     + .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
2281     + .rgb = true,
2282     + .csc = SUN8I_CSC_MODE_OFF,
2283     + },
2284     {
2285     .drm_fmt = DRM_FORMAT_BGRA4444,
2286     .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
2287     .rgb = true,
2288     .csc = SUN8I_CSC_MODE_OFF,
2289     },
2290     + {
2291     + /* for DE2 VI layer which ignores alpha */
2292     + .drm_fmt = DRM_FORMAT_BGRX4444,
2293     + .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
2294     + .rgb = true,
2295     + .csc = SUN8I_CSC_MODE_OFF,
2296     + },
2297     {
2298     .drm_fmt = DRM_FORMAT_ARGB1555,
2299     .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
2300     .rgb = true,
2301     .csc = SUN8I_CSC_MODE_OFF,
2302     },
2303     + {
2304     + /* for DE2 VI layer which ignores alpha */
2305     + .drm_fmt = DRM_FORMAT_XRGB1555,
2306     + .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
2307     + .rgb = true,
2308     + .csc = SUN8I_CSC_MODE_OFF,
2309     + },
2310     {
2311     .drm_fmt = DRM_FORMAT_ABGR1555,
2312     .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
2313     .rgb = true,
2314     .csc = SUN8I_CSC_MODE_OFF,
2315     },
2316     + {
2317     + /* for DE2 VI layer which ignores alpha */
2318     + .drm_fmt = DRM_FORMAT_XBGR1555,
2319     + .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
2320     + .rgb = true,
2321     + .csc = SUN8I_CSC_MODE_OFF,
2322     + },
2323     {
2324     .drm_fmt = DRM_FORMAT_RGBA5551,
2325     .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
2326     .rgb = true,
2327     .csc = SUN8I_CSC_MODE_OFF,
2328     },
2329     + {
2330     + /* for DE2 VI layer which ignores alpha */
2331     + .drm_fmt = DRM_FORMAT_RGBX5551,
2332     + .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
2333     + .rgb = true,
2334     + .csc = SUN8I_CSC_MODE_OFF,
2335     + },
2336     {
2337     .drm_fmt = DRM_FORMAT_BGRA5551,
2338     .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
2339     .rgb = true,
2340     .csc = SUN8I_CSC_MODE_OFF,
2341     },
2342     + {
2343     + /* for DE2 VI layer which ignores alpha */
2344     + .drm_fmt = DRM_FORMAT_BGRX5551,
2345     + .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
2346     + .rgb = true,
2347     + .csc = SUN8I_CSC_MODE_OFF,
2348     + },
2349     + {
2350     + .drm_fmt = DRM_FORMAT_ARGB2101010,
2351     + .de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010,
2352     + .rgb = true,
2353     + .csc = SUN8I_CSC_MODE_OFF,
2354     + },
2355     + {
2356     + .drm_fmt = DRM_FORMAT_ABGR2101010,
2357     + .de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010,
2358     + .rgb = true,
2359     + .csc = SUN8I_CSC_MODE_OFF,
2360     + },
2361     + {
2362     + .drm_fmt = DRM_FORMAT_RGBA1010102,
2363     + .de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102,
2364     + .rgb = true,
2365     + .csc = SUN8I_CSC_MODE_OFF,
2366     + },
2367     + {
2368     + .drm_fmt = DRM_FORMAT_BGRA1010102,
2369     + .de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102,
2370     + .rgb = true,
2371     + .csc = SUN8I_CSC_MODE_OFF,
2372     + },
2373     {
2374     .drm_fmt = DRM_FORMAT_UYVY,
2375     .de2_fmt = SUN8I_MIXER_FBFMT_UYVY,
2376     @@ -196,12 +276,6 @@ static const struct de2_fmt_info de2_formats[] = {
2377     .rgb = false,
2378     .csc = SUN8I_CSC_MODE_YUV2RGB,
2379     },
2380     - {
2381     - .drm_fmt = DRM_FORMAT_YUV444,
2382     - .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
2383     - .rgb = true,
2384     - .csc = SUN8I_CSC_MODE_YUV2RGB,
2385     - },
2386     {
2387     .drm_fmt = DRM_FORMAT_YUV422,
2388     .de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
2389     @@ -220,12 +294,6 @@ static const struct de2_fmt_info de2_formats[] = {
2390     .rgb = false,
2391     .csc = SUN8I_CSC_MODE_YUV2RGB,
2392     },
2393     - {
2394     - .drm_fmt = DRM_FORMAT_YVU444,
2395     - .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
2396     - .rgb = true,
2397     - .csc = SUN8I_CSC_MODE_YVU2RGB,
2398     - },
2399     {
2400     .drm_fmt = DRM_FORMAT_YVU422,
2401     .de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
2402     @@ -244,6 +312,18 @@ static const struct de2_fmt_info de2_formats[] = {
2403     .rgb = false,
2404     .csc = SUN8I_CSC_MODE_YVU2RGB,
2405     },
2406     + {
2407     + .drm_fmt = DRM_FORMAT_P010,
2408     + .de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV,
2409     + .rgb = false,
2410     + .csc = SUN8I_CSC_MODE_YUV2RGB,
2411     + },
2412     + {
2413     + .drm_fmt = DRM_FORMAT_P210,
2414     + .de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV,
2415     + .rgb = false,
2416     + .csc = SUN8I_CSC_MODE_YUV2RGB,
2417     + },
2418     };
2419    
2420     const struct de2_fmt_info *sun8i_mixer_format_info(u32 format)
2421     diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
2422     index c6cc94057faf..345b28b0a80a 100644
2423     --- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
2424     +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
2425     @@ -93,6 +93,10 @@
2426     #define SUN8I_MIXER_FBFMT_ABGR1555 17
2427     #define SUN8I_MIXER_FBFMT_RGBA5551 18
2428     #define SUN8I_MIXER_FBFMT_BGRA5551 19
2429     +#define SUN8I_MIXER_FBFMT_ARGB2101010 20
2430     +#define SUN8I_MIXER_FBFMT_ABGR2101010 21
2431     +#define SUN8I_MIXER_FBFMT_RGBA1010102 22
2432     +#define SUN8I_MIXER_FBFMT_BGRA1010102 23
2433    
2434     #define SUN8I_MIXER_FBFMT_YUYV 0
2435     #define SUN8I_MIXER_FBFMT_UYVY 1
2436     @@ -109,6 +113,13 @@
2437     /* format 12 is semi-planar YUV411 UVUV */
2438     /* format 13 is semi-planar YUV411 VUVU */
2439     #define SUN8I_MIXER_FBFMT_YUV411 14
2440     +/* format 15 doesn't exist */
2441     +/* format 16 is P010 YVU */
2442     +#define SUN8I_MIXER_FBFMT_P010_YUV 17
2443     +/* format 18 is P210 YVU */
2444     +#define SUN8I_MIXER_FBFMT_P210_YUV 19
2445     +/* format 20 is packed YVU444 10-bit */
2446     +/* format 21 is packed YUV444 10-bit */
2447    
2448     /*
2449     * Sub-engines listed bellow are unused for now. The EN registers are here only
2450     diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
2451     index 42d445d23773..b8398ca18b0f 100644
2452     --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
2453     +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
2454     @@ -398,24 +398,66 @@ static const struct drm_plane_funcs sun8i_vi_layer_funcs = {
2455     };
2456    
2457     /*
2458     - * While all RGB formats are supported, VI planes don't support
2459     - * alpha blending, so there is no point having formats with alpha
2460     - * channel if their opaque analog exist.
2461     + * While DE2 VI layer supports same RGB formats as UI layer, alpha
2462     + * channel is ignored. This structure lists all unique variants
2463     + * where alpha channel is replaced with "don't care" (X) channel.
2464     */
2465     static const u32 sun8i_vi_layer_formats[] = {
2466     + DRM_FORMAT_BGR565,
2467     + DRM_FORMAT_BGR888,
2468     + DRM_FORMAT_BGRX4444,
2469     + DRM_FORMAT_BGRX5551,
2470     + DRM_FORMAT_BGRX8888,
2471     + DRM_FORMAT_RGB565,
2472     + DRM_FORMAT_RGB888,
2473     + DRM_FORMAT_RGBX4444,
2474     + DRM_FORMAT_RGBX5551,
2475     + DRM_FORMAT_RGBX8888,
2476     + DRM_FORMAT_XBGR1555,
2477     + DRM_FORMAT_XBGR4444,
2478     + DRM_FORMAT_XBGR8888,
2479     + DRM_FORMAT_XRGB1555,
2480     + DRM_FORMAT_XRGB4444,
2481     + DRM_FORMAT_XRGB8888,
2482     +
2483     + DRM_FORMAT_NV16,
2484     + DRM_FORMAT_NV12,
2485     + DRM_FORMAT_NV21,
2486     + DRM_FORMAT_NV61,
2487     + DRM_FORMAT_UYVY,
2488     + DRM_FORMAT_VYUY,
2489     + DRM_FORMAT_YUYV,
2490     + DRM_FORMAT_YVYU,
2491     + DRM_FORMAT_YUV411,
2492     + DRM_FORMAT_YUV420,
2493     + DRM_FORMAT_YUV422,
2494     + DRM_FORMAT_YVU411,
2495     + DRM_FORMAT_YVU420,
2496     + DRM_FORMAT_YVU422,
2497     +};
2498     +
2499     +static const u32 sun8i_vi_layer_de3_formats[] = {
2500     DRM_FORMAT_ABGR1555,
2501     + DRM_FORMAT_ABGR2101010,
2502     DRM_FORMAT_ABGR4444,
2503     + DRM_FORMAT_ABGR8888,
2504     DRM_FORMAT_ARGB1555,
2505     + DRM_FORMAT_ARGB2101010,
2506     DRM_FORMAT_ARGB4444,
2507     + DRM_FORMAT_ARGB8888,
2508     DRM_FORMAT_BGR565,
2509     DRM_FORMAT_BGR888,
2510     + DRM_FORMAT_BGRA1010102,
2511     DRM_FORMAT_BGRA5551,
2512     DRM_FORMAT_BGRA4444,
2513     + DRM_FORMAT_BGRA8888,
2514     DRM_FORMAT_BGRX8888,
2515     DRM_FORMAT_RGB565,
2516     DRM_FORMAT_RGB888,
2517     + DRM_FORMAT_RGBA1010102,
2518     DRM_FORMAT_RGBA4444,
2519     DRM_FORMAT_RGBA5551,
2520     + DRM_FORMAT_RGBA8888,
2521     DRM_FORMAT_RGBX8888,
2522     DRM_FORMAT_XBGR8888,
2523     DRM_FORMAT_XRGB8888,
2524     @@ -424,6 +466,8 @@ static const u32 sun8i_vi_layer_formats[] = {
2525     DRM_FORMAT_NV12,
2526     DRM_FORMAT_NV21,
2527     DRM_FORMAT_NV61,
2528     + DRM_FORMAT_P010,
2529     + DRM_FORMAT_P210,
2530     DRM_FORMAT_UYVY,
2531     DRM_FORMAT_VYUY,
2532     DRM_FORMAT_YUYV,
2533     @@ -431,11 +475,9 @@ static const u32 sun8i_vi_layer_formats[] = {
2534     DRM_FORMAT_YUV411,
2535     DRM_FORMAT_YUV420,
2536     DRM_FORMAT_YUV422,
2537     - DRM_FORMAT_YUV444,
2538     DRM_FORMAT_YVU411,
2539     DRM_FORMAT_YVU420,
2540     DRM_FORMAT_YVU422,
2541     - DRM_FORMAT_YVU444,
2542     };
2543    
2544     struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
2545     @@ -443,19 +485,27 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
2546     int index)
2547     {
2548     u32 supported_encodings, supported_ranges;
2549     + unsigned int plane_cnt, format_count;
2550     struct sun8i_vi_layer *layer;
2551     - unsigned int plane_cnt;
2552     + const u32 *formats;
2553     int ret;
2554    
2555     layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
2556     if (!layer)
2557     return ERR_PTR(-ENOMEM);
2558    
2559     + if (mixer->cfg->is_de3) {
2560     + formats = sun8i_vi_layer_de3_formats;
2561     + format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats);
2562     + } else {
2563     + formats = sun8i_vi_layer_formats;
2564     + format_count = ARRAY_SIZE(sun8i_vi_layer_formats);
2565     + }
2566     +
2567     /* possible crtcs are set later */
2568     ret = drm_universal_plane_init(drm, &layer->plane, 0,
2569     &sun8i_vi_layer_funcs,
2570     - sun8i_vi_layer_formats,
2571     - ARRAY_SIZE(sun8i_vi_layer_formats),
2572     + formats, format_count,
2573     NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
2574     if (ret) {
2575     dev_err(drm->dev, "Couldn't initialize layer\n");
2576     diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
2577     index 09b526518f5a..8de93ccacaac 100644
2578     --- a/drivers/gpu/drm/virtio/virtgpu_object.c
2579     +++ b/drivers/gpu/drm/virtio/virtgpu_object.c
2580     @@ -23,38 +23,44 @@
2581     * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
2582     */
2583    
2584     +#include <linux/moduleparam.h>
2585     +
2586     #include <drm/ttm/ttm_execbuf_util.h>
2587    
2588     #include "virtgpu_drv.h"
2589    
2590     +static int virtio_gpu_virglrenderer_workaround = 1;
2591     +module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
2592     +
2593     static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
2594     uint32_t *resid)
2595     {
2596     -#if 0
2597     - int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
2598     -
2599     - if (handle < 0)
2600     - return handle;
2601     -#else
2602     - static int handle;
2603     -
2604     - /*
2605     - * FIXME: dirty hack to avoid re-using IDs, virglrenderer
2606     - * can't deal with that. Needs fixing in virglrenderer, also
2607     - * should figure a better way to handle that in the guest.
2608     - */
2609     - handle++;
2610     -#endif
2611     -
2612     - *resid = handle + 1;
2613     + if (virtio_gpu_virglrenderer_workaround) {
2614     + /*
2615     + * Hack to avoid re-using resource IDs.
2616     + *
2617     + * virglrenderer versions up to (and including) 0.7.0
2618     + * can't deal with that. virglrenderer commit
2619     + * "f91a9dd35715 Fix unlinking resources from hash
2620     + * table." (Feb 2019) fixes the bug.
2621     + */
2622     + static atomic_t seqno = ATOMIC_INIT(0);
2623     + int handle = atomic_inc_return(&seqno);
2624     + *resid = handle + 1;
2625     + } else {
2626     + int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
2627     + if (handle < 0)
2628     + return handle;
2629     + *resid = handle + 1;
2630     + }
2631     return 0;
2632     }
2633    
2634     static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
2635     {
2636     -#if 0
2637     - ida_free(&vgdev->resource_ida, id - 1);
2638     -#endif
2639     + if (!virtio_gpu_virglrenderer_workaround) {
2640     + ida_free(&vgdev->resource_ida, id - 1);
2641     + }
2642     }
2643    
2644     static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
2645     diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
2646     index 9632e2e3c4bb..319a0519ebdb 100644
2647     --- a/drivers/hwmon/adt7462.c
2648     +++ b/drivers/hwmon/adt7462.c
2649     @@ -413,7 +413,7 @@ static int ADT7462_REG_VOLT(struct adt7462_data *data, int which)
2650     return 0x95;
2651     break;
2652     }
2653     - return -ENODEV;
2654     + return 0;
2655     }
2656    
2657     /* Provide labels for sysfs */
2658     diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
2659     index 5920c0085d35..319e4b4ae639 100644
2660     --- a/drivers/infiniband/core/cm.c
2661     +++ b/drivers/infiniband/core/cm.c
2662     @@ -1228,6 +1228,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
2663     /* Sharing an ib_cm_id with different handlers is not
2664     * supported */
2665     spin_unlock_irqrestore(&cm.lock, flags);
2666     + ib_destroy_cm_id(cm_id);
2667     return ERR_PTR(-EINVAL);
2668     }
2669     atomic_inc(&cm_id_priv->refcount);
2670     diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
2671     index 9008937f8ed8..6c12da176981 100644
2672     --- a/drivers/infiniband/core/cma.c
2673     +++ b/drivers/infiniband/core/cma.c
2674     @@ -3155,19 +3155,26 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2675     int ret;
2676    
2677     id_priv = container_of(id, struct rdma_id_private, id);
2678     + memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
2679     if (id_priv->state == RDMA_CM_IDLE) {
2680     ret = cma_bind_addr(id, src_addr, dst_addr);
2681     - if (ret)
2682     + if (ret) {
2683     + memset(cma_dst_addr(id_priv), 0,
2684     + rdma_addr_size(dst_addr));
2685     return ret;
2686     + }
2687     }
2688    
2689     - if (cma_family(id_priv) != dst_addr->sa_family)
2690     + if (cma_family(id_priv) != dst_addr->sa_family) {
2691     + memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
2692     return -EINVAL;
2693     + }
2694    
2695     - if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
2696     + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
2697     + memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
2698     return -EINVAL;
2699     + }
2700    
2701     - memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
2702     if (cma_any_addr(dst_addr)) {
2703     ret = cma_resolve_loopback(id_priv);
2704     } else {
2705     diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
2706     index 9b30773f2da0..86a17a87f181 100644
2707     --- a/drivers/infiniband/core/core_priv.h
2708     +++ b/drivers/infiniband/core/core_priv.h
2709     @@ -338,6 +338,21 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
2710     qp->pd = pd;
2711     qp->uobject = uobj;
2712     qp->real_qp = qp;
2713     +
2714     + qp->qp_type = attr->qp_type;
2715     + qp->qp_context = attr->qp_context;
2716     + qp->rwq_ind_tbl = attr->rwq_ind_tbl;
2717     + qp->send_cq = attr->send_cq;
2718     + qp->recv_cq = attr->recv_cq;
2719     + qp->srq = attr->srq;
2720     + qp->rwq_ind_tbl = attr->rwq_ind_tbl;
2721     + qp->event_handler = attr->event_handler;
2722     +
2723     + atomic_set(&qp->usecnt, 0);
2724     + spin_lock_init(&qp->mr_lock);
2725     + INIT_LIST_HEAD(&qp->rdma_mrs);
2726     + INIT_LIST_HEAD(&qp->sig_mrs);
2727     +
2728     /*
2729     * We don't track XRC QPs for now, because they don't have PD
2730     * and more importantly they are created internaly by driver,
2731     diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
2732     index ade71823370f..da8adadf4755 100644
2733     --- a/drivers/infiniband/core/iwcm.c
2734     +++ b/drivers/infiniband/core/iwcm.c
2735     @@ -159,8 +159,10 @@ static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
2736     {
2737     struct list_head *e, *tmp;
2738    
2739     - list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
2740     + list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
2741     + list_del(e);
2742     kfree(list_entry(e, struct iwcm_work, free_list));
2743     + }
2744     }
2745    
2746     static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
2747     diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
2748     index c03af08b80e7..ad4301ecfa59 100644
2749     --- a/drivers/infiniband/core/nldev.c
2750     +++ b/drivers/infiniband/core/nldev.c
2751     @@ -1711,6 +1711,8 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
2752     if (ret)
2753     goto err_msg;
2754     } else {
2755     + if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
2756     + goto err_msg;
2757     qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
2758     if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
2759     cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
2760     diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
2761     index 5337393d4dfe..bb10cbfa0070 100644
2762     --- a/drivers/infiniband/core/rw.c
2763     +++ b/drivers/infiniband/core/rw.c
2764     @@ -268,6 +268,23 @@ static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
2765     return 1;
2766     }
2767    
2768     +static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
2769     + u32 sg_cnt, enum dma_data_direction dir)
2770     +{
2771     + if (is_pci_p2pdma_page(sg_page(sg)))
2772     + pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir);
2773     + else
2774     + ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
2775     +}
2776     +
2777     +static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
2778     + u32 sg_cnt, enum dma_data_direction dir)
2779     +{
2780     + if (is_pci_p2pdma_page(sg_page(sg)))
2781     + return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
2782     + return ib_dma_map_sg(dev, sg, sg_cnt, dir);
2783     +}
2784     +
2785     /**
2786     * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
2787     * @ctx: context to initialize
2788     @@ -290,11 +307,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
2789     struct ib_device *dev = qp->pd->device;
2790     int ret;
2791    
2792     - if (is_pci_p2pdma_page(sg_page(sg)))
2793     - ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
2794     - else
2795     - ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
2796     -
2797     + ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir);
2798     if (!ret)
2799     return -ENOMEM;
2800     sg_cnt = ret;
2801     @@ -333,7 +346,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
2802     return ret;
2803    
2804     out_unmap_sg:
2805     - ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
2806     + rdma_rw_unmap_sg(dev, sg, sg_cnt, dir);
2807     return ret;
2808     }
2809     EXPORT_SYMBOL(rdma_rw_ctx_init);
2810     @@ -583,11 +596,7 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
2811     break;
2812     }
2813    
2814     - if (is_pci_p2pdma_page(sg_page(sg)))
2815     - pci_p2pdma_unmap_sg(qp->pd->device->dma_device, sg,
2816     - sg_cnt, dir);
2817     - else
2818     - ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
2819     + rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
2820     }
2821     EXPORT_SYMBOL(rdma_rw_ctx_destroy);
2822    
2823     diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
2824     index 2b4d80393bd0..2d5608315dc8 100644
2825     --- a/drivers/infiniband/core/security.c
2826     +++ b/drivers/infiniband/core/security.c
2827     @@ -340,15 +340,19 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
2828     return NULL;
2829    
2830     if (qp_attr_mask & IB_QP_PORT)
2831     - new_pps->main.port_num =
2832     - (qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
2833     + new_pps->main.port_num = qp_attr->port_num;
2834     + else if (qp_pps)
2835     + new_pps->main.port_num = qp_pps->main.port_num;
2836     +
2837     if (qp_attr_mask & IB_QP_PKEY_INDEX)
2838     - new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
2839     - qp_attr->pkey_index;
2840     + new_pps->main.pkey_index = qp_attr->pkey_index;
2841     + else if (qp_pps)
2842     + new_pps->main.pkey_index = qp_pps->main.pkey_index;
2843     +
2844     if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
2845     new_pps->main.state = IB_PORT_PKEY_VALID;
2846    
2847     - if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
2848     + if (!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) && qp_pps) {
2849     new_pps->main.port_num = qp_pps->main.port_num;
2850     new_pps->main.pkey_index = qp_pps->main.pkey_index;
2851     if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
2852     diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
2853     index 300353c1e5f1..e2ddcb0dc4ee 100644
2854     --- a/drivers/infiniband/core/uverbs_cmd.c
2855     +++ b/drivers/infiniband/core/uverbs_cmd.c
2856     @@ -1431,17 +1431,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
2857     if (ret)
2858     goto err_cb;
2859    
2860     - qp->pd = pd;
2861     - qp->send_cq = attr.send_cq;
2862     - qp->recv_cq = attr.recv_cq;
2863     - qp->srq = attr.srq;
2864     - qp->rwq_ind_tbl = ind_tbl;
2865     - qp->event_handler = attr.event_handler;
2866     - qp->qp_context = attr.qp_context;
2867     - qp->qp_type = attr.qp_type;
2868     - atomic_set(&qp->usecnt, 0);
2869     atomic_inc(&pd->usecnt);
2870     - qp->port = 0;
2871     if (attr.send_cq)
2872     atomic_inc(&attr.send_cq->usecnt);
2873     if (attr.recv_cq)
2874     diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
2875     index 35c2841a569e..6c4093d0a91d 100644
2876     --- a/drivers/infiniband/core/verbs.c
2877     +++ b/drivers/infiniband/core/verbs.c
2878     @@ -1180,16 +1180,6 @@ struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
2879     if (ret)
2880     goto err;
2881    
2882     - qp->qp_type = qp_init_attr->qp_type;
2883     - qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
2884     -
2885     - atomic_set(&qp->usecnt, 0);
2886     - qp->mrs_used = 0;
2887     - spin_lock_init(&qp->mr_lock);
2888     - INIT_LIST_HEAD(&qp->rdma_mrs);
2889     - INIT_LIST_HEAD(&qp->sig_mrs);
2890     - qp->port = 0;
2891     -
2892     if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
2893     struct ib_qp *xrc_qp =
2894     create_xrc_qp_user(qp, qp_init_attr, udata);
2895     diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
2896     index 089e201d7550..2f6323ad9c59 100644
2897     --- a/drivers/infiniband/hw/hfi1/verbs.c
2898     +++ b/drivers/infiniband/hw/hfi1/verbs.c
2899     @@ -515,10 +515,11 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
2900     opa_get_lid(packet->dlid, 9B));
2901     if (!mcast)
2902     goto drop;
2903     + rcu_read_lock();
2904     list_for_each_entry_rcu(p, &mcast->qp_list, list) {
2905     packet->qp = p->qp;
2906     if (hfi1_do_pkey_check(packet))
2907     - goto drop;
2908     + goto unlock_drop;
2909     spin_lock_irqsave(&packet->qp->r_lock, flags);
2910     packet_handler = qp_ok(packet);
2911     if (likely(packet_handler))
2912     @@ -527,6 +528,7 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
2913     ibp->rvp.n_pkt_drops++;
2914     spin_unlock_irqrestore(&packet->qp->r_lock, flags);
2915     }
2916     + rcu_read_unlock();
2917     /*
2918     * Notify rvt_multicast_detach() if it is waiting for us
2919     * to finish.
2920     diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
2921     index 33778d451b82..5ef93f8f17a1 100644
2922     --- a/drivers/infiniband/hw/qib/qib_verbs.c
2923     +++ b/drivers/infiniband/hw/qib/qib_verbs.c
2924     @@ -329,8 +329,10 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
2925     if (mcast == NULL)
2926     goto drop;
2927     this_cpu_inc(ibp->pmastats->n_multicast_rcv);
2928     + rcu_read_lock();
2929     list_for_each_entry_rcu(p, &mcast->qp_list, list)
2930     qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
2931     + rcu_read_unlock();
2932     /*
2933     * Notify rvt_multicast_detach() if it is waiting for us
2934     * to finish.
2935     diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
2936     index fb01407a310f..130b1e31b978 100644
2937     --- a/drivers/infiniband/sw/siw/siw_main.c
2938     +++ b/drivers/infiniband/sw/siw/siw_main.c
2939     @@ -379,6 +379,9 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
2940     base_dev->dev.dma_ops = &dma_virt_ops;
2941     base_dev->num_comp_vectors = num_possible_cpus();
2942    
2943     + xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
2944     + xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1);
2945     +
2946     ib_set_device_ops(base_dev, &siw_device_ops);
2947     rv = ib_device_set_netdev(base_dev, netdev, 1);
2948     if (rv)
2949     @@ -406,9 +409,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
2950     sdev->attrs.max_srq_wr = SIW_MAX_SRQ_WR;
2951     sdev->attrs.max_srq_sge = SIW_MAX_SGE;
2952    
2953     - xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
2954     - xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1);
2955     -
2956     INIT_LIST_HEAD(&sdev->cep_list);
2957     INIT_LIST_HEAD(&sdev->qp_list);
2958    
2959     diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
2960     index d7cbca8bf2cd..b5ae9f7c0510 100644
2961     --- a/drivers/iommu/amd_iommu_init.c
2962     +++ b/drivers/iommu/amd_iommu_init.c
2963     @@ -2533,6 +2533,7 @@ static int __init early_amd_iommu_init(void)
2964     struct acpi_table_header *ivrs_base;
2965     acpi_status status;
2966     int i, remap_cache_sz, ret = 0;
2967     + u32 pci_id;
2968    
2969     if (!amd_iommu_detected)
2970     return -ENODEV;
2971     @@ -2620,6 +2621,16 @@ static int __init early_amd_iommu_init(void)
2972     if (ret)
2973     goto out;
2974    
2975     + /* Disable IOMMU if there's Stoney Ridge graphics */
2976     + for (i = 0; i < 32; i++) {
2977     + pci_id = read_pci_config(0, i, 0, 0);
2978     + if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
2979     + pr_info("Disable IOMMU on Stoney Ridge\n");
2980     + amd_iommu_disabled = true;
2981     + break;
2982     + }
2983     + }
2984     +
2985     /* Disable any previously enabled IOMMUs */
2986     if (!is_kdump_kernel() || amd_iommu_disabled)
2987     disable_iommus();
2988     @@ -2728,7 +2739,7 @@ static int __init state_next(void)
2989     ret = early_amd_iommu_init();
2990     init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2991     if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
2992     - pr_info("AMD IOMMU disabled on kernel command-line\n");
2993     + pr_info("AMD IOMMU disabled\n");
2994     init_state = IOMMU_CMDLINE_DISABLED;
2995     ret = -EINVAL;
2996     }
2997     diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
2998     index 8346e6d1816c..f595e9867cbe 100644
2999     --- a/drivers/md/dm-cache-target.c
3000     +++ b/drivers/md/dm-cache-target.c
3001     @@ -2867,8 +2867,8 @@ static void cache_postsuspend(struct dm_target *ti)
3002     prevent_background_work(cache);
3003     BUG_ON(atomic_read(&cache->nr_io_migrations));
3004    
3005     - cancel_delayed_work(&cache->waker);
3006     - flush_workqueue(cache->wq);
3007     + cancel_delayed_work_sync(&cache->waker);
3008     + drain_workqueue(cache->wq);
3009     WARN_ON(cache->tracker.in_flight);
3010    
3011     /*
3012     diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
3013     index dab4446fe7d8..57ac603f3741 100644
3014     --- a/drivers/md/dm-integrity.c
3015     +++ b/drivers/md/dm-integrity.c
3016     @@ -199,17 +199,19 @@ struct dm_integrity_c {
3017     __u8 log2_blocks_per_bitmap_bit;
3018    
3019     unsigned char mode;
3020     - int suspending;
3021    
3022     int failed;
3023    
3024     struct crypto_shash *internal_hash;
3025    
3026     + struct dm_target *ti;
3027     +
3028     /* these variables are locked with endio_wait.lock */
3029     struct rb_root in_progress;
3030     struct list_head wait_list;
3031     wait_queue_head_t endio_wait;
3032     struct workqueue_struct *wait_wq;
3033     + struct workqueue_struct *offload_wq;
3034    
3035     unsigned char commit_seq;
3036     commit_id_t commit_ids[N_COMMIT_IDS];
3037     @@ -1434,7 +1436,7 @@ static void dec_in_flight(struct dm_integrity_io *dio)
3038     dio->range.logical_sector += dio->range.n_sectors;
3039     bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
3040     INIT_WORK(&dio->work, integrity_bio_wait);
3041     - queue_work(ic->wait_wq, &dio->work);
3042     + queue_work(ic->offload_wq, &dio->work);
3043     return;
3044     }
3045     do_endio_flush(ic, dio);
3046     @@ -1860,7 +1862,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
3047    
3048     if (need_sync_io && from_map) {
3049     INIT_WORK(&dio->work, integrity_bio_wait);
3050     - queue_work(ic->metadata_wq, &dio->work);
3051     + queue_work(ic->offload_wq, &dio->work);
3052     return;
3053     }
3054    
3055     @@ -2310,7 +2312,7 @@ static void integrity_writer(struct work_struct *w)
3056     unsigned prev_free_sectors;
3057    
3058     /* the following test is not needed, but it tests the replay code */
3059     - if (READ_ONCE(ic->suspending) && !ic->meta_dev)
3060     + if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
3061     return;
3062    
3063     spin_lock_irq(&ic->endio_wait.lock);
3064     @@ -2371,7 +2373,7 @@ static void integrity_recalc(struct work_struct *w)
3065    
3066     next_chunk:
3067    
3068     - if (unlikely(READ_ONCE(ic->suspending)))
3069     + if (unlikely(dm_suspended(ic->ti)))
3070     goto unlock_ret;
3071    
3072     range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
3073     @@ -2496,7 +2498,7 @@ static void bitmap_block_work(struct work_struct *w)
3074     dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
3075     remove_range(ic, &dio->range);
3076     INIT_WORK(&dio->work, integrity_bio_wait);
3077     - queue_work(ic->wait_wq, &dio->work);
3078     + queue_work(ic->offload_wq, &dio->work);
3079     } else {
3080     block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
3081     dio->range.n_sectors, BITMAP_OP_SET);
3082     @@ -2519,7 +2521,7 @@ static void bitmap_block_work(struct work_struct *w)
3083    
3084     remove_range(ic, &dio->range);
3085     INIT_WORK(&dio->work, integrity_bio_wait);
3086     - queue_work(ic->wait_wq, &dio->work);
3087     + queue_work(ic->offload_wq, &dio->work);
3088     }
3089    
3090     queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
3091     @@ -2799,8 +2801,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
3092    
3093     del_timer_sync(&ic->autocommit_timer);
3094    
3095     - WRITE_ONCE(ic->suspending, 1);
3096     -
3097     if (ic->recalc_wq)
3098     drain_workqueue(ic->recalc_wq);
3099    
3100     @@ -2829,8 +2829,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
3101     #endif
3102     }
3103    
3104     - WRITE_ONCE(ic->suspending, 0);
3105     -
3106     BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3107    
3108     ic->journal_uptodate = true;
3109     @@ -2883,17 +2881,24 @@ static void dm_integrity_resume(struct dm_target *ti)
3110     } else {
3111     replay_journal(ic);
3112     if (ic->mode == 'B') {
3113     - int mode;
3114     ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3115     ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3116     r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3117     if (unlikely(r))
3118     dm_integrity_io_error(ic, "writing superblock", r);
3119    
3120     - mode = ic->recalculate_flag ? BITMAP_OP_SET : BITMAP_OP_CLEAR;
3121     - block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, mode);
3122     - block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, mode);
3123     - block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, mode);
3124     + block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3125     + block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3126     + block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3127     + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3128     + le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3129     + block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3130     + ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3131     + block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3132     + ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3133     + block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3134     + ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3135     + }
3136     rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3137     ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3138     }
3139     @@ -2961,7 +2966,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
3140     DMEMIT(" meta_device:%s", ic->meta_dev->name);
3141     if (ic->sectors_per_block != 1)
3142     DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3143     - if (ic->recalculate_flag)
3144     + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3145     DMEMIT(" recalculate");
3146     DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3147     DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3148     @@ -3607,6 +3612,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3149     }
3150     ti->private = ic;
3151     ti->per_io_data_size = sizeof(struct dm_integrity_io);
3152     + ic->ti = ti;
3153    
3154     ic->in_progress = RB_ROOT;
3155     INIT_LIST_HEAD(&ic->wait_list);
3156     @@ -3818,6 +3824,14 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3157     goto bad;
3158     }
3159    
3160     + ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
3161     + METADATA_WORKQUEUE_MAX_ACTIVE);
3162     + if (!ic->offload_wq) {
3163     + ti->error = "Cannot allocate workqueue";
3164     + r = -ENOMEM;
3165     + goto bad;
3166     + }
3167     +
3168     ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
3169     if (!ic->commit_wq) {
3170     ti->error = "Cannot allocate workqueue";
3171     @@ -4122,6 +4136,8 @@ static void dm_integrity_dtr(struct dm_target *ti)
3172     destroy_workqueue(ic->metadata_wq);
3173     if (ic->wait_wq)
3174     destroy_workqueue(ic->wait_wq);
3175     + if (ic->offload_wq)
3176     + destroy_workqueue(ic->offload_wq);
3177     if (ic->commit_wq)
3178     destroy_workqueue(ic->commit_wq);
3179     if (ic->writer_wq)
3180     diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
3181     index 8bb723f1a569..4cd8868f8004 100644
3182     --- a/drivers/md/dm-thin-metadata.c
3183     +++ b/drivers/md/dm-thin-metadata.c
3184     @@ -960,9 +960,9 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
3185     DMWARN("%s: __commit_transaction() failed, error = %d",
3186     __func__, r);
3187     }
3188     + pmd_write_unlock(pmd);
3189     if (!pmd->fail_io)
3190     __destroy_persistent_data_objects(pmd);
3191     - pmd_write_unlock(pmd);
3192    
3193     kfree(pmd);
3194     return 0;
3195     diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
3196     index 07c1b0334f57..184dabce1bad 100644
3197     --- a/drivers/md/dm-writecache.c
3198     +++ b/drivers/md/dm-writecache.c
3199     @@ -625,6 +625,12 @@ static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry
3200     wc->freelist_size++;
3201     }
3202    
3203     +static inline void writecache_verify_watermark(struct dm_writecache *wc)
3204     +{
3205     + if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
3206     + queue_work(wc->writeback_wq, &wc->writeback_work);
3207     +}
3208     +
3209     static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
3210     {
3211     struct wc_entry *e;
3212     @@ -646,8 +652,8 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
3213     list_del(&e->lru);
3214     }
3215     wc->freelist_size--;
3216     - if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
3217     - queue_work(wc->writeback_wq, &wc->writeback_work);
3218     +
3219     + writecache_verify_watermark(wc);
3220    
3221     return e;
3222     }
3223     @@ -838,7 +844,7 @@ static void writecache_suspend(struct dm_target *ti)
3224     }
3225     wc_unlock(wc);
3226    
3227     - flush_workqueue(wc->writeback_wq);
3228     + drain_workqueue(wc->writeback_wq);
3229    
3230     wc_lock(wc);
3231     if (flush_on_suspend)
3232     @@ -961,6 +967,8 @@ erase_this:
3233     writecache_commit_flushed(wc, false);
3234     }
3235    
3236     + writecache_verify_watermark(wc);
3237     +
3238     wc_unlock(wc);
3239     }
3240    
3241     diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
3242     index 4574e0dedbd6..03267609b515 100644
3243     --- a/drivers/md/dm-zoned-target.c
3244     +++ b/drivers/md/dm-zoned-target.c
3245     @@ -533,8 +533,9 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
3246    
3247     /* Get the BIO chunk work. If one is not active yet, create one */
3248     cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
3249     - if (!cw) {
3250     -
3251     + if (cw) {
3252     + dmz_get_chunk_work(cw);
3253     + } else {
3254     /* Create a new chunk work */
3255     cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
3256     if (unlikely(!cw)) {
3257     @@ -543,7 +544,7 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
3258     }
3259    
3260     INIT_WORK(&cw->work, dmz_chunk_work);
3261     - refcount_set(&cw->refcount, 0);
3262     + refcount_set(&cw->refcount, 1);
3263     cw->target = dmz;
3264     cw->chunk = chunk;
3265     bio_list_init(&cw->bio_list);
3266     @@ -556,7 +557,6 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
3267     }
3268    
3269     bio_list_add(&cw->bio_list, bio);
3270     - dmz_get_chunk_work(cw);
3271    
3272     dmz_reclaim_bio_acc(dmz->reclaim);
3273     if (queue_work(dmz->chunk_wq, &cw->work))
3274     diff --git a/drivers/md/dm.c b/drivers/md/dm.c
3275     index 6d3cc235f842..cf71a2277d60 100644
3276     --- a/drivers/md/dm.c
3277     +++ b/drivers/md/dm.c
3278     @@ -1809,7 +1809,8 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
3279     * With request-based DM we only need to check the
3280     * top-level queue for congestion.
3281     */
3282     - r = md->queue->backing_dev_info->wb.state & bdi_bits;
3283     + struct backing_dev_info *bdi = md->queue->backing_dev_info;
3284     + r = bdi->wb.congested->state & bdi_bits;
3285     } else {
3286     map = dm_get_live_table_fast(md);
3287     if (map)
3288     @@ -1875,15 +1876,6 @@ static const struct dax_operations dm_dax_ops;
3289    
3290     static void dm_wq_work(struct work_struct *work);
3291    
3292     -static void dm_init_normal_md_queue(struct mapped_device *md)
3293     -{
3294     - /*
3295     - * Initialize aspects of queue that aren't relevant for blk-mq
3296     - */
3297     - md->queue->backing_dev_info->congested_data = md;
3298     - md->queue->backing_dev_info->congested_fn = dm_any_congested;
3299     -}
3300     -
3301     static void cleanup_mapped_device(struct mapped_device *md)
3302     {
3303     if (md->wq)
3304     @@ -2270,6 +2262,12 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
3305     }
3306     EXPORT_SYMBOL_GPL(dm_get_queue_limits);
3307    
3308     +static void dm_init_congested_fn(struct mapped_device *md)
3309     +{
3310     + md->queue->backing_dev_info->congested_data = md;
3311     + md->queue->backing_dev_info->congested_fn = dm_any_congested;
3312     +}
3313     +
3314     /*
3315     * Setup the DM device's queue based on md's type
3316     */
3317     @@ -2286,11 +2284,12 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
3318     DMERR("Cannot initialize queue for request-based dm-mq mapped device");
3319     return r;
3320     }
3321     + dm_init_congested_fn(md);
3322     break;
3323     case DM_TYPE_BIO_BASED:
3324     case DM_TYPE_DAX_BIO_BASED:
3325     case DM_TYPE_NVME_BIO_BASED:
3326     - dm_init_normal_md_queue(md);
3327     + dm_init_congested_fn(md);
3328     break;
3329     case DM_TYPE_NONE:
3330     WARN_ON_ONCE(true);
3331     @@ -2389,6 +2388,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
3332     map = dm_get_live_table(md, &srcu_idx);
3333     if (!dm_suspended_md(md)) {
3334     dm_table_presuspend_targets(map);
3335     + set_bit(DMF_SUSPENDED, &md->flags);
3336     dm_table_postsuspend_targets(map);
3337     }
3338     /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
3339     diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
3340     index 7c429ce98bae..668770e9f609 100644
3341     --- a/drivers/media/mc/mc-entity.c
3342     +++ b/drivers/media/mc/mc-entity.c
3343     @@ -639,9 +639,9 @@ int media_get_pad_index(struct media_entity *entity, bool is_sink,
3344     return -EINVAL;
3345    
3346     for (i = 0; i < entity->num_pads; i++) {
3347     - if (entity->pads[i].flags == MEDIA_PAD_FL_SINK)
3348     + if (entity->pads[i].flags & MEDIA_PAD_FL_SINK)
3349     pad_is_sink = true;
3350     - else if (entity->pads[i].flags == MEDIA_PAD_FL_SOURCE)
3351     + else if (entity->pads[i].flags & MEDIA_PAD_FL_SOURCE)
3352     pad_is_sink = false;
3353     else
3354     continue; /* This is an error! */
3355     diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.c b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
3356     index 3c93d9232c3c..b6e39fbd8ad5 100644
3357     --- a/drivers/media/platform/vicodec/codec-v4l2-fwht.c
3358     +++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
3359     @@ -27,17 +27,17 @@ static const struct v4l2_fwht_pixfmt_info v4l2_fwht_pixfmts[] = {
3360     { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
3361     { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
3362     { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
3363     - { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
3364     - { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
3365     + { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3366     + { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3367     { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3368     - { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
3369     - { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
3370     + { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3371     + { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3372     { V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3373     - { V4L2_PIX_FMT_BGRX32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
3374     + { V4L2_PIX_FMT_BGRX32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3375     { V4L2_PIX_FMT_BGRA32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3376     - { V4L2_PIX_FMT_RGBX32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
3377     + { V4L2_PIX_FMT_RGBX32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3378     { V4L2_PIX_FMT_RGBA32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
3379     - { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
3380     + { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_HSV},
3381     { V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1, 1, FWHT_FL_PIXENC_RGB},
3382     };
3383    
3384     @@ -175,22 +175,14 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf,
3385     case V4L2_PIX_FMT_RGB32:
3386     case V4L2_PIX_FMT_XRGB32:
3387     case V4L2_PIX_FMT_HSV32:
3388     - rf->cr = rf->luma + 1;
3389     - rf->cb = rf->cr + 2;
3390     - rf->luma += 2;
3391     - break;
3392     - case V4L2_PIX_FMT_BGR32:
3393     - case V4L2_PIX_FMT_XBGR32:
3394     - rf->cb = rf->luma;
3395     - rf->cr = rf->cb + 2;
3396     - rf->luma++;
3397     - break;
3398     case V4L2_PIX_FMT_ARGB32:
3399     rf->alpha = rf->luma;
3400     rf->cr = rf->luma + 1;
3401     rf->cb = rf->cr + 2;
3402     rf->luma += 2;
3403     break;
3404     + case V4L2_PIX_FMT_BGR32:
3405     + case V4L2_PIX_FMT_XBGR32:
3406     case V4L2_PIX_FMT_ABGR32:
3407     rf->cb = rf->luma;
3408     rf->cr = rf->cb + 2;
3409     @@ -198,10 +190,6 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf,
3410     rf->alpha = rf->cr + 1;
3411     break;
3412     case V4L2_PIX_FMT_BGRX32:
3413     - rf->cb = rf->luma + 1;
3414     - rf->cr = rf->cb + 2;
3415     - rf->luma += 2;
3416     - break;
3417     case V4L2_PIX_FMT_BGRA32:
3418     rf->alpha = rf->luma;
3419     rf->cb = rf->luma + 1;
3420     @@ -209,10 +197,6 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf,
3421     rf->luma += 2;
3422     break;
3423     case V4L2_PIX_FMT_RGBX32:
3424     - rf->cr = rf->luma;
3425     - rf->cb = rf->cr + 2;
3426     - rf->luma++;
3427     - break;
3428     case V4L2_PIX_FMT_RGBA32:
3429     rf->alpha = rf->luma + 3;
3430     rf->cr = rf->luma;
3431     diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
3432     index 19937dd3c6f6..3d6a6306cec7 100644
3433     --- a/drivers/media/v4l2-core/v4l2-mem2mem.c
3434     +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
3435     @@ -809,12 +809,12 @@ int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
3436     goto err_rel_entity1;
3437    
3438     /* Connect the three entities */
3439     - ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1,
3440     + ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0,
3441     MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
3442     if (ret)
3443     goto err_rel_entity2;
3444    
3445     - ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0,
3446     + ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0,
3447     MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
3448     if (ret)
3449     goto err_rm_links0;
3450     diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
3451     index 459fee70a597..a7a4fed4d899 100644
3452     --- a/drivers/misc/habanalabs/device.c
3453     +++ b/drivers/misc/habanalabs/device.c
3454     @@ -600,7 +600,9 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
3455     goto out;
3456     }
3457    
3458     - hdev->asic_funcs->halt_coresight(hdev);
3459     + if (!hdev->hard_reset_pending)
3460     + hdev->asic_funcs->halt_coresight(hdev);
3461     +
3462     hdev->in_debug = 0;
3463    
3464     goto out;
3465     @@ -1185,6 +1187,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
3466     if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
3467     dev_info(hdev->dev,
3468     "H/W state is dirty, must reset before initializing\n");
3469     + hdev->asic_funcs->halt_engines(hdev, true);
3470     hdev->asic_funcs->hw_fini(hdev, true);
3471     }
3472    
3473     diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
3474     index fe3574a83b7c..13b39fd97429 100644
3475     --- a/drivers/misc/habanalabs/goya/goya.c
3476     +++ b/drivers/misc/habanalabs/goya/goya.c
3477     @@ -869,6 +869,11 @@ void goya_init_dma_qmans(struct hl_device *hdev)
3478     */
3479     static void goya_disable_external_queues(struct hl_device *hdev)
3480     {
3481     + struct goya_device *goya = hdev->asic_specific;
3482     +
3483     + if (!(goya->hw_cap_initialized & HW_CAP_DMA))
3484     + return;
3485     +
3486     WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
3487     WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
3488     WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
3489     @@ -930,6 +935,11 @@ static int goya_stop_external_queues(struct hl_device *hdev)
3490     {
3491     int rc, retval = 0;
3492    
3493     + struct goya_device *goya = hdev->asic_specific;
3494     +
3495     + if (!(goya->hw_cap_initialized & HW_CAP_DMA))
3496     + return retval;
3497     +
3498     rc = goya_stop_queue(hdev,
3499     mmDMA_QM_0_GLBL_CFG1,
3500     mmDMA_QM_0_CP_STS,
3501     @@ -1719,9 +1729,18 @@ void goya_init_tpc_qmans(struct hl_device *hdev)
3502     */
3503     static void goya_disable_internal_queues(struct hl_device *hdev)
3504     {
3505     + struct goya_device *goya = hdev->asic_specific;
3506     +
3507     + if (!(goya->hw_cap_initialized & HW_CAP_MME))
3508     + goto disable_tpc;
3509     +
3510     WREG32(mmMME_QM_GLBL_CFG0, 0);
3511     WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
3512    
3513     +disable_tpc:
3514     + if (!(goya->hw_cap_initialized & HW_CAP_TPC))
3515     + return;
3516     +
3517     WREG32(mmTPC0_QM_GLBL_CFG0, 0);
3518     WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
3519    
3520     @@ -1757,8 +1776,12 @@ static void goya_disable_internal_queues(struct hl_device *hdev)
3521     */
3522     static int goya_stop_internal_queues(struct hl_device *hdev)
3523     {
3524     + struct goya_device *goya = hdev->asic_specific;
3525     int rc, retval = 0;
3526    
3527     + if (!(goya->hw_cap_initialized & HW_CAP_MME))
3528     + goto stop_tpc;
3529     +
3530     /*
3531     * Each queue (QMAN) is a separate H/W logic. That means that each
3532     * QMAN can be stopped independently and failure to stop one does NOT
3533     @@ -1785,6 +1808,10 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
3534     retval = -EIO;
3535     }
3536    
3537     +stop_tpc:
3538     + if (!(goya->hw_cap_initialized & HW_CAP_TPC))
3539     + return retval;
3540     +
3541     rc = goya_stop_queue(hdev,
3542     mmTPC0_QM_GLBL_CFG1,
3543     mmTPC0_QM_CP_STS,
3544     @@ -1950,6 +1977,11 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
3545    
3546     static void goya_dma_stall(struct hl_device *hdev)
3547     {
3548     + struct goya_device *goya = hdev->asic_specific;
3549     +
3550     + if (!(goya->hw_cap_initialized & HW_CAP_DMA))
3551     + return;
3552     +
3553     WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
3554     WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
3555     WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
3556     @@ -1959,6 +1991,11 @@ static void goya_dma_stall(struct hl_device *hdev)
3557    
3558     static void goya_tpc_stall(struct hl_device *hdev)
3559     {
3560     + struct goya_device *goya = hdev->asic_specific;
3561     +
3562     + if (!(goya->hw_cap_initialized & HW_CAP_TPC))
3563     + return;
3564     +
3565     WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
3566     WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
3567     WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
3568     @@ -1971,6 +2008,11 @@ static void goya_tpc_stall(struct hl_device *hdev)
3569    
3570     static void goya_mme_stall(struct hl_device *hdev)
3571     {
3572     + struct goya_device *goya = hdev->asic_specific;
3573     +
3574     + if (!(goya->hw_cap_initialized & HW_CAP_MME))
3575     + return;
3576     +
3577     WREG32(mmMME_STALL, 0xFFFFFFFF);
3578     }
3579    
3580     @@ -4624,8 +4666,6 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
3581    
3582     rc = goya_send_job_on_qman0(hdev, job);
3583    
3584     - hl_cb_put(job->patched_cb);
3585     -
3586     hl_debugfs_remove_job(hdev, job);
3587     kfree(job);
3588     cb->cs_cnt--;
3589     diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
3590     index fecd5e674e04..46dc913da852 100644
3591     --- a/drivers/net/dsa/bcm_sf2.c
3592     +++ b/drivers/net/dsa/bcm_sf2.c
3593     @@ -69,8 +69,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
3594     /* Force link status for IMP port */
3595     reg = core_readl(priv, offset);
3596     reg |= (MII_SW_OR | LINK_STS);
3597     - if (priv->type == BCM7278_DEVICE_ID)
3598     - reg |= GMII_SPEED_UP_2G;
3599     + reg &= ~GMII_SPEED_UP_2G;
3600     core_writel(priv, reg, offset);
3601    
3602     /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
3603     diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
3604     index 52646855495e..873f9865f0d1 100644
3605     --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
3606     +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
3607     @@ -22,6 +22,7 @@
3608     #define HW_ATL_MIF_ADDR 0x0208U
3609     #define HW_ATL_MIF_VAL 0x020CU
3610    
3611     +#define HW_ATL_MPI_RPC_ADDR 0x0334U
3612     #define HW_ATL_RPC_CONTROL_ADR 0x0338U
3613     #define HW_ATL_RPC_STATE_ADR 0x033CU
3614    
3615     @@ -48,15 +49,14 @@
3616     #define FORCE_FLASHLESS 0
3617    
3618     static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
3619     -
3620     static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
3621     enum hal_atl_utils_fw_state_e state);
3622     -
3623     static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self);
3624     static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self);
3625     static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self);
3626     static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self);
3627     static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self);
3628     +static u32 aq_fw1x_rpc_get(struct aq_hw_s *self);
3629    
3630     int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
3631     {
3632     @@ -413,6 +413,10 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
3633     self, self->mbox_addr,
3634     self->mbox_addr != 0U,
3635     1000U, 10000U);
3636     + err = readx_poll_timeout_atomic(aq_fw1x_rpc_get, self,
3637     + self->rpc_addr,
3638     + self->rpc_addr != 0U,
3639     + 1000U, 100000U);
3640    
3641     return err;
3642     }
3643     @@ -469,6 +473,12 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
3644     self, fw.val,
3645     sw.tid == fw.tid,
3646     1000U, 100000U);
3647     + if (err < 0)
3648     + goto err_exit;
3649     +
3650     + err = aq_hw_err_from_flags(self);
3651     + if (err < 0)
3652     + goto err_exit;
3653    
3654     if (fw.len == 0xFFFFU) {
3655     err = hw_atl_utils_fw_rpc_call(self, sw.len);
3656     @@ -950,6 +960,11 @@ static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self)
3657     return aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR);
3658     }
3659    
3660     +static u32 aq_fw1x_rpc_get(struct aq_hw_s *self)
3661     +{
3662     + return aq_hw_read_reg(self, HW_ATL_MPI_RPC_ADDR);
3663     +}
3664     +
3665     const struct aq_fw_ops aq_fw_1x_ops = {
3666     .init = hw_atl_utils_mpi_create,
3667     .deinit = hw_atl_fw1x_deinit,
3668     diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
3669     index 6cc100e7d5c0..76ff42ec3ae5 100644
3670     --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
3671     +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
3672     @@ -410,10 +410,19 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
3673     lmac = &bgx->lmac[lmacid];
3674    
3675     cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
3676     - if (enable)
3677     + if (enable) {
3678     cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
3679     - else
3680     +
3681     + /* enable TX FIFO Underflow interrupt */
3682     + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1S,
3683     + GMI_TXX_INT_UNDFLW);
3684     + } else {
3685     cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
3686     +
3687     + /* Disable TX FIFO Underflow interrupt */
3688     + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1C,
3689     + GMI_TXX_INT_UNDFLW);
3690     + }
3691     bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
3692    
3693     if (bgx->is_rgx)
3694     @@ -1535,6 +1544,48 @@ static int bgx_init_phy(struct bgx *bgx)
3695     return bgx_init_of_phy(bgx);
3696     }
3697    
3698     +static irqreturn_t bgx_intr_handler(int irq, void *data)
3699     +{
3700     + struct bgx *bgx = (struct bgx *)data;
3701     + u64 status, val;
3702     + int lmac;
3703     +
3704     + for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
3705     + status = bgx_reg_read(bgx, lmac, BGX_GMP_GMI_TXX_INT);
3706     + if (status & GMI_TXX_INT_UNDFLW) {
3707     + pci_err(bgx->pdev, "BGX%d lmac%d UNDFLW\n",
3708     + bgx->bgx_id, lmac);
3709     + val = bgx_reg_read(bgx, lmac, BGX_CMRX_CFG);
3710     + val &= ~CMR_EN;
3711     + bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val);
3712     + val |= CMR_EN;
3713     + bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val);
3714     + }
3715     + /* clear interrupts */
3716     + bgx_reg_write(bgx, lmac, BGX_GMP_GMI_TXX_INT, status);
3717     + }
3718     +
3719     + return IRQ_HANDLED;
3720     +}
3721     +
3722     +static void bgx_register_intr(struct pci_dev *pdev)
3723     +{
3724     + struct bgx *bgx = pci_get_drvdata(pdev);
3725     + int ret;
3726     +
3727     + ret = pci_alloc_irq_vectors(pdev, BGX_LMAC_VEC_OFFSET,
3728     + BGX_LMAC_VEC_OFFSET, PCI_IRQ_ALL_TYPES);
3729     + if (ret < 0) {
3730     + pci_err(pdev, "Req for #%d msix vectors failed\n",
3731     + BGX_LMAC_VEC_OFFSET);
3732     + return;
3733     + }
3734     + ret = pci_request_irq(pdev, GMPX_GMI_TX_INT, bgx_intr_handler, NULL,
3735     + bgx, "BGX%d", bgx->bgx_id);
3736     + if (ret)
3737     + pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
3738     +}
3739     +
3740     static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3741     {
3742     int err;
3743     @@ -1550,7 +1601,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3744    
3745     pci_set_drvdata(pdev, bgx);
3746    
3747     - err = pci_enable_device(pdev);
3748     + err = pcim_enable_device(pdev);
3749     if (err) {
3750     dev_err(dev, "Failed to enable PCI device\n");
3751     pci_set_drvdata(pdev, NULL);
3752     @@ -1604,6 +1655,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3753    
3754     bgx_init_hw(bgx);
3755    
3756     + bgx_register_intr(pdev);
3757     +
3758     /* Enable all LMACs */
3759     for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
3760     err = bgx_lmac_enable(bgx, lmac);
3761     @@ -1620,6 +1673,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3762    
3763     err_enable:
3764     bgx_vnic[bgx->bgx_id] = NULL;
3765     + pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
3766     err_release_regions:
3767     pci_release_regions(pdev);
3768     err_disable_device:
3769     @@ -1637,6 +1691,8 @@ static void bgx_remove(struct pci_dev *pdev)
3770     for (lmac = 0; lmac < bgx->lmac_count; lmac++)
3771     bgx_lmac_disable(bgx, lmac);
3772    
3773     + pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
3774     +
3775     bgx_vnic[bgx->bgx_id] = NULL;
3776     pci_release_regions(pdev);
3777     pci_disable_device(pdev);
3778     diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
3779     index 25888706bdcd..cdea49392185 100644
3780     --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
3781     +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
3782     @@ -180,6 +180,15 @@
3783     #define BGX_GMP_GMI_TXX_BURST 0x38228
3784     #define BGX_GMP_GMI_TXX_MIN_PKT 0x38240
3785     #define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300
3786     +#define BGX_GMP_GMI_TXX_INT 0x38500
3787     +#define BGX_GMP_GMI_TXX_INT_W1S 0x38508
3788     +#define BGX_GMP_GMI_TXX_INT_ENA_W1C 0x38510
3789     +#define BGX_GMP_GMI_TXX_INT_ENA_W1S 0x38518
3790     +#define GMI_TXX_INT_PTP_LOST BIT_ULL(4)
3791     +#define GMI_TXX_INT_LATE_COL BIT_ULL(3)
3792     +#define GMI_TXX_INT_XSDEF BIT_ULL(2)
3793     +#define GMI_TXX_INT_XSCOL BIT_ULL(1)
3794     +#define GMI_TXX_INT_UNDFLW BIT_ULL(0)
3795    
3796     #define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */
3797     #define BGX_MSIX_VEC_0_29_CTL 0x400008
3798     diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
3799     index cce90b5925d9..70060c51854f 100644
3800     --- a/drivers/net/ethernet/davicom/dm9000.c
3801     +++ b/drivers/net/ethernet/davicom/dm9000.c
3802     @@ -1405,6 +1405,8 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
3803     mac_addr = of_get_mac_address(np);
3804     if (!IS_ERR(mac_addr))
3805     ether_addr_copy(pdata->dev_addr, mac_addr);
3806     + else if (PTR_ERR(mac_addr) == -EPROBE_DEFER)
3807     + return ERR_CAST(mac_addr);
3808    
3809     return pdata;
3810     }
3811     diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
3812     index 1fe9f6050635..62673e27af0e 100644
3813     --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
3814     +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
3815     @@ -2916,13 +2916,6 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
3816     else
3817     return -EINVAL;
3818    
3819     - /* Tell the OS link is going down, the link will go back up when fw
3820     - * says it is ready asynchronously
3821     - */
3822     - ice_print_link_msg(vsi, false);
3823     - netif_carrier_off(netdev);
3824     - netif_tx_stop_all_queues(netdev);
3825     -
3826     /* Set the FC mode and only restart AN if link is up */
3827     status = ice_set_fc(pi, &aq_failures, link_up);
3828    
3829     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
3830     index 2739ed2a2911..841abe75652c 100644
3831     --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
3832     +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
3833     @@ -2257,7 +2257,9 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
3834     struct mlx5dr_cmd_vport_cap *vport_cap;
3835     struct mlx5dr_domain *dmn = sb->dmn;
3836     struct mlx5dr_cmd_caps *caps;
3837     + u8 *bit_mask = sb->bit_mask;
3838     u8 *tag = hw_ste->tag;
3839     + bool source_gvmi_set;
3840    
3841     DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
3842    
3843     @@ -2278,7 +2280,8 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
3844     if (!vport_cap)
3845     return -EINVAL;
3846    
3847     - if (vport_cap->vport_gvmi)
3848     + source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
3849     + if (vport_cap->vport_gvmi && source_gvmi_set)
3850     MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
3851    
3852     misc->source_eswitch_owner_vhca_id = 0;
3853     diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
3854     index a41a90c589db..1c9e70c8cc30 100644
3855     --- a/drivers/net/ethernet/micrel/ks8851_mll.c
3856     +++ b/drivers/net/ethernet/micrel/ks8851_mll.c
3857     @@ -156,24 +156,6 @@ static int msg_enable;
3858     * chip is busy transferring packet data (RX/TX FIFO accesses).
3859     */
3860    
3861     -/**
3862     - * ks_rdreg8 - read 8 bit register from device
3863     - * @ks : The chip information
3864     - * @offset: The register address
3865     - *
3866     - * Read a 8bit register from the chip, returning the result
3867     - */
3868     -static u8 ks_rdreg8(struct ks_net *ks, int offset)
3869     -{
3870     - u16 data;
3871     - u8 shift_bit = offset & 0x03;
3872     - u8 shift_data = (offset & 1) << 3;
3873     - ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
3874     - iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
3875     - data = ioread16(ks->hw_addr);
3876     - return (u8)(data >> shift_data);
3877     -}
3878     -
3879     /**
3880     * ks_rdreg16 - read 16 bit register from device
3881     * @ks : The chip information
3882     @@ -184,27 +166,11 @@ static u8 ks_rdreg8(struct ks_net *ks, int offset)
3883    
3884     static u16 ks_rdreg16(struct ks_net *ks, int offset)
3885     {
3886     - ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
3887     + ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
3888     iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
3889     return ioread16(ks->hw_addr);
3890     }
3891    
3892     -/**
3893     - * ks_wrreg8 - write 8bit register value to chip
3894     - * @ks: The chip information
3895     - * @offset: The register address
3896     - * @value: The value to write
3897     - *
3898     - */
3899     -static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
3900     -{
3901     - u8 shift_bit = (offset & 0x03);
3902     - u16 value_write = (u16)(value << ((offset & 1) << 3));
3903     - ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
3904     - iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
3905     - iowrite16(value_write, ks->hw_addr);
3906     -}
3907     -
3908     /**
3909     * ks_wrreg16 - write 16bit register value to chip
3910     * @ks: The chip information
3911     @@ -215,7 +181,7 @@ static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
3912    
3913     static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
3914     {
3915     - ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
3916     + ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
3917     iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
3918     iowrite16(value, ks->hw_addr);
3919     }
3920     @@ -231,7 +197,7 @@ static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
3921     {
3922     len >>= 1;
3923     while (len--)
3924     - *wptr++ = (u16)ioread16(ks->hw_addr);
3925     + *wptr++ = be16_to_cpu(ioread16(ks->hw_addr));
3926     }
3927    
3928     /**
3929     @@ -245,7 +211,7 @@ static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
3930     {
3931     len >>= 1;
3932     while (len--)
3933     - iowrite16(*wptr++, ks->hw_addr);
3934     + iowrite16(cpu_to_be16(*wptr++), ks->hw_addr);
3935     }
3936    
3937     static void ks_disable_int(struct ks_net *ks)
3938     @@ -324,8 +290,7 @@ static void ks_read_config(struct ks_net *ks)
3939     u16 reg_data = 0;
3940    
3941     /* Regardless of bus width, 8 bit read should always work.*/
3942     - reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
3943     - reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
3944     + reg_data = ks_rdreg16(ks, KS_CCR);
3945    
3946     /* addr/data bus are multiplexed */
3947     ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
3948     @@ -429,7 +394,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
3949    
3950     /* 1. set sudo DMA mode */
3951     ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
3952     - ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
3953     + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
3954    
3955     /* 2. read prepend data */
3956     /**
3957     @@ -446,7 +411,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
3958     ks_inblk(ks, buf, ALIGN(len, 4));
3959    
3960     /* 4. reset sudo DMA Mode */
3961     - ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
3962     + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
3963     }
3964    
3965     /**
3966     @@ -679,13 +644,13 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
3967     ks->txh.txw[1] = cpu_to_le16(len);
3968    
3969     /* 1. set sudo-DMA mode */
3970     - ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
3971     + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
3972     /* 2. write status/lenth info */
3973     ks_outblk(ks, ks->txh.txw, 4);
3974     /* 3. write pkt data */
3975     ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
3976     /* 4. reset sudo-DMA mode */
3977     - ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
3978     + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
3979     /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
3980     ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
3981     /* 6. wait until TXQCR_METFE is auto-cleared */
3982     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3983     index 582176d869c3..89a6ae2b17e3 100644
3984     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3985     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3986     @@ -4208,6 +4208,8 @@ static void stmmac_init_fs(struct net_device *dev)
3987     {
3988     struct stmmac_priv *priv = netdev_priv(dev);
3989    
3990     + rtnl_lock();
3991     +
3992     /* Create per netdev entries */
3993     priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3994    
3995     @@ -4219,14 +4221,13 @@ static void stmmac_init_fs(struct net_device *dev)
3996     debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
3997     &stmmac_dma_cap_fops);
3998    
3999     - register_netdevice_notifier(&stmmac_notifier);
4000     + rtnl_unlock();
4001     }
4002    
4003     static void stmmac_exit_fs(struct net_device *dev)
4004     {
4005     struct stmmac_priv *priv = netdev_priv(dev);
4006    
4007     - unregister_netdevice_notifier(&stmmac_notifier);
4008     debugfs_remove_recursive(priv->dbgfs_dir);
4009     }
4010     #endif /* CONFIG_DEBUG_FS */
4011     @@ -4728,14 +4729,14 @@ int stmmac_dvr_remove(struct device *dev)
4012    
4013     netdev_info(priv->dev, "%s: removing driver", __func__);
4014    
4015     -#ifdef CONFIG_DEBUG_FS
4016     - stmmac_exit_fs(ndev);
4017     -#endif
4018     stmmac_stop_all_dma(priv);
4019    
4020     stmmac_mac_set(priv, priv->ioaddr, false);
4021     netif_carrier_off(ndev);
4022     unregister_netdev(ndev);
4023     +#ifdef CONFIG_DEBUG_FS
4024     + stmmac_exit_fs(ndev);
4025     +#endif
4026     phylink_destroy(priv->phylink);
4027     if (priv->plat->stmmac_rst)
4028     reset_control_assert(priv->plat->stmmac_rst);
4029     @@ -4955,6 +4956,7 @@ static int __init stmmac_init(void)
4030     /* Create debugfs main directory if it doesn't exist yet */
4031     if (!stmmac_fs_dir)
4032     stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4033     + register_netdevice_notifier(&stmmac_notifier);
4034     #endif
4035    
4036     return 0;
4037     @@ -4963,6 +4965,7 @@ static int __init stmmac_init(void)
4038     static void __exit stmmac_exit(void)
4039     {
4040     #ifdef CONFIG_DEBUG_FS
4041     + unregister_netdevice_notifier(&stmmac_notifier);
4042     debugfs_remove_recursive(stmmac_fs_dir);
4043     #endif
4044     }
4045     diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
4046     index 7dacfd102a99..b8fe42f4b3c5 100644
4047     --- a/drivers/nvme/host/core.c
4048     +++ b/drivers/nvme/host/core.c
4049     @@ -1161,8 +1161,8 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl,
4050     static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
4051     unsigned int dword11, void *buffer, size_t buflen, u32 *result)
4052     {
4053     + union nvme_result res = { 0 };
4054     struct nvme_command c;
4055     - union nvme_result res;
4056     int ret;
4057    
4058     memset(&c, 0, sizeof(c));
4059     diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
4060     index 570c75c92e29..cd64ddb129e5 100644
4061     --- a/drivers/nvme/host/pci.c
4062     +++ b/drivers/nvme/host/pci.c
4063     @@ -2753,6 +2753,18 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
4064     (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") ||
4065     dmi_match(DMI_BOARD_NAME, "PRIME Z370-A")))
4066     return NVME_QUIRK_NO_APST;
4067     + } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 ||
4068     + pdev->device == 0xa808 || pdev->device == 0xa809)) ||
4069     + (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) {
4070     + /*
4071     + * Forcing to use host managed nvme power settings for
4072     + * lowest idle power with quick resume latency on
4073     + * Samsung and Toshiba SSDs based on suspend behavior
4074     + * on Coffee Lake board for LENOVO C640
4075     + */
4076     + if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) &&
4077     + dmi_match(DMI_BOARD_NAME, "LNVNB161216"))
4078     + return NVME_QUIRK_SIMPLE_SUSPEND;
4079     }
4080    
4081     return 0;
4082     @@ -3114,7 +3126,8 @@ static const struct pci_device_id nvme_id_table[] = {
4083     .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
4084     NVME_QUIRK_IGNORE_DEV_SUBNQN, },
4085     { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
4086     - { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
4087     + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
4088     + .driver_data = NVME_QUIRK_SINGLE_VECTOR },
4089     { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
4090     { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
4091     .driver_data = NVME_QUIRK_SINGLE_VECTOR |
4092     diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
4093     index f20524f0c21d..94a34cf75eb3 100644
4094     --- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
4095     +++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
4096     @@ -20,6 +20,7 @@
4097    
4098     #define PHY_MDM6600_PHY_DELAY_MS 4000 /* PHY enable 2.2s to 3.5s */
4099     #define PHY_MDM6600_ENABLED_DELAY_MS 8000 /* 8s more total for MDM6600 */
4100     +#define PHY_MDM6600_WAKE_KICK_MS 600 /* time on after GPIO toggle */
4101     #define MDM6600_MODEM_IDLE_DELAY_MS 1000 /* modem after USB suspend */
4102     #define MDM6600_MODEM_WAKE_DELAY_MS 200 /* modem response after idle */
4103    
4104     @@ -243,10 +244,24 @@ static irqreturn_t phy_mdm6600_wakeirq_thread(int irq, void *data)
4105     {
4106     struct phy_mdm6600 *ddata = data;
4107     struct gpio_desc *mode_gpio1;
4108     + int error, wakeup;
4109    
4110     mode_gpio1 = ddata->mode_gpios->desc[PHY_MDM6600_MODE1];
4111     - dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n",
4112     - gpiod_get_value(mode_gpio1));
4113     + wakeup = gpiod_get_value(mode_gpio1);
4114     + if (!wakeup)
4115     + return IRQ_NONE;
4116     +
4117     + dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n", wakeup);
4118     + error = pm_runtime_get_sync(ddata->dev);
4119     + if (error < 0) {
4120     + pm_runtime_put_noidle(ddata->dev);
4121     +
4122     + return IRQ_NONE;
4123     + }
4124     +
4125     + /* Just wake-up and kick the autosuspend timer */
4126     + pm_runtime_mark_last_busy(ddata->dev);
4127     + pm_runtime_put_autosuspend(ddata->dev);
4128    
4129     return IRQ_HANDLED;
4130     }
4131     @@ -496,8 +511,14 @@ static void phy_mdm6600_modem_wake(struct work_struct *work)
4132    
4133     ddata = container_of(work, struct phy_mdm6600, modem_wake_work.work);
4134     phy_mdm6600_wake_modem(ddata);
4135     +
4136     + /*
4137     + * The modem does not always stay awake 1.2 seconds after toggling
4138     + * the wake GPIO, and sometimes it idles after about some 600 ms
4139     + * making writes time out.
4140     + */
4141     schedule_delayed_work(&ddata->modem_wake_work,
4142     - msecs_to_jiffies(MDM6600_MODEM_IDLE_DELAY_MS));
4143     + msecs_to_jiffies(PHY_MDM6600_WAKE_KICK_MS));
4144     }
4145    
4146     static int __maybe_unused phy_mdm6600_runtime_suspend(struct device *dev)
4147     diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
4148     index 8919a5130bec..25f24df9aa82 100644
4149     --- a/drivers/regulator/stm32-vrefbuf.c
4150     +++ b/drivers/regulator/stm32-vrefbuf.c
4151     @@ -88,7 +88,7 @@ static int stm32_vrefbuf_disable(struct regulator_dev *rdev)
4152     }
4153    
4154     val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
4155     - val = (val & ~STM32_ENVR) | STM32_HIZ;
4156     + val &= ~STM32_ENVR;
4157     writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
4158    
4159     pm_runtime_mark_last_busy(priv->dev);
4160     @@ -175,6 +175,7 @@ static const struct regulator_desc stm32_vrefbuf_regu = {
4161     .volt_table = stm32_vrefbuf_voltages,
4162     .n_voltages = ARRAY_SIZE(stm32_vrefbuf_voltages),
4163     .ops = &stm32_vrefbuf_volt_ops,
4164     + .off_on_delay = 1000,
4165     .type = REGULATOR_VOLTAGE,
4166     .owner = THIS_MODULE,
4167     };
4168     diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
4169     index 2a3f874a21d5..9cebff8e8d74 100644
4170     --- a/drivers/s390/cio/blacklist.c
4171     +++ b/drivers/s390/cio/blacklist.c
4172     @@ -303,8 +303,10 @@ static void *
4173     cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
4174     {
4175     struct ccwdev_iter *iter;
4176     + loff_t p = *offset;
4177    
4178     - if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
4179     + (*offset)++;
4180     + if (p >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
4181     return NULL;
4182     iter = it;
4183     if (iter->devno == __MAX_SUBCHANNEL) {
4184     @@ -314,7 +316,6 @@ cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
4185     return NULL;
4186     } else
4187     iter->devno++;
4188     - (*offset)++;
4189     return iter;
4190     }
4191    
4192     diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
4193     index cd164886132f..ee0b3c586211 100644
4194     --- a/drivers/s390/cio/qdio_setup.c
4195     +++ b/drivers/s390/cio/qdio_setup.c
4196     @@ -8,6 +8,7 @@
4197     #include <linux/kernel.h>
4198     #include <linux/slab.h>
4199     #include <linux/export.h>
4200     +#include <linux/io.h>
4201     #include <asm/qdio.h>
4202    
4203     #include "cio.h"
4204     @@ -207,7 +208,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
4205    
4206     /* fill in sl */
4207     for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
4208     - q->sl->element[j].sbal = (unsigned long)q->sbal[j];
4209     + q->sl->element[j].sbal = virt_to_phys(q->sbal[j]);
4210     }
4211    
4212     static void setup_queues(struct qdio_irq *irq_ptr,
4213     diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
4214     index 23852888eb2c..b727d1e34523 100644
4215     --- a/drivers/s390/net/qeth_core_main.c
4216     +++ b/drivers/s390/net/qeth_core_main.c
4217     @@ -4716,10 +4716,10 @@ static void qeth_qdio_establish_cq(struct qeth_card *card,
4218     if (card->options.cq == QETH_CQ_ENABLED) {
4219     int offset = QDIO_MAX_BUFFERS_PER_Q *
4220     (card->qdio.no_in_queues - 1);
4221     - for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4222     - in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
4223     - virt_to_phys(card->qdio.c_q->bufs[i].buffer);
4224     - }
4225     +
4226     + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
4227     + in_sbal_ptrs[offset + i] =
4228     + card->qdio.c_q->bufs[i].buffer;
4229    
4230     queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
4231     }
4232     @@ -4753,10 +4753,9 @@ static int qeth_qdio_establish(struct qeth_card *card)
4233     rc = -ENOMEM;
4234     goto out_free_qib_param;
4235     }
4236     - for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4237     - in_sbal_ptrs[i] = (struct qdio_buffer *)
4238     - virt_to_phys(card->qdio.in_q->bufs[i].buffer);
4239     - }
4240     +
4241     + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
4242     + in_sbal_ptrs[i] = card->qdio.in_q->bufs[i].buffer;
4243    
4244     queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
4245     GFP_KERNEL);
4246     @@ -4777,11 +4776,11 @@ static int qeth_qdio_establish(struct qeth_card *card)
4247     rc = -ENOMEM;
4248     goto out_free_queue_start_poll;
4249     }
4250     +
4251     for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
4252     - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
4253     - out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
4254     - card->qdio.out_qs[i]->bufs[j]->buffer);
4255     - }
4256     + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++, k++)
4257     + out_sbal_ptrs[k] =
4258     + card->qdio.out_qs[i]->bufs[j]->buffer;
4259    
4260     memset(&init_data, 0, sizeof(struct qdio_initialize));
4261     init_data.cdev = CARD_DDEV(card);
4262     diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
4263     index 46bc062d873e..d86838801805 100644
4264     --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
4265     +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
4266     @@ -594,7 +594,8 @@ retry_alloc:
4267    
4268     fusion->io_request_frames =
4269     dma_pool_alloc(fusion->io_request_frames_pool,
4270     - GFP_KERNEL, &fusion->io_request_frames_phys);
4271     + GFP_KERNEL | __GFP_NOWARN,
4272     + &fusion->io_request_frames_phys);
4273     if (!fusion->io_request_frames) {
4274     if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) {
4275     instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT;
4276     @@ -632,7 +633,7 @@ retry_alloc:
4277    
4278     fusion->io_request_frames =
4279     dma_pool_alloc(fusion->io_request_frames_pool,
4280     - GFP_KERNEL,
4281     + GFP_KERNEL | __GFP_NOWARN,
4282     &fusion->io_request_frames_phys);
4283    
4284     if (!fusion->io_request_frames) {
4285     diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c
4286     index c68882eb80f7..f638e7790470 100644
4287     --- a/drivers/soc/imx/soc-imx-scu.c
4288     +++ b/drivers/soc/imx/soc-imx-scu.c
4289     @@ -25,7 +25,7 @@ struct imx_sc_msg_misc_get_soc_id {
4290     u32 id;
4291     } resp;
4292     } data;
4293     -} __packed;
4294     +} __packed __aligned(4);
4295    
4296     struct imx_sc_msg_misc_get_soc_uid {
4297     struct imx_sc_rpc_msg hdr;
4298     diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
4299     index fd8007ebb145..13def7f78b9e 100644
4300     --- a/drivers/spi/atmel-quadspi.c
4301     +++ b/drivers/spi/atmel-quadspi.c
4302     @@ -149,6 +149,7 @@ struct atmel_qspi {
4303     struct clk *qspick;
4304     struct platform_device *pdev;
4305     const struct atmel_qspi_caps *caps;
4306     + resource_size_t mmap_size;
4307     u32 pending;
4308     u32 mr;
4309     u32 scr;
4310     @@ -329,6 +330,14 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
4311     u32 sr, offset;
4312     int err;
4313    
4314     + /*
4315     + * Check if the address exceeds the MMIO window size. An improvement
4316     + * would be to add support for regular SPI mode and fall back to it
4317     + * when the flash memories overrun the controller's memory space.
4318     + */
4319     + if (op->addr.val + op->data.nbytes > aq->mmap_size)
4320     + return -ENOTSUPP;
4321     +
4322     err = atmel_qspi_set_cfg(aq, op, &offset);
4323     if (err)
4324     return err;
4325     @@ -480,6 +489,8 @@ static int atmel_qspi_probe(struct platform_device *pdev)
4326     goto exit;
4327     }
4328    
4329     + aq->mmap_size = resource_size(res);
4330     +
4331     /* Get the peripheral clock */
4332     aq->pclk = devm_clk_get(&pdev->dev, "pclk");
4333     if (IS_ERR(aq->pclk))
4334     diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
4335     index c6836a931dbf..36f7eb8ab2df 100644
4336     --- a/drivers/spi/spi-bcm63xx-hsspi.c
4337     +++ b/drivers/spi/spi-bcm63xx-hsspi.c
4338     @@ -367,7 +367,6 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
4339     goto out_disable_clk;
4340    
4341     rate = clk_get_rate(pll_clk);
4342     - clk_disable_unprepare(pll_clk);
4343     if (!rate) {
4344     ret = -EINVAL;
4345     goto out_disable_pll_clk;
4346     diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
4347     index 3ea9d8a3e6e8..ab2c3848f5bf 100644
4348     --- a/drivers/spi/spidev.c
4349     +++ b/drivers/spi/spidev.c
4350     @@ -394,6 +394,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4351     else
4352     retval = get_user(tmp, (u32 __user *)arg);
4353     if (retval == 0) {
4354     + struct spi_controller *ctlr = spi->controller;
4355     u32 save = spi->mode;
4356    
4357     if (tmp & ~SPI_MODE_MASK) {
4358     @@ -401,6 +402,10 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4359     break;
4360     }
4361    
4362     + if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
4363     + ctlr->cs_gpiods[spi->chip_select])
4364     + tmp |= SPI_CS_HIGH;
4365     +
4366     tmp |= spi->mode & ~SPI_MODE_MASK;
4367     spi->mode = (u16)tmp;
4368     retval = spi_setup(spi);
4369     diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
4370     index 6d9d41170832..32e5966ba5c5 100644
4371     --- a/drivers/staging/media/hantro/hantro_drv.c
4372     +++ b/drivers/staging/media/hantro/hantro_drv.c
4373     @@ -553,13 +553,13 @@ static int hantro_attach_func(struct hantro_dev *vpu,
4374     goto err_rel_entity1;
4375    
4376     /* Connect the three entities */
4377     - ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 1,
4378     + ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0,
4379     MEDIA_LNK_FL_IMMUTABLE |
4380     MEDIA_LNK_FL_ENABLED);
4381     if (ret)
4382     goto err_rel_entity2;
4383    
4384     - ret = media_create_pad_link(&func->proc, 0, &func->sink, 0,
4385     + ret = media_create_pad_link(&func->proc, 1, &func->sink, 0,
4386     MEDIA_LNK_FL_IMMUTABLE |
4387     MEDIA_LNK_FL_ENABLED);
4388     if (ret)
4389     diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
4390     index a8b4d0c5ab7e..032f3264fba1 100644
4391     --- a/drivers/staging/speakup/selection.c
4392     +++ b/drivers/staging/speakup/selection.c
4393     @@ -51,9 +51,7 @@ static void __speakup_set_selection(struct work_struct *work)
4394     goto unref;
4395     }
4396    
4397     - console_lock();
4398     set_selection_kernel(&sel, tty);
4399     - console_unlock();
4400    
4401     unref:
4402     tty_kref_put(tty);
4403     diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
4404     index 597eb9d16f21..e1268646ee56 100644
4405     --- a/drivers/tty/serial/8250/8250_exar.c
4406     +++ b/drivers/tty/serial/8250/8250_exar.c
4407     @@ -25,6 +25,14 @@
4408    
4409     #include "8250.h"
4410    
4411     +#define PCI_DEVICE_ID_ACCES_COM_2S 0x1052
4412     +#define PCI_DEVICE_ID_ACCES_COM_4S 0x105d
4413     +#define PCI_DEVICE_ID_ACCES_COM_8S 0x106c
4414     +#define PCI_DEVICE_ID_ACCES_COM232_8 0x10a8
4415     +#define PCI_DEVICE_ID_ACCES_COM_2SM 0x10d2
4416     +#define PCI_DEVICE_ID_ACCES_COM_4SM 0x10db
4417     +#define PCI_DEVICE_ID_ACCES_COM_8SM 0x10ea
4418     +
4419     #define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002
4420     #define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004
4421     #define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a
4422     @@ -658,6 +666,22 @@ static int __maybe_unused exar_resume(struct device *dev)
4423    
4424     static SIMPLE_DEV_PM_OPS(exar_pci_pm, exar_suspend, exar_resume);
4425    
4426     +static const struct exar8250_board acces_com_2x = {
4427     + .num_ports = 2,
4428     + .setup = pci_xr17c154_setup,
4429     +};
4430     +
4431     +static const struct exar8250_board acces_com_4x = {
4432     + .num_ports = 4,
4433     + .setup = pci_xr17c154_setup,
4434     +};
4435     +
4436     +static const struct exar8250_board acces_com_8x = {
4437     + .num_ports = 8,
4438     + .setup = pci_xr17c154_setup,
4439     +};
4440     +
4441     +
4442     static const struct exar8250_board pbn_fastcom335_2 = {
4443     .num_ports = 2,
4444     .setup = pci_fastcom335_setup,
4445     @@ -726,6 +750,15 @@ static const struct exar8250_board pbn_exar_XR17V8358 = {
4446     }
4447    
4448     static const struct pci_device_id exar_pci_tbl[] = {
4449     + EXAR_DEVICE(ACCESSIO, ACCES_COM_2S, acces_com_2x),
4450     + EXAR_DEVICE(ACCESSIO, ACCES_COM_4S, acces_com_4x),
4451     + EXAR_DEVICE(ACCESSIO, ACCES_COM_8S, acces_com_8x),
4452     + EXAR_DEVICE(ACCESSIO, ACCES_COM232_8, acces_com_8x),
4453     + EXAR_DEVICE(ACCESSIO, ACCES_COM_2SM, acces_com_2x),
4454     + EXAR_DEVICE(ACCESSIO, ACCES_COM_4SM, acces_com_4x),
4455     + EXAR_DEVICE(ACCESSIO, ACCES_COM_8SM, acces_com_8x),
4456     +
4457     +
4458     CONNECT_DEVICE(XR17C152, UART_2_232, pbn_connect),
4459     CONNECT_DEVICE(XR17C154, UART_4_232, pbn_connect),
4460     CONNECT_DEVICE(XR17C158, UART_8_232, pbn_connect),
4461     diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
4462     index 3bdd56a1021b..ea12f10610b6 100644
4463     --- a/drivers/tty/serial/ar933x_uart.c
4464     +++ b/drivers/tty/serial/ar933x_uart.c
4465     @@ -286,6 +286,10 @@ static void ar933x_uart_set_termios(struct uart_port *port,
4466     ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
4467     AR933X_UART_CS_HOST_INT_EN);
4468    
4469     + /* enable RX and TX ready overide */
4470     + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
4471     + AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
4472     +
4473     /* reenable the UART */
4474     ar933x_uart_rmw(up, AR933X_UART_CS_REG,
4475     AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
4476     @@ -418,6 +422,10 @@ static int ar933x_uart_startup(struct uart_port *port)
4477     ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
4478     AR933X_UART_CS_HOST_INT_EN);
4479    
4480     + /* enable RX and TX ready overide */
4481     + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
4482     + AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
4483     +
4484     /* Enable RX interrupts */
4485     up->ier = AR933X_UART_INT_RX_VALID;
4486     ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
4487     diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
4488     index 3f64b08f50ef..d2fc050a3445 100644
4489     --- a/drivers/tty/serial/fsl_lpuart.c
4490     +++ b/drivers/tty/serial/fsl_lpuart.c
4491     @@ -268,6 +268,7 @@ struct lpuart_port {
4492     int rx_dma_rng_buf_len;
4493     unsigned int dma_tx_nents;
4494     wait_queue_head_t dma_wait;
4495     + bool id_allocated;
4496     };
4497    
4498     struct lpuart_soc_data {
4499     @@ -2382,19 +2383,6 @@ static int lpuart_probe(struct platform_device *pdev)
4500     if (!sport)
4501     return -ENOMEM;
4502    
4503     - ret = of_alias_get_id(np, "serial");
4504     - if (ret < 0) {
4505     - ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
4506     - if (ret < 0) {
4507     - dev_err(&pdev->dev, "port line is full, add device failed\n");
4508     - return ret;
4509     - }
4510     - }
4511     - if (ret >= ARRAY_SIZE(lpuart_ports)) {
4512     - dev_err(&pdev->dev, "serial%d out of range\n", ret);
4513     - return -EINVAL;
4514     - }
4515     - sport->port.line = ret;
4516     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4517     sport->port.membase = devm_ioremap_resource(&pdev->dev, res);
4518     if (IS_ERR(sport->port.membase))
4519     @@ -2435,9 +2423,25 @@ static int lpuart_probe(struct platform_device *pdev)
4520     }
4521     }
4522    
4523     + ret = of_alias_get_id(np, "serial");
4524     + if (ret < 0) {
4525     + ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
4526     + if (ret < 0) {
4527     + dev_err(&pdev->dev, "port line is full, add device failed\n");
4528     + return ret;
4529     + }
4530     + sport->id_allocated = true;
4531     + }
4532     + if (ret >= ARRAY_SIZE(lpuart_ports)) {
4533     + dev_err(&pdev->dev, "serial%d out of range\n", ret);
4534     + ret = -EINVAL;
4535     + goto failed_out_of_range;
4536     + }
4537     + sport->port.line = ret;
4538     +
4539     ret = lpuart_enable_clks(sport);
4540     if (ret)
4541     - return ret;
4542     + goto failed_clock_enable;
4543     sport->port.uartclk = lpuart_get_baud_clk_rate(sport);
4544    
4545     lpuart_ports[sport->port.line] = sport;
4546     @@ -2487,6 +2491,10 @@ static int lpuart_probe(struct platform_device *pdev)
4547     failed_attach_port:
4548     failed_irq_request:
4549     lpuart_disable_clks(sport);
4550     +failed_clock_enable:
4551     +failed_out_of_range:
4552     + if (sport->id_allocated)
4553     + ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
4554     return ret;
4555     }
4556    
4557     @@ -2496,7 +2504,8 @@ static int lpuart_remove(struct platform_device *pdev)
4558    
4559     uart_remove_one_port(&lpuart_reg, &sport->port);
4560    
4561     - ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
4562     + if (sport->id_allocated)
4563     + ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
4564    
4565     lpuart_disable_clks(sport);
4566    
4567     diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
4568     index c12a12556339..4e9a590712cb 100644
4569     --- a/drivers/tty/serial/mvebu-uart.c
4570     +++ b/drivers/tty/serial/mvebu-uart.c
4571     @@ -851,7 +851,7 @@ static int mvebu_uart_probe(struct platform_device *pdev)
4572    
4573     port->membase = devm_ioremap_resource(&pdev->dev, reg);
4574     if (IS_ERR(port->membase))
4575     - return -PTR_ERR(port->membase);
4576     + return PTR_ERR(port->membase);
4577    
4578     mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart),
4579     GFP_KERNEL);
4580     diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
4581     index 44d974d4159f..d7d2e4b844bc 100644
4582     --- a/drivers/tty/vt/selection.c
4583     +++ b/drivers/tty/vt/selection.c
4584     @@ -16,6 +16,7 @@
4585     #include <linux/tty.h>
4586     #include <linux/sched.h>
4587     #include <linux/mm.h>
4588     +#include <linux/mutex.h>
4589     #include <linux/slab.h>
4590     #include <linux/types.h>
4591    
4592     @@ -45,6 +46,7 @@ static volatile int sel_start = -1; /* cleared by clear_selection */
4593     static int sel_end;
4594     static int sel_buffer_lth;
4595     static char *sel_buffer;
4596     +static DEFINE_MUTEX(sel_lock);
4597    
4598     /* clear_selection, highlight and highlight_pointer can be called
4599     from interrupt (via scrollback/front) */
4600     @@ -179,14 +181,14 @@ int set_selection_user(const struct tiocl_selection __user *sel,
4601     return set_selection_kernel(&v, tty);
4602     }
4603    
4604     -int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
4605     +static int __set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
4606     {
4607     struct vc_data *vc = vc_cons[fg_console].d;
4608     int new_sel_start, new_sel_end, spc;
4609     char *bp, *obp;
4610     int i, ps, pe, multiplier;
4611     u32 c;
4612     - int mode;
4613     + int mode, ret = 0;
4614    
4615     poke_blanked_console();
4616    
4617     @@ -334,7 +336,21 @@ int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
4618     }
4619     }
4620     sel_buffer_lth = bp - sel_buffer;
4621     - return 0;
4622     +
4623     + return ret;
4624     +}
4625     +
4626     +int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
4627     +{
4628     + int ret;
4629     +
4630     + mutex_lock(&sel_lock);
4631     + console_lock();
4632     + ret = __set_selection_kernel(v, tty);
4633     + console_unlock();
4634     + mutex_unlock(&sel_lock);
4635     +
4636     + return ret;
4637     }
4638     EXPORT_SYMBOL_GPL(set_selection_kernel);
4639    
4640     @@ -364,6 +380,7 @@ int paste_selection(struct tty_struct *tty)
4641     tty_buffer_lock_exclusive(&vc->port);
4642    
4643     add_wait_queue(&vc->paste_wait, &wait);
4644     + mutex_lock(&sel_lock);
4645     while (sel_buffer && sel_buffer_lth > pasted) {
4646     set_current_state(TASK_INTERRUPTIBLE);
4647     if (signal_pending(current)) {
4648     @@ -371,7 +388,9 @@ int paste_selection(struct tty_struct *tty)
4649     break;
4650     }
4651     if (tty_throttled(tty)) {
4652     + mutex_unlock(&sel_lock);
4653     schedule();
4654     + mutex_lock(&sel_lock);
4655     continue;
4656     }
4657     __set_current_state(TASK_RUNNING);
4658     @@ -380,6 +399,7 @@ int paste_selection(struct tty_struct *tty)
4659     count);
4660     pasted += count;
4661     }
4662     + mutex_unlock(&sel_lock);
4663     remove_wait_queue(&vc->paste_wait, &wait);
4664     __set_current_state(TASK_RUNNING);
4665    
4666     diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
4667     index 3b4ccc2a30c1..e9e27ba69d5d 100644
4668     --- a/drivers/tty/vt/vt.c
4669     +++ b/drivers/tty/vt/vt.c
4670     @@ -3046,10 +3046,8 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
4671     switch (type)
4672     {
4673     case TIOCL_SETSEL:
4674     - console_lock();
4675     ret = set_selection_user((struct tiocl_selection
4676     __user *)(p+1), tty);
4677     - console_unlock();
4678     break;
4679     case TIOCL_PASTESEL:
4680     ret = paste_selection(tty);
4681     diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
4682     index 02f6ca2cb1ba..f624cc87cbab 100644
4683     --- a/drivers/usb/cdns3/gadget.c
4684     +++ b/drivers/usb/cdns3/gadget.c
4685     @@ -2107,7 +2107,7 @@ found:
4686     /* Update ring only if removed request is on pending_req_list list */
4687     if (req_on_hw_ring) {
4688     link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
4689     - (priv_req->start_trb * TRB_SIZE));
4690     + ((priv_req->end_trb + 1) * TRB_SIZE));
4691     link_trb->control = (link_trb->control & TRB_CYCLE) |
4692     TRB_TYPE(TRB_LINK) | TRB_CHAIN;
4693    
4694     @@ -2152,11 +2152,21 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
4695     {
4696     struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
4697     struct usb_request *request;
4698     + struct cdns3_request *priv_req;
4699     + struct cdns3_trb *trb = NULL;
4700     int ret;
4701     int val;
4702    
4703     trace_cdns3_halt(priv_ep, 0, 0);
4704    
4705     + request = cdns3_next_request(&priv_ep->pending_req_list);
4706     + if (request) {
4707     + priv_req = to_cdns3_request(request);
4708     + trb = priv_req->trb;
4709     + if (trb)
4710     + trb->control = trb->control ^ TRB_CYCLE;
4711     + }
4712     +
4713     writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
4714    
4715     /* wait for EPRST cleared */
4716     @@ -2167,10 +2177,11 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
4717    
4718     priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING);
4719    
4720     - request = cdns3_next_request(&priv_ep->pending_req_list);
4721     -
4722     - if (request)
4723     + if (request) {
4724     + if (trb)
4725     + trb->control = trb->control ^ TRB_CYCLE;
4726     cdns3_rearm_transfer(priv_ep, 1);
4727     + }
4728    
4729     cdns3_start_all_request(priv_dev, priv_ep);
4730     return ret;
4731     diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
4732     index f381faa10f15..243577656177 100644
4733     --- a/drivers/usb/core/hub.c
4734     +++ b/drivers/usb/core/hub.c
4735     @@ -987,13 +987,17 @@ int usb_remove_device(struct usb_device *udev)
4736     {
4737     struct usb_hub *hub;
4738     struct usb_interface *intf;
4739     + int ret;
4740    
4741     if (!udev->parent) /* Can't remove a root hub */
4742     return -EINVAL;
4743     hub = usb_hub_to_struct_hub(udev->parent);
4744     intf = to_usb_interface(hub->intfdev);
4745    
4746     - usb_autopm_get_interface(intf);
4747     + ret = usb_autopm_get_interface(intf);
4748     + if (ret < 0)
4749     + return ret;
4750     +
4751     set_bit(udev->portnum, hub->removed_bits);
4752     hub_port_logical_disconnect(hub, udev->portnum);
4753     usb_autopm_put_interface(intf);
4754     @@ -1865,7 +1869,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
4755    
4756     if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) {
4757     hub->quirk_disable_autosuspend = 1;
4758     - usb_autopm_get_interface(intf);
4759     + usb_autopm_get_interface_no_resume(intf);
4760     }
4761    
4762     if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
4763     diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
4764     index bbbb35fa639f..235a7c645503 100644
4765     --- a/drivers/usb/core/port.c
4766     +++ b/drivers/usb/core/port.c
4767     @@ -213,7 +213,10 @@ static int usb_port_runtime_resume(struct device *dev)
4768     if (!port_dev->is_superspeed && peer)
4769     pm_runtime_get_sync(&peer->dev);
4770    
4771     - usb_autopm_get_interface(intf);
4772     + retval = usb_autopm_get_interface(intf);
4773     + if (retval < 0)
4774     + return retval;
4775     +
4776     retval = usb_hub_set_port_power(hdev, hub, port1, true);
4777     msleep(hub_power_on_good_delay(hub));
4778     if (udev && !retval) {
4779     @@ -266,7 +269,10 @@ static int usb_port_runtime_suspend(struct device *dev)
4780     if (usb_port_block_power_off)
4781     return -EBUSY;
4782    
4783     - usb_autopm_get_interface(intf);
4784     + retval = usb_autopm_get_interface(intf);
4785     + if (retval < 0)
4786     + return retval;
4787     +
4788     retval = usb_hub_set_port_power(hdev, hub, port1, false);
4789     usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
4790     if (!port_dev->is_superspeed)
4791     diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
4792     index 2b24336a72e5..2dac3e7cdd97 100644
4793     --- a/drivers/usb/core/quirks.c
4794     +++ b/drivers/usb/core/quirks.c
4795     @@ -231,6 +231,9 @@ static const struct usb_device_id usb_quirk_list[] = {
4796     /* Logitech PTZ Pro Camera */
4797     { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
4798    
4799     + /* Logitech Screen Share */
4800     + { USB_DEVICE(0x046d, 0x086c), .driver_info = USB_QUIRK_NO_LPM },
4801     +
4802     /* Logitech Quickcam Fusion */
4803     { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
4804    
4805     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
4806     index e0cb1c2d5675..6ac02ba5e4a1 100644
4807     --- a/drivers/usb/dwc3/gadget.c
4808     +++ b/drivers/usb/dwc3/gadget.c
4809     @@ -1068,7 +1068,14 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
4810     unsigned int rem = length % maxp;
4811     unsigned chain = true;
4812    
4813     - if (sg_is_last(s))
4814     + /*
4815     + * IOMMU driver is coalescing the list of sgs which shares a
4816     + * page boundary into one and giving it to USB driver. With
4817     + * this the number of sgs mapped is not equal to the number of
4818     + * sgs passed. So mark the chain bit to false if it isthe last
4819     + * mapped sg.
4820     + */
4821     + if (i == remaining - 1)
4822     chain = false;
4823    
4824     if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
4825     diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
4826     index 0d45d7a4f949..d7871636fced 100644
4827     --- a/drivers/usb/gadget/composite.c
4828     +++ b/drivers/usb/gadget/composite.c
4829     @@ -438,9 +438,13 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
4830     if (!val)
4831     return 0;
4832     if (speed < USB_SPEED_SUPER)
4833     - return DIV_ROUND_UP(val, 2);
4834     + return min(val, 500U) / 2;
4835     else
4836     - return DIV_ROUND_UP(val, 8);
4837     + /*
4838     + * USB 3.x supports up to 900mA, but since 900 isn't divisible
4839     + * by 8 the integral division will effectively cap to 896mA.
4840     + */
4841     + return min(val, 900U) / 8;
4842     }
4843    
4844     static int config_buf(struct usb_configuration *config,
4845     @@ -852,6 +856,10 @@ static int set_config(struct usb_composite_dev *cdev,
4846    
4847     /* when we return, be sure our power usage is valid */
4848     power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
4849     + if (gadget->speed < USB_SPEED_SUPER)
4850     + power = min(power, 500U);
4851     + else
4852     + power = min(power, 900U);
4853     done:
4854     usb_gadget_vbus_draw(gadget, power);
4855     if (result >= 0 && cdev->delayed_status)
4856     @@ -2278,7 +2286,7 @@ void composite_resume(struct usb_gadget *gadget)
4857     {
4858     struct usb_composite_dev *cdev = get_gadget_data(gadget);
4859     struct usb_function *f;
4860     - u16 maxpower;
4861     + unsigned maxpower;
4862    
4863     /* REVISIT: should we have config level
4864     * suspend/resume callbacks?
4865     @@ -2292,10 +2300,14 @@ void composite_resume(struct usb_gadget *gadget)
4866     f->resume(f);
4867     }
4868    
4869     - maxpower = cdev->config->MaxPower;
4870     + maxpower = cdev->config->MaxPower ?
4871     + cdev->config->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
4872     + if (gadget->speed < USB_SPEED_SUPER)
4873     + maxpower = min(maxpower, 500U);
4874     + else
4875     + maxpower = min(maxpower, 900U);
4876    
4877     - usb_gadget_vbus_draw(gadget, maxpower ?
4878     - maxpower : CONFIG_USB_GADGET_VBUS_DRAW);
4879     + usb_gadget_vbus_draw(gadget, maxpower);
4880     }
4881    
4882     cdev->suspended = 0;
4883     diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
4884     index ced2581cf99f..a9a711e04614 100644
4885     --- a/drivers/usb/gadget/function/f_fs.c
4886     +++ b/drivers/usb/gadget/function/f_fs.c
4887     @@ -1162,18 +1162,19 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
4888     {
4889     struct ffs_io_data *io_data = kiocb->private;
4890     struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
4891     + unsigned long flags;
4892     int value;
4893    
4894     ENTER();
4895    
4896     - spin_lock_irq(&epfile->ffs->eps_lock);
4897     + spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
4898    
4899     if (likely(io_data && io_data->ep && io_data->req))
4900     value = usb_ep_dequeue(io_data->ep, io_data->req);
4901     else
4902     value = -EINVAL;
4903    
4904     - spin_unlock_irq(&epfile->ffs->eps_lock);
4905     + spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
4906    
4907     return value;
4908     }
4909     diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
4910     index bb1e2e1d0076..038c445a4e9b 100644
4911     --- a/drivers/usb/gadget/function/u_serial.c
4912     +++ b/drivers/usb/gadget/function/u_serial.c
4913     @@ -560,8 +560,10 @@ static int gs_start_io(struct gs_port *port)
4914     port->n_read = 0;
4915     started = gs_start_rx(port);
4916    
4917     - /* unblock any pending writes into our circular buffer */
4918     if (started) {
4919     + gs_start_tx(port);
4920     + /* Unblock any pending writes into our circular buffer, in case
4921     + * we didn't in gs_start_tx() */
4922     tty_wakeup(port->port.tty);
4923     } else {
4924     gs_free_requests(ep, head, &port->read_allocated);
4925     diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
4926     index 1cd9b6305b06..1880f3e13f57 100644
4927     --- a/drivers/usb/storage/unusual_devs.h
4928     +++ b/drivers/usb/storage/unusual_devs.h
4929     @@ -1258,6 +1258,12 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999,
4930     USB_SC_RBC, USB_PR_BULK, NULL,
4931     0 ),
4932    
4933     +UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100,
4934     + "Samsung",
4935     + "Flash Drive FIT",
4936     + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
4937     + US_FL_MAX_SECTORS_64),
4938     +
4939     /* aeb */
4940     UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
4941     "Feiya",
4942     diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
4943     index c6b3bdbbdbc9..bfaa9ec4bc1f 100644
4944     --- a/drivers/video/console/vgacon.c
4945     +++ b/drivers/video/console/vgacon.c
4946     @@ -1316,6 +1316,9 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
4947     static int vgacon_resize(struct vc_data *c, unsigned int width,
4948     unsigned int height, unsigned int user)
4949     {
4950     + if ((width << 1) * height > vga_vram_size)
4951     + return -EINVAL;
4952     +
4953     if (width % 2 || width > screen_info.orig_video_cols ||
4954     height > (screen_info.orig_video_lines * vga_default_font_height)/
4955     c->vc_font.height)
4956     diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
4957     index e149e66a6ea9..e92f38fcb7a4 100644
4958     --- a/drivers/watchdog/da9062_wdt.c
4959     +++ b/drivers/watchdog/da9062_wdt.c
4960     @@ -94,13 +94,6 @@ static int da9062_wdt_stop(struct watchdog_device *wdd)
4961     struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
4962     int ret;
4963    
4964     - ret = da9062_reset_watchdog_timer(wdt);
4965     - if (ret) {
4966     - dev_err(wdt->hw->dev, "Failed to ping the watchdog (err = %d)\n",
4967     - ret);
4968     - return ret;
4969     - }
4970     -
4971     ret = regmap_update_bits(wdt->hw->regmap,
4972     DA9062AA_CONTROL_D,
4973     DA9062AA_TWDSCALE_MASK,
4974     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
4975     index 50feb01f27f3..c056d12cbb3c 100644
4976     --- a/fs/btrfs/inode.c
4977     +++ b/fs/btrfs/inode.c
4978     @@ -8426,6 +8426,7 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
4979     {
4980     struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
4981     struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
4982     + u16 csum_size;
4983     blk_status_t ret;
4984    
4985     /*
4986     @@ -8445,7 +8446,8 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
4987    
4988     file_offset -= dip->logical_offset;
4989     file_offset >>= inode->i_sb->s_blocksize_bits;
4990     - io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
4991     + csum_size = btrfs_super_csum_size(btrfs_sb(inode->i_sb)->super_copy);
4992     + io_bio->csum = orig_io_bio->csum + csum_size * file_offset;
4993    
4994     return 0;
4995     }
4996     diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
4997     index 53611d7e9d28..f9cbdfc1591b 100644
4998     --- a/fs/cifs/cifsglob.h
4999     +++ b/fs/cifs/cifsglob.h
5000     @@ -1229,6 +1229,7 @@ struct cifs_fid {
5001     __u64 volatile_fid; /* volatile file id for smb2 */
5002     __u8 lease_key[SMB2_LEASE_KEY_SIZE]; /* lease key for smb2 */
5003     __u8 create_guid[16];
5004     + __u32 access;
5005     struct cifs_pending_open *pending_open;
5006     unsigned int epoch;
5007     #ifdef CONFIG_CIFS_DEBUG2
5008     @@ -1700,6 +1701,12 @@ static inline bool is_retryable_error(int error)
5009     return false;
5010     }
5011    
5012     +
5013     +/* cifs_get_writable_file() flags */
5014     +#define FIND_WR_ANY 0
5015     +#define FIND_WR_FSUID_ONLY 1
5016     +#define FIND_WR_WITH_DELETE 2
5017     +
5018     #define MID_FREE 0
5019     #define MID_REQUEST_ALLOCATED 1
5020     #define MID_REQUEST_SUBMITTED 2
5021     diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
5022     index fe597d3d5208..f18da99a6b55 100644
5023     --- a/fs/cifs/cifsproto.h
5024     +++ b/fs/cifs/cifsproto.h
5025     @@ -133,11 +133,12 @@ extern bool backup_cred(struct cifs_sb_info *);
5026     extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
5027     extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
5028     unsigned int bytes_written);
5029     -extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool);
5030     +extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int);
5031     extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
5032     - bool fsuid_only,
5033     + int flags,
5034     struct cifsFileInfo **ret_file);
5035     extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
5036     + int flags,
5037     struct cifsFileInfo **ret_file);
5038     extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
5039     extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
5040     diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
5041     index 4f554f019a98..c8494fa5e19d 100644
5042     --- a/fs/cifs/cifssmb.c
5043     +++ b/fs/cifs/cifssmb.c
5044     @@ -1489,6 +1489,7 @@ openRetry:
5045     *oplock = rsp->OplockLevel;
5046     /* cifs fid stays in le */
5047     oparms->fid->netfid = rsp->Fid;
5048     + oparms->fid->access = desired_access;
5049    
5050     /* Let caller know file was created so we can set the mode. */
5051     /* Do we care about the CreateAction in any other cases? */
5052     @@ -2112,7 +2113,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
5053     wdata2->tailsz = tailsz;
5054     wdata2->bytes = cur_len;
5055    
5056     - rc = cifs_get_writable_file(CIFS_I(inode), false,
5057     + rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY,
5058     &wdata2->cfile);
5059     if (!wdata2->cfile) {
5060     cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n",
5061     diff --git a/fs/cifs/file.c b/fs/cifs/file.c
5062     index 969543034b4d..0dbe47e89720 100644
5063     --- a/fs/cifs/file.c
5064     +++ b/fs/cifs/file.c
5065     @@ -1916,7 +1916,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
5066    
5067     /* Return -EBADF if no handle is found and general rc otherwise */
5068     int
5069     -cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
5070     +cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
5071     struct cifsFileInfo **ret_file)
5072     {
5073     struct cifsFileInfo *open_file, *inv_file = NULL;
5074     @@ -1924,7 +1924,8 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
5075     bool any_available = false;
5076     int rc = -EBADF;
5077     unsigned int refind = 0;
5078     -
5079     + bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
5080     + bool with_delete = flags & FIND_WR_WITH_DELETE;
5081     *ret_file = NULL;
5082    
5083     /*
5084     @@ -1956,6 +1957,8 @@ refind_writable:
5085     continue;
5086     if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
5087     continue;
5088     + if (with_delete && !(open_file->fid.access & DELETE))
5089     + continue;
5090     if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
5091     if (!open_file->invalidHandle) {
5092     /* found a good writable file */
5093     @@ -2003,12 +2006,12 @@ refind_writable:
5094     }
5095    
5096     struct cifsFileInfo *
5097     -find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
5098     +find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
5099     {
5100     struct cifsFileInfo *cfile;
5101     int rc;
5102    
5103     - rc = cifs_get_writable_file(cifs_inode, fsuid_only, &cfile);
5104     + rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
5105     if (rc)
5106     cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc);
5107    
5108     @@ -2017,6 +2020,7 @@ find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
5109    
5110     int
5111     cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
5112     + int flags,
5113     struct cifsFileInfo **ret_file)
5114     {
5115     struct list_head *tmp;
5116     @@ -2043,7 +2047,7 @@ cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
5117     kfree(full_path);
5118     cinode = CIFS_I(d_inode(cfile->dentry));
5119     spin_unlock(&tcon->open_file_lock);
5120     - return cifs_get_writable_file(cinode, 0, ret_file);
5121     + return cifs_get_writable_file(cinode, flags, ret_file);
5122     }
5123    
5124     spin_unlock(&tcon->open_file_lock);
5125     @@ -2120,7 +2124,8 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
5126     if (mapping->host->i_size - offset < (loff_t)to)
5127     to = (unsigned)(mapping->host->i_size - offset);
5128    
5129     - rc = cifs_get_writable_file(CIFS_I(mapping->host), false, &open_file);
5130     + rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
5131     + &open_file);
5132     if (!rc) {
5133     bytes_written = cifs_write(open_file, open_file->pid,
5134     write_data, to - from, &offset);
5135     @@ -2313,7 +2318,7 @@ retry:
5136     if (cfile)
5137     cifsFileInfo_put(cfile);
5138    
5139     - rc = cifs_get_writable_file(CIFS_I(inode), false, &cfile);
5140     + rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
5141    
5142     /* in case of an error store it to return later */
5143     if (rc)
5144     diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
5145     index aafcd79c4772..7c5e983fe385 100644
5146     --- a/fs/cifs/inode.c
5147     +++ b/fs/cifs/inode.c
5148     @@ -2011,6 +2011,7 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
5149     struct inode *inode = d_inode(dentry);
5150     struct super_block *sb = dentry->d_sb;
5151     char *full_path = NULL;
5152     + int count = 0;
5153    
5154     if (inode == NULL)
5155     return -ENOENT;
5156     @@ -2032,15 +2033,18 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
5157     full_path, inode, inode->i_count.counter,
5158     dentry, cifs_get_time(dentry), jiffies);
5159    
5160     +again:
5161     if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
5162     rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
5163     else
5164     rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
5165     xid, NULL);
5166     -
5167     + if (rc == -EAGAIN && count++ < 10)
5168     + goto again;
5169     out:
5170     kfree(full_path);
5171     free_xid(xid);
5172     +
5173     return rc;
5174     }
5175    
5176     @@ -2216,7 +2220,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
5177     * writebehind data than the SMB timeout for the SetPathInfo
5178     * request would allow
5179     */
5180     - open_file = find_writable_file(cifsInode, true);
5181     + open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
5182     if (open_file) {
5183     tcon = tlink_tcon(open_file->tlink);
5184     server = tcon->ses->server;
5185     @@ -2366,7 +2370,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
5186     args->ctime = NO_CHANGE_64;
5187    
5188     args->device = 0;
5189     - open_file = find_writable_file(cifsInode, true);
5190     + open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
5191     if (open_file) {
5192     u16 nfid = open_file->fid.netfid;
5193     u32 npid = open_file->pid;
5194     @@ -2469,7 +2473,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
5195     rc = 0;
5196    
5197     if (attrs->ia_valid & ATTR_MTIME) {
5198     - rc = cifs_get_writable_file(cifsInode, false, &wfile);
5199     + rc = cifs_get_writable_file(cifsInode, FIND_WR_ANY, &wfile);
5200     if (!rc) {
5201     tcon = tlink_tcon(wfile->tlink);
5202     rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
5203     diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
5204     index 514810694c0f..195766221a7a 100644
5205     --- a/fs/cifs/smb1ops.c
5206     +++ b/fs/cifs/smb1ops.c
5207     @@ -767,7 +767,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
5208     struct cifs_tcon *tcon;
5209    
5210     /* if the file is already open for write, just use that fileid */
5211     - open_file = find_writable_file(cinode, true);
5212     + open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
5213     if (open_file) {
5214     fid.netfid = open_file->fid.netfid;
5215     netpid = open_file->pid;
5216     diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
5217     index 4121ac1163ca..f2a6f7f28340 100644
5218     --- a/fs/cifs/smb2inode.c
5219     +++ b/fs/cifs/smb2inode.c
5220     @@ -525,7 +525,7 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name,
5221     cifs_i = CIFS_I(inode);
5222     dosattrs = cifs_i->cifsAttrs | ATTR_READONLY;
5223     data.Attributes = cpu_to_le32(dosattrs);
5224     - cifs_get_writable_path(tcon, name, &cfile);
5225     + cifs_get_writable_path(tcon, name, FIND_WR_ANY, &cfile);
5226     tmprc = smb2_compound_op(xid, tcon, cifs_sb, name,
5227     FILE_WRITE_ATTRIBUTES, FILE_CREATE,
5228     CREATE_NOT_FILE, ACL_NO_MODE,
5229     @@ -581,7 +581,7 @@ smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon,
5230     {
5231     struct cifsFileInfo *cfile;
5232    
5233     - cifs_get_writable_path(tcon, from_name, &cfile);
5234     + cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile);
5235    
5236     return smb2_set_path_attr(xid, tcon, from_name, to_name,
5237     cifs_sb, DELETE, SMB2_OP_RENAME, cfile);
5238     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
5239     index b75d208d4b2b..99420608d2ec 100644
5240     --- a/fs/cifs/smb2ops.c
5241     +++ b/fs/cifs/smb2ops.c
5242     @@ -1338,6 +1338,7 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
5243    
5244     cfile->fid.persistent_fid = fid->persistent_fid;
5245     cfile->fid.volatile_fid = fid->volatile_fid;
5246     + cfile->fid.access = fid->access;
5247     #ifdef CONFIG_CIFS_DEBUG2
5248     cfile->fid.mid = fid->mid;
5249     #endif /* CIFS_DEBUG2 */
5250     @@ -3162,7 +3163,7 @@ static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offs
5251     * some servers (Windows2016) will not reflect recent writes in
5252     * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
5253     */
5254     - wrcfile = find_writable_file(cifsi, false);
5255     + wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
5256     if (wrcfile) {
5257     filemap_write_and_wait(inode->i_mapping);
5258     smb2_flush_file(xid, tcon, &wrcfile->fid);
5259     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
5260     index c6fc6582ee7b..c8f304cae3f3 100644
5261     --- a/fs/cifs/smb2pdu.c
5262     +++ b/fs/cifs/smb2pdu.c
5263     @@ -2650,6 +2650,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
5264     atomic_inc(&tcon->num_remote_opens);
5265     oparms->fid->persistent_fid = rsp->PersistentFileId;
5266     oparms->fid->volatile_fid = rsp->VolatileFileId;
5267     + oparms->fid->access = oparms->desired_access;
5268     #ifdef CONFIG_CIFS_DEBUG2
5269     oparms->fid->mid = le64_to_cpu(rsp->sync_hdr.MessageId);
5270     #endif /* CIFS_DEBUG2 */
5271     diff --git a/fs/fat/inode.c b/fs/fat/inode.c
5272     index 5f04c5c810fb..d40cbad16659 100644
5273     --- a/fs/fat/inode.c
5274     +++ b/fs/fat/inode.c
5275     @@ -749,6 +749,13 @@ static struct inode *fat_alloc_inode(struct super_block *sb)
5276     return NULL;
5277    
5278     init_rwsem(&ei->truncate_lock);
5279     + /* Zeroing to allow iput() even if partial initialized inode. */
5280     + ei->mmu_private = 0;
5281     + ei->i_start = 0;
5282     + ei->i_logstart = 0;
5283     + ei->i_attrs = 0;
5284     + ei->i_pos = 0;
5285     +
5286     return &ei->vfs_inode;
5287     }
5288    
5289     @@ -1373,16 +1380,6 @@ out:
5290     return 0;
5291     }
5292    
5293     -static void fat_dummy_inode_init(struct inode *inode)
5294     -{
5295     - /* Initialize this dummy inode to work as no-op. */
5296     - MSDOS_I(inode)->mmu_private = 0;
5297     - MSDOS_I(inode)->i_start = 0;
5298     - MSDOS_I(inode)->i_logstart = 0;
5299     - MSDOS_I(inode)->i_attrs = 0;
5300     - MSDOS_I(inode)->i_pos = 0;
5301     -}
5302     -
5303     static int fat_read_root(struct inode *inode)
5304     {
5305     struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
5306     @@ -1843,13 +1840,11 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
5307     fat_inode = new_inode(sb);
5308     if (!fat_inode)
5309     goto out_fail;
5310     - fat_dummy_inode_init(fat_inode);
5311     sbi->fat_inode = fat_inode;
5312    
5313     fsinfo_inode = new_inode(sb);
5314     if (!fsinfo_inode)
5315     goto out_fail;
5316     - fat_dummy_inode_init(fsinfo_inode);
5317     fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
5318     sbi->fsinfo_inode = fsinfo_inode;
5319     insert_inode_hash(fsinfo_inode);
5320     diff --git a/include/linux/mm.h b/include/linux/mm.h
5321     index b249d2e033aa..afa77b683a04 100644
5322     --- a/include/linux/mm.h
5323     +++ b/include/linux/mm.h
5324     @@ -2695,6 +2695,10 @@ static inline bool debug_pagealloc_enabled_static(void)
5325     #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
5326     extern void __kernel_map_pages(struct page *page, int numpages, int enable);
5327    
5328     +/*
5329     + * When called in DEBUG_PAGEALLOC context, the call should most likely be
5330     + * guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static()
5331     + */
5332     static inline void
5333     kernel_map_pages(struct page *page, int numpages, int enable)
5334     {
5335     diff --git a/kernel/kprobes.c b/kernel/kprobes.c
5336     index 34e28b236d68..2625c241ac00 100644
5337     --- a/kernel/kprobes.c
5338     +++ b/kernel/kprobes.c
5339     @@ -612,6 +612,18 @@ void wait_for_kprobe_optimizer(void)
5340     mutex_unlock(&kprobe_mutex);
5341     }
5342    
5343     +static bool optprobe_queued_unopt(struct optimized_kprobe *op)
5344     +{
5345     + struct optimized_kprobe *_op;
5346     +
5347     + list_for_each_entry(_op, &unoptimizing_list, list) {
5348     + if (op == _op)
5349     + return true;
5350     + }
5351     +
5352     + return false;
5353     +}
5354     +
5355     /* Optimize kprobe if p is ready to be optimized */
5356     static void optimize_kprobe(struct kprobe *p)
5357     {
5358     @@ -633,17 +645,21 @@ static void optimize_kprobe(struct kprobe *p)
5359     return;
5360    
5361     /* Check if it is already optimized. */
5362     - if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
5363     + if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
5364     + if (optprobe_queued_unopt(op)) {
5365     + /* This is under unoptimizing. Just dequeue the probe */
5366     + list_del_init(&op->list);
5367     + }
5368     return;
5369     + }
5370     op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
5371    
5372     - if (!list_empty(&op->list))
5373     - /* This is under unoptimizing. Just dequeue the probe */
5374     - list_del_init(&op->list);
5375     - else {
5376     - list_add(&op->list, &optimizing_list);
5377     - kick_kprobe_optimizer();
5378     - }
5379     + /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
5380     + if (WARN_ON_ONCE(!list_empty(&op->list)))
5381     + return;
5382     +
5383     + list_add(&op->list, &optimizing_list);
5384     + kick_kprobe_optimizer();
5385     }
5386    
5387     /* Short cut to direct unoptimizing */
5388     @@ -665,30 +681,33 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
5389     return; /* This is not an optprobe nor optimized */
5390    
5391     op = container_of(p, struct optimized_kprobe, kp);
5392     - if (!kprobe_optimized(p)) {
5393     - /* Unoptimized or unoptimizing case */
5394     - if (force && !list_empty(&op->list)) {
5395     - /*
5396     - * Only if this is unoptimizing kprobe and forced,
5397     - * forcibly unoptimize it. (No need to unoptimize
5398     - * unoptimized kprobe again :)
5399     - */
5400     - list_del_init(&op->list);
5401     - force_unoptimize_kprobe(op);
5402     - }
5403     + if (!kprobe_optimized(p))
5404     return;
5405     - }
5406    
5407     if (!list_empty(&op->list)) {
5408     - /* Dequeue from the optimization queue */
5409     - list_del_init(&op->list);
5410     + if (optprobe_queued_unopt(op)) {
5411     + /* Queued in unoptimizing queue */
5412     + if (force) {
5413     + /*
5414     + * Forcibly unoptimize the kprobe here, and queue it
5415     + * in the freeing list for release afterwards.
5416     + */
5417     + force_unoptimize_kprobe(op);
5418     + list_move(&op->list, &freeing_list);
5419     + }
5420     + } else {
5421     + /* Dequeue from the optimizing queue */
5422     + list_del_init(&op->list);
5423     + op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
5424     + }
5425     return;
5426     }
5427     +
5428     /* Optimized kprobe case */
5429     - if (force)
5430     + if (force) {
5431     /* Forcibly update the code: this is a special case */
5432     force_unoptimize_kprobe(op);
5433     - else {
5434     + } else {
5435     list_add(&op->list, &unoptimizing_list);
5436     kick_kprobe_optimizer();
5437     }
5438     diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
5439     index 4b2ad374167b..e7e483cdbea6 100644
5440     --- a/kernel/trace/blktrace.c
5441     +++ b/kernel/trace/blktrace.c
5442     @@ -1888,8 +1888,11 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
5443     }
5444    
5445     ret = 0;
5446     - if (bt == NULL)
5447     + if (bt == NULL) {
5448     ret = blk_trace_setup_queue(q, bdev);
5449     + bt = rcu_dereference_protected(q->blk_trace,
5450     + lockdep_is_held(&q->blk_trace_mutex));
5451     + }
5452    
5453     if (ret == 0) {
5454     if (attr == &dev_attr_act_mask)
5455     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
5456     index 6c9689281c07..0d96831b6ded 100644
5457     --- a/mm/huge_memory.c
5458     +++ b/mm/huge_memory.c
5459     @@ -3032,8 +3032,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
5460     return;
5461    
5462     flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
5463     - pmdval = *pvmw->pmd;
5464     - pmdp_invalidate(vma, address, pvmw->pmd);
5465     + pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
5466     if (pmd_dirty(pmdval))
5467     set_page_dirty(page);
5468     entry = make_migration_entry(page, pmd_write(pmdval));
5469     diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
5470     index 0aa154be3a52..c054945a9a74 100644
5471     --- a/mm/memory_hotplug.c
5472     +++ b/mm/memory_hotplug.c
5473     @@ -598,7 +598,13 @@ EXPORT_SYMBOL_GPL(__online_page_free);
5474    
5475     static void generic_online_page(struct page *page, unsigned int order)
5476     {
5477     - kernel_map_pages(page, 1 << order, 1);
5478     + /*
5479     + * Freeing the page with debug_pagealloc enabled will try to unmap it,
5480     + * so we should map it first. This is better than introducing a special
5481     + * case in page freeing fast path.
5482     + */
5483     + if (debug_pagealloc_enabled_static())
5484     + kernel_map_pages(page, 1 << order, 1);
5485     __free_pages_core(page, order);
5486     totalram_pages_add(1UL << order);
5487     #ifdef CONFIG_HIGHMEM
5488     diff --git a/mm/mprotect.c b/mm/mprotect.c
5489     index 7967825f6d33..95dee88f782b 100644
5490     --- a/mm/mprotect.c
5491     +++ b/mm/mprotect.c
5492     @@ -161,6 +161,31 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
5493     return pages;
5494     }
5495    
5496     +/*
5497     + * Used when setting automatic NUMA hinting protection where it is
5498     + * critical that a numa hinting PMD is not confused with a bad PMD.
5499     + */
5500     +static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
5501     +{
5502     + pmd_t pmdval = pmd_read_atomic(pmd);
5503     +
5504     + /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
5505     +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5506     + barrier();
5507     +#endif
5508     +
5509     + if (pmd_none(pmdval))
5510     + return 1;
5511     + if (pmd_trans_huge(pmdval))
5512     + return 0;
5513     + if (unlikely(pmd_bad(pmdval))) {
5514     + pmd_clear_bad(pmd);
5515     + return 1;
5516     + }
5517     +
5518     + return 0;
5519     +}
5520     +
5521     static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
5522     pud_t *pud, unsigned long addr, unsigned long end,
5523     pgprot_t newprot, int dirty_accountable, int prot_numa)
5524     @@ -178,8 +203,17 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
5525     unsigned long this_pages;
5526    
5527     next = pmd_addr_end(addr, end);
5528     - if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
5529     - && pmd_none_or_clear_bad(pmd))
5530     +
5531     + /*
5532     + * Automatic NUMA balancing walks the tables with mmap_sem
5533     + * held for read. It's possible a parallel update to occur
5534     + * between pmd_trans_huge() and a pmd_none_or_clear_bad()
5535     + * check leading to a false positive and clearing.
5536     + * Hence, it's necessary to atomically read the PMD value
5537     + * for all the checks.
5538     + */
5539     + if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
5540     + pmd_none_or_clear_bad_unless_trans_huge(pmd))
5541     goto next;
5542    
5543     /* invoke the mmu notifier if the pmd is populated */
5544     diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c
5545     index 81b19c52832b..020fc7a11ef0 100644
5546     --- a/security/integrity/platform_certs/load_uefi.c
5547     +++ b/security/integrity/platform_certs/load_uefi.c
5548     @@ -39,16 +39,18 @@ static __init bool uefi_check_ignore_db(void)
5549     * Get a certificate list blob from the named EFI variable.
5550     */
5551     static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid,
5552     - unsigned long *size)
5553     + unsigned long *size, efi_status_t *status)
5554     {
5555     - efi_status_t status;
5556     unsigned long lsize = 4;
5557     unsigned long tmpdb[4];
5558     void *db;
5559    
5560     - status = efi.get_variable(name, guid, NULL, &lsize, &tmpdb);
5561     - if (status != EFI_BUFFER_TOO_SMALL) {
5562     - pr_err("Couldn't get size: 0x%lx\n", status);
5563     + *status = efi.get_variable(name, guid, NULL, &lsize, &tmpdb);
5564     + if (*status == EFI_NOT_FOUND)
5565     + return NULL;
5566     +
5567     + if (*status != EFI_BUFFER_TOO_SMALL) {
5568     + pr_err("Couldn't get size: 0x%lx\n", *status);
5569     return NULL;
5570     }
5571    
5572     @@ -56,10 +58,10 @@ static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid,
5573     if (!db)
5574     return NULL;
5575    
5576     - status = efi.get_variable(name, guid, NULL, &lsize, db);
5577     - if (status != EFI_SUCCESS) {
5578     + *status = efi.get_variable(name, guid, NULL, &lsize, db);
5579     + if (*status != EFI_SUCCESS) {
5580     kfree(db);
5581     - pr_err("Error reading db var: 0x%lx\n", status);
5582     + pr_err("Error reading db var: 0x%lx\n", *status);
5583     return NULL;
5584     }
5585    
5586     @@ -144,6 +146,7 @@ static int __init load_uefi_certs(void)
5587     efi_guid_t mok_var = EFI_SHIM_LOCK_GUID;
5588     void *db = NULL, *dbx = NULL, *mok = NULL;
5589     unsigned long dbsize = 0, dbxsize = 0, moksize = 0;
5590     + efi_status_t status;
5591     int rc = 0;
5592    
5593     if (!efi.get_variable)
5594     @@ -153,9 +156,12 @@ static int __init load_uefi_certs(void)
5595     * an error if we can't get them.
5596     */
5597     if (!uefi_check_ignore_db()) {
5598     - db = get_cert_list(L"db", &secure_var, &dbsize);
5599     + db = get_cert_list(L"db", &secure_var, &dbsize, &status);
5600     if (!db) {
5601     - pr_err("MODSIGN: Couldn't get UEFI db list\n");
5602     + if (status == EFI_NOT_FOUND)
5603     + pr_debug("MODSIGN: db variable wasn't found\n");
5604     + else
5605     + pr_err("MODSIGN: Couldn't get UEFI db list\n");
5606     } else {
5607     rc = parse_efi_signature_list("UEFI:db",
5608     db, dbsize, get_handler_for_db);
5609     @@ -166,9 +172,12 @@ static int __init load_uefi_certs(void)
5610     }
5611     }
5612    
5613     - mok = get_cert_list(L"MokListRT", &mok_var, &moksize);
5614     + mok = get_cert_list(L"MokListRT", &mok_var, &moksize, &status);
5615     if (!mok) {
5616     - pr_info("Couldn't get UEFI MokListRT\n");
5617     + if (status == EFI_NOT_FOUND)
5618     + pr_debug("MokListRT variable wasn't found\n");
5619     + else
5620     + pr_info("Couldn't get UEFI MokListRT\n");
5621     } else {
5622     rc = parse_efi_signature_list("UEFI:MokListRT",
5623     mok, moksize, get_handler_for_db);
5624     @@ -177,9 +186,12 @@ static int __init load_uefi_certs(void)
5625     kfree(mok);
5626     }
5627    
5628     - dbx = get_cert_list(L"dbx", &secure_var, &dbxsize);
5629     + dbx = get_cert_list(L"dbx", &secure_var, &dbxsize, &status);
5630     if (!dbx) {
5631     - pr_info("Couldn't get UEFI dbx list\n");
5632     + if (status == EFI_NOT_FOUND)
5633     + pr_debug("dbx variable wasn't found\n");
5634     + else
5635     + pr_info("Couldn't get UEFI dbx list\n");
5636     } else {
5637     rc = parse_efi_signature_list("UEFI:dbx",
5638     dbx, dbxsize,
5639     diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
5640     index cfab60d88c92..09ff209df4a3 100644
5641     --- a/sound/hda/ext/hdac_ext_controller.c
5642     +++ b/sound/hda/ext/hdac_ext_controller.c
5643     @@ -254,6 +254,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down_all);
5644     int snd_hdac_ext_bus_link_get(struct hdac_bus *bus,
5645     struct hdac_ext_link *link)
5646     {
5647     + unsigned long codec_mask;
5648     int ret = 0;
5649    
5650     mutex_lock(&bus->lock);
5651     @@ -280,9 +281,11 @@ int snd_hdac_ext_bus_link_get(struct hdac_bus *bus,
5652     * HDA spec section 4.3 - Codec Discovery
5653     */
5654     udelay(521);
5655     - bus->codec_mask = snd_hdac_chip_readw(bus, STATESTS);
5656     - dev_dbg(bus->dev, "codec_mask = 0x%lx\n", bus->codec_mask);
5657     - snd_hdac_chip_writew(bus, STATESTS, bus->codec_mask);
5658     + codec_mask = snd_hdac_chip_readw(bus, STATESTS);
5659     + dev_dbg(bus->dev, "codec_mask = 0x%lx\n", codec_mask);
5660     + snd_hdac_chip_writew(bus, STATESTS, codec_mask);
5661     + if (!bus->codec_mask)
5662     + bus->codec_mask = codec_mask;
5663     }
5664    
5665     mutex_unlock(&bus->lock);
5666     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5667     index 4f78b40831d8..b6b837a5bdaf 100644
5668     --- a/sound/pci/hda/patch_realtek.c
5669     +++ b/sound/pci/hda/patch_realtek.c
5670     @@ -2447,6 +2447,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5671     SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
5672     SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
5673     SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
5674     + SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
5675     SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
5676     SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
5677     SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
5678     @@ -5920,7 +5921,8 @@ enum {
5679     ALC289_FIXUP_DUAL_SPK,
5680     ALC294_FIXUP_SPK2_TO_DAC1,
5681     ALC294_FIXUP_ASUS_DUAL_SPK,
5682     -
5683     + ALC285_FIXUP_THINKPAD_HEADSET_JACK,
5684     + ALC294_FIXUP_ASUS_HPE,
5685     };
5686    
5687     static const struct hda_fixup alc269_fixups[] = {
5688     @@ -6684,6 +6686,8 @@ static const struct hda_fixup alc269_fixups[] = {
5689     [ALC285_FIXUP_SPEAKER2_TO_DAC1] = {
5690     .type = HDA_FIXUP_FUNC,
5691     .v.func = alc285_fixup_speaker2_to_dac1,
5692     + .chained = true,
5693     + .chain_id = ALC269_FIXUP_THINKPAD_ACPI
5694     },
5695     [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
5696     .type = HDA_FIXUP_PINS,
5697     @@ -7040,7 +7044,23 @@ static const struct hda_fixup alc269_fixups[] = {
5698     .chained = true,
5699     .chain_id = ALC294_FIXUP_SPK2_TO_DAC1
5700     },
5701     -
5702     + [ALC285_FIXUP_THINKPAD_HEADSET_JACK] = {
5703     + .type = HDA_FIXUP_FUNC,
5704     + .v.func = alc_fixup_headset_jack,
5705     + .chained = true,
5706     + .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1
5707     + },
5708     + [ALC294_FIXUP_ASUS_HPE] = {
5709     + .type = HDA_FIXUP_VERBS,
5710     + .v.verbs = (const struct hda_verb[]) {
5711     + /* Set EAPD high */
5712     + { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f },
5713     + { 0x20, AC_VERB_SET_PROC_COEF, 0x7774 },
5714     + { }
5715     + },
5716     + .chained = true,
5717     + .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
5718     + },
5719     };
5720    
5721     static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5722     @@ -7115,6 +7135,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5723     SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
5724     SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
5725     SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
5726     + SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
5727     + SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
5728     SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5729     SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5730     SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
5731     @@ -7204,6 +7226,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5732     SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
5733     SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
5734     SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
5735     + SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
5736     SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
5737     SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
5738     SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
5739     @@ -7274,8 +7297,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5740     SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
5741     SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
5742     SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5743     - SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
5744     - SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
5745     + SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
5746     + SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
5747     SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
5748     SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
5749     SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
5750     diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
5751     index 861210f6bf4f..4cbef9affffd 100644
5752     --- a/sound/soc/codecs/pcm512x.c
5753     +++ b/sound/soc/codecs/pcm512x.c
5754     @@ -1564,13 +1564,15 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
5755     }
5756    
5757     pcm512x->sclk = devm_clk_get(dev, NULL);
5758     - if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER)
5759     - return -EPROBE_DEFER;
5760     + if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER) {
5761     + ret = -EPROBE_DEFER;
5762     + goto err;
5763     + }
5764     if (!IS_ERR(pcm512x->sclk)) {
5765     ret = clk_prepare_enable(pcm512x->sclk);
5766     if (ret != 0) {
5767     dev_err(dev, "Failed to enable SCLK: %d\n", ret);
5768     - return ret;
5769     + goto err;
5770     }
5771     }
5772    
5773     diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c
5774     index 3466675f2678..a15aa2ffa681 100644
5775     --- a/sound/soc/intel/skylake/skl-debug.c
5776     +++ b/sound/soc/intel/skylake/skl-debug.c
5777     @@ -34,8 +34,8 @@ static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf,
5778     int i;
5779     ssize_t ret = 0;
5780    
5781     - for (i = 0; i < max_pin; i++)
5782     - ret += snprintf(buf + size, MOD_BUF - size,
5783     + for (i = 0; i < max_pin; i++) {
5784     + ret += scnprintf(buf + size, MOD_BUF - size,
5785     "%s %d\n\tModule %d\n\tInstance %d\n\t"
5786     "In-used %s\n\tType %s\n"
5787     "\tState %d\n\tIndex %d\n",
5788     @@ -45,13 +45,15 @@ static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf,
5789     m_pin[i].in_use ? "Used" : "Unused",
5790     m_pin[i].is_dynamic ? "Dynamic" : "Static",
5791     m_pin[i].pin_state, i);
5792     + size += ret;
5793     + }
5794     return ret;
5795     }
5796    
5797     static ssize_t skl_print_fmt(struct skl_module_fmt *fmt, char *buf,
5798     ssize_t size, bool direction)
5799     {
5800     - return snprintf(buf + size, MOD_BUF - size,
5801     + return scnprintf(buf + size, MOD_BUF - size,
5802     "%s\n\tCh %d\n\tFreq %d\n\tBit depth %d\n\t"
5803     "Valid bit depth %d\n\tCh config %#x\n\tInterleaving %d\n\t"
5804     "Sample Type %d\n\tCh Map %#x\n",
5805     @@ -75,16 +77,16 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
5806     if (!buf)
5807     return -ENOMEM;
5808    
5809     - ret = snprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n"
5810     + ret = scnprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n"
5811     "\tInstance id %d\n\tPvt_id %d\n", mconfig->guid,
5812     mconfig->id.module_id, mconfig->id.instance_id,
5813     mconfig->id.pvt_id);
5814    
5815     - ret += snprintf(buf + ret, MOD_BUF - ret,
5816     + ret += scnprintf(buf + ret, MOD_BUF - ret,
5817     "Resources:\n\tCPC %#x\n\tIBS %#x\n\tOBS %#x\t\n",
5818     res->cpc, res->ibs, res->obs);
5819    
5820     - ret += snprintf(buf + ret, MOD_BUF - ret,
5821     + ret += scnprintf(buf + ret, MOD_BUF - ret,
5822     "Module data:\n\tCore %d\n\tIn queue %d\n\t"
5823     "Out queue %d\n\tType %s\n",
5824     mconfig->core_id, mconfig->max_in_queue,
5825     @@ -94,38 +96,38 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
5826     ret += skl_print_fmt(mconfig->in_fmt, buf, ret, true);
5827     ret += skl_print_fmt(mconfig->out_fmt, buf, ret, false);
5828    
5829     - ret += snprintf(buf + ret, MOD_BUF - ret,
5830     + ret += scnprintf(buf + ret, MOD_BUF - ret,
5831     "Fixup:\n\tParams %#x\n\tConverter %#x\n",
5832     mconfig->params_fixup, mconfig->converter);
5833    
5834     - ret += snprintf(buf + ret, MOD_BUF - ret,
5835     + ret += scnprintf(buf + ret, MOD_BUF - ret,
5836     "Module Gateway:\n\tType %#x\n\tVbus %#x\n\tHW conn %#x\n\tSlot %#x\n",
5837     mconfig->dev_type, mconfig->vbus_id,
5838     mconfig->hw_conn_type, mconfig->time_slot);
5839    
5840     - ret += snprintf(buf + ret, MOD_BUF - ret,
5841     + ret += scnprintf(buf + ret, MOD_BUF - ret,
5842     "Pipeline:\n\tID %d\n\tPriority %d\n\tConn Type %d\n\t"
5843     "Pages %#x\n", mconfig->pipe->ppl_id,
5844     mconfig->pipe->pipe_priority, mconfig->pipe->conn_type,
5845     mconfig->pipe->memory_pages);
5846    
5847     - ret += snprintf(buf + ret, MOD_BUF - ret,
5848     + ret += scnprintf(buf + ret, MOD_BUF - ret,
5849     "\tParams:\n\t\tHost DMA %d\n\t\tLink DMA %d\n",
5850     mconfig->pipe->p_params->host_dma_id,
5851     mconfig->pipe->p_params->link_dma_id);
5852    
5853     - ret += snprintf(buf + ret, MOD_BUF - ret,
5854     + ret += scnprintf(buf + ret, MOD_BUF - ret,
5855     "\tPCM params:\n\t\tCh %d\n\t\tFreq %d\n\t\tFormat %d\n",
5856     mconfig->pipe->p_params->ch,
5857     mconfig->pipe->p_params->s_freq,
5858     mconfig->pipe->p_params->s_fmt);
5859    
5860     - ret += snprintf(buf + ret, MOD_BUF - ret,
5861     + ret += scnprintf(buf + ret, MOD_BUF - ret,
5862     "\tLink %#x\n\tStream %#x\n",
5863     mconfig->pipe->p_params->linktype,
5864     mconfig->pipe->p_params->stream);
5865    
5866     - ret += snprintf(buf + ret, MOD_BUF - ret,
5867     + ret += scnprintf(buf + ret, MOD_BUF - ret,
5868     "\tState %d\n\tPassthru %s\n",
5869     mconfig->pipe->state,
5870     mconfig->pipe->passthru ? "true" : "false");
5871     @@ -135,7 +137,7 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
5872     ret += skl_print_pins(mconfig->m_out_pin, buf,
5873     mconfig->max_out_queue, ret, false);
5874    
5875     - ret += snprintf(buf + ret, MOD_BUF - ret,
5876     + ret += scnprintf(buf + ret, MOD_BUF - ret,
5877     "Other:\n\tDomain %d\n\tHomogeneous Input %s\n\t"
5878     "Homogeneous Output %s\n\tIn Queue Mask %d\n\t"
5879     "Out Queue Mask %d\n\tDMA ID %d\n\tMem Pages %d\n\t"
5880     @@ -191,7 +193,7 @@ static ssize_t fw_softreg_read(struct file *file, char __user *user_buf,
5881     __ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
5882    
5883     for (offset = 0; offset < FW_REG_SIZE; offset += 16) {
5884     - ret += snprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
5885     + ret += scnprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
5886     hex_dump_to_buffer(d->fw_read_buff + offset, 16, 16, 4,
5887     tmp + ret, FW_REG_BUF - ret, 0);
5888     ret += strlen(tmp + ret);
5889     diff --git a/sound/soc/intel/skylake/skl-ssp-clk.c b/sound/soc/intel/skylake/skl-ssp-clk.c
5890     index 1c0e5226cb5b..bd43885f3805 100644
5891     --- a/sound/soc/intel/skylake/skl-ssp-clk.c
5892     +++ b/sound/soc/intel/skylake/skl-ssp-clk.c
5893     @@ -384,9 +384,11 @@ static int skl_clk_dev_probe(struct platform_device *pdev)
5894     &clks[i], clk_pdata, i);
5895    
5896     if (IS_ERR(data->clk[data->avail_clk_cnt])) {
5897     - ret = PTR_ERR(data->clk[data->avail_clk_cnt++]);
5898     + ret = PTR_ERR(data->clk[data->avail_clk_cnt]);
5899     goto err_unreg_skl_clk;
5900     }
5901     +
5902     + data->avail_clk_cnt++;
5903     }
5904    
5905     platform_set_drvdata(pdev, data);
5906     diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
5907     index 935b5375ecc5..ebd785f9aa46 100644
5908     --- a/sound/soc/soc-dapm.c
5909     +++ b/sound/soc/soc-dapm.c
5910     @@ -4749,7 +4749,7 @@ static void soc_dapm_shutdown_dapm(struct snd_soc_dapm_context *dapm)
5911     continue;
5912     if (w->power) {
5913     dapm_seq_insert(w, &down_list, false);
5914     - w->power = 0;
5915     + w->new_power = 0;
5916     powerdown = 1;
5917     }
5918     }
5919     diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
5920     index 8712a91e0e3e..d978df95c5c6 100644
5921     --- a/sound/soc/soc-pcm.c
5922     +++ b/sound/soc/soc-pcm.c
5923     @@ -3169,16 +3169,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
5924     unsigned long flags;
5925    
5926     /* FE state */
5927     - offset += snprintf(buf + offset, size - offset,
5928     + offset += scnprintf(buf + offset, size - offset,
5929     "[%s - %s]\n", fe->dai_link->name,
5930     stream ? "Capture" : "Playback");
5931    
5932     - offset += snprintf(buf + offset, size - offset, "State: %s\n",
5933     + offset += scnprintf(buf + offset, size - offset, "State: %s\n",
5934     dpcm_state_string(fe->dpcm[stream].state));
5935    
5936     if ((fe->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
5937     (fe->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
5938     - offset += snprintf(buf + offset, size - offset,
5939     + offset += scnprintf(buf + offset, size - offset,
5940     "Hardware Params: "
5941     "Format = %s, Channels = %d, Rate = %d\n",
5942     snd_pcm_format_name(params_format(params)),
5943     @@ -3186,10 +3186,10 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
5944     params_rate(params));
5945    
5946     /* BEs state */
5947     - offset += snprintf(buf + offset, size - offset, "Backends:\n");
5948     + offset += scnprintf(buf + offset, size - offset, "Backends:\n");
5949    
5950     if (list_empty(&fe->dpcm[stream].be_clients)) {
5951     - offset += snprintf(buf + offset, size - offset,
5952     + offset += scnprintf(buf + offset, size - offset,
5953     " No active DSP links\n");
5954     goto out;
5955     }
5956     @@ -3199,16 +3199,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
5957     struct snd_soc_pcm_runtime *be = dpcm->be;
5958     params = &dpcm->hw_params;
5959    
5960     - offset += snprintf(buf + offset, size - offset,
5961     + offset += scnprintf(buf + offset, size - offset,
5962     "- %s\n", be->dai_link->name);
5963    
5964     - offset += snprintf(buf + offset, size - offset,
5965     + offset += scnprintf(buf + offset, size - offset,
5966     " State: %s\n",
5967     dpcm_state_string(be->dpcm[stream].state));
5968    
5969     if ((be->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
5970     (be->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
5971     - offset += snprintf(buf + offset, size - offset,
5972     + offset += scnprintf(buf + offset, size - offset,
5973     " Hardware Params: "
5974     "Format = %s, Channels = %d, Rate = %d\n",
5975     snd_pcm_format_name(params_format(params)),
5976     diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
5977     index d00203ef8305..b19ecaf0febf 100644
5978     --- a/sound/soc/soc-topology.c
5979     +++ b/sound/soc/soc-topology.c
5980     @@ -2320,8 +2320,11 @@ static int soc_tplg_link_elems_load(struct soc_tplg *tplg,
5981     }
5982    
5983     ret = soc_tplg_link_config(tplg, _link);
5984     - if (ret < 0)
5985     + if (ret < 0) {
5986     + if (!abi_match)
5987     + kfree(_link);
5988     return ret;
5989     + }
5990    
5991     /* offset by version-specific struct size and
5992     * real priv data size
5993     @@ -2485,7 +2488,7 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
5994     {
5995     struct snd_soc_tplg_manifest *manifest, *_manifest;
5996     bool abi_match;
5997     - int err;
5998     + int ret = 0;
5999    
6000     if (tplg->pass != SOC_TPLG_PASS_MANIFEST)
6001     return 0;
6002     @@ -2498,19 +2501,19 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
6003     _manifest = manifest;
6004     } else {
6005     abi_match = false;
6006     - err = manifest_new_ver(tplg, manifest, &_manifest);
6007     - if (err < 0)
6008     - return err;
6009     + ret = manifest_new_ver(tplg, manifest, &_manifest);
6010     + if (ret < 0)
6011     + return ret;
6012     }
6013    
6014     /* pass control to component driver for optional further init */
6015     if (tplg->comp && tplg->ops && tplg->ops->manifest)
6016     - return tplg->ops->manifest(tplg->comp, tplg->index, _manifest);
6017     + ret = tplg->ops->manifest(tplg->comp, tplg->index, _manifest);
6018    
6019     if (!abi_match) /* free the duplicated one */
6020     kfree(_manifest);
6021    
6022     - return 0;
6023     + return ret;
6024     }
6025    
6026     /* validate header magic, size and type */
6027     diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
6028     index 8984d965037d..e7b1a80e2a14 100644
6029     --- a/sound/soc/sof/ipc.c
6030     +++ b/sound/soc/sof/ipc.c
6031     @@ -497,7 +497,7 @@ int snd_sof_ipc_stream_posn(struct snd_sof_dev *sdev,
6032    
6033     /* send IPC to the DSP */
6034     err = sof_ipc_tx_message(sdev->ipc,
6035     - stream.hdr.cmd, &stream, sizeof(stream), &posn,
6036     + stream.hdr.cmd, &stream, sizeof(stream), posn,
6037     sizeof(*posn));
6038     if (err < 0) {
6039     dev_err(sdev->dev, "error: failed to get stream %d position\n",
6040     diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
6041     index ede040cf82ad..20e9a189ad92 100644
6042     --- a/tools/perf/arch/arm/util/cs-etm.c
6043     +++ b/tools/perf/arch/arm/util/cs-etm.c
6044     @@ -865,9 +865,12 @@ static int cs_etm_read_finish(struct auxtrace_record *itr, int idx)
6045     struct evsel *evsel;
6046    
6047     evlist__for_each_entry(ptr->evlist, evsel) {
6048     - if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
6049     + if (evsel->core.attr.type == ptr->cs_etm_pmu->type) {
6050     + if (evsel->disabled)
6051     + return 0;
6052     return perf_evlist__enable_event_idx(ptr->evlist,
6053     evsel, idx);
6054     + }
6055     }
6056    
6057     return -EINVAL;
6058     diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
6059     index eba6541ec0f1..1d993c27242b 100644
6060     --- a/tools/perf/arch/arm64/util/arm-spe.c
6061     +++ b/tools/perf/arch/arm64/util/arm-spe.c
6062     @@ -165,9 +165,12 @@ static int arm_spe_read_finish(struct auxtrace_record *itr, int idx)
6063     struct evsel *evsel;
6064    
6065     evlist__for_each_entry(sper->evlist, evsel) {
6066     - if (evsel->core.attr.type == sper->arm_spe_pmu->type)
6067     + if (evsel->core.attr.type == sper->arm_spe_pmu->type) {
6068     + if (evsel->disabled)
6069     + return 0;
6070     return perf_evlist__enable_event_idx(sper->evlist,
6071     evsel, idx);
6072     + }
6073     }
6074     return -EINVAL;
6075     }
6076     diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
6077     index f7f68a50a5cd..85799b5fa01d 100644
6078     --- a/tools/perf/arch/x86/util/intel-bts.c
6079     +++ b/tools/perf/arch/x86/util/intel-bts.c
6080     @@ -415,9 +415,12 @@ static int intel_bts_read_finish(struct auxtrace_record *itr, int idx)
6081     struct evsel *evsel;
6082    
6083     evlist__for_each_entry(btsr->evlist, evsel) {
6084     - if (evsel->core.attr.type == btsr->intel_bts_pmu->type)
6085     + if (evsel->core.attr.type == btsr->intel_bts_pmu->type) {
6086     + if (evsel->disabled)
6087     + return 0;
6088     return perf_evlist__enable_event_idx(btsr->evlist,
6089     evsel, idx);
6090     + }
6091     }
6092     return -EINVAL;
6093     }
6094     diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
6095     index d6d26256915f..d43f9dec6998 100644
6096     --- a/tools/perf/arch/x86/util/intel-pt.c
6097     +++ b/tools/perf/arch/x86/util/intel-pt.c
6098     @@ -1099,9 +1099,12 @@ static int intel_pt_read_finish(struct auxtrace_record *itr, int idx)
6099     struct evsel *evsel;
6100    
6101     evlist__for_each_entry(ptr->evlist, evsel) {
6102     - if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
6103     + if (evsel->core.attr.type == ptr->intel_pt_pmu->type) {
6104     + if (evsel->disabled)
6105     + return 0;
6106     return perf_evlist__enable_event_idx(ptr->evlist, evsel,
6107     idx);
6108     + }
6109     }
6110     return -EINVAL;
6111     }
6112     diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
6113     index 1c8a1963d03f..3ed0134a764d 100644
6114     --- a/tools/testing/selftests/lib.mk
6115     +++ b/tools/testing/selftests/lib.mk
6116     @@ -83,17 +83,20 @@ else
6117     $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS))
6118     endif
6119    
6120     +define INSTALL_SINGLE_RULE
6121     + $(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH))
6122     + $(if $(INSTALL_LIST),@echo rsync -a $(INSTALL_LIST) $(INSTALL_PATH)/)
6123     + $(if $(INSTALL_LIST),@rsync -a $(INSTALL_LIST) $(INSTALL_PATH)/)
6124     +endef
6125     +
6126     define INSTALL_RULE
6127     - @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
6128     - mkdir -p ${INSTALL_PATH}; \
6129     - echo "rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/"; \
6130     - rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/; \
6131     - fi
6132     - @if [ "X$(TEST_GEN_PROGS)$(TEST_CUSTOM_PROGS)$(TEST_GEN_PROGS_EXTENDED)$(TEST_GEN_FILES)" != "X" ]; then \
6133     - mkdir -p ${INSTALL_PATH}; \
6134     - echo "rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/"; \
6135     - rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/; \
6136     - fi
6137     + $(eval INSTALL_LIST = $(TEST_PROGS)) $(INSTALL_SINGLE_RULE)
6138     + $(eval INSTALL_LIST = $(TEST_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE)
6139     + $(eval INSTALL_LIST = $(TEST_FILES)) $(INSTALL_SINGLE_RULE)
6140     + $(eval INSTALL_LIST = $(TEST_GEN_PROGS)) $(INSTALL_SINGLE_RULE)
6141     + $(eval INSTALL_LIST = $(TEST_CUSTOM_PROGS)) $(INSTALL_SINGLE_RULE)
6142     + $(eval INSTALL_LIST = $(TEST_GEN_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE)
6143     + $(eval INSTALL_LIST = $(TEST_GEN_FILES)) $(INSTALL_SINGLE_RULE)
6144     endef
6145    
6146     install: all
6147     diff --git a/tools/testing/selftests/net/forwarding/mirror_gre.sh b/tools/testing/selftests/net/forwarding/mirror_gre.sh
6148     index e6fd7a18c655..0266443601bc 100755
6149     --- a/tools/testing/selftests/net/forwarding/mirror_gre.sh
6150     +++ b/tools/testing/selftests/net/forwarding/mirror_gre.sh
6151     @@ -63,22 +63,23 @@ test_span_gre_mac()
6152     {
6153     local tundev=$1; shift
6154     local direction=$1; shift
6155     - local prot=$1; shift
6156     local what=$1; shift
6157    
6158     - local swp3mac=$(mac_get $swp3)
6159     - local h3mac=$(mac_get $h3)
6160     + case "$direction" in
6161     + ingress) local src_mac=$(mac_get $h1); local dst_mac=$(mac_get $h2)
6162     + ;;
6163     + egress) local src_mac=$(mac_get $h2); local dst_mac=$(mac_get $h1)
6164     + ;;
6165     + esac
6166    
6167     RET=0
6168    
6169     mirror_install $swp1 $direction $tundev "matchall $tcflags"
6170     - tc filter add dev $h3 ingress pref 77 prot $prot \
6171     - flower ip_proto 0x2f src_mac $swp3mac dst_mac $h3mac \
6172     - action pass
6173     + icmp_capture_install h3-${tundev} "src_mac $src_mac dst_mac $dst_mac"
6174    
6175     - mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 10
6176     + mirror_test v$h1 192.0.2.1 192.0.2.2 h3-${tundev} 100 10
6177    
6178     - tc filter del dev $h3 ingress pref 77
6179     + icmp_capture_uninstall h3-${tundev}
6180     mirror_uninstall $swp1 $direction
6181    
6182     log_test "$direction $what: envelope MAC ($tcflags)"
6183     @@ -120,14 +121,14 @@ test_ip6gretap()
6184    
6185     test_gretap_mac()
6186     {
6187     - test_span_gre_mac gt4 ingress ip "mirror to gretap"
6188     - test_span_gre_mac gt4 egress ip "mirror to gretap"
6189     + test_span_gre_mac gt4 ingress "mirror to gretap"
6190     + test_span_gre_mac gt4 egress "mirror to gretap"
6191     }
6192    
6193     test_ip6gretap_mac()
6194     {
6195     - test_span_gre_mac gt6 ingress ipv6 "mirror to ip6gretap"
6196     - test_span_gre_mac gt6 egress ipv6 "mirror to ip6gretap"
6197     + test_span_gre_mac gt6 ingress "mirror to ip6gretap"
6198     + test_span_gre_mac gt6 egress "mirror to ip6gretap"
6199     }
6200    
6201     test_all()
6202     diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
6203     index bb10e33690b2..ce6bea9675c0 100755
6204     --- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
6205     +++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
6206     @@ -516,9 +516,9 @@ test_tos()
6207     RET=0
6208    
6209     tc filter add dev v1 egress pref 77 prot ip \
6210     - flower ip_tos 0x40 action pass
6211     - vxlan_ping_test $h1 192.0.2.3 "-Q 0x40" v1 egress 77 10
6212     - vxlan_ping_test $h1 192.0.2.3 "-Q 0x30" v1 egress 77 0
6213     + flower ip_tos 0x14 action pass
6214     + vxlan_ping_test $h1 192.0.2.3 "-Q 0x14" v1 egress 77 10
6215     + vxlan_ping_test $h1 192.0.2.3 "-Q 0x18" v1 egress 77 0
6216     tc filter del dev v1 egress pref 77 prot ip
6217    
6218     log_test "VXLAN: envelope TOS inheritance"
6219     diff --git a/usr/include/Makefile b/usr/include/Makefile
6220     index 47cb91d3a51d..e2840579156a 100644
6221     --- a/usr/include/Makefile
6222     +++ b/usr/include/Makefile
6223     @@ -99,7 +99,7 @@ endif
6224     # asm-generic/*.h is used by asm/*.h, and should not be included directly
6225     header-test- += asm-generic/%
6226    
6227     -extra-y := $(patsubst $(obj)/%.h,%.hdrtest, $(shell find $(obj) -name '*.h'))
6228     +extra-y := $(patsubst $(obj)/%.h,%.hdrtest, $(shell find $(obj) -name '*.h' 2>/dev/null))
6229    
6230     quiet_cmd_hdrtest = HDRTEST $<
6231     cmd_hdrtest = \