Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0295-5.4.196-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (hide annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (20 months ago) by niro
File size: 91580 byte(s)
-add missing
1 niro 3637 diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
2     index 7193505a98cab..8f8d97f65d737 100644
3     --- a/Documentation/DMA-attributes.txt
4     +++ b/Documentation/DMA-attributes.txt
5     @@ -156,13 +156,3 @@ accesses to DMA buffers in both privileged "supervisor" and unprivileged
6     subsystem that the buffer is fully accessible at the elevated privilege
7     level (and ideally inaccessible or at least read-only at the
8     lesser-privileged levels).
9     -
10     -DMA_ATTR_PRIVILEGED
11     --------------------
12     -
13     -Some advanced peripherals such as remote processors and GPUs perform
14     -accesses to DMA buffers in both privileged "supervisor" and unprivileged
15     -"user" modes. This attribute is used to indicate to the DMA-mapping
16     -subsystem that the buffer is fully accessible at the elevated privilege
17     -level (and ideally inaccessible or at least read-only at the
18     -lesser-privileged levels).
19     diff --git a/Makefile b/Makefile
20     index 4297d0107bd6c..c064ed925552d 100644
21     --- a/Makefile
22     +++ b/Makefile
23     @@ -1,7 +1,7 @@
24     # SPDX-License-Identifier: GPL-2.0
25     VERSION = 5
26     PATCHLEVEL = 4
27     -SUBLEVEL = 195
28     +SUBLEVEL = 196
29     EXTRAVERSION =
30     NAME = Kleptomaniac Octopus
31    
32     diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
33     index f310f4d3bcc7c..ac723fe898c76 100644
34     --- a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
35     +++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
36     @@ -117,11 +117,6 @@
37     groups = "FWSPID";
38     };
39    
40     - pinctrl_fwqspid_default: fwqspid_default {
41     - function = "FWSPID";
42     - groups = "FWQSPID";
43     - };
44     -
45     pinctrl_fwspiwp_default: fwspiwp_default {
46     function = "FWSPIWP";
47     groups = "FWSPIWP";
48     @@ -653,12 +648,12 @@
49     };
50    
51     pinctrl_qspi1_default: qspi1_default {
52     - function = "QSPI1";
53     + function = "SPI1";
54     groups = "QSPI1";
55     };
56    
57     pinctrl_qspi2_default: qspi2_default {
58     - function = "QSPI2";
59     + function = "SPI2";
60     groups = "QSPI2";
61     };
62    
63     diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
64     index 8bba03de51ad4..1e61e38621066 100644
65     --- a/arch/arm/boot/dts/imx7-colibri.dtsi
66     +++ b/arch/arm/boot/dts/imx7-colibri.dtsi
67     @@ -77,7 +77,7 @@
68    
69     dailink_master: simple-audio-card,codec {
70     sound-dai = <&codec>;
71     - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
72     + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
73     };
74     };
75     };
76     @@ -152,7 +152,7 @@
77     compatible = "fsl,sgtl5000";
78     #sound-dai-cells = <0>;
79     reg = <0x0a>;
80     - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
81     + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
82     pinctrl-names = "default";
83     pinctrl-0 = <&pinctrl_sai1_mclk>;
84     VDDA-supply = <&reg_module_3v3_avdd>;
85     diff --git a/arch/arm/boot/dts/imx7-mba7.dtsi b/arch/arm/boot/dts/imx7-mba7.dtsi
86     index 50abf18ad30b2..887497e3bb4b8 100644
87     --- a/arch/arm/boot/dts/imx7-mba7.dtsi
88     +++ b/arch/arm/boot/dts/imx7-mba7.dtsi
89     @@ -250,7 +250,7 @@
90     tlv320aic32x4: audio-codec@18 {
91     compatible = "ti,tlv320aic32x4";
92     reg = <0x18>;
93     - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
94     + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
95     clock-names = "mclk";
96     ldoin-supply = <&reg_audio_3v3>;
97     iov-supply = <&reg_audio_3v3>;
98     diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts
99     index 6b4acea1ef795..ecfa179ccab1c 100644
100     --- a/arch/arm/boot/dts/imx7d-nitrogen7.dts
101     +++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts
102     @@ -284,7 +284,7 @@
103     codec: wm8960@1a {
104     compatible = "wlf,wm8960";
105     reg = <0x1a>;
106     - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
107     + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
108     clock-names = "mclk";
109     wlf,shared-lrclk;
110     };
111     diff --git a/arch/arm/boot/dts/imx7d-pico-hobbit.dts b/arch/arm/boot/dts/imx7d-pico-hobbit.dts
112     index 7b2198a9372c6..d917dc4f2f227 100644
113     --- a/arch/arm/boot/dts/imx7d-pico-hobbit.dts
114     +++ b/arch/arm/boot/dts/imx7d-pico-hobbit.dts
115     @@ -31,7 +31,7 @@
116    
117     dailink_master: simple-audio-card,codec {
118     sound-dai = <&sgtl5000>;
119     - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
120     + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
121     };
122     };
123     };
124     @@ -41,7 +41,7 @@
125     #sound-dai-cells = <0>;
126     reg = <0x0a>;
127     compatible = "fsl,sgtl5000";
128     - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
129     + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
130     VDDA-supply = <&reg_2p5v>;
131     VDDIO-supply = <&reg_vref_1v8>;
132     };
133     diff --git a/arch/arm/boot/dts/imx7d-pico-pi.dts b/arch/arm/boot/dts/imx7d-pico-pi.dts
134     index 70bea95c06d83..f263e391e24cb 100644
135     --- a/arch/arm/boot/dts/imx7d-pico-pi.dts
136     +++ b/arch/arm/boot/dts/imx7d-pico-pi.dts
137     @@ -31,7 +31,7 @@
138    
139     dailink_master: simple-audio-card,codec {
140     sound-dai = <&sgtl5000>;
141     - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
142     + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
143     };
144     };
145     };
146     @@ -41,7 +41,7 @@
147     #sound-dai-cells = <0>;
148     reg = <0x0a>;
149     compatible = "fsl,sgtl5000";
150     - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
151     + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
152     VDDA-supply = <&reg_2p5v>;
153     VDDIO-supply = <&reg_vref_1v8>;
154     };
155     diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
156     index 869efbc4af42c..a97cda17e484e 100644
157     --- a/arch/arm/boot/dts/imx7d-sdb.dts
158     +++ b/arch/arm/boot/dts/imx7d-sdb.dts
159     @@ -356,7 +356,7 @@
160     codec: wm8960@1a {
161     compatible = "wlf,wm8960";
162     reg = <0x1a>;
163     - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
164     + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
165     clock-names = "mclk";
166     wlf,shared-lrclk;
167     };
168     diff --git a/arch/arm/boot/dts/imx7s-warp.dts b/arch/arm/boot/dts/imx7s-warp.dts
169     index d6b4888fa686b..e035dd5bf4f62 100644
170     --- a/arch/arm/boot/dts/imx7s-warp.dts
171     +++ b/arch/arm/boot/dts/imx7s-warp.dts
172     @@ -75,7 +75,7 @@
173    
174     dailink_master: simple-audio-card,codec {
175     sound-dai = <&codec>;
176     - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
177     + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
178     };
179     };
180     };
181     @@ -232,7 +232,7 @@
182     #sound-dai-cells = <0>;
183     reg = <0x0a>;
184     compatible = "fsl,sgtl5000";
185     - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
186     + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
187     pinctrl-names = "default";
188     pinctrl-0 = <&pinctrl_sai1_mclk>;
189     VDDA-supply = <&vgen4_reg>;
190     diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
191     index 3d65fa56a0e5d..8e8efe28d7995 100644
192     --- a/arch/arm/kernel/entry-armv.S
193     +++ b/arch/arm/kernel/entry-armv.S
194     @@ -1043,7 +1043,7 @@ vector_bhb_loop8_\name:
195    
196     @ bhb workaround
197     mov r0, #8
198     -3: b . + 4
199     +3: W(b) . + 4
200     subs r0, r0, #1
201     bne 3b
202     dsb
203     diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
204     index db798eac74315..8247749998259 100644
205     --- a/arch/arm/kernel/stacktrace.c
206     +++ b/arch/arm/kernel/stacktrace.c
207     @@ -53,17 +53,17 @@ int notrace unwind_frame(struct stackframe *frame)
208     return -EINVAL;
209    
210     frame->sp = frame->fp;
211     - frame->fp = *(unsigned long *)(fp);
212     - frame->pc = *(unsigned long *)(fp + 4);
213     + frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
214     + frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4));
215     #else
216     /* check current frame pointer is within bounds */
217     if (fp < low + 12 || fp > high - 4)
218     return -EINVAL;
219    
220     /* restore the registers from the stack frame */
221     - frame->fp = *(unsigned long *)(fp - 12);
222     - frame->sp = *(unsigned long *)(fp - 8);
223     - frame->pc = *(unsigned long *)(fp - 4);
224     + frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12));
225     + frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8));
226     + frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4));
227     #endif
228    
229     return 0;
230     diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
231     index 097ef85bb7f21..bcb9181601d9b 100644
232     --- a/arch/arm/mm/proc-v7-bugs.c
233     +++ b/arch/arm/mm/proc-v7-bugs.c
234     @@ -301,6 +301,7 @@ void cpu_v7_ca15_ibe(void)
235     {
236     if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
237     cpu_v7_spectre_v2_init();
238     + cpu_v7_spectre_bhb_init();
239     }
240    
241     void cpu_v7_bugs_init(void)
242     diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
243     index 037b08f3257e0..a2837a54d9726 100644
244     --- a/arch/mips/lantiq/falcon/sysctrl.c
245     +++ b/arch/mips/lantiq/falcon/sysctrl.c
246     @@ -167,6 +167,8 @@ static inline void clkdev_add_sys(const char *dev, unsigned int module,
247     {
248     struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
249    
250     + if (!clk)
251     + return;
252     clk->cl.dev_id = dev;
253     clk->cl.con_id = NULL;
254     clk->cl.clk = clk;
255     diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c
256     index 3d5683e75cf1e..200fe9ff641d6 100644
257     --- a/arch/mips/lantiq/xway/gptu.c
258     +++ b/arch/mips/lantiq/xway/gptu.c
259     @@ -122,6 +122,8 @@ static inline void clkdev_add_gptu(struct device *dev, const char *con,
260     {
261     struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
262    
263     + if (!clk)
264     + return;
265     clk->cl.dev_id = dev_name(dev);
266     clk->cl.con_id = con;
267     clk->cl.clk = clk;
268     diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
269     index 2ee68d6e8bb99..6c2d9779ac727 100644
270     --- a/arch/mips/lantiq/xway/sysctrl.c
271     +++ b/arch/mips/lantiq/xway/sysctrl.c
272     @@ -311,6 +311,8 @@ static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate,
273     {
274     struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
275    
276     + if (!clk)
277     + return;
278     clk->cl.dev_id = dev;
279     clk->cl.con_id = con;
280     clk->cl.clk = clk;
281     @@ -334,6 +336,8 @@ static void clkdev_add_cgu(const char *dev, const char *con,
282     {
283     struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
284    
285     + if (!clk)
286     + return;
287     clk->cl.dev_id = dev;
288     clk->cl.con_id = con;
289     clk->cl.clk = clk;
290     @@ -352,24 +356,28 @@ static void clkdev_add_pci(void)
291     struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
292    
293     /* main pci clock */
294     - clk->cl.dev_id = "17000000.pci";
295     - clk->cl.con_id = NULL;
296     - clk->cl.clk = clk;
297     - clk->rate = CLOCK_33M;
298     - clk->rates = valid_pci_rates;
299     - clk->enable = pci_enable;
300     - clk->disable = pmu_disable;
301     - clk->module = 0;
302     - clk->bits = PMU_PCI;
303     - clkdev_add(&clk->cl);
304     + if (clk) {
305     + clk->cl.dev_id = "17000000.pci";
306     + clk->cl.con_id = NULL;
307     + clk->cl.clk = clk;
308     + clk->rate = CLOCK_33M;
309     + clk->rates = valid_pci_rates;
310     + clk->enable = pci_enable;
311     + clk->disable = pmu_disable;
312     + clk->module = 0;
313     + clk->bits = PMU_PCI;
314     + clkdev_add(&clk->cl);
315     + }
316    
317     /* use internal/external bus clock */
318     - clk_ext->cl.dev_id = "17000000.pci";
319     - clk_ext->cl.con_id = "external";
320     - clk_ext->cl.clk = clk_ext;
321     - clk_ext->enable = pci_ext_enable;
322     - clk_ext->disable = pci_ext_disable;
323     - clkdev_add(&clk_ext->cl);
324     + if (clk_ext) {
325     + clk_ext->cl.dev_id = "17000000.pci";
326     + clk_ext->cl.con_id = "external";
327     + clk_ext->cl.clk = clk_ext;
328     + clk_ext->enable = pci_ext_enable;
329     + clk_ext->disable = pci_ext_disable;
330     + clkdev_add(&clk_ext->cl);
331     + }
332     }
333    
334     /* xway socs can generate clocks on gpio pins */
335     @@ -389,9 +397,15 @@ static void clkdev_add_clkout(void)
336     char *name;
337    
338     name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
339     + if (!name)
340     + continue;
341     sprintf(name, "clkout%d", i);
342    
343     clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
344     + if (!clk) {
345     + kfree(name);
346     + continue;
347     + }
348     clk->cl.dev_id = "1f103000.cgu";
349     clk->cl.con_id = name;
350     clk->cl.clk = clk;
351     diff --git a/arch/x86/crypto/chacha-avx512vl-x86_64.S b/arch/x86/crypto/chacha-avx512vl-x86_64.S
352     index 848f9c75fd4f5..596f91e3a774e 100644
353     --- a/arch/x86/crypto/chacha-avx512vl-x86_64.S
354     +++ b/arch/x86/crypto/chacha-avx512vl-x86_64.S
355     @@ -172,7 +172,7 @@ ENTRY(chacha_2block_xor_avx512vl)
356     # xor remaining bytes from partial register into output
357     mov %rcx,%rax
358     and $0xf,%rcx
359     - jz .Ldone8
360     + jz .Ldone2
361     mov %rax,%r9
362     and $~0xf,%r9
363    
364     @@ -438,7 +438,7 @@ ENTRY(chacha_4block_xor_avx512vl)
365     # xor remaining bytes from partial register into output
366     mov %rcx,%rax
367     and $0xf,%rcx
368     - jz .Ldone8
369     + jz .Ldone4
370     mov %rax,%r9
371     and $~0xf,%r9
372    
373     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
374     index d3877dd713aef..015da62e4ad75 100644
375     --- a/arch/x86/kvm/mmu.c
376     +++ b/arch/x86/kvm/mmu.c
377     @@ -5821,6 +5821,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
378     {
379     struct kvm_mmu_page *sp, *node;
380     int nr_zapped, batch = 0;
381     + bool unstable;
382    
383     restart:
384     list_for_each_entry_safe_reverse(sp, node,
385     @@ -5853,11 +5854,12 @@ restart:
386     goto restart;
387     }
388    
389     - if (__kvm_mmu_prepare_zap_page(kvm, sp,
390     - &kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
391     - batch += nr_zapped;
392     + unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
393     + &kvm->arch.zapped_obsolete_pages, &nr_zapped);
394     + batch += nr_zapped;
395     +
396     + if (unstable)
397     goto restart;
398     - }
399     }
400    
401     /*
402     diff --git a/arch/x86/um/shared/sysdep/syscalls_64.h b/arch/x86/um/shared/sysdep/syscalls_64.h
403     index 8a7d5e1da98e5..1e6875b4ffd83 100644
404     --- a/arch/x86/um/shared/sysdep/syscalls_64.h
405     +++ b/arch/x86/um/shared/sysdep/syscalls_64.h
406     @@ -10,13 +10,12 @@
407     #include <linux/msg.h>
408     #include <linux/shm.h>
409    
410     -typedef long syscall_handler_t(void);
411     +typedef long syscall_handler_t(long, long, long, long, long, long);
412    
413     extern syscall_handler_t *sys_call_table[];
414    
415     #define EXECUTE_SYSCALL(syscall, regs) \
416     - (((long (*)(long, long, long, long, long, long)) \
417     - (*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
418     + (((*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
419     UPT_SYSCALL_ARG2(&regs->regs), \
420     UPT_SYSCALL_ARG3(&regs->regs), \
421     UPT_SYSCALL_ARG4(&regs->regs), \
422     diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
423     index 64e6ec2c32a76..9d9777ded5f7b 100644
424     --- a/arch/x86/xen/smp_pv.c
425     +++ b/arch/x86/xen/smp_pv.c
426     @@ -53,6 +53,7 @@ static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
427     static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 };
428    
429     static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
430     +void asm_cpu_bringup_and_idle(void);
431    
432     static void cpu_bringup(void)
433     {
434     @@ -310,7 +311,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
435     * pointing just below where pt_regs would be if it were a normal
436     * kernel entry.
437     */
438     - ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
439     + ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle;
440     ctxt->flags = VGCF_IN_KERNEL;
441     ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
442     ctxt->user_regs.ds = __USER_DS;
443     diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
444     index c1d8b90aa4e2d..142da85474801 100644
445     --- a/arch/x86/xen/xen-head.S
446     +++ b/arch/x86/xen/xen-head.S
447     @@ -35,7 +35,11 @@ ENTRY(startup_xen)
448     rep __ASM_SIZE(stos)
449    
450     mov %_ASM_SI, xen_start_info
451     - mov $init_thread_union+THREAD_SIZE, %_ASM_SP
452     +#ifdef CONFIG_X86_64
453     + mov initial_stack(%rip), %rsp
454     +#else
455     + mov initial_stack, %esp
456     +#endif
457    
458     #ifdef CONFIG_X86_64
459     /* Set up %gs.
460     @@ -51,9 +55,19 @@ ENTRY(startup_xen)
461     wrmsr
462     #endif
463    
464     - jmp xen_start_kernel
465     + call xen_start_kernel
466     END(startup_xen)
467     __FINIT
468     +
469     +#ifdef CONFIG_XEN_PV_SMP
470     +.pushsection .text
471     +SYM_CODE_START(asm_cpu_bringup_and_idle)
472     + UNWIND_HINT_EMPTY
473     +
474     + call cpu_bringup_and_idle
475     +SYM_CODE_END(asm_cpu_bringup_and_idle)
476     +.popsection
477     +#endif
478     #endif
479    
480     .pushsection .text
481     diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
482     index 1d443d17cf7c5..d46806182b051 100644
483     --- a/block/bfq-iosched.c
484     +++ b/block/bfq-iosched.c
485     @@ -2251,6 +2251,9 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
486     __rq = bfq_find_rq_fmerge(bfqd, bio, q);
487     if (__rq && elv_bio_merge_ok(__rq, bio)) {
488     *req = __rq;
489     +
490     + if (blk_discard_mergable(__rq))
491     + return ELEVATOR_DISCARD_MERGE;
492     return ELEVATOR_FRONT_MERGE;
493     }
494    
495     diff --git a/block/blk-merge.c b/block/blk-merge.c
496     index a62692d135660..5219064cd72bb 100644
497     --- a/block/blk-merge.c
498     +++ b/block/blk-merge.c
499     @@ -721,21 +721,6 @@ static void blk_account_io_merge(struct request *req)
500     part_stat_unlock();
501     }
502     }
503     -/*
504     - * Two cases of handling DISCARD merge:
505     - * If max_discard_segments > 1, the driver takes every bio
506     - * as a range and send them to controller together. The ranges
507     - * needn't to be contiguous.
508     - * Otherwise, the bios/requests will be handled as same as
509     - * others which should be contiguous.
510     - */
511     -static inline bool blk_discard_mergable(struct request *req)
512     -{
513     - if (req_op(req) == REQ_OP_DISCARD &&
514     - queue_max_discard_segments(req->q) > 1)
515     - return true;
516     - return false;
517     -}
518    
519     static enum elv_merge blk_try_req_merge(struct request *req,
520     struct request *next)
521     diff --git a/block/elevator.c b/block/elevator.c
522     index 78805c74ea8a4..3ba826230c578 100644
523     --- a/block/elevator.c
524     +++ b/block/elevator.c
525     @@ -337,6 +337,9 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
526     __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
527     if (__rq && elv_bio_merge_ok(__rq, bio)) {
528     *req = __rq;
529     +
530     + if (blk_discard_mergable(__rq))
531     + return ELEVATOR_DISCARD_MERGE;
532     return ELEVATOR_BACK_MERGE;
533     }
534    
535     diff --git a/block/mq-deadline.c b/block/mq-deadline.c
536     index 19c6922e85f1b..6d6dda5cfffa3 100644
537     --- a/block/mq-deadline.c
538     +++ b/block/mq-deadline.c
539     @@ -452,6 +452,8 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
540    
541     if (elv_bio_merge_ok(__rq, bio)) {
542     *rq = __rq;
543     + if (blk_discard_mergable(__rq))
544     + return ELEVATOR_DISCARD_MERGE;
545     return ELEVATOR_FRONT_MERGE;
546     }
547     }
548     diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
549     index 4f6b76bd957ef..12ab50d295484 100644
550     --- a/drivers/base/firmware_loader/main.c
551     +++ b/drivers/base/firmware_loader/main.c
552     @@ -761,6 +761,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
553     enum fw_opt opt_flags)
554     {
555     struct firmware *fw = NULL;
556     + struct cred *kern_cred = NULL;
557     + const struct cred *old_cred;
558     int ret;
559    
560     if (!firmware_p)
561     @@ -776,6 +778,18 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
562     if (ret <= 0) /* error or already assigned */
563     goto out;
564    
565     + /*
566     + * We are about to try to access the firmware file. Because we may have been
567     + * called by a driver when serving an unrelated request from userland, we use
568     + * the kernel credentials to read the file.
569     + */
570     + kern_cred = prepare_kernel_cred(NULL);
571     + if (!kern_cred) {
572     + ret = -ENOMEM;
573     + goto out;
574     + }
575     + old_cred = override_creds(kern_cred);
576     +
577     ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
578     #ifdef CONFIG_FW_LOADER_COMPRESS
579     if (ret == -ENOENT)
580     @@ -792,6 +806,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
581     } else
582     ret = assign_fw(fw, device, opt_flags);
583    
584     + revert_creds(old_cred);
585     + put_cred(kern_cred);
586     +
587     out:
588     if (ret < 0) {
589     fw_abort_batch_reqs(fw);
590     diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
591     index a18155cdce416..ba10fa24fa1f1 100644
592     --- a/drivers/block/drbd/drbd_main.c
593     +++ b/drivers/block/drbd/drbd_main.c
594     @@ -183,7 +183,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
595     unsigned int set_size)
596     {
597     struct drbd_request *r;
598     - struct drbd_request *req = NULL;
599     + struct drbd_request *req = NULL, *tmp = NULL;
600     int expect_epoch = 0;
601     int expect_size = 0;
602    
603     @@ -237,8 +237,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
604     * to catch requests being barrier-acked "unexpectedly".
605     * It usually should find the same req again, or some READ preceding it. */
606     list_for_each_entry(req, &connection->transfer_log, tl_requests)
607     - if (req->epoch == expect_epoch)
608     + if (req->epoch == expect_epoch) {
609     + tmp = req;
610     break;
611     + }
612     + req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
613     list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
614     if (req->epoch != expect_epoch)
615     break;
616     diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
617     index f24e3791e8400..e133ff5fa5961 100644
618     --- a/drivers/block/floppy.c
619     +++ b/drivers/block/floppy.c
620     @@ -521,8 +521,8 @@ static unsigned long fdc_busy;
621     static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
622     static DECLARE_WAIT_QUEUE_HEAD(command_done);
623    
624     -/* Errors during formatting are counted here. */
625     -static int format_errors;
626     +/* errors encountered on the current (or last) request */
627     +static int floppy_errors;
628    
629     /* Format request descriptor. */
630     static struct format_descr format_req;
631     @@ -542,7 +542,6 @@ static struct format_descr format_req;
632     static char *floppy_track_buffer;
633     static int max_buffer_sectors;
634    
635     -static int *errors;
636     typedef void (*done_f)(int);
637     static const struct cont_t {
638     void (*interrupt)(void);
639     @@ -1435,7 +1434,7 @@ static int interpret_errors(void)
640     if (DP->flags & FTD_MSG)
641     DPRINT("Over/Underrun - retrying\n");
642     bad = 0;
643     - } else if (*errors >= DP->max_errors.reporting) {
644     + } else if (floppy_errors >= DP->max_errors.reporting) {
645     print_errors();
646     }
647     if (ST2 & ST2_WC || ST2 & ST2_BC)
648     @@ -2055,7 +2054,7 @@ static void bad_flp_intr(void)
649     if (!next_valid_format())
650     return;
651     }
652     - err_count = ++(*errors);
653     + err_count = ++floppy_errors;
654     INFBOUND(DRWE->badness, err_count);
655     if (err_count > DP->max_errors.abort)
656     cont->done(0);
657     @@ -2200,9 +2199,8 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
658     return -EINVAL;
659     }
660     format_req = *tmp_format_req;
661     - format_errors = 0;
662     cont = &format_cont;
663     - errors = &format_errors;
664     + floppy_errors = 0;
665     ret = wait_til_done(redo_format, true);
666     if (ret == -EINTR)
667     return -EINTR;
668     @@ -2677,7 +2675,7 @@ static int make_raw_rw_request(void)
669     */
670     if (!direct ||
671     (indirect * 2 > direct * 3 &&
672     - *errors < DP->max_errors.read_track &&
673     + floppy_errors < DP->max_errors.read_track &&
674     ((!probing ||
675     (DP->read_track & (1 << DRS->probed_format)))))) {
676     max_size = blk_rq_sectors(current_req);
677     @@ -2801,10 +2799,11 @@ static int set_next_request(void)
678     current_req = list_first_entry_or_null(&floppy_reqs, struct request,
679     queuelist);
680     if (current_req) {
681     - current_req->error_count = 0;
682     + floppy_errors = 0;
683     list_del_init(&current_req->queuelist);
684     + return 1;
685     }
686     - return current_req != NULL;
687     + return 0;
688     }
689    
690     static void redo_fd_request(void)
691     @@ -2860,7 +2859,6 @@ do_request:
692     _floppy = floppy_type + DP->autodetect[DRS->probed_format];
693     } else
694     probing = 0;
695     - errors = &(current_req->error_count);
696     tmp = make_raw_rw_request();
697     if (tmp < 2) {
698     request_done(tmp);
699     diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
700     index d7fe1303f79dc..0e7ec5075689a 100644
701     --- a/drivers/clk/at91/clk-generated.c
702     +++ b/drivers/clk/at91/clk-generated.c
703     @@ -105,6 +105,10 @@ static void clk_generated_best_diff(struct clk_rate_request *req,
704     tmp_rate = parent_rate;
705     else
706     tmp_rate = parent_rate / div;
707     +
708     + if (tmp_rate < req->min_rate || tmp_rate > req->max_rate)
709     + return;
710     +
711     tmp_diff = abs(req->rate - tmp_rate);
712    
713     if (*best_diff < 0 || *best_diff >= tmp_diff) {
714     diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
715     index 3a633a0c40fdf..6cc4fd005fe07 100644
716     --- a/drivers/crypto/qcom-rng.c
717     +++ b/drivers/crypto/qcom-rng.c
718     @@ -64,6 +64,7 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
719     } else {
720     /* copy only remaining bytes */
721     memcpy(data, &val, max - currsize);
722     + break;
723     }
724     } while (currsize < max);
725    
726     diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
727     index fb640e0ea6140..2ecc970f5cae5 100644
728     --- a/drivers/crypto/stm32/stm32-crc32.c
729     +++ b/drivers/crypto/stm32/stm32-crc32.c
730     @@ -332,8 +332,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
731     struct stm32_crc *crc = platform_get_drvdata(pdev);
732     int ret = pm_runtime_get_sync(crc->dev);
733    
734     - if (ret < 0)
735     + if (ret < 0) {
736     + pm_runtime_put_noidle(crc->dev);
737     return ret;
738     + }
739    
740     spin_lock(&crc_list.lock);
741     list_del(&crc->list);
742     diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
743     index 89a053b1d2799..b5ae28fce9a89 100644
744     --- a/drivers/gpio/gpio-mvebu.c
745     +++ b/drivers/gpio/gpio-mvebu.c
746     @@ -697,6 +697,9 @@ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
747     unsigned long flags;
748     unsigned int on, off;
749    
750     + if (state->polarity != PWM_POLARITY_NORMAL)
751     + return -EINVAL;
752     +
753     val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle;
754     do_div(val, NSEC_PER_SEC);
755     if (val > UINT_MAX)
756     diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
757     index 58776f2d69ff8..1ae612c796eef 100644
758     --- a/drivers/gpio/gpio-vf610.c
759     +++ b/drivers/gpio/gpio-vf610.c
760     @@ -125,9 +125,13 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
761     {
762     struct vf610_gpio_port *port = gpiochip_get_data(chip);
763     unsigned long mask = BIT(gpio);
764     + u32 val;
765    
766     - if (port->sdata && port->sdata->have_paddr)
767     - vf610_gpio_writel(mask, port->gpio_base + GPIO_PDDR);
768     + if (port->sdata && port->sdata->have_paddr) {
769     + val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
770     + val |= mask;
771     + vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
772     + }
773    
774     vf610_gpio_set(chip, gpio, value);
775    
776     diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
777     index 2de1eebe591f9..1ff13af133243 100644
778     --- a/drivers/gpu/drm/drm_dp_mst_topology.c
779     +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
780     @@ -3657,6 +3657,7 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
781    
782     mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
783     drm_edid_get_monitor_name(mst_edid, name, namelen);
784     + kfree(mst_edid);
785     }
786    
787     /**
788     diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
789     index 62df8379bc895..65e72101b393b 100644
790     --- a/drivers/i2c/busses/i2c-mt7621.c
791     +++ b/drivers/i2c/busses/i2c-mt7621.c
792     @@ -304,7 +304,8 @@ static int mtk_i2c_probe(struct platform_device *pdev)
793    
794     if (i2c->bus_freq == 0) {
795     dev_warn(i2c->dev, "clock-frequency 0 not supported\n");
796     - return -EINVAL;
797     + ret = -EINVAL;
798     + goto err_disable_clk;
799     }
800    
801     adap = &i2c->adap;
802     @@ -322,10 +323,15 @@ static int mtk_i2c_probe(struct platform_device *pdev)
803    
804     ret = i2c_add_adapter(adap);
805     if (ret < 0)
806     - return ret;
807     + goto err_disable_clk;
808    
809     dev_info(&pdev->dev, "clock %u kHz\n", i2c->bus_freq / 1000);
810    
811     + return 0;
812     +
813     +err_disable_clk:
814     + clk_disable_unprepare(i2c->clk);
815     +
816     return ret;
817     }
818    
819     diff --git a/drivers/input/input.c b/drivers/input/input.c
820     index e2eb9b9b8363d..0e16a9980c6a1 100644
821     --- a/drivers/input/input.c
822     +++ b/drivers/input/input.c
823     @@ -47,6 +47,17 @@ static DEFINE_MUTEX(input_mutex);
824    
825     static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
826    
827     +static const unsigned int input_max_code[EV_CNT] = {
828     + [EV_KEY] = KEY_MAX,
829     + [EV_REL] = REL_MAX,
830     + [EV_ABS] = ABS_MAX,
831     + [EV_MSC] = MSC_MAX,
832     + [EV_SW] = SW_MAX,
833     + [EV_LED] = LED_MAX,
834     + [EV_SND] = SND_MAX,
835     + [EV_FF] = FF_MAX,
836     +};
837     +
838     static inline int is_event_supported(unsigned int code,
839     unsigned long *bm, unsigned int max)
840     {
841     @@ -1978,6 +1989,14 @@ EXPORT_SYMBOL(input_get_timestamp);
842     */
843     void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
844     {
845     + if (type < EV_CNT && input_max_code[type] &&
846     + code > input_max_code[type]) {
847     + pr_err("%s: invalid code %u for type %u\n", __func__, code,
848     + type);
849     + dump_stack();
850     + return;
851     + }
852     +
853     switch (type) {
854     case EV_KEY:
855     __set_bit(code, dev->keybit);
856     diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
857     index 22839dde1d09f..bc6f6a2ac4b9d 100644
858     --- a/drivers/input/touchscreen/ili210x.c
859     +++ b/drivers/input/touchscreen/ili210x.c
860     @@ -290,9 +290,9 @@ static int ili210x_i2c_probe(struct i2c_client *client,
861     if (error)
862     return error;
863    
864     - usleep_range(50, 100);
865     + usleep_range(12000, 15000);
866     gpiod_set_value_cansleep(reset_gpio, 0);
867     - msleep(100);
868     + msleep(160);
869     }
870    
871     priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
872     diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
873     index cd8805d71d977..be1dd504d5b1d 100644
874     --- a/drivers/input/touchscreen/stmfts.c
875     +++ b/drivers/input/touchscreen/stmfts.c
876     @@ -339,11 +339,11 @@ static int stmfts_input_open(struct input_dev *dev)
877    
878     err = pm_runtime_get_sync(&sdata->client->dev);
879     if (err < 0)
880     - return err;
881     + goto out;
882    
883     err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON);
884     if (err)
885     - return err;
886     + goto out;
887    
888     mutex_lock(&sdata->mutex);
889     sdata->running = true;
890     @@ -366,7 +366,9 @@ static int stmfts_input_open(struct input_dev *dev)
891     "failed to enable touchkey\n");
892     }
893    
894     - return 0;
895     +out:
896     + pm_runtime_put_noidle(&sdata->client->dev);
897     + return err;
898     }
899    
900     static void stmfts_input_close(struct input_dev *dev)
901     diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
902     index 362ad361d5861..709f117fd5772 100644
903     --- a/drivers/mmc/core/block.c
904     +++ b/drivers/mmc/core/block.c
905     @@ -1126,7 +1126,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
906     card->erase_arg == MMC_TRIM_ARG ?
907     INAND_CMD38_ARG_TRIM :
908     INAND_CMD38_ARG_ERASE,
909     - 0);
910     + card->ext_csd.generic_cmd6_time);
911     }
912     if (!err)
913     err = mmc_erase(card, from, nr, card->erase_arg);
914     @@ -1168,7 +1168,7 @@ retry:
915     arg == MMC_SECURE_TRIM1_ARG ?
916     INAND_CMD38_ARG_SECTRIM1 :
917     INAND_CMD38_ARG_SECERASE,
918     - 0);
919     + card->ext_csd.generic_cmd6_time);
920     if (err)
921     goto out_retry;
922     }
923     @@ -1186,7 +1186,7 @@ retry:
924     err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
925     INAND_CMD38_ARG_EXT_CSD,
926     INAND_CMD38_ARG_SECTRIM2,
927     - 0);
928     + card->ext_csd.generic_cmd6_time);
929     if (err)
930     goto out_retry;
931     }
932     diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
933     index 09311c2bd8588..d495ba2f368cb 100644
934     --- a/drivers/mmc/core/mmc_ops.c
935     +++ b/drivers/mmc/core/mmc_ops.c
936     @@ -19,7 +19,9 @@
937     #include "host.h"
938     #include "mmc_ops.h"
939    
940     -#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
941     +#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/
942     +#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
943     +#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
944    
945     static const u8 tuning_blk_pattern_4bit[] = {
946     0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
947     @@ -458,10 +460,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
948     bool expired = false;
949     bool busy = false;
950    
951     - /* We have an unspecified cmd timeout, use the fallback value. */
952     - if (!timeout_ms)
953     - timeout_ms = MMC_OPS_TIMEOUT_MS;
954     -
955     /*
956     * In cases when not allowed to poll by using CMD13 or because we aren't
957     * capable of polling by using ->card_busy(), then rely on waiting the
958     @@ -534,6 +532,12 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
959    
960     mmc_retune_hold(host);
961    
962     + if (!timeout_ms) {
963     + pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
964     + mmc_hostname(host));
965     + timeout_ms = card->ext_csd.generic_cmd6_time;
966     + }
967     +
968     /*
969     * If the cmd timeout and the max_busy_timeout of the host are both
970     * specified, let's validate them. A failure means we need to prevent
971     @@ -542,7 +546,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
972     * which also means they are on their own when it comes to deal with the
973     * busy timeout.
974     */
975     - if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && timeout_ms &&
976     + if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) &&
977     host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
978     use_r1b_resp = false;
979    
980     @@ -554,10 +558,6 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
981     cmd.flags = MMC_CMD_AC;
982     if (use_r1b_resp) {
983     cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
984     - /*
985     - * A busy_timeout of zero means the host can decide to use
986     - * whatever value it finds suitable.
987     - */
988     cmd.busy_timeout = timeout_ms;
989     } else {
990     cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
991     @@ -943,7 +943,7 @@ void mmc_run_bkops(struct mmc_card *card)
992     * urgent levels by using an asynchronous background task, when idle.
993     */
994     err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
995     - EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS);
996     + EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
997     if (err)
998     pr_warn("%s: Error %d starting bkops\n",
999     mmc_hostname(card->host), err);
1000     @@ -961,7 +961,8 @@ int mmc_flush_cache(struct mmc_card *card)
1001    
1002     if (mmc_cache_enabled(card->host)) {
1003     err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1004     - EXT_CSD_FLUSH_CACHE, 1, 0);
1005     + EXT_CSD_FLUSH_CACHE, 1,
1006     + MMC_CACHE_FLUSH_TIMEOUT_MS);
1007     if (err)
1008     pr_err("%s: cache flush error %d\n",
1009     mmc_hostname(card->host), err);
1010     diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
1011     index 2ad3fa6316ce3..cb5954eeb4090 100644
1012     --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
1013     +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
1014     @@ -674,6 +674,13 @@ static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
1015     err = -ENXIO;
1016     goto err_exit;
1017     }
1018     +
1019     + /* Validate that the new hw_head_ is reasonable. */
1020     + if (hw_head_ >= ring->size) {
1021     + err = -ENXIO;
1022     + goto err_exit;
1023     + }
1024     +
1025     ring->hw_head = hw_head_;
1026     err = aq_hw_err_from_flags(self);
1027    
1028     diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
1029     index 002a374f197bd..78219a9943a73 100644
1030     --- a/drivers/net/ethernet/cadence/macb_main.c
1031     +++ b/drivers/net/ethernet/cadence/macb_main.c
1032     @@ -927,7 +927,6 @@ static void gem_rx_refill(struct macb_queue *queue)
1033     /* Make hw descriptor updates visible to CPU */
1034     rmb();
1035    
1036     - queue->rx_prepared_head++;
1037     desc = macb_rx_desc(queue, entry);
1038    
1039     if (!queue->rx_skbuff[entry]) {
1040     @@ -966,6 +965,7 @@ static void gem_rx_refill(struct macb_queue *queue)
1041     dma_wmb();
1042     desc->addr &= ~MACB_BIT(RX_USED);
1043     }
1044     + queue->rx_prepared_head++;
1045     }
1046    
1047     /* Make descriptor updates visible to hardware */
1048     diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
1049     index 3e3e08698876b..fea4223ad6f15 100644
1050     --- a/drivers/net/ethernet/dec/tulip/tulip_core.c
1051     +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
1052     @@ -1410,8 +1410,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1053    
1054     /* alloc_etherdev ensures aligned and zeroed private structures */
1055     dev = alloc_etherdev (sizeof (*tp));
1056     - if (!dev)
1057     + if (!dev) {
1058     + pci_disable_device(pdev);
1059     return -ENOMEM;
1060     + }
1061    
1062     SET_NETDEV_DEV(dev, &pdev->dev);
1063     if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1064     @@ -1788,6 +1790,7 @@ err_out_free_res:
1065    
1066     err_out_free_netdev:
1067     free_netdev (dev);
1068     + pci_disable_device(pdev);
1069     return -ENODEV;
1070     }
1071    
1072     diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
1073     index 3eea68f3a5262..88750a96cb3f2 100644
1074     --- a/drivers/net/ethernet/intel/ice/ice_main.c
1075     +++ b/drivers/net/ethernet/intel/ice/ice_main.c
1076     @@ -3561,9 +3561,10 @@ static int ice_up_complete(struct ice_vsi *vsi)
1077     netif_carrier_on(vsi->netdev);
1078     }
1079    
1080     - /* clear this now, and the first stats read will be used as baseline */
1081     - vsi->stat_offsets_loaded = false;
1082     -
1083     + /* Perform an initial read of the statistics registers now to
1084     + * set the baseline so counters are ready when interface is up
1085     + */
1086     + ice_update_eth_stats(vsi);
1087     ice_service_task_schedule(pf);
1088    
1089     return 0;
1090     diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
1091     index 3df25b231ab5c..26c8d09ad4ddb 100644
1092     --- a/drivers/net/ethernet/intel/igb/igb_main.c
1093     +++ b/drivers/net/ethernet/intel/igb/igb_main.c
1094     @@ -5318,7 +5318,8 @@ static void igb_watchdog_task(struct work_struct *work)
1095     break;
1096     }
1097    
1098     - if (adapter->link_speed != SPEED_1000)
1099     + if (adapter->link_speed != SPEED_1000 ||
1100     + !hw->phy.ops.read_reg)
1101     goto no_wait;
1102    
1103     /* wait for Remote receiver status OK */
1104     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1105     index 2465165cbea73..73291051808f9 100644
1106     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1107     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1108     @@ -3980,6 +3980,13 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
1109     }
1110     }
1111    
1112     + if (params->xdp_prog) {
1113     + if (features & NETIF_F_LRO) {
1114     + netdev_warn(netdev, "LRO is incompatible with XDP\n");
1115     + features &= ~NETIF_F_LRO;
1116     + }
1117     + }
1118     +
1119     if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
1120     features &= ~NETIF_F_RXHASH;
1121     if (netdev->features & NETIF_F_RXHASH)
1122     diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
1123     index da2862d596813..5e81cd317a32f 100644
1124     --- a/drivers/net/ethernet/qlogic/qla3xxx.c
1125     +++ b/drivers/net/ethernet/qlogic/qla3xxx.c
1126     @@ -3629,7 +3629,8 @@ static void ql_reset_work(struct work_struct *work)
1127     qdev->mem_map_registers;
1128     unsigned long hw_flags;
1129    
1130     - if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
1131     + if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
1132     + test_bit(QL_RESET_START, &qdev->flags)) {
1133     clear_bit(QL_LINK_MASTER, &qdev->flags);
1134    
1135     /*
1136     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1137     index 9cbc0179d24ec..9931724c4727d 100644
1138     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1139     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1140     @@ -4531,7 +4531,7 @@ int stmmac_dvr_probe(struct device *device,
1141     dev_info(priv->device, "TSO feature enabled\n");
1142     }
1143    
1144     - if (priv->dma_cap.sphen) {
1145     + if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
1146     ndev->hw_features |= NETIF_F_GRO;
1147     priv->sph = true;
1148     dev_info(priv->device, "SPH feature enabled\n");
1149     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
1150     index 292045f4581f7..0edcf3f704b74 100644
1151     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
1152     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
1153     @@ -119,6 +119,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
1154     plat->has_gmac4 = 1;
1155     plat->force_sf_dma_mode = 0;
1156     plat->tso_en = 1;
1157     + plat->sph_disable = 1;
1158    
1159     plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
1160    
1161     @@ -481,7 +482,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
1162     return -ENOMEM;
1163    
1164     /* Enable pci device */
1165     - ret = pci_enable_device(pdev);
1166     + ret = pcim_enable_device(pdev);
1167     if (ret) {
1168     dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
1169     __func__);
1170     @@ -538,8 +539,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
1171     pcim_iounmap_regions(pdev, BIT(i));
1172     break;
1173     }
1174     -
1175     - pci_disable_device(pdev);
1176     }
1177    
1178     static int __maybe_unused stmmac_pci_suspend(struct device *dev)
1179     diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
1180     index cf090f88dac03..609f65530b9b0 100644
1181     --- a/drivers/net/vmxnet3/vmxnet3_drv.c
1182     +++ b/drivers/net/vmxnet3/vmxnet3_drv.c
1183     @@ -595,6 +595,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
1184     if (dma_mapping_error(&adapter->pdev->dev,
1185     rbi->dma_addr)) {
1186     dev_kfree_skb_any(rbi->skb);
1187     + rbi->skb = NULL;
1188     rq->stats.rx_buf_alloc_failure++;
1189     break;
1190     }
1191     @@ -619,6 +620,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
1192     if (dma_mapping_error(&adapter->pdev->dev,
1193     rbi->dma_addr)) {
1194     put_page(rbi->page);
1195     + rbi->page = NULL;
1196     rq->stats.rx_buf_alloc_failure++;
1197     break;
1198     }
1199     @@ -1584,6 +1586,10 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1200     u32 i, ring_idx;
1201     struct Vmxnet3_RxDesc *rxd;
1202    
1203     + /* ring has already been cleaned up */
1204     + if (!rq->rx_ring[0].base)
1205     + return;
1206     +
1207     for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1208     for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1209     #ifdef __BIG_ENDIAN_BITFIELD
1210     diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
1211     index 6a9a42809f972..79e22618817de 100644
1212     --- a/drivers/nvme/host/core.c
1213     +++ b/drivers/nvme/host/core.c
1214     @@ -4047,6 +4047,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
1215     if (ctrl->queue_count > 1) {
1216     nvme_queue_scan(ctrl);
1217     nvme_start_queues(ctrl);
1218     + nvme_mpath_update(ctrl);
1219     }
1220     ctrl->created = true;
1221     }
1222     diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
1223     index 4d615337e6e22..811f7b96b5517 100644
1224     --- a/drivers/nvme/host/multipath.c
1225     +++ b/drivers/nvme/host/multipath.c
1226     @@ -501,8 +501,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
1227     ns->ana_grpid = le32_to_cpu(desc->grpid);
1228     ns->ana_state = desc->state;
1229     clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
1230     -
1231     - if (nvme_state_is_live(ns->ana_state))
1232     + /*
1233     + * nvme_mpath_set_live() will trigger I/O to the multipath path device
1234     + * and in turn to this path device. However we cannot accept this I/O
1235     + * if the controller is not live. This may deadlock if called from
1236     + * nvme_mpath_init_identify() and the ctrl will never complete
1237     + * initialization, preventing I/O from completing. For this case we
1238     + * will reprocess the ANA log page in nvme_mpath_update() once the
1239     + * controller is ready.
1240     + */
1241     + if (nvme_state_is_live(ns->ana_state) &&
1242     + ns->ctrl->state == NVME_CTRL_LIVE)
1243     nvme_mpath_set_live(ns);
1244     }
1245    
1246     @@ -586,6 +595,18 @@ static void nvme_ana_work(struct work_struct *work)
1247     nvme_read_ana_log(ctrl);
1248     }
1249    
1250     +void nvme_mpath_update(struct nvme_ctrl *ctrl)
1251     +{
1252     + u32 nr_change_groups = 0;
1253     +
1254     + if (!ctrl->ana_log_buf)
1255     + return;
1256     +
1257     + mutex_lock(&ctrl->ana_lock);
1258     + nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
1259     + mutex_unlock(&ctrl->ana_lock);
1260     +}
1261     +
1262     static void nvme_anatt_timeout(struct timer_list *t)
1263     {
1264     struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
1265     diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
1266     index 2df90d4355b9a..1d1431dd4f9e3 100644
1267     --- a/drivers/nvme/host/nvme.h
1268     +++ b/drivers/nvme/host/nvme.h
1269     @@ -551,6 +551,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
1270     void nvme_mpath_remove_disk(struct nvme_ns_head *head);
1271     int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
1272     void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
1273     +void nvme_mpath_update(struct nvme_ctrl *ctrl);
1274     void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
1275     void nvme_mpath_stop(struct nvme_ctrl *ctrl);
1276     bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
1277     @@ -648,6 +649,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
1278     "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
1279     return 0;
1280     }
1281     +static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
1282     +{
1283     +}
1284     static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
1285     {
1286     }
1287     diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
1288     index b9550cd4280ca..d539eb379743e 100644
1289     --- a/drivers/pci/pci.c
1290     +++ b/drivers/pci/pci.c
1291     @@ -2613,6 +2613,16 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
1292     DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
1293     DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
1294     },
1295     + /*
1296     + * Downstream device is not accessible after putting a root port
1297     + * into D3cold and back into D0 on Elo i2.
1298     + */
1299     + .ident = "Elo i2",
1300     + .matches = {
1301     + DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
1302     + DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
1303     + DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
1304     + },
1305     },
1306     #endif
1307     { }
1308     diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
1309     index 6ae484989d1f5..c4b57e1df192a 100644
1310     --- a/drivers/platform/chrome/cros_ec_debugfs.c
1311     +++ b/drivers/platform/chrome/cros_ec_debugfs.c
1312     @@ -26,6 +26,9 @@
1313    
1314     #define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1))
1315    
1316     +/* waitqueue for log readers */
1317     +static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq);
1318     +
1319     /**
1320     * struct cros_ec_debugfs - EC debugging information.
1321     *
1322     @@ -34,7 +37,6 @@
1323     * @log_buffer: circular buffer for console log information
1324     * @read_msg: preallocated EC command and buffer to read console log
1325     * @log_mutex: mutex to protect circular buffer
1326     - * @log_wq: waitqueue for log readers
1327     * @log_poll_work: recurring task to poll EC for new console log data
1328     * @panicinfo_blob: panicinfo debugfs blob
1329     */
1330     @@ -45,7 +47,6 @@ struct cros_ec_debugfs {
1331     struct circ_buf log_buffer;
1332     struct cros_ec_command *read_msg;
1333     struct mutex log_mutex;
1334     - wait_queue_head_t log_wq;
1335     struct delayed_work log_poll_work;
1336     /* EC panicinfo */
1337     struct debugfs_blob_wrapper panicinfo_blob;
1338     @@ -108,7 +109,7 @@ static void cros_ec_console_log_work(struct work_struct *__work)
1339     buf_space--;
1340     }
1341    
1342     - wake_up(&debug_info->log_wq);
1343     + wake_up(&cros_ec_debugfs_log_wq);
1344     }
1345    
1346     mutex_unlock(&debug_info->log_mutex);
1347     @@ -142,7 +143,7 @@ static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf,
1348    
1349     mutex_unlock(&debug_info->log_mutex);
1350    
1351     - ret = wait_event_interruptible(debug_info->log_wq,
1352     + ret = wait_event_interruptible(cros_ec_debugfs_log_wq,
1353     CIRC_CNT(cb->head, cb->tail, LOG_SIZE));
1354     if (ret < 0)
1355     return ret;
1356     @@ -174,7 +175,7 @@ static __poll_t cros_ec_console_log_poll(struct file *file,
1357     struct cros_ec_debugfs *debug_info = file->private_data;
1358     __poll_t mask = 0;
1359    
1360     - poll_wait(file, &debug_info->log_wq, wait);
1361     + poll_wait(file, &cros_ec_debugfs_log_wq, wait);
1362    
1363     mutex_lock(&debug_info->log_mutex);
1364     if (CIRC_CNT(debug_info->log_buffer.head,
1365     @@ -359,7 +360,6 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
1366     debug_info->log_buffer.tail = 0;
1367    
1368     mutex_init(&debug_info->log_mutex);
1369     - init_waitqueue_head(&debug_info->log_wq);
1370    
1371     debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir,
1372     debug_info, &cros_ec_console_log_fops);
1373     diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
1374     index 9458e6d6686ac..8b434213bc7ad 100644
1375     --- a/drivers/rtc/class.c
1376     +++ b/drivers/rtc/class.c
1377     @@ -26,6 +26,15 @@ struct class *rtc_class;
1378     static void rtc_device_release(struct device *dev)
1379     {
1380     struct rtc_device *rtc = to_rtc_device(dev);
1381     + struct timerqueue_head *head = &rtc->timerqueue;
1382     + struct timerqueue_node *node;
1383     +
1384     + mutex_lock(&rtc->ops_lock);
1385     + while ((node = timerqueue_getnext(head)))
1386     + timerqueue_del(head, node);
1387     + mutex_unlock(&rtc->ops_lock);
1388     +
1389     + cancel_work_sync(&rtc->irqwork);
1390    
1391     ida_simple_remove(&rtc_ida, rtc->id);
1392     kfree(rtc);
1393     diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
1394     index 5add637c9ad23..b036ff33fbe61 100644
1395     --- a/drivers/rtc/rtc-mc146818-lib.c
1396     +++ b/drivers/rtc/rtc-mc146818-lib.c
1397     @@ -99,6 +99,17 @@ unsigned int mc146818_get_time(struct rtc_time *time)
1398     }
1399     EXPORT_SYMBOL_GPL(mc146818_get_time);
1400    
1401     +/* AMD systems don't allow access to AltCentury with DV1 */
1402     +static bool apply_amd_register_a_behavior(void)
1403     +{
1404     +#ifdef CONFIG_X86
1405     + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1406     + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
1407     + return true;
1408     +#endif
1409     + return false;
1410     +}
1411     +
1412     /* Set the current date and time in the real time clock. */
1413     int mc146818_set_time(struct rtc_time *time)
1414     {
1415     @@ -172,7 +183,10 @@ int mc146818_set_time(struct rtc_time *time)
1416     save_control = CMOS_READ(RTC_CONTROL);
1417     CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
1418     save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
1419     - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
1420     + if (apply_amd_register_a_behavior())
1421     + CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT);
1422     + else
1423     + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
1424    
1425     #ifdef CONFIG_MACH_DECSTATION
1426     CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
1427     diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
1428     index df598c377161d..cb97565b6a333 100644
1429     --- a/drivers/scsi/qla2xxx/qla_target.c
1430     +++ b/drivers/scsi/qla2xxx/qla_target.c
1431     @@ -3768,6 +3768,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
1432    
1433     spin_lock_irqsave(&cmd->cmd_lock, flags);
1434     if (cmd->aborted) {
1435     + if (cmd->sg_mapped)
1436     + qlt_unmap_sg(vha, cmd);
1437     +
1438     spin_unlock_irqrestore(&cmd->cmd_lock, flags);
1439     /*
1440     * It's normal to see 2 calls in this path:
1441     diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
1442     index 1058aba8d5734..3e0267ead718d 100644
1443     --- a/drivers/vhost/net.c
1444     +++ b/drivers/vhost/net.c
1445     @@ -1446,13 +1446,9 @@ err:
1446     return ERR_PTR(r);
1447     }
1448    
1449     -static struct ptr_ring *get_tap_ptr_ring(int fd)
1450     +static struct ptr_ring *get_tap_ptr_ring(struct file *file)
1451     {
1452     struct ptr_ring *ring;
1453     - struct file *file = fget(fd);
1454     -
1455     - if (!file)
1456     - return NULL;
1457     ring = tun_get_tx_ring(file);
1458     if (!IS_ERR(ring))
1459     goto out;
1460     @@ -1461,7 +1457,6 @@ static struct ptr_ring *get_tap_ptr_ring(int fd)
1461     goto out;
1462     ring = NULL;
1463     out:
1464     - fput(file);
1465     return ring;
1466     }
1467    
1468     @@ -1548,8 +1543,12 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
1469     r = vhost_net_enable_vq(n, vq);
1470     if (r)
1471     goto err_used;
1472     - if (index == VHOST_NET_VQ_RX)
1473     - nvq->rx_ring = get_tap_ptr_ring(fd);
1474     + if (index == VHOST_NET_VQ_RX) {
1475     + if (sock)
1476     + nvq->rx_ring = get_tap_ptr_ring(sock->file);
1477     + else
1478     + nvq->rx_ring = NULL;
1479     + }
1480    
1481     oldubufs = nvq->ubufs;
1482     nvq->ubufs = ubufs;
1483     diff --git a/fs/afs/inode.c b/fs/afs/inode.c
1484     index 4f58b28a1edd2..90eac3ec01cbc 100644
1485     --- a/fs/afs/inode.c
1486     +++ b/fs/afs/inode.c
1487     @@ -734,10 +734,22 @@ int afs_getattr(const struct path *path, struct kstat *stat,
1488     {
1489     struct inode *inode = d_inode(path->dentry);
1490     struct afs_vnode *vnode = AFS_FS_I(inode);
1491     - int seq = 0;
1492     + struct key *key;
1493     + int ret, seq = 0;
1494    
1495     _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation);
1496    
1497     + if (!(query_flags & AT_STATX_DONT_SYNC) &&
1498     + !test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
1499     + key = afs_request_key(vnode->volume->cell);
1500     + if (IS_ERR(key))
1501     + return PTR_ERR(key);
1502     + ret = afs_validate(vnode, key);
1503     + key_put(key);
1504     + if (ret < 0)
1505     + return ret;
1506     + }
1507     +
1508     do {
1509     read_seqbegin_or_lock(&vnode->cb_lock, &seq);
1510     generic_fillattr(inode, stat);
1511     diff --git a/fs/file_table.c b/fs/file_table.c
1512     index 30d55c9a1744a..70e8fb68a1717 100644
1513     --- a/fs/file_table.c
1514     +++ b/fs/file_table.c
1515     @@ -375,6 +375,7 @@ void __fput_sync(struct file *file)
1516     }
1517    
1518     EXPORT_SYMBOL(fput);
1519     +EXPORT_SYMBOL(__fput_sync);
1520    
1521     void __init files_init(void)
1522     {
1523     diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
1524     index 4391fd3abd8f8..e00e184b12615 100644
1525     --- a/fs/nilfs2/btnode.c
1526     +++ b/fs/nilfs2/btnode.c
1527     @@ -20,6 +20,23 @@
1528     #include "page.h"
1529     #include "btnode.h"
1530    
1531     +
1532     +/**
1533     + * nilfs_init_btnc_inode - initialize B-tree node cache inode
1534     + * @btnc_inode: inode to be initialized
1535     + *
1536     + * nilfs_init_btnc_inode() sets up an inode for B-tree node cache.
1537     + */
1538     +void nilfs_init_btnc_inode(struct inode *btnc_inode)
1539     +{
1540     + struct nilfs_inode_info *ii = NILFS_I(btnc_inode);
1541     +
1542     + btnc_inode->i_mode = S_IFREG;
1543     + ii->i_flags = 0;
1544     + memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
1545     + mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
1546     +}
1547     +
1548     void nilfs_btnode_cache_clear(struct address_space *btnc)
1549     {
1550     invalidate_mapping_pages(btnc, 0, -1);
1551     @@ -29,7 +46,7 @@ void nilfs_btnode_cache_clear(struct address_space *btnc)
1552     struct buffer_head *
1553     nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
1554     {
1555     - struct inode *inode = NILFS_BTNC_I(btnc);
1556     + struct inode *inode = btnc->host;
1557     struct buffer_head *bh;
1558    
1559     bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
1560     @@ -57,7 +74,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
1561     struct buffer_head **pbh, sector_t *submit_ptr)
1562     {
1563     struct buffer_head *bh;
1564     - struct inode *inode = NILFS_BTNC_I(btnc);
1565     + struct inode *inode = btnc->host;
1566     struct page *page;
1567     int err;
1568    
1569     @@ -157,7 +174,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
1570     struct nilfs_btnode_chkey_ctxt *ctxt)
1571     {
1572     struct buffer_head *obh, *nbh;
1573     - struct inode *inode = NILFS_BTNC_I(btnc);
1574     + struct inode *inode = btnc->host;
1575     __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
1576     int err;
1577    
1578     diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
1579     index 0f88dbc9bcb3e..05ab64d354dc9 100644
1580     --- a/fs/nilfs2/btnode.h
1581     +++ b/fs/nilfs2/btnode.h
1582     @@ -30,6 +30,7 @@ struct nilfs_btnode_chkey_ctxt {
1583     struct buffer_head *newbh;
1584     };
1585    
1586     +void nilfs_init_btnc_inode(struct inode *btnc_inode);
1587     void nilfs_btnode_cache_clear(struct address_space *);
1588     struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
1589     __u64 blocknr);
1590     diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
1591     index 23e043eca237b..919d1238ce45f 100644
1592     --- a/fs/nilfs2/btree.c
1593     +++ b/fs/nilfs2/btree.c
1594     @@ -58,7 +58,8 @@ static void nilfs_btree_free_path(struct nilfs_btree_path *path)
1595     static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
1596     __u64 ptr, struct buffer_head **bhp)
1597     {
1598     - struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
1599     + struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
1600     + struct address_space *btnc = btnc_inode->i_mapping;
1601     struct buffer_head *bh;
1602    
1603     bh = nilfs_btnode_create_block(btnc, ptr);
1604     @@ -470,7 +471,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
1605     struct buffer_head **bhp,
1606     const struct nilfs_btree_readahead_info *ra)
1607     {
1608     - struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
1609     + struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
1610     + struct address_space *btnc = btnc_inode->i_mapping;
1611     struct buffer_head *bh, *ra_bh;
1612     sector_t submit_ptr = 0;
1613     int ret;
1614     @@ -1742,6 +1744,10 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key,
1615     dat = nilfs_bmap_get_dat(btree);
1616     }
1617    
1618     + ret = nilfs_attach_btree_node_cache(&NILFS_BMAP_I(btree)->vfs_inode);
1619     + if (ret < 0)
1620     + return ret;
1621     +
1622     ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat);
1623     if (ret < 0)
1624     return ret;
1625     @@ -1914,7 +1920,7 @@ static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree,
1626     path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr;
1627     path[level].bp_ctxt.bh = path[level].bp_bh;
1628     ret = nilfs_btnode_prepare_change_key(
1629     - &NILFS_BMAP_I(btree)->i_btnode_cache,
1630     + NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
1631     &path[level].bp_ctxt);
1632     if (ret < 0) {
1633     nilfs_dat_abort_update(dat,
1634     @@ -1940,7 +1946,7 @@ static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree,
1635    
1636     if (buffer_nilfs_node(path[level].bp_bh)) {
1637     nilfs_btnode_commit_change_key(
1638     - &NILFS_BMAP_I(btree)->i_btnode_cache,
1639     + NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
1640     &path[level].bp_ctxt);
1641     path[level].bp_bh = path[level].bp_ctxt.bh;
1642     }
1643     @@ -1959,7 +1965,7 @@ static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree,
1644     &path[level].bp_newreq.bpr_req);
1645     if (buffer_nilfs_node(path[level].bp_bh))
1646     nilfs_btnode_abort_change_key(
1647     - &NILFS_BMAP_I(btree)->i_btnode_cache,
1648     + NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
1649     &path[level].bp_ctxt);
1650     }
1651    
1652     @@ -2135,7 +2141,8 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
1653     static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
1654     struct list_head *listp)
1655     {
1656     - struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache;
1657     + struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
1658     + struct address_space *btcache = btnc_inode->i_mapping;
1659     struct list_head lists[NILFS_BTREE_LEVEL_MAX];
1660     struct pagevec pvec;
1661     struct buffer_head *bh, *head;
1662     @@ -2189,12 +2196,12 @@ static int nilfs_btree_assign_p(struct nilfs_bmap *btree,
1663     path[level].bp_ctxt.newkey = blocknr;
1664     path[level].bp_ctxt.bh = *bh;
1665     ret = nilfs_btnode_prepare_change_key(
1666     - &NILFS_BMAP_I(btree)->i_btnode_cache,
1667     + NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
1668     &path[level].bp_ctxt);
1669     if (ret < 0)
1670     return ret;
1671     nilfs_btnode_commit_change_key(
1672     - &NILFS_BMAP_I(btree)->i_btnode_cache,
1673     + NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
1674     &path[level].bp_ctxt);
1675     *bh = path[level].bp_ctxt.bh;
1676     }
1677     @@ -2399,6 +2406,10 @@ int nilfs_btree_init(struct nilfs_bmap *bmap)
1678    
1679     if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode))
1680     ret = -EIO;
1681     + else
1682     + ret = nilfs_attach_btree_node_cache(
1683     + &NILFS_BMAP_I(bmap)->vfs_inode);
1684     +
1685     return ret;
1686     }
1687    
1688     diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
1689     index 6f4066636be9a..a3523a243e113 100644
1690     --- a/fs/nilfs2/dat.c
1691     +++ b/fs/nilfs2/dat.c
1692     @@ -497,7 +497,9 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size,
1693     di = NILFS_DAT_I(dat);
1694     lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
1695     nilfs_palloc_setup_cache(dat, &di->palloc_cache);
1696     - nilfs_mdt_setup_shadow_map(dat, &di->shadow);
1697     + err = nilfs_mdt_setup_shadow_map(dat, &di->shadow);
1698     + if (err)
1699     + goto failed;
1700    
1701     err = nilfs_read_inode_common(dat, raw_inode);
1702     if (err)
1703     diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
1704     index aa3c328ee189c..114774ac2185a 100644
1705     --- a/fs/nilfs2/gcinode.c
1706     +++ b/fs/nilfs2/gcinode.c
1707     @@ -126,9 +126,10 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
1708     int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
1709     __u64 vbn, struct buffer_head **out_bh)
1710     {
1711     + struct inode *btnc_inode = NILFS_I(inode)->i_assoc_inode;
1712     int ret;
1713    
1714     - ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
1715     + ret = nilfs_btnode_submit_block(btnc_inode->i_mapping,
1716     vbn ? : pbn, pbn, REQ_OP_READ, 0,
1717     out_bh, &pbn);
1718     if (ret == -EEXIST) /* internal code (cache hit) */
1719     @@ -170,7 +171,7 @@ int nilfs_init_gcinode(struct inode *inode)
1720     ii->i_flags = 0;
1721     nilfs_bmap_init_gc(ii->i_bmap);
1722    
1723     - return 0;
1724     + return nilfs_attach_btree_node_cache(inode);
1725     }
1726    
1727     /**
1728     @@ -185,7 +186,7 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
1729     ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
1730     list_del_init(&ii->i_dirty);
1731     truncate_inode_pages(&ii->vfs_inode.i_data, 0);
1732     - nilfs_btnode_cache_clear(&ii->i_btnode_cache);
1733     + nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
1734     iput(&ii->vfs_inode);
1735     }
1736     }
1737     diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
1738     index 671085512e0fd..35b0bfe9324f2 100644
1739     --- a/fs/nilfs2/inode.c
1740     +++ b/fs/nilfs2/inode.c
1741     @@ -28,12 +28,16 @@
1742     * @cno: checkpoint number
1743     * @root: pointer on NILFS root object (mounted checkpoint)
1744     * @for_gc: inode for GC flag
1745     + * @for_btnc: inode for B-tree node cache flag
1746     + * @for_shadow: inode for shadowed page cache flag
1747     */
1748     struct nilfs_iget_args {
1749     u64 ino;
1750     __u64 cno;
1751     struct nilfs_root *root;
1752     - int for_gc;
1753     + bool for_gc;
1754     + bool for_btnc;
1755     + bool for_shadow;
1756     };
1757    
1758     static int nilfs_iget_test(struct inode *inode, void *opaque);
1759     @@ -322,7 +326,8 @@ static int nilfs_insert_inode_locked(struct inode *inode,
1760     unsigned long ino)
1761     {
1762     struct nilfs_iget_args args = {
1763     - .ino = ino, .root = root, .cno = 0, .for_gc = 0
1764     + .ino = ino, .root = root, .cno = 0, .for_gc = false,
1765     + .for_btnc = false, .for_shadow = false
1766     };
1767    
1768     return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
1769     @@ -534,6 +539,19 @@ static int nilfs_iget_test(struct inode *inode, void *opaque)
1770     return 0;
1771    
1772     ii = NILFS_I(inode);
1773     + if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
1774     + if (!args->for_btnc)
1775     + return 0;
1776     + } else if (args->for_btnc) {
1777     + return 0;
1778     + }
1779     + if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
1780     + if (!args->for_shadow)
1781     + return 0;
1782     + } else if (args->for_shadow) {
1783     + return 0;
1784     + }
1785     +
1786     if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
1787     return !args->for_gc;
1788    
1789     @@ -545,15 +563,17 @@ static int nilfs_iget_set(struct inode *inode, void *opaque)
1790     struct nilfs_iget_args *args = opaque;
1791    
1792     inode->i_ino = args->ino;
1793     - if (args->for_gc) {
1794     + NILFS_I(inode)->i_cno = args->cno;
1795     + NILFS_I(inode)->i_root = args->root;
1796     + if (args->root && args->ino == NILFS_ROOT_INO)
1797     + nilfs_get_root(args->root);
1798     +
1799     + if (args->for_gc)
1800     NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
1801     - NILFS_I(inode)->i_cno = args->cno;
1802     - NILFS_I(inode)->i_root = NULL;
1803     - } else {
1804     - if (args->root && args->ino == NILFS_ROOT_INO)
1805     - nilfs_get_root(args->root);
1806     - NILFS_I(inode)->i_root = args->root;
1807     - }
1808     + if (args->for_btnc)
1809     + NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
1810     + if (args->for_shadow)
1811     + NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
1812     return 0;
1813     }
1814    
1815     @@ -561,7 +581,8 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
1816     unsigned long ino)
1817     {
1818     struct nilfs_iget_args args = {
1819     - .ino = ino, .root = root, .cno = 0, .for_gc = 0
1820     + .ino = ino, .root = root, .cno = 0, .for_gc = false,
1821     + .for_btnc = false, .for_shadow = false
1822     };
1823    
1824     return ilookup5(sb, ino, nilfs_iget_test, &args);
1825     @@ -571,7 +592,8 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
1826     unsigned long ino)
1827     {
1828     struct nilfs_iget_args args = {
1829     - .ino = ino, .root = root, .cno = 0, .for_gc = 0
1830     + .ino = ino, .root = root, .cno = 0, .for_gc = false,
1831     + .for_btnc = false, .for_shadow = false
1832     };
1833    
1834     return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
1835     @@ -602,7 +624,8 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
1836     __u64 cno)
1837     {
1838     struct nilfs_iget_args args = {
1839     - .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
1840     + .ino = ino, .root = NULL, .cno = cno, .for_gc = true,
1841     + .for_btnc = false, .for_shadow = false
1842     };
1843     struct inode *inode;
1844     int err;
1845     @@ -622,6 +645,113 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
1846     return inode;
1847     }
1848    
1849     +/**
1850     + * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode
1851     + * @inode: inode object
1852     + *
1853     + * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode,
1854     + * or does nothing if the inode already has it. This function allocates
1855     + * an additional inode to maintain page cache of B-tree nodes one-on-one.
1856     + *
1857     + * Return Value: On success, 0 is returned. On errors, one of the following
1858     + * negative error code is returned.
1859     + *
1860     + * %-ENOMEM - Insufficient memory available.
1861     + */
1862     +int nilfs_attach_btree_node_cache(struct inode *inode)
1863     +{
1864     + struct nilfs_inode_info *ii = NILFS_I(inode);
1865     + struct inode *btnc_inode;
1866     + struct nilfs_iget_args args;
1867     +
1868     + if (ii->i_assoc_inode)
1869     + return 0;
1870     +
1871     + args.ino = inode->i_ino;
1872     + args.root = ii->i_root;
1873     + args.cno = ii->i_cno;
1874     + args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
1875     + args.for_btnc = true;
1876     + args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
1877     +
1878     + btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
1879     + nilfs_iget_set, &args);
1880     + if (unlikely(!btnc_inode))
1881     + return -ENOMEM;
1882     + if (btnc_inode->i_state & I_NEW) {
1883     + nilfs_init_btnc_inode(btnc_inode);
1884     + unlock_new_inode(btnc_inode);
1885     + }
1886     + NILFS_I(btnc_inode)->i_assoc_inode = inode;
1887     + NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
1888     + ii->i_assoc_inode = btnc_inode;
1889     +
1890     + return 0;
1891     +}
1892     +
1893     +/**
1894     + * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode
1895     + * @inode: inode object
1896     + *
1897     + * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its
1898     + * holder inode bound to @inode, or does nothing if @inode doesn't have it.
1899     + */
1900     +void nilfs_detach_btree_node_cache(struct inode *inode)
1901     +{
1902     + struct nilfs_inode_info *ii = NILFS_I(inode);
1903     + struct inode *btnc_inode = ii->i_assoc_inode;
1904     +
1905     + if (btnc_inode) {
1906     + NILFS_I(btnc_inode)->i_assoc_inode = NULL;
1907     + ii->i_assoc_inode = NULL;
1908     + iput(btnc_inode);
1909     + }
1910     +}
1911     +
1912     +/**
1913     + * nilfs_iget_for_shadow - obtain inode for shadow mapping
1914     + * @inode: inode object that uses shadow mapping
1915     + *
1916     + * nilfs_iget_for_shadow() allocates a pair of inodes that holds page
1917     + * caches for shadow mapping. The page cache for data pages is set up
1918     + * in one inode and the one for b-tree node pages is set up in the
1919     + * other inode, which is attached to the former inode.
1920     + *
1921     + * Return Value: On success, a pointer to the inode for data pages is
1922     + * returned. On errors, one of the following negative error code is returned
1923     + * in a pointer type.
1924     + *
1925     + * %-ENOMEM - Insufficient memory available.
1926     + */
1927     +struct inode *nilfs_iget_for_shadow(struct inode *inode)
1928     +{
1929     + struct nilfs_iget_args args = {
1930     + .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
1931     + .for_btnc = false, .for_shadow = true
1932     + };
1933     + struct inode *s_inode;
1934     + int err;
1935     +
1936     + s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
1937     + nilfs_iget_set, &args);
1938     + if (unlikely(!s_inode))
1939     + return ERR_PTR(-ENOMEM);
1940     + if (!(s_inode->i_state & I_NEW))
1941     + return inode;
1942     +
1943     + NILFS_I(s_inode)->i_flags = 0;
1944     + memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
1945     + mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
1946     +
1947     + err = nilfs_attach_btree_node_cache(s_inode);
1948     + if (unlikely(err)) {
1949     + iget_failed(s_inode);
1950     + return ERR_PTR(err);
1951     + }
1952     + unlock_new_inode(s_inode);
1953     + return s_inode;
1954     +}
1955     +
1956     void nilfs_write_inode_common(struct inode *inode,
1957     struct nilfs_inode *raw_inode, int has_bmap)
1958     {
1959     @@ -770,7 +900,8 @@ static void nilfs_clear_inode(struct inode *inode)
1960     if (test_bit(NILFS_I_BMAP, &ii->i_state))
1961     nilfs_bmap_clear(ii->i_bmap);
1962    
1963     - nilfs_btnode_cache_clear(&ii->i_btnode_cache);
1964     + if (!test_bit(NILFS_I_BTNC, &ii->i_state))
1965     + nilfs_detach_btree_node_cache(inode);
1966    
1967     if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
1968     nilfs_put_root(ii->i_root);
1969     diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
1970     index 700870a92bc4a..7c9055d767d16 100644
1971     --- a/fs/nilfs2/mdt.c
1972     +++ b/fs/nilfs2/mdt.c
1973     @@ -469,9 +469,18 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
1974     void nilfs_mdt_clear(struct inode *inode)
1975     {
1976     struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
1977     + struct nilfs_shadow_map *shadow = mdi->mi_shadow;
1978    
1979     if (mdi->mi_palloc_cache)
1980     nilfs_palloc_destroy_cache(inode);
1981     +
1982     + if (shadow) {
1983     + struct inode *s_inode = shadow->inode;
1984     +
1985     + shadow->inode = NULL;
1986     + iput(s_inode);
1987     + mdi->mi_shadow = NULL;
1988     + }
1989     }
1990    
1991     /**
1992     @@ -505,12 +514,15 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
1993     struct nilfs_shadow_map *shadow)
1994     {
1995     struct nilfs_mdt_info *mi = NILFS_MDT(inode);
1996     + struct inode *s_inode;
1997    
1998     INIT_LIST_HEAD(&shadow->frozen_buffers);
1999     - address_space_init_once(&shadow->frozen_data);
2000     - nilfs_mapping_init(&shadow->frozen_data, inode);
2001     - address_space_init_once(&shadow->frozen_btnodes);
2002     - nilfs_mapping_init(&shadow->frozen_btnodes, inode);
2003     +
2004     + s_inode = nilfs_iget_for_shadow(inode);
2005     + if (IS_ERR(s_inode))
2006     + return PTR_ERR(s_inode);
2007     +
2008     + shadow->inode = s_inode;
2009     mi->mi_shadow = shadow;
2010     return 0;
2011     }
2012     @@ -524,14 +536,15 @@ int nilfs_mdt_save_to_shadow_map(struct inode *inode)
2013     struct nilfs_mdt_info *mi = NILFS_MDT(inode);
2014     struct nilfs_inode_info *ii = NILFS_I(inode);
2015     struct nilfs_shadow_map *shadow = mi->mi_shadow;
2016     + struct inode *s_inode = shadow->inode;
2017     int ret;
2018    
2019     - ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping);
2020     + ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping);
2021     if (ret)
2022     goto out;
2023    
2024     - ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes,
2025     - &ii->i_btnode_cache);
2026     + ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping,
2027     + ii->i_assoc_inode->i_mapping);
2028     if (ret)
2029     goto out;
2030    
2031     @@ -547,7 +560,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
2032     struct page *page;
2033     int blkbits = inode->i_blkbits;
2034    
2035     - page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
2036     + page = grab_cache_page(shadow->inode->i_mapping, bh->b_page->index);
2037     if (!page)
2038     return -ENOMEM;
2039    
2040     @@ -579,7 +592,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
2041     struct page *page;
2042     int n;
2043    
2044     - page = find_lock_page(&shadow->frozen_data, bh->b_page->index);
2045     + page = find_lock_page(shadow->inode->i_mapping, bh->b_page->index);
2046     if (page) {
2047     if (page_has_buffers(page)) {
2048     n = bh_offset(bh) >> inode->i_blkbits;
2049     @@ -620,10 +633,11 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
2050     nilfs_palloc_clear_cache(inode);
2051    
2052     nilfs_clear_dirty_pages(inode->i_mapping, true);
2053     - nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data);
2054     + nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping);
2055    
2056     - nilfs_clear_dirty_pages(&ii->i_btnode_cache, true);
2057     - nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes);
2058     + nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true);
2059     + nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping,
2060     + NILFS_I(shadow->inode)->i_assoc_inode->i_mapping);
2061    
2062     nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
2063    
2064     @@ -638,10 +652,11 @@ void nilfs_mdt_clear_shadow_map(struct inode *inode)
2065     {
2066     struct nilfs_mdt_info *mi = NILFS_MDT(inode);
2067     struct nilfs_shadow_map *shadow = mi->mi_shadow;
2068     + struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode;
2069    
2070     down_write(&mi->mi_sem);
2071     nilfs_release_frozen_buffers(shadow);
2072     - truncate_inode_pages(&shadow->frozen_data, 0);
2073     - truncate_inode_pages(&shadow->frozen_btnodes, 0);
2074     + truncate_inode_pages(shadow->inode->i_mapping, 0);
2075     + truncate_inode_pages(shadow_btnc_inode->i_mapping, 0);
2076     up_write(&mi->mi_sem);
2077     }
2078     diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h
2079     index e77aea4bb921c..9d8ac0d27c16e 100644
2080     --- a/fs/nilfs2/mdt.h
2081     +++ b/fs/nilfs2/mdt.h
2082     @@ -18,14 +18,12 @@
2083     /**
2084     * struct nilfs_shadow_map - shadow mapping of meta data file
2085     * @bmap_store: shadow copy of bmap state
2086     - * @frozen_data: shadowed dirty data pages
2087     - * @frozen_btnodes: shadowed dirty b-tree nodes' pages
2088     + * @inode: holder of page caches used in shadow mapping
2089     * @frozen_buffers: list of frozen buffers
2090     */
2091     struct nilfs_shadow_map {
2092     struct nilfs_bmap_store bmap_store;
2093     - struct address_space frozen_data;
2094     - struct address_space frozen_btnodes;
2095     + struct inode *inode;
2096     struct list_head frozen_buffers;
2097     };
2098    
2099     diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
2100     index 42395ba52da62..4895d978369a1 100644
2101     --- a/fs/nilfs2/nilfs.h
2102     +++ b/fs/nilfs2/nilfs.h
2103     @@ -28,7 +28,7 @@
2104     * @i_xattr: <TODO>
2105     * @i_dir_start_lookup: page index of last successful search
2106     * @i_cno: checkpoint number for GC inode
2107     - * @i_btnode_cache: cached pages of b-tree nodes
2108     + * @i_assoc_inode: associated inode (B-tree node cache holder or back pointer)
2109     * @i_dirty: list for connecting dirty files
2110     * @xattr_sem: semaphore for extended attributes processing
2111     * @i_bh: buffer contains disk inode
2112     @@ -43,7 +43,7 @@ struct nilfs_inode_info {
2113     __u64 i_xattr; /* sector_t ??? */
2114     __u32 i_dir_start_lookup;
2115     __u64 i_cno; /* check point number for GC inode */
2116     - struct address_space i_btnode_cache;
2117     + struct inode *i_assoc_inode;
2118     struct list_head i_dirty; /* List for connecting dirty files */
2119    
2120     #ifdef CONFIG_NILFS_XATTR
2121     @@ -75,13 +75,6 @@ NILFS_BMAP_I(const struct nilfs_bmap *bmap)
2122     return container_of(bmap, struct nilfs_inode_info, i_bmap_data);
2123     }
2124    
2125     -static inline struct inode *NILFS_BTNC_I(struct address_space *btnc)
2126     -{
2127     - struct nilfs_inode_info *ii =
2128     - container_of(btnc, struct nilfs_inode_info, i_btnode_cache);
2129     - return &ii->vfs_inode;
2130     -}
2131     -
2132     /*
2133     * Dynamic state flags of NILFS on-memory inode (i_state)
2134     */
2135     @@ -98,6 +91,8 @@ enum {
2136     NILFS_I_INODE_SYNC, /* dsync is not allowed for inode */
2137     NILFS_I_BMAP, /* has bmap and btnode_cache */
2138     NILFS_I_GCINODE, /* inode for GC, on memory only */
2139     + NILFS_I_BTNC, /* inode for btree node cache */
2140     + NILFS_I_SHADOW, /* inode for shadowed page cache */
2141     };
2142    
2143     /*
2144     @@ -264,6 +259,9 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
2145     unsigned long ino);
2146     extern struct inode *nilfs_iget_for_gc(struct super_block *sb,
2147     unsigned long ino, __u64 cno);
2148     +int nilfs_attach_btree_node_cache(struct inode *inode);
2149     +void nilfs_detach_btree_node_cache(struct inode *inode);
2150     +struct inode *nilfs_iget_for_shadow(struct inode *inode);
2151     extern void nilfs_update_inode(struct inode *, struct buffer_head *, int);
2152     extern void nilfs_truncate(struct inode *);
2153     extern void nilfs_evict_inode(struct inode *);
2154     diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
2155     index d7fc8d369d89e..98d21ad9a073f 100644
2156     --- a/fs/nilfs2/page.c
2157     +++ b/fs/nilfs2/page.c
2158     @@ -450,10 +450,9 @@ void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
2159     /*
2160     * NILFS2 needs clear_page_dirty() in the following two cases:
2161     *
2162     - * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
2163     - * page dirty flags when it copies back pages from the shadow cache
2164     - * (gcdat->{i_mapping,i_btnode_cache}) to its original cache
2165     - * (dat->{i_mapping,i_btnode_cache}).
2166     + * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
2167     + * flag of pages when it copies back pages from shadow cache to the
2168     + * original cache.
2169     *
2170     * 2) Some B-tree operations like insertion or deletion may dispose buffers
2171     * in dirty state, and this needs to cancel the dirty state of their pages.
2172     diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
2173     index 91b58c897f92d..eb3ac76190887 100644
2174     --- a/fs/nilfs2/segment.c
2175     +++ b/fs/nilfs2/segment.c
2176     @@ -738,15 +738,18 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
2177     struct list_head *listp)
2178     {
2179     struct nilfs_inode_info *ii = NILFS_I(inode);
2180     - struct address_space *mapping = &ii->i_btnode_cache;
2181     + struct inode *btnc_inode = ii->i_assoc_inode;
2182     struct pagevec pvec;
2183     struct buffer_head *bh, *head;
2184     unsigned int i;
2185     pgoff_t index = 0;
2186    
2187     + if (!btnc_inode)
2188     + return;
2189     +
2190     pagevec_init(&pvec);
2191    
2192     - while (pagevec_lookup_tag(&pvec, mapping, &index,
2193     + while (pagevec_lookup_tag(&pvec, btnc_inode->i_mapping, &index,
2194     PAGECACHE_TAG_DIRTY)) {
2195     for (i = 0; i < pagevec_count(&pvec); i++) {
2196     bh = head = page_buffers(pvec.pages[i]);
2197     @@ -2410,7 +2413,7 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2198     continue;
2199     list_del_init(&ii->i_dirty);
2200     truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2201     - nilfs_btnode_cache_clear(&ii->i_btnode_cache);
2202     + nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
2203     iput(&ii->vfs_inode);
2204     }
2205     }
2206     diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
2207     index 5729ee86da9ae..b5bc9f0c6a406 100644
2208     --- a/fs/nilfs2/super.c
2209     +++ b/fs/nilfs2/super.c
2210     @@ -151,7 +151,8 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
2211     ii->i_bh = NULL;
2212     ii->i_state = 0;
2213     ii->i_cno = 0;
2214     - nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode);
2215     + ii->i_assoc_inode = NULL;
2216     + ii->i_bmap = &ii->i_bmap_data;
2217     return &ii->vfs_inode;
2218     }
2219    
2220     @@ -1375,8 +1376,6 @@ static void nilfs_inode_init_once(void *obj)
2221     #ifdef CONFIG_NILFS_XATTR
2222     init_rwsem(&ii->xattr_sem);
2223     #endif
2224     - address_space_init_once(&ii->i_btnode_cache);
2225     - ii->i_bmap = &ii->i_bmap_data;
2226     inode_init_once(&ii->vfs_inode);
2227     }
2228    
2229     diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
2230     index 8cc766743270f..308c2d8cdca19 100644
2231     --- a/include/linux/blkdev.h
2232     +++ b/include/linux/blkdev.h
2233     @@ -1409,6 +1409,22 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
2234     return offset << SECTOR_SHIFT;
2235     }
2236    
2237     +/*
2238     + * Two cases of handling DISCARD merge:
2239     + * If max_discard_segments > 1, the driver takes every bio
2240     + * as a range and send them to controller together. The ranges
2241     + * needn't to be contiguous.
2242     + * Otherwise, the bios/requests will be handled as same as
2243     + * others which should be contiguous.
2244     + */
2245     +static inline bool blk_discard_mergable(struct request *req)
2246     +{
2247     + if (req_op(req) == REQ_OP_DISCARD &&
2248     + queue_max_discard_segments(req->q) > 1)
2249     + return true;
2250     + return false;
2251     +}
2252     +
2253     static inline int bdev_discard_alignment(struct block_device *bdev)
2254     {
2255     struct request_queue *q = bdev_get_queue(bdev);
2256     diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
2257     index da90f20e11c1c..4d450672b7d66 100644
2258     --- a/include/linux/dma-mapping.h
2259     +++ b/include/linux/dma-mapping.h
2260     @@ -70,14 +70,6 @@
2261     */
2262     #define DMA_ATTR_PRIVILEGED (1UL << 9)
2263    
2264     -/*
2265     - * This is a hint to the DMA-mapping subsystem that the device is expected
2266     - * to overwrite the entire mapped size, thus the caller does not require any
2267     - * of the previous buffer contents to be preserved. This allows
2268     - * bounce-buffering implementations to optimise DMA_FROM_DEVICE transfers.
2269     - */
2270     -#define DMA_ATTR_OVERWRITE (1UL << 10)
2271     -
2272     /*
2273     * A dma_addr_t can hold any valid DMA or bus address for the platform.
2274     * It can be given to a device to use as a DMA source or target. A CPU cannot
2275     diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
2276     index 0661af17a7584..1e02058113944 100644
2277     --- a/include/linux/mc146818rtc.h
2278     +++ b/include/linux/mc146818rtc.h
2279     @@ -86,6 +86,8 @@ struct cmos_rtc_board_info {
2280     /* 2 values for divider stage reset, others for "testing purposes only" */
2281     # define RTC_DIV_RESET1 0x60
2282     # define RTC_DIV_RESET2 0x70
2283     + /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */
2284     +# define RTC_AMD_BANK_SELECT 0x10
2285     /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
2286     # define RTC_RATE_SELECT 0x0F
2287    
2288     diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
2289     index dc60d03c4b606..0b35747c9837a 100644
2290     --- a/include/linux/stmmac.h
2291     +++ b/include/linux/stmmac.h
2292     @@ -179,5 +179,6 @@ struct plat_stmmacenet_data {
2293     int mac_port_sel_speed;
2294     bool en_tx_lpi_clockgating;
2295     int has_xgmac;
2296     + bool sph_disable;
2297     };
2298     #endif
2299     diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
2300     index a940de03808dd..46deca97e806d 100644
2301     --- a/include/linux/sunrpc/xprtsock.h
2302     +++ b/include/linux/sunrpc/xprtsock.h
2303     @@ -90,6 +90,7 @@ struct sock_xprt {
2304     #define XPRT_SOCK_WAKE_WRITE (5)
2305     #define XPRT_SOCK_WAKE_PENDING (6)
2306     #define XPRT_SOCK_WAKE_DISCONNECT (7)
2307     +#define XPRT_SOCK_CONNECT_SENT (8)
2308    
2309     #endif /* __KERNEL__ */
2310    
2311     diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h
2312     index 7f30393b92c3b..f76d11725c6c6 100644
2313     --- a/include/uapi/linux/dma-buf.h
2314     +++ b/include/uapi/linux/dma-buf.h
2315     @@ -44,7 +44,7 @@ struct dma_buf_sync {
2316     * between them in actual uapi, they're just different numbers.
2317     */
2318     #define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *)
2319     -#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, u32)
2320     -#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, u64)
2321     +#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32)
2322     +#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64)
2323    
2324     #endif
2325     diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
2326     index f17b771856d1c..913cb71198af4 100644
2327     --- a/kernel/dma/swiotlb.c
2328     +++ b/kernel/dma/swiotlb.c
2329     @@ -571,11 +571,14 @@ found:
2330     */
2331     for (i = 0; i < nslots; i++)
2332     io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
2333     - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
2334     - (!(attrs & DMA_ATTR_OVERWRITE) || dir == DMA_TO_DEVICE ||
2335     - dir == DMA_BIDIRECTIONAL))
2336     - swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
2337     -
2338     + /*
2339     + * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
2340     + * to the tlb buffer, if we knew for sure the device will
2341     + * overwirte the entire current content. But we don't. Thus
2342     + * unconditional bounce may prevent leaking swiotlb content (i.e.
2343     + * kernel memory) to user-space.
2344     + */
2345     + swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
2346     return tlb_addr;
2347     }
2348    
2349     diff --git a/kernel/events/core.c b/kernel/events/core.c
2350     index 52f4a9e467040..8336dcb2bd432 100644
2351     --- a/kernel/events/core.c
2352     +++ b/kernel/events/core.c
2353     @@ -11114,6 +11114,9 @@ SYSCALL_DEFINE5(perf_event_open,
2354     * Do not allow to attach to a group in a different task
2355     * or CPU context. If we're moving SW events, we'll fix
2356     * this up later, so allow that.
2357     + *
2358     + * Racy, not holding group_leader->ctx->mutex, see comment with
2359     + * perf_event_ctx_lock().
2360     */
2361     if (!move_group && group_leader->ctx != ctx)
2362     goto err_context;
2363     @@ -11181,6 +11184,7 @@ SYSCALL_DEFINE5(perf_event_open,
2364     } else {
2365     perf_event_ctx_unlock(group_leader, gctx);
2366     move_group = 0;
2367     + goto not_move_group;
2368     }
2369     }
2370    
2371     @@ -11197,7 +11201,17 @@ SYSCALL_DEFINE5(perf_event_open,
2372     }
2373     } else {
2374     mutex_lock(&ctx->mutex);
2375     +
2376     + /*
2377     + * Now that we hold ctx->lock, (re)validate group_leader->ctx == ctx,
2378     + * see the group_leader && !move_group test earlier.
2379     + */
2380     + if (group_leader && group_leader->ctx != ctx) {
2381     + err = -EINVAL;
2382     + goto err_locked;
2383     + }
2384     }
2385     +not_move_group:
2386    
2387     if (ctx->task == TASK_TOMBSTONE) {
2388     err = -ESRCH;
2389     diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
2390     index 09b1dd8cd8536..464f6a619444d 100644
2391     --- a/net/bridge/br_input.c
2392     +++ b/net/bridge/br_input.c
2393     @@ -42,6 +42,13 @@ static int br_pass_frame_up(struct sk_buff *skb)
2394     u64_stats_update_end(&brstats->syncp);
2395    
2396     vg = br_vlan_group_rcu(br);
2397     +
2398     + /* Reset the offload_fwd_mark because there could be a stacked
2399     + * bridge above, and it should not think this bridge it doing
2400     + * that bridge's work forwarding out its ports.
2401     + */
2402     + br_switchdev_frame_unmark(skb);
2403     +
2404     /* Bridge is just like any other port. Make sure the
2405     * packet is allowed except in promisc modue when someone
2406     * may be running packet capture.
2407     diff --git a/net/key/af_key.c b/net/key/af_key.c
2408     index 2ac9560020f91..f67d3ba72c496 100644
2409     --- a/net/key/af_key.c
2410     +++ b/net/key/af_key.c
2411     @@ -2830,8 +2830,10 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
2412     void *ext_hdrs[SADB_EXT_MAX];
2413     int err;
2414    
2415     - pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
2416     - BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
2417     + err = pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
2418     + BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
2419     + if (err)
2420     + return err;
2421    
2422     memset(ext_hdrs, 0, sizeof(ext_hdrs));
2423     err = parse_exthdrs(skb, hdr, ext_hdrs);
2424     diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
2425     index ab91683d94596..99d5f8b58e92e 100644
2426     --- a/net/mac80211/rx.c
2427     +++ b/net/mac80211/rx.c
2428     @@ -1400,8 +1400,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
2429     goto dont_reorder;
2430    
2431     /* not part of a BA session */
2432     - if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
2433     - ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
2434     + if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
2435     goto dont_reorder;
2436    
2437     /* new, potentially un-ordered, ampdu frame - process it */
2438     diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
2439     index ce3382be937ff..b002e18f38c81 100644
2440     --- a/net/nfc/nci/data.c
2441     +++ b/net/nfc/nci/data.c
2442     @@ -118,7 +118,7 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
2443    
2444     skb_frag = nci_skb_alloc(ndev,
2445     (NCI_DATA_HDR_SIZE + frag_len),
2446     - GFP_KERNEL);
2447     + GFP_ATOMIC);
2448     if (skb_frag == NULL) {
2449     rc = -ENOMEM;
2450     goto free_exit;
2451     diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
2452     index 04e55ccb33836..4fe336ff2bfa1 100644
2453     --- a/net/nfc/nci/hci.c
2454     +++ b/net/nfc/nci/hci.c
2455     @@ -153,7 +153,7 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
2456    
2457     i = 0;
2458     skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len +
2459     - NCI_DATA_HDR_SIZE, GFP_KERNEL);
2460     + NCI_DATA_HDR_SIZE, GFP_ATOMIC);
2461     if (!skb)
2462     return -ENOMEM;
2463    
2464     @@ -186,7 +186,7 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
2465     if (i < data_len) {
2466     skb = nci_skb_alloc(ndev,
2467     conn_info->max_pkt_payload_len +
2468     - NCI_DATA_HDR_SIZE, GFP_KERNEL);
2469     + NCI_DATA_HDR_SIZE, GFP_ATOMIC);
2470     if (!skb)
2471     return -ENOMEM;
2472    
2473     diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
2474     index 305cb190e9979..f095a0fb75c6d 100644
2475     --- a/net/sched/act_pedit.c
2476     +++ b/net/sched/act_pedit.c
2477     @@ -231,6 +231,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
2478     for (i = 0; i < p->tcfp_nkeys; ++i) {
2479     u32 cur = p->tcfp_keys[i].off;
2480    
2481     + /* sanitize the shift value for any later use */
2482     + p->tcfp_keys[i].shift = min_t(size_t, BITS_PER_TYPE(int) - 1,
2483     + p->tcfp_keys[i].shift);
2484     +
2485     /* The AT option can read a single byte, we can bound the actual
2486     * value with uchar max.
2487     */
2488     diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
2489     index 8ac579778e487..94ae95c57f78a 100644
2490     --- a/net/sunrpc/xprt.c
2491     +++ b/net/sunrpc/xprt.c
2492     @@ -717,21 +717,30 @@ void xprt_disconnect_done(struct rpc_xprt *xprt)
2493     EXPORT_SYMBOL_GPL(xprt_disconnect_done);
2494    
2495     /**
2496     - * xprt_force_disconnect - force a transport to disconnect
2497     + * xprt_schedule_autoclose_locked - Try to schedule an autoclose RPC call
2498     * @xprt: transport to disconnect
2499     - *
2500     */
2501     -void xprt_force_disconnect(struct rpc_xprt *xprt)
2502     +static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt)
2503     {
2504     - /* Don't race with the test_bit() in xprt_clear_locked() */
2505     - spin_lock(&xprt->transport_lock);
2506     - set_bit(XPRT_CLOSE_WAIT, &xprt->state);
2507     - /* Try to schedule an autoclose RPC call */
2508     + if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state))
2509     + return;
2510     if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
2511     queue_work(xprtiod_workqueue, &xprt->task_cleanup);
2512     else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
2513     rpc_wake_up_queued_task_set_status(&xprt->pending,
2514     xprt->snd_task, -ENOTCONN);
2515     +}
2516     +
2517     +/**
2518     + * xprt_force_disconnect - force a transport to disconnect
2519     + * @xprt: transport to disconnect
2520     + *
2521     + */
2522     +void xprt_force_disconnect(struct rpc_xprt *xprt)
2523     +{
2524     + /* Don't race with the test_bit() in xprt_clear_locked() */
2525     + spin_lock(&xprt->transport_lock);
2526     + xprt_schedule_autoclose_locked(xprt);
2527     spin_unlock(&xprt->transport_lock);
2528     }
2529     EXPORT_SYMBOL_GPL(xprt_force_disconnect);
2530     @@ -771,11 +780,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
2531     goto out;
2532     if (test_bit(XPRT_CLOSING, &xprt->state))
2533     goto out;
2534     - set_bit(XPRT_CLOSE_WAIT, &xprt->state);
2535     - /* Try to schedule an autoclose RPC call */
2536     - if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
2537     - queue_work(xprtiod_workqueue, &xprt->task_cleanup);
2538     - xprt_wake_pending_tasks(xprt, -EAGAIN);
2539     + xprt_schedule_autoclose_locked(xprt);
2540     out:
2541     spin_unlock(&xprt->transport_lock);
2542     }
2543     @@ -863,10 +868,7 @@ void xprt_connect(struct rpc_task *task)
2544     if (!xprt_lock_write(xprt, task))
2545     return;
2546    
2547     - if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
2548     - xprt->ops->close(xprt);
2549     -
2550     - if (!xprt_connected(xprt)) {
2551     + if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
2552     task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
2553     rpc_sleep_on_timeout(&xprt->pending, task, NULL,
2554     xprt_request_timeout(task->tk_rqstp));
2555     diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
2556     index 43bc02dea80c8..81f0e03b71b6d 100644
2557     --- a/net/sunrpc/xprtsock.c
2558     +++ b/net/sunrpc/xprtsock.c
2559     @@ -989,7 +989,7 @@ static int xs_local_send_request(struct rpc_rqst *req)
2560    
2561     /* Close the stream if the previous transmission was incomplete */
2562     if (xs_send_request_was_aborted(transport, req)) {
2563     - xs_close(xprt);
2564     + xprt_force_disconnect(xprt);
2565     return -ENOTCONN;
2566     }
2567    
2568     @@ -1027,7 +1027,7 @@ static int xs_local_send_request(struct rpc_rqst *req)
2569     -status);
2570     /* fall through */
2571     case -EPIPE:
2572     - xs_close(xprt);
2573     + xprt_force_disconnect(xprt);
2574     status = -ENOTCONN;
2575     }
2576    
2577     @@ -1303,6 +1303,16 @@ static void xs_reset_transport(struct sock_xprt *transport)
2578    
2579     if (sk == NULL)
2580     return;
2581     + /*
2582     + * Make sure we're calling this in a context from which it is safe
2583     + * to call __fput_sync(). In practice that means rpciod and the
2584     + * system workqueue.
2585     + */
2586     + if (!(current->flags & PF_WQ_WORKER)) {
2587     + WARN_ON_ONCE(1);
2588     + set_bit(XPRT_CLOSE_WAIT, &xprt->state);
2589     + return;
2590     + }
2591    
2592     if (atomic_read(&transport->xprt.swapper))
2593     sk_clear_memalloc(sk);
2594     @@ -1326,7 +1336,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
2595     mutex_unlock(&transport->recv_mutex);
2596    
2597     trace_rpc_socket_close(xprt, sock);
2598     - fput(filp);
2599     + __fput_sync(filp);
2600    
2601     xprt_disconnect_done(xprt);
2602     }
2603     @@ -2384,10 +2394,14 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2604     struct rpc_xprt *xprt = &transport->xprt;
2605     int status = -EIO;
2606    
2607     - if (!sock) {
2608     - sock = xs_create_sock(xprt, transport,
2609     - xs_addr(xprt)->sa_family, SOCK_STREAM,
2610     - IPPROTO_TCP, true);
2611     + if (xprt_connected(xprt))
2612     + goto out;
2613     + if (test_and_clear_bit(XPRT_SOCK_CONNECT_SENT,
2614     + &transport->sock_state) ||
2615     + !sock) {
2616     + xs_reset_transport(transport);
2617     + sock = xs_create_sock(xprt, transport, xs_addr(xprt)->sa_family,
2618     + SOCK_STREAM, IPPROTO_TCP, true);
2619     if (IS_ERR(sock)) {
2620     status = PTR_ERR(sock);
2621     goto out;
2622     @@ -2418,6 +2432,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2623     break;
2624     case 0:
2625     case -EINPROGRESS:
2626     + set_bit(XPRT_SOCK_CONNECT_SENT, &transport->sock_state);
2627     + fallthrough;
2628     case -EALREADY:
2629     xprt_unlock_connect(xprt, transport);
2630     return;
2631     @@ -2471,11 +2487,7 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2632    
2633     if (transport->sock != NULL) {
2634     dprintk("RPC: xs_connect delayed xprt %p for %lu "
2635     - "seconds\n",
2636     - xprt, xprt->reestablish_timeout / HZ);
2637     -
2638     - /* Start by resetting any existing state */
2639     - xs_reset_transport(transport);
2640     + "seconds\n", xprt, xprt->reestablish_timeout / HZ);
2641    
2642     delay = xprt_reconnect_delay(xprt);
2643     xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO);
2644     diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
2645     index d6420d224d097..09b368761cc00 100644
2646     --- a/sound/isa/wavefront/wavefront_synth.c
2647     +++ b/sound/isa/wavefront/wavefront_synth.c
2648     @@ -1088,7 +1088,8 @@ wavefront_send_sample (snd_wavefront_t *dev,
2649    
2650     if (dataptr < data_end) {
2651    
2652     - __get_user (sample_short, dataptr);
2653     + if (get_user(sample_short, dataptr))
2654     + return -EFAULT;
2655     dataptr += skip;
2656    
2657     if (data_is_unsigned) { /* GUS ? */
2658     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2659     index 851ea79da31cd..78b5a0f22a415 100644
2660     --- a/sound/pci/hda/patch_realtek.c
2661     +++ b/sound/pci/hda/patch_realtek.c
2662     @@ -10233,6 +10233,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
2663     SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
2664     SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
2665     SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
2666     + SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN),
2667     SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
2668     SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
2669     SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
2670     diff --git a/tools/objtool/check.c b/tools/objtool/check.c
2671     index 06aaf04e629c2..bae6b261481db 100644
2672     --- a/tools/objtool/check.c
2673     +++ b/tools/objtool/check.c
2674     @@ -144,6 +144,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
2675     "usercopy_abort",
2676     "machine_real_restart",
2677     "rewind_stack_do_exit",
2678     + "cpu_bringup_and_idle",
2679     };
2680    
2681     if (!func)
2682     diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
2683     index 5797253b97005..69d62e57a0c36 100644
2684     --- a/tools/perf/bench/numa.c
2685     +++ b/tools/perf/bench/numa.c
2686     @@ -1630,7 +1630,7 @@ static int __bench_numa(const char *name)
2687     "GB/sec,", "total-speed", "GB/sec total speed");
2688    
2689     if (g->p.show_details >= 2) {
2690     - char tname[14 + 2 * 10 + 1];
2691     + char tname[14 + 2 * 11 + 1];
2692     struct thread_data *td;
2693     for (p = 0; p < g->p.nr_proc; p++) {
2694     for (t = 0; t < g->p.nr_threads; t++) {
2695     diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
2696     index 157822331954d..d2ac09b35dcf5 100755
2697     --- a/tools/testing/selftests/net/fcnal-test.sh
2698     +++ b/tools/testing/selftests/net/fcnal-test.sh
2699     @@ -757,10 +757,16 @@ ipv4_ping()
2700     setup
2701     set_sysctl net.ipv4.raw_l3mdev_accept=1 2>/dev/null
2702     ipv4_ping_novrf
2703     + setup
2704     + set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
2705     + ipv4_ping_novrf
2706    
2707     log_subsection "With VRF"
2708     setup "yes"
2709     ipv4_ping_vrf
2710     + setup "yes"
2711     + set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
2712     + ipv4_ping_vrf
2713     }
2714    
2715     ################################################################################
2716     @@ -2005,10 +2011,16 @@ ipv6_ping()
2717     log_subsection "No VRF"
2718     setup
2719     ipv6_ping_novrf
2720     + setup
2721     + set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
2722     + ipv6_ping_novrf
2723    
2724     log_subsection "With VRF"
2725     setup "yes"
2726     ipv6_ping_vrf
2727     + setup "yes"
2728     + set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
2729     + ipv6_ping_vrf
2730     }
2731    
2732     ################################################################################