Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.14/0102-4.14.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3030 - (hide annotations) (download)
Wed Dec 13 13:37:53 2017 UTC (6 years, 5 months ago) by niro
File size: 277372 byte(s)
-linux-4.14.3
1 niro 3030 diff --git a/Makefile b/Makefile
2     index 75d89dc2b94a..ede4de0d8634 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 14
9     -SUBLEVEL = 2
10     +SUBLEVEL = 3
11     EXTRAVERSION =
12     NAME = Petit Gorille
13    
14     diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
15     index 35ff45470dbf..fc3b44028cfb 100644
16     --- a/arch/arm/mm/dump.c
17     +++ b/arch/arm/mm/dump.c
18     @@ -129,8 +129,8 @@ static const struct prot_bits section_bits[] = {
19     .val = PMD_SECT_USER,
20     .set = "USR",
21     }, {
22     - .mask = L_PMD_SECT_RDONLY,
23     - .val = L_PMD_SECT_RDONLY,
24     + .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
25     + .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
26     .set = "ro",
27     .clear = "RW",
28     #elif __LINUX_ARM_ARCH__ >= 6
29     diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
30     index ad80548325fe..0f6d1537f330 100644
31     --- a/arch/arm/mm/init.c
32     +++ b/arch/arm/mm/init.c
33     @@ -639,8 +639,8 @@ static struct section_perm ro_perms[] = {
34     .start = (unsigned long)_stext,
35     .end = (unsigned long)__init_begin,
36     #ifdef CONFIG_ARM_LPAE
37     - .mask = ~L_PMD_SECT_RDONLY,
38     - .prot = L_PMD_SECT_RDONLY,
39     + .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
40     + .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
41     #else
42     .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
43     .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
44     diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
45     index d8dd3298b15c..fb8d76a17bc5 100644
46     --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
47     +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
48     @@ -49,6 +49,14 @@
49    
50     / {
51     compatible = "amlogic,meson-gxl";
52     +
53     + reserved-memory {
54     + /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
55     + secmon_reserved_alt: secmon@05000000 {
56     + reg = <0x0 0x05000000 0x0 0x300000>;
57     + no-map;
58     + };
59     + };
60     };
61    
62     &ethmac {
63     diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
64     index b46e54c2399b..c9530b5b5ca8 100644
65     --- a/arch/arm64/include/asm/pgtable.h
66     +++ b/arch/arm64/include/asm/pgtable.h
67     @@ -98,6 +98,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
68     ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
69     #define pte_valid_young(pte) \
70     ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
71     +#define pte_valid_user(pte) \
72     + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
73    
74     /*
75     * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
76     @@ -107,6 +109,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
77     #define pte_accessible(mm, pte) \
78     (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
79    
80     +/*
81     + * p??_access_permitted() is true for valid user mappings (subject to the
82     + * write permission check) other than user execute-only which do not have the
83     + * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
84     + */
85     +#define pte_access_permitted(pte, write) \
86     + (pte_valid_user(pte) && (!(write) || pte_write(pte)))
87     +#define pmd_access_permitted(pmd, write) \
88     + (pte_access_permitted(pmd_pte(pmd), (write)))
89     +#define pud_access_permitted(pud, write) \
90     + (pte_access_permitted(pud_pte(pud), (write)))
91     +
92     static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
93     {
94     pte_val(pte) &= ~pgprot_val(prot);
95     diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
96     index 5d3284d20678..c3d798b44030 100644
97     --- a/arch/mips/Kconfig
98     +++ b/arch/mips/Kconfig
99     @@ -65,7 +65,7 @@ config MIPS
100     select HAVE_PERF_EVENTS
101     select HAVE_REGS_AND_STACK_ACCESS_API
102     select HAVE_SYSCALL_TRACEPOINTS
103     - select HAVE_VIRT_CPU_ACCOUNTING_GEN
104     + select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP
105     select IRQ_FORCED_THREADING
106     select MODULES_USE_ELF_RELA if MODULES && 64BIT
107     select MODULES_USE_ELF_REL if MODULES
108     diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c
109     index d4f2407a42c6..8307a8a02667 100644
110     --- a/arch/mips/bcm47xx/leds.c
111     +++ b/arch/mips/bcm47xx/leds.c
112     @@ -331,7 +331,7 @@ bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = {
113     /* Verified on: WRT54GS V1.0 */
114     static const struct gpio_led
115     bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = {
116     - BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
117     + BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
118     BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
119     BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
120     };
121     diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile
122     index 9e09cc4556b3..398994312361 100644
123     --- a/arch/mips/boot/dts/brcm/Makefile
124     +++ b/arch/mips/boot/dts/brcm/Makefile
125     @@ -23,7 +23,6 @@ dtb-$(CONFIG_DT_NONE) += \
126     bcm63268-comtrend-vr-3032u.dtb \
127     bcm93384wvg.dtb \
128     bcm93384wvg_viper.dtb \
129     - bcm96358nb4ser.dtb \
130     bcm96368mvwg.dtb \
131     bcm9ejtagprb.dtb \
132     bcm97125cbmb.dtb \
133     diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
134     index 83054f79f72a..feb069cbf44e 100644
135     --- a/arch/mips/include/asm/asmmacro.h
136     +++ b/arch/mips/include/asm/asmmacro.h
137     @@ -19,6 +19,9 @@
138     #include <asm/asmmacro-64.h>
139     #endif
140    
141     +/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
142     +#undef fp
143     +
144     /*
145     * Helper macros for generating raw instruction encodings.
146     */
147     @@ -105,6 +108,7 @@
148     .macro fpu_save_16odd thread
149     .set push
150     .set mips64r2
151     + .set fp=64
152     SET_HARDFLOAT
153     sdc1 $f1, THREAD_FPR1(\thread)
154     sdc1 $f3, THREAD_FPR3(\thread)
155     @@ -126,8 +130,8 @@
156     .endm
157    
158     .macro fpu_save_double thread status tmp
159     -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
160     - defined(CONFIG_CPU_MIPS32_R6)
161     +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
162     + defined(CONFIG_CPU_MIPSR6)
163     sll \tmp, \status, 5
164     bgez \tmp, 10f
165     fpu_save_16odd \thread
166     @@ -163,6 +167,7 @@
167     .macro fpu_restore_16odd thread
168     .set push
169     .set mips64r2
170     + .set fp=64
171     SET_HARDFLOAT
172     ldc1 $f1, THREAD_FPR1(\thread)
173     ldc1 $f3, THREAD_FPR3(\thread)
174     @@ -184,8 +189,8 @@
175     .endm
176    
177     .macro fpu_restore_double thread status tmp
178     -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
179     - defined(CONFIG_CPU_MIPS32_R6)
180     +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
181     + defined(CONFIG_CPU_MIPSR6)
182     sll \tmp, \status, 5
183     bgez \tmp, 10f # 16 register mode?
184    
185     @@ -234,9 +239,6 @@
186     .endm
187    
188     #ifdef TOOLCHAIN_SUPPORTS_MSA
189     -/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
190     -#undef fp
191     -
192     .macro _cfcmsa rd, cs
193     .set push
194     .set mips32r2
195     diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
196     index 7e25c5cc353a..89e9fb7976fe 100644
197     --- a/arch/mips/include/asm/cmpxchg.h
198     +++ b/arch/mips/include/asm/cmpxchg.h
199     @@ -204,8 +204,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
200     #else
201     #include <asm-generic/cmpxchg-local.h>
202     #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
203     +#ifndef CONFIG_SMP
204     #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
205     #endif
206     +#endif
207    
208     #undef __scbeqz
209    
210     diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
211     index 1395654cfc8d..5a09c2901a76 100644
212     --- a/arch/mips/kernel/ptrace.c
213     +++ b/arch/mips/kernel/ptrace.c
214     @@ -618,6 +618,19 @@ static const struct user_regset_view user_mips64_view = {
215     .n = ARRAY_SIZE(mips64_regsets),
216     };
217    
218     +#ifdef CONFIG_MIPS32_N32
219     +
220     +static const struct user_regset_view user_mipsn32_view = {
221     + .name = "mipsn32",
222     + .e_flags = EF_MIPS_ABI2,
223     + .e_machine = ELF_ARCH,
224     + .ei_osabi = ELF_OSABI,
225     + .regsets = mips64_regsets,
226     + .n = ARRAY_SIZE(mips64_regsets),
227     +};
228     +
229     +#endif /* CONFIG_MIPS32_N32 */
230     +
231     #endif /* CONFIG_64BIT */
232    
233     const struct user_regset_view *task_user_regset_view(struct task_struct *task)
234     @@ -628,6 +641,10 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
235     #ifdef CONFIG_MIPS32_O32
236     if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
237     return &user_mips_view;
238     +#endif
239     +#ifdef CONFIG_MIPS32_N32
240     + if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
241     + return &user_mipsn32_view;
242     #endif
243     return &user_mips64_view;
244     #endif
245     diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
246     index 0a83b1708b3c..8e3a6020c613 100644
247     --- a/arch/mips/kernel/r4k_fpu.S
248     +++ b/arch/mips/kernel/r4k_fpu.S
249     @@ -40,8 +40,8 @@
250     */
251     LEAF(_save_fp)
252     EXPORT_SYMBOL(_save_fp)
253     -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
254     - defined(CONFIG_CPU_MIPS32_R6)
255     +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
256     + defined(CONFIG_CPU_MIPSR6)
257     mfc0 t0, CP0_STATUS
258     #endif
259     fpu_save_double a0 t0 t1 # clobbers t1
260     @@ -52,8 +52,8 @@ EXPORT_SYMBOL(_save_fp)
261     * Restore a thread's fp context.
262     */
263     LEAF(_restore_fp)
264     -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
265     - defined(CONFIG_CPU_MIPS32_R6)
266     +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
267     + defined(CONFIG_CPU_MIPSR6)
268     mfc0 t0, CP0_STATUS
269     #endif
270     fpu_restore_double a0 t0 t1 # clobbers t1
271     @@ -246,11 +246,11 @@ LEAF(_save_fp_context)
272     cfc1 t1, fcr31
273     .set pop
274    
275     -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
276     - defined(CONFIG_CPU_MIPS32_R6)
277     +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
278     + defined(CONFIG_CPU_MIPSR6)
279     .set push
280     SET_HARDFLOAT
281     -#ifdef CONFIG_CPU_MIPS32_R2
282     +#ifdef CONFIG_CPU_MIPSR2
283     .set mips32r2
284     .set fp=64
285     mfc0 t0, CP0_STATUS
286     @@ -314,11 +314,11 @@ LEAF(_save_fp_context)
287     LEAF(_restore_fp_context)
288     EX lw t1, 0(a1)
289    
290     -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
291     - defined(CONFIG_CPU_MIPS32_R6)
292     +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
293     + defined(CONFIG_CPU_MIPSR6)
294     .set push
295     SET_HARDFLOAT
296     -#ifdef CONFIG_CPU_MIPS32_R2
297     +#ifdef CONFIG_CPU_MIPSR2
298     .set mips32r2
299     .set fp=64
300     mfc0 t0, CP0_STATUS
301     diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
302     index 16d9ef5a78c5..6f57212f5659 100644
303     --- a/arch/mips/math-emu/cp1emu.c
304     +++ b/arch/mips/math-emu/cp1emu.c
305     @@ -1795,7 +1795,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
306     SPFROMREG(fs, MIPSInst_FS(ir));
307     SPFROMREG(fd, MIPSInst_FD(ir));
308     rv.s = ieee754sp_maddf(fd, fs, ft);
309     - break;
310     + goto copcsr;
311     }
312    
313     case fmsubf_op: {
314     @@ -1809,7 +1809,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
315     SPFROMREG(fs, MIPSInst_FS(ir));
316     SPFROMREG(fd, MIPSInst_FD(ir));
317     rv.s = ieee754sp_msubf(fd, fs, ft);
318     - break;
319     + goto copcsr;
320     }
321    
322     case frint_op: {
323     @@ -1834,7 +1834,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
324     SPFROMREG(fs, MIPSInst_FS(ir));
325     rv.w = ieee754sp_2008class(fs);
326     rfmt = w_fmt;
327     - break;
328     + goto copcsr;
329     }
330    
331     case fmin_op: {
332     @@ -1847,7 +1847,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
333     SPFROMREG(ft, MIPSInst_FT(ir));
334     SPFROMREG(fs, MIPSInst_FS(ir));
335     rv.s = ieee754sp_fmin(fs, ft);
336     - break;
337     + goto copcsr;
338     }
339    
340     case fmina_op: {
341     @@ -1860,7 +1860,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
342     SPFROMREG(ft, MIPSInst_FT(ir));
343     SPFROMREG(fs, MIPSInst_FS(ir));
344     rv.s = ieee754sp_fmina(fs, ft);
345     - break;
346     + goto copcsr;
347     }
348    
349     case fmax_op: {
350     @@ -1873,7 +1873,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
351     SPFROMREG(ft, MIPSInst_FT(ir));
352     SPFROMREG(fs, MIPSInst_FS(ir));
353     rv.s = ieee754sp_fmax(fs, ft);
354     - break;
355     + goto copcsr;
356     }
357    
358     case fmaxa_op: {
359     @@ -1886,7 +1886,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
360     SPFROMREG(ft, MIPSInst_FT(ir));
361     SPFROMREG(fs, MIPSInst_FS(ir));
362     rv.s = ieee754sp_fmaxa(fs, ft);
363     - break;
364     + goto copcsr;
365     }
366    
367     case fabs_op:
368     @@ -2165,7 +2165,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
369     DPFROMREG(fs, MIPSInst_FS(ir));
370     DPFROMREG(fd, MIPSInst_FD(ir));
371     rv.d = ieee754dp_maddf(fd, fs, ft);
372     - break;
373     + goto copcsr;
374     }
375    
376     case fmsubf_op: {
377     @@ -2179,7 +2179,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
378     DPFROMREG(fs, MIPSInst_FS(ir));
379     DPFROMREG(fd, MIPSInst_FD(ir));
380     rv.d = ieee754dp_msubf(fd, fs, ft);
381     - break;
382     + goto copcsr;
383     }
384    
385     case frint_op: {
386     @@ -2204,7 +2204,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
387     DPFROMREG(fs, MIPSInst_FS(ir));
388     rv.l = ieee754dp_2008class(fs);
389     rfmt = l_fmt;
390     - break;
391     + goto copcsr;
392     }
393    
394     case fmin_op: {
395     @@ -2217,7 +2217,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
396     DPFROMREG(ft, MIPSInst_FT(ir));
397     DPFROMREG(fs, MIPSInst_FS(ir));
398     rv.d = ieee754dp_fmin(fs, ft);
399     - break;
400     + goto copcsr;
401     }
402    
403     case fmina_op: {
404     @@ -2230,7 +2230,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
405     DPFROMREG(ft, MIPSInst_FT(ir));
406     DPFROMREG(fs, MIPSInst_FS(ir));
407     rv.d = ieee754dp_fmina(fs, ft);
408     - break;
409     + goto copcsr;
410     }
411    
412     case fmax_op: {
413     @@ -2243,7 +2243,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
414     DPFROMREG(ft, MIPSInst_FT(ir));
415     DPFROMREG(fs, MIPSInst_FS(ir));
416     rv.d = ieee754dp_fmax(fs, ft);
417     - break;
418     + goto copcsr;
419     }
420    
421     case fmaxa_op: {
422     @@ -2256,7 +2256,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
423     DPFROMREG(ft, MIPSInst_FT(ir));
424     DPFROMREG(fs, MIPSInst_FS(ir));
425     rv.d = ieee754dp_fmaxa(fs, ft);
426     - break;
427     + goto copcsr;
428     }
429    
430     case fabs_op:
431     diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
432     index 90fba9bf98da..27ac00c36bc0 100644
433     --- a/arch/mips/pci/pci-mt7620.c
434     +++ b/arch/mips/pci/pci-mt7620.c
435     @@ -121,7 +121,7 @@ static int wait_pciephy_busy(void)
436     else
437     break;
438     if (retry++ > WAITRETRY_MAX) {
439     - printk(KERN_WARN "PCIE-PHY retry failed.\n");
440     + pr_warn("PCIE-PHY retry failed.\n");
441     return -1;
442     }
443     }
444     diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
445     index 9be8b08ae46b..41b71c4352c2 100644
446     --- a/arch/mips/ralink/mt7620.c
447     +++ b/arch/mips/ralink/mt7620.c
448     @@ -145,8 +145,8 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = {
449     FUNC("i2c", 0, 4, 2),
450     };
451    
452     -static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
453     -static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
454     +static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) };
455     +static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) };
456     static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
457     static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
458    
459     diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
460     index 41e60a9c7db2..e775f80ae28c 100644
461     --- a/arch/parisc/kernel/syscall.S
462     +++ b/arch/parisc/kernel/syscall.S
463     @@ -690,15 +690,15 @@ cas_action:
464     /* ELF32 Process entry path */
465     lws_compare_and_swap_2:
466     #ifdef CONFIG_64BIT
467     - /* Clip the input registers */
468     + /* Clip the input registers. We don't need to clip %r23 as we
469     + only use it for word operations */
470     depdi 0, 31, 32, %r26
471     depdi 0, 31, 32, %r25
472     depdi 0, 31, 32, %r24
473     - depdi 0, 31, 32, %r23
474     #endif
475    
476     /* Check the validity of the size pointer */
477     - subi,>>= 4, %r23, %r0
478     + subi,>>= 3, %r23, %r0
479     b,n lws_exit_nosys
480    
481     /* Jump to the functions which will load the old and new values into
482     diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
483     index 1c80bd292e48..06598142d755 100644
484     --- a/arch/powerpc/kernel/exceptions-64s.S
485     +++ b/arch/powerpc/kernel/exceptions-64s.S
486     @@ -542,7 +542,7 @@ EXC_COMMON_BEGIN(instruction_access_common)
487     RECONCILE_IRQ_STATE(r10, r11)
488     ld r12,_MSR(r1)
489     ld r3,_NIP(r1)
490     - andis. r4,r12,DSISR_BAD_FAULT_64S@h
491     + andis. r4,r12,DSISR_SRR1_MATCH_64S@h
492     li r5,0x400
493     std r3,_DAR(r1)
494     std r4,_DSISR(r1)
495     diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
496     index e9436c5e1e09..3d7539b90010 100644
497     --- a/arch/powerpc/kernel/signal.c
498     +++ b/arch/powerpc/kernel/signal.c
499     @@ -103,7 +103,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
500     static void do_signal(struct task_struct *tsk)
501     {
502     sigset_t *oldset = sigmask_to_save();
503     - struct ksignal ksig;
504     + struct ksignal ksig = { .sig = 0 };
505     int ret;
506     int is32 = is_32bit_task();
507    
508     diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
509     index 90644db9d38e..8e0cf8f186df 100644
510     --- a/arch/powerpc/kvm/book3s_hv_builtin.c
511     +++ b/arch/powerpc/kvm/book3s_hv_builtin.c
512     @@ -529,6 +529,8 @@ static inline bool is_rm(void)
513    
514     unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
515     {
516     + if (!kvmppc_xics_enabled(vcpu))
517     + return H_TOO_HARD;
518     if (xive_enabled()) {
519     if (is_rm())
520     return xive_rm_h_xirr(vcpu);
521     @@ -541,6 +543,8 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
522    
523     unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
524     {
525     + if (!kvmppc_xics_enabled(vcpu))
526     + return H_TOO_HARD;
527     vcpu->arch.gpr[5] = get_tb();
528     if (xive_enabled()) {
529     if (is_rm())
530     @@ -554,6 +558,8 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
531    
532     unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
533     {
534     + if (!kvmppc_xics_enabled(vcpu))
535     + return H_TOO_HARD;
536     if (xive_enabled()) {
537     if (is_rm())
538     return xive_rm_h_ipoll(vcpu, server);
539     @@ -567,6 +573,8 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
540     int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
541     unsigned long mfrr)
542     {
543     + if (!kvmppc_xics_enabled(vcpu))
544     + return H_TOO_HARD;
545     if (xive_enabled()) {
546     if (is_rm())
547     return xive_rm_h_ipi(vcpu, server, mfrr);
548     @@ -579,6 +587,8 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
549    
550     int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
551     {
552     + if (!kvmppc_xics_enabled(vcpu))
553     + return H_TOO_HARD;
554     if (xive_enabled()) {
555     if (is_rm())
556     return xive_rm_h_cppr(vcpu, cppr);
557     @@ -591,6 +601,8 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
558    
559     int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
560     {
561     + if (!kvmppc_xics_enabled(vcpu))
562     + return H_TOO_HARD;
563     if (xive_enabled()) {
564     if (is_rm())
565     return xive_rm_h_eoi(vcpu, xirr);
566     diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
567     index c9de03e0c1f1..d469224c4ada 100644
568     --- a/arch/powerpc/lib/code-patching.c
569     +++ b/arch/powerpc/lib/code-patching.c
570     @@ -21,6 +21,7 @@
571     #include <asm/tlbflush.h>
572     #include <asm/page.h>
573     #include <asm/code-patching.h>
574     +#include <asm/setup.h>
575    
576     static int __patch_instruction(unsigned int *addr, unsigned int instr)
577     {
578     @@ -146,11 +147,8 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
579     * During early early boot patch_instruction is called
580     * when text_poke_area is not ready, but we still need
581     * to allow patching. We just do the plain old patching
582     - * We use slab_is_available and per cpu read * via this_cpu_read
583     - * of text_poke_area. Per-CPU areas might not be up early
584     - * this can create problems with just using this_cpu_read()
585     */
586     - if (!slab_is_available() || !this_cpu_read(text_poke_area))
587     + if (!this_cpu_read(*PTRRELOC(&text_poke_area)))
588     return __patch_instruction(addr, instr);
589    
590     local_irq_save(flags);
591     diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
592     index 558e9d3891bf..bd022d16745c 100644
593     --- a/arch/powerpc/mm/hugetlbpage-radix.c
594     +++ b/arch/powerpc/mm/hugetlbpage-radix.c
595     @@ -49,17 +49,28 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
596     struct mm_struct *mm = current->mm;
597     struct vm_area_struct *vma;
598     struct hstate *h = hstate_file(file);
599     + int fixed = (flags & MAP_FIXED);
600     + unsigned long high_limit;
601     struct vm_unmapped_area_info info;
602    
603     - if (unlikely(addr > mm->context.addr_limit && addr < TASK_SIZE))
604     - mm->context.addr_limit = TASK_SIZE;
605     + high_limit = DEFAULT_MAP_WINDOW;
606     + if (addr >= high_limit || (fixed && (addr + len > high_limit)))
607     + high_limit = TASK_SIZE;
608    
609     if (len & ~huge_page_mask(h))
610     return -EINVAL;
611     - if (len > mm->task_size)
612     + if (len > high_limit)
613     return -ENOMEM;
614     + if (fixed) {
615     + if (addr > high_limit - len)
616     + return -ENOMEM;
617     + }
618    
619     - if (flags & MAP_FIXED) {
620     + if (unlikely(addr > mm->context.addr_limit &&
621     + mm->context.addr_limit != TASK_SIZE))
622     + mm->context.addr_limit = TASK_SIZE;
623     +
624     + if (fixed) {
625     if (prepare_hugepage_range(file, addr, len))
626     return -EINVAL;
627     return addr;
628     @@ -68,7 +79,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
629     if (addr) {
630     addr = ALIGN(addr, huge_page_size(h));
631     vma = find_vma(mm, addr);
632     - if (mm->task_size - len >= addr &&
633     + if (high_limit - len >= addr &&
634     (!vma || addr + len <= vm_start_gap(vma)))
635     return addr;
636     }
637     @@ -79,12 +90,9 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
638     info.flags = VM_UNMAPPED_AREA_TOPDOWN;
639     info.length = len;
640     info.low_limit = PAGE_SIZE;
641     - info.high_limit = current->mm->mmap_base;
642     + info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
643     info.align_mask = PAGE_MASK & ~huge_page_mask(h);
644     info.align_offset = 0;
645    
646     - if (addr > DEFAULT_MAP_WINDOW)
647     - info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
648     -
649     return vm_unmapped_area(&info);
650     }
651     diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
652     index 5d78b193fec4..6d476a7b5611 100644
653     --- a/arch/powerpc/mm/mmap.c
654     +++ b/arch/powerpc/mm/mmap.c
655     @@ -106,22 +106,32 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
656     {
657     struct mm_struct *mm = current->mm;
658     struct vm_area_struct *vma;
659     + int fixed = (flags & MAP_FIXED);
660     + unsigned long high_limit;
661     struct vm_unmapped_area_info info;
662    
663     + high_limit = DEFAULT_MAP_WINDOW;
664     + if (addr >= high_limit || (fixed && (addr + len > high_limit)))
665     + high_limit = TASK_SIZE;
666     +
667     + if (len > high_limit)
668     + return -ENOMEM;
669     + if (fixed) {
670     + if (addr > high_limit - len)
671     + return -ENOMEM;
672     + }
673     +
674     if (unlikely(addr > mm->context.addr_limit &&
675     mm->context.addr_limit != TASK_SIZE))
676     mm->context.addr_limit = TASK_SIZE;
677    
678     - if (len > mm->task_size - mmap_min_addr)
679     - return -ENOMEM;
680     -
681     - if (flags & MAP_FIXED)
682     + if (fixed)
683     return addr;
684    
685     if (addr) {
686     addr = PAGE_ALIGN(addr);
687     vma = find_vma(mm, addr);
688     - if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
689     + if (high_limit - len >= addr && addr >= mmap_min_addr &&
690     (!vma || addr + len <= vm_start_gap(vma)))
691     return addr;
692     }
693     @@ -129,13 +139,9 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
694     info.flags = 0;
695     info.length = len;
696     info.low_limit = mm->mmap_base;
697     + info.high_limit = high_limit;
698     info.align_mask = 0;
699    
700     - if (unlikely(addr > DEFAULT_MAP_WINDOW))
701     - info.high_limit = mm->context.addr_limit;
702     - else
703     - info.high_limit = DEFAULT_MAP_WINDOW;
704     -
705     return vm_unmapped_area(&info);
706     }
707    
708     @@ -149,37 +155,42 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
709     struct vm_area_struct *vma;
710     struct mm_struct *mm = current->mm;
711     unsigned long addr = addr0;
712     + int fixed = (flags & MAP_FIXED);
713     + unsigned long high_limit;
714     struct vm_unmapped_area_info info;
715    
716     + high_limit = DEFAULT_MAP_WINDOW;
717     + if (addr >= high_limit || (fixed && (addr + len > high_limit)))
718     + high_limit = TASK_SIZE;
719     +
720     + if (len > high_limit)
721     + return -ENOMEM;
722     + if (fixed) {
723     + if (addr > high_limit - len)
724     + return -ENOMEM;
725     + }
726     +
727     if (unlikely(addr > mm->context.addr_limit &&
728     mm->context.addr_limit != TASK_SIZE))
729     mm->context.addr_limit = TASK_SIZE;
730    
731     - /* requested length too big for entire address space */
732     - if (len > mm->task_size - mmap_min_addr)
733     - return -ENOMEM;
734     -
735     - if (flags & MAP_FIXED)
736     + if (fixed)
737     return addr;
738    
739     - /* requesting a specific address */
740     if (addr) {
741     addr = PAGE_ALIGN(addr);
742     vma = find_vma(mm, addr);
743     - if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
744     - (!vma || addr + len <= vm_start_gap(vma)))
745     + if (high_limit - len >= addr && addr >= mmap_min_addr &&
746     + (!vma || addr + len <= vm_start_gap(vma)))
747     return addr;
748     }
749    
750     info.flags = VM_UNMAPPED_AREA_TOPDOWN;
751     info.length = len;
752     info.low_limit = max(PAGE_SIZE, mmap_min_addr);
753     - info.high_limit = mm->mmap_base;
754     + info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
755     info.align_mask = 0;
756    
757     - if (addr > DEFAULT_MAP_WINDOW)
758     - info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
759     -
760     addr = vm_unmapped_area(&info);
761     if (!(addr & ~PAGE_MASK))
762     return addr;
763     diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
764     index 05e15386d4cb..b94fb62e60fd 100644
765     --- a/arch/powerpc/mm/mmu_context_book3s64.c
766     +++ b/arch/powerpc/mm/mmu_context_book3s64.c
767     @@ -93,11 +93,11 @@ static int hash__init_new_context(struct mm_struct *mm)
768     return index;
769    
770     /*
771     - * We do switch_slb() early in fork, even before we setup the
772     - * mm->context.addr_limit. Default to max task size so that we copy the
773     - * default values to paca which will help us to handle slb miss early.
774     + * In the case of exec, use the default limit,
775     + * otherwise inherit it from the mm we are duplicating.
776     */
777     - mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
778     + if (!mm->context.addr_limit)
779     + mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
780    
781     /*
782     * The old code would re-promote on fork, we don't do that when using
783     diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
784     index 39c252b54d16..cfbbee941a76 100644
785     --- a/arch/powerpc/mm/pgtable-radix.c
786     +++ b/arch/powerpc/mm/pgtable-radix.c
787     @@ -169,6 +169,16 @@ void radix__mark_rodata_ro(void)
788     {
789     unsigned long start, end;
790    
791     + /*
792     + * mark_rodata_ro() will mark itself as !writable at some point.
793     + * Due to DD1 workaround in radix__pte_update(), we'll end up with
794     + * an invalid pte and the system will crash quite severly.
795     + */
796     + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
797     + pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n");
798     + return;
799     + }
800     +
801     start = (unsigned long)_stext;
802     end = (unsigned long)__init_begin;
803    
804     diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
805     index 45f6740dd407..a4f93699194b 100644
806     --- a/arch/powerpc/mm/slice.c
807     +++ b/arch/powerpc/mm/slice.c
808     @@ -96,7 +96,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
809     {
810     struct vm_area_struct *vma;
811    
812     - if ((mm->task_size - len) < addr)
813     + if ((mm->context.addr_limit - len) < addr)
814     return 0;
815     vma = find_vma(mm, addr);
816     return (!vma || (addr + len) <= vm_start_gap(vma));
817     @@ -133,7 +133,7 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
818     if (!slice_low_has_vma(mm, i))
819     ret->low_slices |= 1u << i;
820    
821     - if (mm->task_size <= SLICE_LOW_TOP)
822     + if (mm->context.addr_limit <= SLICE_LOW_TOP)
823     return;
824    
825     for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++)
826     @@ -412,25 +412,31 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
827     struct slice_mask compat_mask;
828     int fixed = (flags & MAP_FIXED);
829     int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
830     + unsigned long page_size = 1UL << pshift;
831     struct mm_struct *mm = current->mm;
832     unsigned long newaddr;
833     unsigned long high_limit;
834    
835     - /*
836     - * Check if we need to expland slice area.
837     - */
838     - if (unlikely(addr > mm->context.addr_limit &&
839     - mm->context.addr_limit != TASK_SIZE)) {
840     - mm->context.addr_limit = TASK_SIZE;
841     + high_limit = DEFAULT_MAP_WINDOW;
842     + if (addr >= high_limit || (fixed && (addr + len > high_limit)))
843     + high_limit = TASK_SIZE;
844     +
845     + if (len > high_limit)
846     + return -ENOMEM;
847     + if (len & (page_size - 1))
848     + return -EINVAL;
849     + if (fixed) {
850     + if (addr & (page_size - 1))
851     + return -EINVAL;
852     + if (addr > high_limit - len)
853     + return -ENOMEM;
854     + }
855     +
856     + if (high_limit > mm->context.addr_limit) {
857     + mm->context.addr_limit = high_limit;
858     on_each_cpu(slice_flush_segments, mm, 1);
859     }
860     - /*
861     - * This mmap request can allocate upt to 512TB
862     - */
863     - if (addr > DEFAULT_MAP_WINDOW)
864     - high_limit = mm->context.addr_limit;
865     - else
866     - high_limit = DEFAULT_MAP_WINDOW;
867     +
868     /*
869     * init different masks
870     */
871     @@ -446,27 +452,19 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
872    
873     /* Sanity checks */
874     BUG_ON(mm->task_size == 0);
875     + BUG_ON(mm->context.addr_limit == 0);
876     VM_BUG_ON(radix_enabled());
877    
878     slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
879     slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
880     addr, len, flags, topdown);
881    
882     - if (len > mm->task_size)
883     - return -ENOMEM;
884     - if (len & ((1ul << pshift) - 1))
885     - return -EINVAL;
886     - if (fixed && (addr & ((1ul << pshift) - 1)))
887     - return -EINVAL;
888     - if (fixed && addr > (mm->task_size - len))
889     - return -ENOMEM;
890     -
891     /* If hint, make sure it matches our alignment restrictions */
892     if (!fixed && addr) {
893     - addr = _ALIGN_UP(addr, 1ul << pshift);
894     + addr = _ALIGN_UP(addr, page_size);
895     slice_dbg(" aligned addr=%lx\n", addr);
896     /* Ignore hint if it's too large or overlaps a VMA */
897     - if (addr > mm->task_size - len ||
898     + if (addr > high_limit - len ||
899     !slice_area_is_free(mm, addr, len))
900     addr = 0;
901     }
902     diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
903     index 36344117c680..cf64e16f92c2 100644
904     --- a/arch/powerpc/perf/imc-pmu.c
905     +++ b/arch/powerpc/perf/imc-pmu.c
906     @@ -467,7 +467,7 @@ static int nest_imc_event_init(struct perf_event *event)
907     * Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
908     * Get the base memory addresss for this cpu.
909     */
910     - chip_id = topology_physical_package_id(event->cpu);
911     + chip_id = cpu_to_chip_id(event->cpu);
912     pcni = pmu->mem_info;
913     do {
914     if (pcni->id == chip_id) {
915     @@ -524,19 +524,19 @@ static int nest_imc_event_init(struct perf_event *event)
916     */
917     static int core_imc_mem_init(int cpu, int size)
918     {
919     - int phys_id, rc = 0, core_id = (cpu / threads_per_core);
920     + int nid, rc = 0, core_id = (cpu / threads_per_core);
921     struct imc_mem_info *mem_info;
922    
923     /*
924     * alloc_pages_node() will allocate memory for core in the
925     * local node only.
926     */
927     - phys_id = topology_physical_package_id(cpu);
928     + nid = cpu_to_node(cpu);
929     mem_info = &core_imc_pmu->mem_info[core_id];
930     mem_info->id = core_id;
931    
932     /* We need only vbase for core counters */
933     - mem_info->vbase = page_address(alloc_pages_node(phys_id,
934     + mem_info->vbase = page_address(alloc_pages_node(nid,
935     GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
936     __GFP_NOWARN, get_order(size)));
937     if (!mem_info->vbase)
938     @@ -797,14 +797,14 @@ static int core_imc_event_init(struct perf_event *event)
939     static int thread_imc_mem_alloc(int cpu_id, int size)
940     {
941     u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id);
942     - int phys_id = topology_physical_package_id(cpu_id);
943     + int nid = cpu_to_node(cpu_id);
944    
945     if (!local_mem) {
946     /*
947     * This case could happen only once at start, since we dont
948     * free the memory in cpu offline path.
949     */
950     - local_mem = page_address(alloc_pages_node(phys_id,
951     + local_mem = page_address(alloc_pages_node(nid,
952     GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
953     __GFP_NOWARN, get_order(size)));
954     if (!local_mem)
955     diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
956     index c21fe1d57c00..ec7b476c1ac5 100644
957     --- a/arch/s390/include/asm/switch_to.h
958     +++ b/arch/s390/include/asm/switch_to.h
959     @@ -37,8 +37,8 @@ static inline void restore_access_regs(unsigned int *acrs)
960     save_ri_cb(prev->thread.ri_cb); \
961     save_gs_cb(prev->thread.gs_cb); \
962     } \
963     + update_cr_regs(next); \
964     if (next->mm) { \
965     - update_cr_regs(next); \
966     set_cpu_flag(CIF_FPU); \
967     restore_access_regs(&next->thread.acrs[0]); \
968     restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
969     diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
970     index f7e82302a71e..2394557653d5 100644
971     --- a/arch/s390/kernel/dis.c
972     +++ b/arch/s390/kernel/dis.c
973     @@ -1548,6 +1548,7 @@ static struct s390_insn opcode_e7[] = {
974     { "vfsq", 0xce, INSTR_VRR_VV000MM },
975     { "vfs", 0xe2, INSTR_VRR_VVV00MM },
976     { "vftci", 0x4a, INSTR_VRI_VVIMM },
977     + { "", 0, INSTR_INVALID }
978     };
979    
980     static struct s390_insn opcode_eb[] = {
981     @@ -1953,7 +1954,7 @@ void show_code(struct pt_regs *regs)
982     {
983     char *mode = user_mode(regs) ? "User" : "Krnl";
984     unsigned char code[64];
985     - char buffer[64], *ptr;
986     + char buffer[128], *ptr;
987     mm_segment_t old_fs;
988     unsigned long addr;
989     int start, end, opsize, hops, i;
990     @@ -2016,7 +2017,7 @@ void show_code(struct pt_regs *regs)
991     start += opsize;
992     pr_cont("%s", buffer);
993     ptr = buffer;
994     - ptr += sprintf(ptr, "\n ");
995     + ptr += sprintf(ptr, "\n\t ");
996     hops++;
997     }
998     pr_cont("\n");
999     diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
1000     index b945448b9eae..f7b280f0ab16 100644
1001     --- a/arch/s390/kernel/early.c
1002     +++ b/arch/s390/kernel/early.c
1003     @@ -375,8 +375,10 @@ static __init void detect_machine_facilities(void)
1004     S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
1005     if (test_facility(40))
1006     S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
1007     - if (test_facility(50) && test_facility(73))
1008     + if (test_facility(50) && test_facility(73)) {
1009     S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
1010     + __ctl_set_bit(0, 55);
1011     + }
1012     if (test_facility(51))
1013     S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
1014     if (test_facility(129)) {
1015     diff --git a/arch/s390/kernel/guarded_storage.c b/arch/s390/kernel/guarded_storage.c
1016     index bff39b66c9ff..9ee794e14f33 100644
1017     --- a/arch/s390/kernel/guarded_storage.c
1018     +++ b/arch/s390/kernel/guarded_storage.c
1019     @@ -14,9 +14,11 @@
1020    
1021     void exit_thread_gs(void)
1022     {
1023     + preempt_disable();
1024     kfree(current->thread.gs_cb);
1025     kfree(current->thread.gs_bc_cb);
1026     current->thread.gs_cb = current->thread.gs_bc_cb = NULL;
1027     + preempt_enable();
1028     }
1029    
1030     static int gs_enable(void)
1031     diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
1032     index b0ba2c26b45e..d6f7782e75c9 100644
1033     --- a/arch/s390/kernel/machine_kexec.c
1034     +++ b/arch/s390/kernel/machine_kexec.c
1035     @@ -269,6 +269,7 @@ static void __do_machine_kexec(void *data)
1036     s390_reset_system();
1037     data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
1038    
1039     + __arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */
1040     /* Call the moving routine */
1041     (*data_mover)(&image->head, image->start);
1042    
1043     diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
1044     index a4a84fb08046..203b7cd7c348 100644
1045     --- a/arch/s390/kernel/process.c
1046     +++ b/arch/s390/kernel/process.c
1047     @@ -100,6 +100,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
1048     memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
1049     memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
1050     clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
1051     + p->thread.per_flags = 0;
1052     /* Initialize per thread user and system timer values */
1053     p->thread.user_timer = 0;
1054     p->thread.guest_timer = 0;
1055     diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
1056     index ca37e5d5b40c..9c2c96da23d0 100644
1057     --- a/arch/s390/kernel/relocate_kernel.S
1058     +++ b/arch/s390/kernel/relocate_kernel.S
1059     @@ -29,7 +29,6 @@
1060     ENTRY(relocate_kernel)
1061     basr %r13,0 # base address
1062     .base:
1063     - stnsm sys_msk-.base(%r13),0xfb # disable DAT
1064     stctg %c0,%c15,ctlregs-.base(%r13)
1065     stmg %r0,%r15,gprregs-.base(%r13)
1066     lghi %r0,3
1067     @@ -103,8 +102,6 @@ ENTRY(relocate_kernel)
1068     .align 8
1069     load_psw:
1070     .long 0x00080000,0x80000000
1071     - sys_msk:
1072     - .quad 0
1073     ctlregs:
1074     .rept 16
1075     .quad 0
1076     diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
1077     index 32aefb215e59..d85c64821a6b 100644
1078     --- a/arch/s390/kernel/runtime_instr.c
1079     +++ b/arch/s390/kernel/runtime_instr.c
1080     @@ -50,11 +50,13 @@ void exit_thread_runtime_instr(void)
1081     {
1082     struct task_struct *task = current;
1083    
1084     + preempt_disable();
1085     if (!task->thread.ri_cb)
1086     return;
1087     disable_runtime_instr();
1088     kfree(task->thread.ri_cb);
1089     task->thread.ri_cb = NULL;
1090     + preempt_enable();
1091     }
1092    
1093     SYSCALL_DEFINE1(s390_runtime_instr, int, command)
1094     @@ -65,9 +67,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command)
1095     return -EOPNOTSUPP;
1096    
1097     if (command == S390_RUNTIME_INSTR_STOP) {
1098     - preempt_disable();
1099     exit_thread_runtime_instr();
1100     - preempt_enable();
1101     return 0;
1102     }
1103    
1104     diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
1105     index bcfc5668dcb2..518d9286b3d1 100644
1106     --- a/arch/x86/entry/entry_64.S
1107     +++ b/arch/x86/entry/entry_64.S
1108     @@ -51,15 +51,19 @@ ENTRY(native_usergs_sysret64)
1109     END(native_usergs_sysret64)
1110     #endif /* CONFIG_PARAVIRT */
1111    
1112     -.macro TRACE_IRQS_IRETQ
1113     +.macro TRACE_IRQS_FLAGS flags:req
1114     #ifdef CONFIG_TRACE_IRQFLAGS
1115     - bt $9, EFLAGS(%rsp) /* interrupts off? */
1116     + bt $9, \flags /* interrupts off? */
1117     jnc 1f
1118     TRACE_IRQS_ON
1119     1:
1120     #endif
1121     .endm
1122    
1123     +.macro TRACE_IRQS_IRETQ
1124     + TRACE_IRQS_FLAGS EFLAGS(%rsp)
1125     +.endm
1126     +
1127     /*
1128     * When dynamic function tracer is enabled it will add a breakpoint
1129     * to all locations that it is about to modify, sync CPUs, update
1130     @@ -148,8 +152,6 @@ ENTRY(entry_SYSCALL_64)
1131     movq %rsp, PER_CPU_VAR(rsp_scratch)
1132     movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
1133    
1134     - TRACE_IRQS_OFF
1135     -
1136     /* Construct struct pt_regs on stack */
1137     pushq $__USER_DS /* pt_regs->ss */
1138     pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
1139     @@ -170,6 +172,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
1140     sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
1141     UNWIND_HINT_REGS extra=0
1142    
1143     + TRACE_IRQS_OFF
1144     +
1145     /*
1146     * If we need to do entry work or if we guess we'll need to do
1147     * exit work, go straight to the slow path.
1148     @@ -923,11 +927,13 @@ ENTRY(native_load_gs_index)
1149     FRAME_BEGIN
1150     pushfq
1151     DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
1152     + TRACE_IRQS_OFF
1153     SWAPGS
1154     .Lgs_change:
1155     movl %edi, %gs
1156     2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
1157     SWAPGS
1158     + TRACE_IRQS_FLAGS (%rsp)
1159     popfq
1160     FRAME_END
1161     ret
1162     diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
1163     index 9fb9a1f1e47b..f94855000d4e 100644
1164     --- a/arch/x86/events/intel/core.c
1165     +++ b/arch/x86/events/intel/core.c
1166     @@ -3730,6 +3730,19 @@ EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
1167     EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
1168    
1169     static struct attribute *hsw_events_attrs[] = {
1170     + EVENT_PTR(mem_ld_hsw),
1171     + EVENT_PTR(mem_st_hsw),
1172     + EVENT_PTR(td_slots_issued),
1173     + EVENT_PTR(td_slots_retired),
1174     + EVENT_PTR(td_fetch_bubbles),
1175     + EVENT_PTR(td_total_slots),
1176     + EVENT_PTR(td_total_slots_scale),
1177     + EVENT_PTR(td_recovery_bubbles),
1178     + EVENT_PTR(td_recovery_bubbles_scale),
1179     + NULL
1180     +};
1181     +
1182     +static struct attribute *hsw_tsx_events_attrs[] = {
1183     EVENT_PTR(tx_start),
1184     EVENT_PTR(tx_commit),
1185     EVENT_PTR(tx_abort),
1186     @@ -3742,18 +3755,16 @@ static struct attribute *hsw_events_attrs[] = {
1187     EVENT_PTR(el_conflict),
1188     EVENT_PTR(cycles_t),
1189     EVENT_PTR(cycles_ct),
1190     - EVENT_PTR(mem_ld_hsw),
1191     - EVENT_PTR(mem_st_hsw),
1192     - EVENT_PTR(td_slots_issued),
1193     - EVENT_PTR(td_slots_retired),
1194     - EVENT_PTR(td_fetch_bubbles),
1195     - EVENT_PTR(td_total_slots),
1196     - EVENT_PTR(td_total_slots_scale),
1197     - EVENT_PTR(td_recovery_bubbles),
1198     - EVENT_PTR(td_recovery_bubbles_scale),
1199     NULL
1200     };
1201    
1202     +static __init struct attribute **get_hsw_events_attrs(void)
1203     +{
1204     + return boot_cpu_has(X86_FEATURE_RTM) ?
1205     + merge_attr(hsw_events_attrs, hsw_tsx_events_attrs) :
1206     + hsw_events_attrs;
1207     +}
1208     +
1209     static ssize_t freeze_on_smi_show(struct device *cdev,
1210     struct device_attribute *attr,
1211     char *buf)
1212     @@ -4182,7 +4193,7 @@ __init int intel_pmu_init(void)
1213    
1214     x86_pmu.hw_config = hsw_hw_config;
1215     x86_pmu.get_event_constraints = hsw_get_event_constraints;
1216     - x86_pmu.cpu_events = hsw_events_attrs;
1217     + x86_pmu.cpu_events = get_hsw_events_attrs();
1218     x86_pmu.lbr_double_abort = true;
1219     extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
1220     hsw_format_attr : nhm_format_attr;
1221     @@ -4221,7 +4232,7 @@ __init int intel_pmu_init(void)
1222    
1223     x86_pmu.hw_config = hsw_hw_config;
1224     x86_pmu.get_event_constraints = hsw_get_event_constraints;
1225     - x86_pmu.cpu_events = hsw_events_attrs;
1226     + x86_pmu.cpu_events = get_hsw_events_attrs();
1227     x86_pmu.limit_period = bdw_limit_period;
1228     extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
1229     hsw_format_attr : nhm_format_attr;
1230     @@ -4279,7 +4290,7 @@ __init int intel_pmu_init(void)
1231     extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
1232     hsw_format_attr : nhm_format_attr;
1233     extra_attr = merge_attr(extra_attr, skl_format_attr);
1234     - x86_pmu.cpu_events = hsw_events_attrs;
1235     + x86_pmu.cpu_events = get_hsw_events_attrs();
1236     intel_pmu_pebs_data_source_skl(
1237     boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
1238     pr_cont("Skylake events, ");
1239     diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
1240     index 410c5dadcee3..3a4b12809ab5 100644
1241     --- a/arch/x86/kernel/mpparse.c
1242     +++ b/arch/x86/kernel/mpparse.c
1243     @@ -431,6 +431,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
1244     }
1245    
1246     static unsigned long mpf_base;
1247     +static bool mpf_found;
1248    
1249     static unsigned long __init get_mpc_size(unsigned long physptr)
1250     {
1251     @@ -504,7 +505,7 @@ void __init default_get_smp_config(unsigned int early)
1252     if (!smp_found_config)
1253     return;
1254    
1255     - if (!mpf_base)
1256     + if (!mpf_found)
1257     return;
1258    
1259     if (acpi_lapic && early)
1260     @@ -593,6 +594,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
1261     smp_found_config = 1;
1262     #endif
1263     mpf_base = base;
1264     + mpf_found = true;
1265    
1266     pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
1267     base, base + sizeof(*mpf) - 1, mpf);
1268     @@ -858,7 +860,7 @@ static int __init update_mp_table(void)
1269     if (!enable_update_mptable)
1270     return 0;
1271    
1272     - if (!mpf_base)
1273     + if (!mpf_found)
1274     return 0;
1275    
1276     mpf = early_memremap(mpf_base, sizeof(*mpf));
1277     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1278     index 0e68f0b3cbf7..ca209a4a7834 100644
1279     --- a/arch/x86/kvm/svm.c
1280     +++ b/arch/x86/kvm/svm.c
1281     @@ -3657,6 +3657,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1282     u32 ecx = msr->index;
1283     u64 data = msr->data;
1284     switch (ecx) {
1285     + case MSR_IA32_CR_PAT:
1286     + if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
1287     + return 1;
1288     + vcpu->arch.pat = data;
1289     + svm->vmcb->save.g_pat = data;
1290     + mark_dirty(svm->vmcb, VMCB_NPT);
1291     + break;
1292     case MSR_IA32_TSC:
1293     kvm_write_tsc(vcpu, msr);
1294     break;
1295     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1296     index a6f4f095f8f4..21cad7068cbf 100644
1297     --- a/arch/x86/kvm/vmx.c
1298     +++ b/arch/x86/kvm/vmx.c
1299     @@ -202,6 +202,10 @@ struct loaded_vmcs {
1300     bool nmi_known_unmasked;
1301     unsigned long vmcs_host_cr3; /* May not match real cr3 */
1302     unsigned long vmcs_host_cr4; /* May not match real cr4 */
1303     + /* Support for vnmi-less CPUs */
1304     + int soft_vnmi_blocked;
1305     + ktime_t entry_time;
1306     + s64 vnmi_blocked_time;
1307     struct list_head loaded_vmcss_on_cpu_link;
1308     };
1309    
1310     @@ -1286,6 +1290,11 @@ static inline bool cpu_has_vmx_invpcid(void)
1311     SECONDARY_EXEC_ENABLE_INVPCID;
1312     }
1313    
1314     +static inline bool cpu_has_virtual_nmis(void)
1315     +{
1316     + return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1317     +}
1318     +
1319     static inline bool cpu_has_vmx_wbinvd_exit(void)
1320     {
1321     return vmcs_config.cpu_based_2nd_exec_ctrl &
1322     @@ -1343,11 +1352,6 @@ static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1323     (vmcs12->secondary_vm_exec_control & bit);
1324     }
1325    
1326     -static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1327     -{
1328     - return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1329     -}
1330     -
1331     static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1332     {
1333     return vmcs12->pin_based_vm_exec_control &
1334     @@ -3699,9 +3703,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
1335     &_vmexit_control) < 0)
1336     return -EIO;
1337    
1338     - min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
1339     - PIN_BASED_VIRTUAL_NMIS;
1340     - opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER;
1341     + min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
1342     + opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
1343     + PIN_BASED_VMX_PREEMPTION_TIMER;
1344     if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
1345     &_pin_based_exec_control) < 0)
1346     return -EIO;
1347     @@ -5667,7 +5671,8 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
1348    
1349     static void enable_nmi_window(struct kvm_vcpu *vcpu)
1350     {
1351     - if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
1352     + if (!cpu_has_virtual_nmis() ||
1353     + vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
1354     enable_irq_window(vcpu);
1355     return;
1356     }
1357     @@ -5707,6 +5712,19 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
1358     {
1359     struct vcpu_vmx *vmx = to_vmx(vcpu);
1360    
1361     + if (!cpu_has_virtual_nmis()) {
1362     + /*
1363     + * Tracking the NMI-blocked state in software is built upon
1364     + * finding the next open IRQ window. This, in turn, depends on
1365     + * well-behaving guests: They have to keep IRQs disabled at
1366     + * least as long as the NMI handler runs. Otherwise we may
1367     + * cause NMI nesting, maybe breaking the guest. But as this is
1368     + * highly unlikely, we can live with the residual risk.
1369     + */
1370     + vmx->loaded_vmcs->soft_vnmi_blocked = 1;
1371     + vmx->loaded_vmcs->vnmi_blocked_time = 0;
1372     + }
1373     +
1374     ++vcpu->stat.nmi_injections;
1375     vmx->loaded_vmcs->nmi_known_unmasked = false;
1376    
1377     @@ -5725,6 +5743,8 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
1378     struct vcpu_vmx *vmx = to_vmx(vcpu);
1379     bool masked;
1380    
1381     + if (!cpu_has_virtual_nmis())
1382     + return vmx->loaded_vmcs->soft_vnmi_blocked;
1383     if (vmx->loaded_vmcs->nmi_known_unmasked)
1384     return false;
1385     masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
1386     @@ -5736,13 +5756,20 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
1387     {
1388     struct vcpu_vmx *vmx = to_vmx(vcpu);
1389    
1390     - vmx->loaded_vmcs->nmi_known_unmasked = !masked;
1391     - if (masked)
1392     - vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
1393     - GUEST_INTR_STATE_NMI);
1394     - else
1395     - vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
1396     - GUEST_INTR_STATE_NMI);
1397     + if (!cpu_has_virtual_nmis()) {
1398     + if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
1399     + vmx->loaded_vmcs->soft_vnmi_blocked = masked;
1400     + vmx->loaded_vmcs->vnmi_blocked_time = 0;
1401     + }
1402     + } else {
1403     + vmx->loaded_vmcs->nmi_known_unmasked = !masked;
1404     + if (masked)
1405     + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
1406     + GUEST_INTR_STATE_NMI);
1407     + else
1408     + vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
1409     + GUEST_INTR_STATE_NMI);
1410     + }
1411     }
1412    
1413     static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
1414     @@ -5750,6 +5777,10 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
1415     if (to_vmx(vcpu)->nested.nested_run_pending)
1416     return 0;
1417    
1418     + if (!cpu_has_virtual_nmis() &&
1419     + to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
1420     + return 0;
1421     +
1422     return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
1423     (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
1424     | GUEST_INTR_STATE_NMI));
1425     @@ -6478,6 +6509,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
1426     * AAK134, BY25.
1427     */
1428     if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
1429     + cpu_has_virtual_nmis() &&
1430     (exit_qualification & INTR_INFO_UNBLOCK_NMI))
1431     vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
1432    
1433     @@ -6961,7 +6993,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
1434     }
1435    
1436     /* Create a new VMCS */
1437     - item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
1438     + item = kzalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
1439     if (!item)
1440     return NULL;
1441     item->vmcs02.vmcs = alloc_vmcs();
1442     @@ -7978,6 +8010,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
1443     * "blocked by NMI" bit has to be set before next VM entry.
1444     */
1445     if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
1446     + cpu_has_virtual_nmis() &&
1447     (exit_qualification & INTR_INFO_UNBLOCK_NMI))
1448     vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
1449     GUEST_INTR_STATE_NMI);
1450     @@ -8822,6 +8855,25 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
1451     return 0;
1452     }
1453    
1454     + if (unlikely(!cpu_has_virtual_nmis() &&
1455     + vmx->loaded_vmcs->soft_vnmi_blocked)) {
1456     + if (vmx_interrupt_allowed(vcpu)) {
1457     + vmx->loaded_vmcs->soft_vnmi_blocked = 0;
1458     + } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
1459     + vcpu->arch.nmi_pending) {
1460     + /*
1461     + * This CPU don't support us in finding the end of an
1462     + * NMI-blocked window if the guest runs with IRQs
1463     + * disabled. So we pull the trigger after 1 s of
1464     + * futile waiting, but inform the user about this.
1465     + */
1466     + printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
1467     + "state on VCPU %d after 1 s timeout\n",
1468     + __func__, vcpu->vcpu_id);
1469     + vmx->loaded_vmcs->soft_vnmi_blocked = 0;
1470     + }
1471     + }
1472     +
1473     if (exit_reason < kvm_vmx_max_exit_handlers
1474     && kvm_vmx_exit_handlers[exit_reason])
1475     return kvm_vmx_exit_handlers[exit_reason](vcpu);
1476     @@ -9104,33 +9156,38 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
1477    
1478     idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
1479    
1480     - if (vmx->loaded_vmcs->nmi_known_unmasked)
1481     - return;
1482     - /*
1483     - * Can't use vmx->exit_intr_info since we're not sure what
1484     - * the exit reason is.
1485     - */
1486     - exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1487     - unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
1488     - vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
1489     - /*
1490     - * SDM 3: 27.7.1.2 (September 2008)
1491     - * Re-set bit "block by NMI" before VM entry if vmexit caused by
1492     - * a guest IRET fault.
1493     - * SDM 3: 23.2.2 (September 2008)
1494     - * Bit 12 is undefined in any of the following cases:
1495     - * If the VM exit sets the valid bit in the IDT-vectoring
1496     - * information field.
1497     - * If the VM exit is due to a double fault.
1498     - */
1499     - if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
1500     - vector != DF_VECTOR && !idtv_info_valid)
1501     - vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
1502     - GUEST_INTR_STATE_NMI);
1503     - else
1504     - vmx->loaded_vmcs->nmi_known_unmasked =
1505     - !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
1506     - & GUEST_INTR_STATE_NMI);
1507     + if (cpu_has_virtual_nmis()) {
1508     + if (vmx->loaded_vmcs->nmi_known_unmasked)
1509     + return;
1510     + /*
1511     + * Can't use vmx->exit_intr_info since we're not sure what
1512     + * the exit reason is.
1513     + */
1514     + exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1515     + unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
1516     + vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
1517     + /*
1518     + * SDM 3: 27.7.1.2 (September 2008)
1519     + * Re-set bit "block by NMI" before VM entry if vmexit caused by
1520     + * a guest IRET fault.
1521     + * SDM 3: 23.2.2 (September 2008)
1522     + * Bit 12 is undefined in any of the following cases:
1523     + * If the VM exit sets the valid bit in the IDT-vectoring
1524     + * information field.
1525     + * If the VM exit is due to a double fault.
1526     + */
1527     + if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
1528     + vector != DF_VECTOR && !idtv_info_valid)
1529     + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
1530     + GUEST_INTR_STATE_NMI);
1531     + else
1532     + vmx->loaded_vmcs->nmi_known_unmasked =
1533     + !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
1534     + & GUEST_INTR_STATE_NMI);
1535     + } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
1536     + vmx->loaded_vmcs->vnmi_blocked_time +=
1537     + ktime_to_ns(ktime_sub(ktime_get(),
1538     + vmx->loaded_vmcs->entry_time));
1539     }
1540    
1541     static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
1542     @@ -9247,6 +9304,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
1543     struct vcpu_vmx *vmx = to_vmx(vcpu);
1544     unsigned long debugctlmsr, cr3, cr4;
1545    
1546     + /* Record the guest's net vcpu time for enforced NMI injections. */
1547     + if (unlikely(!cpu_has_virtual_nmis() &&
1548     + vmx->loaded_vmcs->soft_vnmi_blocked))
1549     + vmx->loaded_vmcs->entry_time = ktime_get();
1550     +
1551     /* Don't enter VMX if guest state is invalid, let the exit handler
1552     start emulation until we arrive back to a valid state */
1553     if (vmx->emulation_required)
1554     @@ -11325,6 +11387,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
1555     vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
1556     vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
1557     vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
1558     + vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
1559     + vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
1560    
1561     /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
1562     if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
1563     diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
1564     index 12e377184ee4..c4d55919fac1 100644
1565     --- a/arch/x86/lib/x86-opcode-map.txt
1566     +++ b/arch/x86/lib/x86-opcode-map.txt
1567     @@ -896,7 +896,7 @@ EndTable
1568    
1569     GrpTable: Grp3_1
1570     0: TEST Eb,Ib
1571     -1:
1572     +1: TEST Eb,Ib
1573     2: NOT Eb
1574     3: NEG Eb
1575     4: MUL AL,Eb
1576     diff --git a/block/blk-core.c b/block/blk-core.c
1577     index 048be4aa6024..33ee583cfe45 100644
1578     --- a/block/blk-core.c
1579     +++ b/block/blk-core.c
1580     @@ -333,6 +333,7 @@ EXPORT_SYMBOL(blk_stop_queue);
1581     void blk_sync_queue(struct request_queue *q)
1582     {
1583     del_timer_sync(&q->timeout);
1584     + cancel_work_sync(&q->timeout_work);
1585    
1586     if (q->mq_ops) {
1587     struct blk_mq_hw_ctx *hctx;
1588     @@ -844,6 +845,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1589     setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
1590     laptop_mode_timer_fn, (unsigned long) q);
1591     setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
1592     + INIT_WORK(&q->timeout_work, NULL);
1593     INIT_LIST_HEAD(&q->queue_head);
1594     INIT_LIST_HEAD(&q->timeout_list);
1595     INIT_LIST_HEAD(&q->icq_list);
1596     diff --git a/block/blk-timeout.c b/block/blk-timeout.c
1597     index 17ec83bb0900..6427be7ac363 100644
1598     --- a/block/blk-timeout.c
1599     +++ b/block/blk-timeout.c
1600     @@ -134,8 +134,6 @@ void blk_timeout_work(struct work_struct *work)
1601     struct request *rq, *tmp;
1602     int next_set = 0;
1603    
1604     - if (blk_queue_enter(q, true))
1605     - return;
1606     spin_lock_irqsave(q->queue_lock, flags);
1607    
1608     list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
1609     @@ -145,7 +143,6 @@ void blk_timeout_work(struct work_struct *work)
1610     mod_timer(&q->timeout, round_jiffies_up(next));
1611    
1612     spin_unlock_irqrestore(q->queue_lock, flags);
1613     - blk_queue_exit(q);
1614     }
1615    
1616     /**
1617     diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
1618     index fbcc73f7a099..18af71057b44 100644
1619     --- a/drivers/acpi/device_pm.c
1620     +++ b/drivers/acpi/device_pm.c
1621     @@ -387,6 +387,7 @@ EXPORT_SYMBOL(acpi_bus_power_manageable);
1622    
1623     #ifdef CONFIG_PM
1624     static DEFINE_MUTEX(acpi_pm_notifier_lock);
1625     +static DEFINE_MUTEX(acpi_pm_notifier_install_lock);
1626    
1627     void acpi_pm_wakeup_event(struct device *dev)
1628     {
1629     @@ -443,24 +444,25 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
1630     if (!dev && !func)
1631     return AE_BAD_PARAMETER;
1632    
1633     - mutex_lock(&acpi_pm_notifier_lock);
1634     + mutex_lock(&acpi_pm_notifier_install_lock);
1635    
1636     if (adev->wakeup.flags.notifier_present)
1637     goto out;
1638    
1639     - adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
1640     - adev->wakeup.context.dev = dev;
1641     - adev->wakeup.context.func = func;
1642     -
1643     status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
1644     acpi_pm_notify_handler, NULL);
1645     if (ACPI_FAILURE(status))
1646     goto out;
1647    
1648     + mutex_lock(&acpi_pm_notifier_lock);
1649     + adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
1650     + adev->wakeup.context.dev = dev;
1651     + adev->wakeup.context.func = func;
1652     adev->wakeup.flags.notifier_present = true;
1653     + mutex_unlock(&acpi_pm_notifier_lock);
1654    
1655     out:
1656     - mutex_unlock(&acpi_pm_notifier_lock);
1657     + mutex_unlock(&acpi_pm_notifier_install_lock);
1658     return status;
1659     }
1660    
1661     @@ -472,7 +474,7 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
1662     {
1663     acpi_status status = AE_BAD_PARAMETER;
1664    
1665     - mutex_lock(&acpi_pm_notifier_lock);
1666     + mutex_lock(&acpi_pm_notifier_install_lock);
1667    
1668     if (!adev->wakeup.flags.notifier_present)
1669     goto out;
1670     @@ -483,14 +485,15 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
1671     if (ACPI_FAILURE(status))
1672     goto out;
1673    
1674     + mutex_lock(&acpi_pm_notifier_lock);
1675     adev->wakeup.context.func = NULL;
1676     adev->wakeup.context.dev = NULL;
1677     wakeup_source_unregister(adev->wakeup.ws);
1678     -
1679     adev->wakeup.flags.notifier_present = false;
1680     + mutex_unlock(&acpi_pm_notifier_lock);
1681    
1682     out:
1683     - mutex_unlock(&acpi_pm_notifier_lock);
1684     + mutex_unlock(&acpi_pm_notifier_install_lock);
1685     return status;
1686     }
1687    
1688     diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
1689     index 236b14324780..82b3ce5e937e 100644
1690     --- a/drivers/acpi/ec.c
1691     +++ b/drivers/acpi/ec.c
1692     @@ -486,8 +486,11 @@ static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
1693     {
1694     if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
1695     ec_log_drv("event unblocked");
1696     - if (!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
1697     - advance_transaction(ec);
1698     + /*
1699     + * Unconditionally invoke this once after enabling the event
1700     + * handling mechanism to detect the pending events.
1701     + */
1702     + advance_transaction(ec);
1703     }
1704    
1705     static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
1706     @@ -1456,11 +1459,10 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
1707     if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1708     ec->reference_count >= 1)
1709     acpi_ec_enable_gpe(ec, true);
1710     -
1711     - /* EC is fully operational, allow queries */
1712     - acpi_ec_enable_event(ec);
1713     }
1714     }
1715     + /* EC is fully operational, allow queries */
1716     + acpi_ec_enable_event(ec);
1717    
1718     return 0;
1719     }
1720     diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
1721     index e4effef0c83f..ea20e0eb4d5a 100644
1722     --- a/drivers/ata/libata-eh.c
1723     +++ b/drivers/ata/libata-eh.c
1724     @@ -2264,8 +2264,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
1725     if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
1726     eflags |= ATA_EFLAG_DUBIOUS_XFER;
1727     ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
1728     + trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
1729     }
1730     - trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
1731     DPRINTK("EXIT\n");
1732     }
1733    
1734     diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
1735     index 0b718886479b..87509cb69f79 100644
1736     --- a/drivers/base/power/opp/of.c
1737     +++ b/drivers/base/power/opp/of.c
1738     @@ -397,6 +397,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
1739     dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
1740     ret);
1741     _dev_pm_opp_remove_table(opp_table, dev, false);
1742     + of_node_put(np);
1743     goto put_opp_table;
1744     }
1745     }
1746     diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1747     index 9adfb5445f8d..5f2a4240a204 100644
1748     --- a/drivers/block/nbd.c
1749     +++ b/drivers/block/nbd.c
1750     @@ -288,15 +288,6 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
1751     cmd->status = BLK_STS_TIMEOUT;
1752     return BLK_EH_HANDLED;
1753     }
1754     -
1755     - /* If we are waiting on our dead timer then we could get timeout
1756     - * callbacks for our request. For this we just want to reset the timer
1757     - * and let the queue side take care of everything.
1758     - */
1759     - if (!completion_done(&cmd->send_complete)) {
1760     - nbd_config_put(nbd);
1761     - return BLK_EH_RESET_TIMER;
1762     - }
1763     config = nbd->config;
1764    
1765     if (config->num_connections > 1) {
1766     @@ -723,9 +714,9 @@ static int wait_for_reconnect(struct nbd_device *nbd)
1767     return 0;
1768     if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
1769     return 0;
1770     - wait_event_interruptible_timeout(config->conn_wait,
1771     - atomic_read(&config->live_connections),
1772     - config->dead_conn_timeout);
1773     + wait_event_timeout(config->conn_wait,
1774     + atomic_read(&config->live_connections),
1775     + config->dead_conn_timeout);
1776     return atomic_read(&config->live_connections);
1777     }
1778    
1779     @@ -740,6 +731,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
1780     if (!refcount_inc_not_zero(&nbd->config_refs)) {
1781     dev_err_ratelimited(disk_to_dev(nbd->disk),
1782     "Socks array is empty\n");
1783     + blk_mq_start_request(req);
1784     return -EINVAL;
1785     }
1786     config = nbd->config;
1787     @@ -748,6 +740,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
1788     dev_err_ratelimited(disk_to_dev(nbd->disk),
1789     "Attempted send on invalid socket\n");
1790     nbd_config_put(nbd);
1791     + blk_mq_start_request(req);
1792     return -EINVAL;
1793     }
1794     cmd->status = BLK_STS_OK;
1795     @@ -771,6 +764,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
1796     */
1797     sock_shutdown(nbd);
1798     nbd_config_put(nbd);
1799     + blk_mq_start_request(req);
1800     return -EIO;
1801     }
1802     goto again;
1803     @@ -781,6 +775,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
1804     * here so that it gets put _after_ the request that is already on the
1805     * dispatch list.
1806     */
1807     + blk_mq_start_request(req);
1808     if (unlikely(nsock->pending && nsock->pending != req)) {
1809     blk_mq_requeue_request(req, true);
1810     ret = 0;
1811     @@ -793,10 +788,10 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
1812     ret = nbd_send_cmd(nbd, cmd, index);
1813     if (ret == -EAGAIN) {
1814     dev_err_ratelimited(disk_to_dev(nbd->disk),
1815     - "Request send failed trying another connection\n");
1816     + "Request send failed, requeueing\n");
1817     nbd_mark_nsock_dead(nbd, nsock, 1);
1818     - mutex_unlock(&nsock->tx_lock);
1819     - goto again;
1820     + blk_mq_requeue_request(req, true);
1821     + ret = 0;
1822     }
1823     out:
1824     mutex_unlock(&nsock->tx_lock);
1825     @@ -820,7 +815,6 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1826     * done sending everything over the wire.
1827     */
1828     init_completion(&cmd->send_complete);
1829     - blk_mq_start_request(bd->rq);
1830    
1831     /* We can be called directly from the user space process, which means we
1832     * could possibly have signals pending so our sendmsg will fail. In
1833     diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
1834     index d00c4fdae924..bd810d01538a 100644
1835     --- a/drivers/bluetooth/btqcomsmd.c
1836     +++ b/drivers/bluetooth/btqcomsmd.c
1837     @@ -26,6 +26,7 @@
1838     struct btqcomsmd {
1839     struct hci_dev *hdev;
1840    
1841     + bdaddr_t bdaddr;
1842     struct rpmsg_endpoint *acl_channel;
1843     struct rpmsg_endpoint *cmd_channel;
1844     };
1845     @@ -100,6 +101,38 @@ static int btqcomsmd_close(struct hci_dev *hdev)
1846     return 0;
1847     }
1848    
1849     +static int btqcomsmd_setup(struct hci_dev *hdev)
1850     +{
1851     + struct btqcomsmd *btq = hci_get_drvdata(hdev);
1852     + struct sk_buff *skb;
1853     + int err;
1854     +
1855     + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
1856     + if (IS_ERR(skb))
1857     + return PTR_ERR(skb);
1858     + kfree_skb(skb);
1859     +
1860     + /* Devices do not have persistent storage for BD address. If no
1861     + * BD address has been retrieved during probe, mark the device
1862     + * as having an invalid BD address.
1863     + */
1864     + if (!bacmp(&btq->bdaddr, BDADDR_ANY)) {
1865     + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
1866     + return 0;
1867     + }
1868     +
1869     + /* When setting a configured BD address fails, mark the device
1870     + * as having an invalid BD address.
1871     + */
1872     + err = qca_set_bdaddr_rome(hdev, &btq->bdaddr);
1873     + if (err) {
1874     + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
1875     + return 0;
1876     + }
1877     +
1878     + return 0;
1879     +}
1880     +
1881     static int btqcomsmd_probe(struct platform_device *pdev)
1882     {
1883     struct btqcomsmd *btq;
1884     @@ -135,6 +168,7 @@ static int btqcomsmd_probe(struct platform_device *pdev)
1885     hdev->open = btqcomsmd_open;
1886     hdev->close = btqcomsmd_close;
1887     hdev->send = btqcomsmd_send;
1888     + hdev->setup = btqcomsmd_setup;
1889     hdev->set_bdaddr = qca_set_bdaddr_rome;
1890    
1891     ret = hci_register_dev(hdev);
1892     diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
1893     index 13eb04f72389..148815470431 100644
1894     --- a/drivers/clk/ti/clk-dra7-atl.c
1895     +++ b/drivers/clk/ti/clk-dra7-atl.c
1896     @@ -274,8 +274,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
1897    
1898     /* Get configuration for the ATL instances */
1899     snprintf(prop, sizeof(prop), "atl%u", i);
1900     - of_node_get(node);
1901     - cfg_node = of_find_node_by_name(node, prop);
1902     + cfg_node = of_get_child_by_name(node, prop);
1903     if (cfg_node) {
1904     ret = of_property_read_u32(cfg_node, "bws",
1905     &cdesc->bws);
1906     diff --git a/drivers/dax/super.c b/drivers/dax/super.c
1907     index 557b93703532..c4cd034a3820 100644
1908     --- a/drivers/dax/super.c
1909     +++ b/drivers/dax/super.c
1910     @@ -344,6 +344,9 @@ static struct inode *dax_alloc_inode(struct super_block *sb)
1911     struct inode *inode;
1912    
1913     dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
1914     + if (!dax_dev)
1915     + return NULL;
1916     +
1917     inode = &dax_dev->inode;
1918     inode->i_rdev = 0;
1919     return inode;
1920     diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
1921     index 4c4b46586af2..2af79e4f3235 100644
1922     --- a/drivers/infiniband/core/cm.c
1923     +++ b/drivers/infiniband/core/cm.c
1924     @@ -1575,7 +1575,7 @@ static void cm_format_req_event(struct cm_work *work,
1925     param->bth_pkey = cm_get_bth_pkey(work);
1926     param->port = cm_id_priv->av.port->port_num;
1927     param->primary_path = &work->path[0];
1928     - if (req_msg->alt_local_lid)
1929     + if (cm_req_has_alt_path(req_msg))
1930     param->alternate_path = &work->path[1];
1931     else
1932     param->alternate_path = NULL;
1933     @@ -1856,7 +1856,8 @@ static int cm_req_handler(struct cm_work *work)
1934     cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1935    
1936     memset(&work->path[0], 0, sizeof(work->path[0]));
1937     - memset(&work->path[1], 0, sizeof(work->path[1]));
1938     + if (cm_req_has_alt_path(req_msg))
1939     + memset(&work->path[1], 0, sizeof(work->path[1]));
1940     grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
1941     ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
1942     work->port->port_num,
1943     @@ -3817,14 +3818,16 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
1944     struct cm_port *port = mad_agent->context;
1945     struct cm_work *work;
1946     enum ib_cm_event_type event;
1947     + bool alt_path = false;
1948     u16 attr_id;
1949     int paths = 0;
1950     int going_down = 0;
1951    
1952     switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
1953     case CM_REQ_ATTR_ID:
1954     - paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
1955     - alt_local_lid != 0);
1956     + alt_path = cm_req_has_alt_path((struct cm_req_msg *)
1957     + mad_recv_wc->recv_buf.mad);
1958     + paths = 1 + (alt_path != 0);
1959     event = IB_CM_REQ_RECEIVED;
1960     break;
1961     case CM_MRA_ATTR_ID:
1962     diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
1963     index f8f53bb90837..cb91245e9163 100644
1964     --- a/drivers/infiniband/core/mad.c
1965     +++ b/drivers/infiniband/core/mad.c
1966     @@ -1974,14 +1974,15 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1967     unsigned long flags;
1968     int ret;
1969    
1970     + INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1971     ret = ib_mad_enforce_security(mad_agent_priv,
1972     mad_recv_wc->wc->pkey_index);
1973     if (ret) {
1974     ib_free_recv_mad(mad_recv_wc);
1975     deref_mad_agent(mad_agent_priv);
1976     + return;
1977     }
1978    
1979     - INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1980     list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1981     if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1982     mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1983     diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
1984     index 88bdafb297f5..28607bb42d87 100644
1985     --- a/drivers/infiniband/core/security.c
1986     +++ b/drivers/infiniband/core/security.c
1987     @@ -87,16 +87,14 @@ static int enforce_qp_pkey_security(u16 pkey,
1988     if (ret)
1989     return ret;
1990    
1991     - if (qp_sec->qp == qp_sec->qp->real_qp) {
1992     - list_for_each_entry(shared_qp_sec,
1993     - &qp_sec->shared_qp_list,
1994     - shared_qp_list) {
1995     - ret = security_ib_pkey_access(shared_qp_sec->security,
1996     - subnet_prefix,
1997     - pkey);
1998     - if (ret)
1999     - return ret;
2000     - }
2001     + list_for_each_entry(shared_qp_sec,
2002     + &qp_sec->shared_qp_list,
2003     + shared_qp_list) {
2004     + ret = security_ib_pkey_access(shared_qp_sec->security,
2005     + subnet_prefix,
2006     + pkey);
2007     + if (ret)
2008     + return ret;
2009     }
2010     return 0;
2011     }
2012     @@ -560,15 +558,22 @@ int ib_security_modify_qp(struct ib_qp *qp,
2013     int ret = 0;
2014     struct ib_ports_pkeys *tmp_pps;
2015     struct ib_ports_pkeys *new_pps;
2016     - bool special_qp = (qp->qp_type == IB_QPT_SMI ||
2017     - qp->qp_type == IB_QPT_GSI ||
2018     - qp->qp_type >= IB_QPT_RESERVED1);
2019     + struct ib_qp *real_qp = qp->real_qp;
2020     + bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
2021     + real_qp->qp_type == IB_QPT_GSI ||
2022     + real_qp->qp_type >= IB_QPT_RESERVED1);
2023     bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
2024     (qp_attr_mask & IB_QP_ALT_PATH));
2025    
2026     + /* The port/pkey settings are maintained only for the real QP. Open
2027     + * handles on the real QP will be in the shared_qp_list. When
2028     + * enforcing security on the real QP all the shared QPs will be
2029     + * checked as well.
2030     + */
2031     +
2032     if (pps_change && !special_qp) {
2033     - mutex_lock(&qp->qp_sec->mutex);
2034     - new_pps = get_new_pps(qp,
2035     + mutex_lock(&real_qp->qp_sec->mutex);
2036     + new_pps = get_new_pps(real_qp,
2037     qp_attr,
2038     qp_attr_mask);
2039    
2040     @@ -586,14 +591,14 @@ int ib_security_modify_qp(struct ib_qp *qp,
2041    
2042     if (!ret)
2043     ret = check_qp_port_pkey_settings(new_pps,
2044     - qp->qp_sec);
2045     + real_qp->qp_sec);
2046     }
2047    
2048     if (!ret)
2049     - ret = qp->device->modify_qp(qp->real_qp,
2050     - qp_attr,
2051     - qp_attr_mask,
2052     - udata);
2053     + ret = real_qp->device->modify_qp(real_qp,
2054     + qp_attr,
2055     + qp_attr_mask,
2056     + udata);
2057    
2058     if (pps_change && !special_qp) {
2059     /* Clean up the lists and free the appropriate
2060     @@ -602,8 +607,8 @@ int ib_security_modify_qp(struct ib_qp *qp,
2061     if (ret) {
2062     tmp_pps = new_pps;
2063     } else {
2064     - tmp_pps = qp->qp_sec->ports_pkeys;
2065     - qp->qp_sec->ports_pkeys = new_pps;
2066     + tmp_pps = real_qp->qp_sec->ports_pkeys;
2067     + real_qp->qp_sec->ports_pkeys = new_pps;
2068     }
2069    
2070     if (tmp_pps) {
2071     @@ -611,7 +616,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
2072     port_pkey_list_remove(&tmp_pps->alt);
2073     }
2074     kfree(tmp_pps);
2075     - mutex_unlock(&qp->qp_sec->mutex);
2076     + mutex_unlock(&real_qp->qp_sec->mutex);
2077     }
2078     return ret;
2079     }
2080     diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
2081     index 0be42787759f..312444386f54 100644
2082     --- a/drivers/infiniband/hw/hfi1/chip.c
2083     +++ b/drivers/infiniband/hw/hfi1/chip.c
2084     @@ -13074,7 +13074,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
2085     first_sdma = last_general;
2086     last_sdma = first_sdma + dd->num_sdma;
2087     first_rx = last_sdma;
2088     - last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
2089     + last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
2090    
2091     /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
2092     dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
2093     @@ -13294,8 +13294,9 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
2094     * slow source, SDMACleanupDone)
2095     * N interrupts - one per used SDMA engine
2096     * M interrupt - one per kernel receive context
2097     + * V interrupt - one for each VNIC context
2098     */
2099     - total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
2100     + total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
2101    
2102     /* ask for MSI-X interrupts */
2103     request = request_msix(dd, total);
2104     @@ -13356,10 +13357,12 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
2105     * in array of contexts
2106     * freectxts - number of free user contexts
2107     * num_send_contexts - number of PIO send contexts being used
2108     + * num_vnic_contexts - number of contexts reserved for VNIC
2109     */
2110     static int set_up_context_variables(struct hfi1_devdata *dd)
2111     {
2112     unsigned long num_kernel_contexts;
2113     + u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
2114     int total_contexts;
2115     int ret;
2116     unsigned ngroups;
2117     @@ -13393,6 +13396,14 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
2118     num_kernel_contexts);
2119     num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
2120     }
2121     +
2122     + /* Accommodate VNIC contexts if possible */
2123     + if ((num_kernel_contexts + num_vnic_contexts) > dd->chip_rcv_contexts) {
2124     + dd_dev_err(dd, "No receive contexts available for VNIC\n");
2125     + num_vnic_contexts = 0;
2126     + }
2127     + total_contexts = num_kernel_contexts + num_vnic_contexts;
2128     +
2129     /*
2130     * User contexts:
2131     * - default to 1 user context per real (non-HT) CPU core if
2132     @@ -13402,19 +13413,16 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
2133     num_user_contexts =
2134     cpumask_weight(&node_affinity.real_cpu_mask);
2135    
2136     - total_contexts = num_kernel_contexts + num_user_contexts;
2137     -
2138     /*
2139     * Adjust the counts given a global max.
2140     */
2141     - if (total_contexts > dd->chip_rcv_contexts) {
2142     + if (total_contexts + num_user_contexts > dd->chip_rcv_contexts) {
2143     dd_dev_err(dd,
2144     "Reducing # user receive contexts to: %d, from %d\n",
2145     - (int)(dd->chip_rcv_contexts - num_kernel_contexts),
2146     + (int)(dd->chip_rcv_contexts - total_contexts),
2147     (int)num_user_contexts);
2148     - num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
2149     /* recalculate */
2150     - total_contexts = num_kernel_contexts + num_user_contexts;
2151     + num_user_contexts = dd->chip_rcv_contexts - total_contexts;
2152     }
2153    
2154     /* each user context requires an entry in the RMT */
2155     @@ -13427,25 +13435,24 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
2156     user_rmt_reduced);
2157     /* recalculate */
2158     num_user_contexts = user_rmt_reduced;
2159     - total_contexts = num_kernel_contexts + num_user_contexts;
2160     }
2161    
2162     - /* Accommodate VNIC contexts */
2163     - if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts)
2164     - total_contexts += HFI1_NUM_VNIC_CTXT;
2165     + total_contexts += num_user_contexts;
2166    
2167     /* the first N are kernel contexts, the rest are user/vnic contexts */
2168     dd->num_rcv_contexts = total_contexts;
2169     dd->n_krcv_queues = num_kernel_contexts;
2170     dd->first_dyn_alloc_ctxt = num_kernel_contexts;
2171     + dd->num_vnic_contexts = num_vnic_contexts;
2172     dd->num_user_contexts = num_user_contexts;
2173     dd->freectxts = num_user_contexts;
2174     dd_dev_info(dd,
2175     - "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
2176     + "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
2177     (int)dd->chip_rcv_contexts,
2178     (int)dd->num_rcv_contexts,
2179     (int)dd->n_krcv_queues,
2180     - (int)dd->num_rcv_contexts - dd->n_krcv_queues);
2181     + dd->num_vnic_contexts,
2182     + dd->num_user_contexts);
2183    
2184     /*
2185     * Receive array allocation:
2186     diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
2187     index 3ac9c307a285..6ff44dc606eb 100644
2188     --- a/drivers/infiniband/hw/hfi1/hfi.h
2189     +++ b/drivers/infiniband/hw/hfi1/hfi.h
2190     @@ -1047,6 +1047,8 @@ struct hfi1_devdata {
2191     u64 z_send_schedule;
2192    
2193     u64 __percpu *send_schedule;
2194     + /* number of reserved contexts for VNIC usage */
2195     + u16 num_vnic_contexts;
2196     /* number of receive contexts in use by the driver */
2197     u32 num_rcv_contexts;
2198     /* number of pio send contexts in use by the driver */
2199     diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
2200     index 6d2702ef34ac..25e867393463 100644
2201     --- a/drivers/infiniband/hw/hfi1/sysfs.c
2202     +++ b/drivers/infiniband/hw/hfi1/sysfs.c
2203     @@ -543,7 +543,7 @@ static ssize_t show_nctxts(struct device *device,
2204     * give a more accurate picture of total contexts available.
2205     */
2206     return scnprintf(buf, PAGE_SIZE, "%u\n",
2207     - min(dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt,
2208     + min(dd->num_user_contexts,
2209     (u32)dd->sc_sizes[SC_USER].count));
2210     }
2211    
2212     diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
2213     index f419cbb05928..1a17708be46a 100644
2214     --- a/drivers/infiniband/hw/hfi1/vnic_main.c
2215     +++ b/drivers/infiniband/hw/hfi1/vnic_main.c
2216     @@ -840,6 +840,9 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
2217     struct rdma_netdev *rn;
2218     int i, size, rc;
2219    
2220     + if (!dd->num_vnic_contexts)
2221     + return ERR_PTR(-ENOMEM);
2222     +
2223     if (!port_num || (port_num > dd->num_pports))
2224     return ERR_PTR(-EINVAL);
2225    
2226     @@ -848,7 +851,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
2227    
2228     size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
2229     netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
2230     - dd->chip_sdma_engines, HFI1_NUM_VNIC_CTXT);
2231     + dd->chip_sdma_engines, dd->num_vnic_contexts);
2232     if (!netdev)
2233     return ERR_PTR(-ENOMEM);
2234    
2235     @@ -856,7 +859,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
2236     vinfo = opa_vnic_dev_priv(netdev);
2237     vinfo->dd = dd;
2238     vinfo->num_tx_q = dd->chip_sdma_engines;
2239     - vinfo->num_rx_q = HFI1_NUM_VNIC_CTXT;
2240     + vinfo->num_rx_q = dd->num_vnic_contexts;
2241     vinfo->netdev = netdev;
2242     rn->free_rdma_netdev = hfi1_vnic_free_rn;
2243     rn->set_id = hfi1_vnic_set_vesw_id;
2244     diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
2245     index fa5ccdb3bb2a..60d7b493ed2d 100644
2246     --- a/drivers/infiniband/ulp/srp/ib_srp.c
2247     +++ b/drivers/infiniband/ulp/srp/ib_srp.c
2248     @@ -665,12 +665,19 @@ static void srp_path_rec_completion(int status,
2249     static int srp_lookup_path(struct srp_rdma_ch *ch)
2250     {
2251     struct srp_target_port *target = ch->target;
2252     - int ret;
2253     + int ret = -ENODEV;
2254    
2255     ch->path.numb_path = 1;
2256    
2257     init_completion(&ch->done);
2258    
2259     + /*
2260     + * Avoid that the SCSI host can be removed by srp_remove_target()
2261     + * before srp_path_rec_completion() is called.
2262     + */
2263     + if (!scsi_host_get(target->scsi_host))
2264     + goto out;
2265     +
2266     ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
2267     target->srp_host->srp_dev->dev,
2268     target->srp_host->port,
2269     @@ -684,18 +691,24 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
2270     GFP_KERNEL,
2271     srp_path_rec_completion,
2272     ch, &ch->path_query);
2273     - if (ch->path_query_id < 0)
2274     - return ch->path_query_id;
2275     + ret = ch->path_query_id;
2276     + if (ret < 0)
2277     + goto put;
2278    
2279     ret = wait_for_completion_interruptible(&ch->done);
2280     if (ret < 0)
2281     - return ret;
2282     + goto put;
2283    
2284     - if (ch->status < 0)
2285     + ret = ch->status;
2286     + if (ret < 0)
2287     shost_printk(KERN_WARNING, target->scsi_host,
2288     PFX "Path record query failed\n");
2289    
2290     - return ch->status;
2291     +put:
2292     + scsi_host_put(target->scsi_host);
2293     +
2294     +out:
2295     + return ret;
2296     }
2297    
2298     static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
2299     diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
2300     index 9e8e9220f816..95178b4e3565 100644
2301     --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
2302     +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
2303     @@ -2777,7 +2777,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
2304     {
2305     const char *p;
2306     unsigned len, count, leading_zero_bytes;
2307     - int ret, rc;
2308     + int ret;
2309    
2310     p = name;
2311     if (strncasecmp(p, "0x", 2) == 0)
2312     @@ -2789,10 +2789,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
2313     count = min(len / 2, 16U);
2314     leading_zero_bytes = 16 - count;
2315     memset(i_port_id, 0, leading_zero_bytes);
2316     - rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
2317     - if (rc < 0)
2318     - pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
2319     - ret = 0;
2320     + ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
2321     + if (ret < 0)
2322     + pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret);
2323     out:
2324     return ret;
2325     }
2326     diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
2327     index b5df99c6f680..3b35271114ee 100644
2328     --- a/drivers/irqchip/irq-gic-v3.c
2329     +++ b/drivers/irqchip/irq-gic-v3.c
2330     @@ -1071,18 +1071,18 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
2331     int nr_parts;
2332     struct partition_affinity *parts;
2333    
2334     - parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
2335     + parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
2336     if (!parts_node)
2337     return;
2338    
2339     nr_parts = of_get_child_count(parts_node);
2340    
2341     if (!nr_parts)
2342     - return;
2343     + goto out_put_node;
2344    
2345     parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
2346     if (WARN_ON(!parts))
2347     - return;
2348     + goto out_put_node;
2349    
2350     for_each_child_of_node(parts_node, child_part) {
2351     struct partition_affinity *part;
2352     @@ -1149,6 +1149,9 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
2353    
2354     gic_data.ppi_descs[i] = desc;
2355     }
2356     +
2357     +out_put_node:
2358     + of_node_put(parts_node);
2359     }
2360    
2361     static void __init gic_of_setup_kvm_info(struct device_node *node)
2362     diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
2363     index ae6146311934..f052a3eb2098 100644
2364     --- a/drivers/mailbox/bcm-flexrm-mailbox.c
2365     +++ b/drivers/mailbox/bcm-flexrm-mailbox.c
2366     @@ -1365,8 +1365,8 @@ static void flexrm_shutdown(struct mbox_chan *chan)
2367     /* Disable/inactivate ring */
2368     writel_relaxed(0x0, ring->regs + RING_CONTROL);
2369    
2370     - /* Flush ring with timeout of 1s */
2371     - timeout = 1000;
2372     + /* Set ring flush state */
2373     + timeout = 1000; /* timeout of 1s */
2374     writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
2375     ring->regs + RING_CONTROL);
2376     do {
2377     @@ -1374,7 +1374,23 @@ static void flexrm_shutdown(struct mbox_chan *chan)
2378     FLUSH_DONE_MASK)
2379     break;
2380     mdelay(1);
2381     - } while (timeout--);
2382     + } while (--timeout);
2383     + if (!timeout)
2384     + dev_err(ring->mbox->dev,
2385     + "setting ring%d flush state timedout\n", ring->num);
2386     +
2387     + /* Clear ring flush state */
2388     + timeout = 1000; /* timeout of 1s */
2389     + writel_relaxed(0x0, ring + RING_CONTROL);
2390     + do {
2391     + if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
2392     + FLUSH_DONE_MASK))
2393     + break;
2394     + mdelay(1);
2395     + } while (--timeout);
2396     + if (!timeout)
2397     + dev_err(ring->mbox->dev,
2398     + "clearing ring%d flush state timedout\n", ring->num);
2399    
2400     /* Abort all in-flight requests */
2401     for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
2402     diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
2403     index 08035634795c..c9934139d609 100644
2404     --- a/drivers/md/bcache/alloc.c
2405     +++ b/drivers/md/bcache/alloc.c
2406     @@ -407,7 +407,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
2407    
2408     finish_wait(&ca->set->bucket_wait, &w);
2409     out:
2410     - wake_up_process(ca->alloc_thread);
2411     + if (ca->alloc_thread)
2412     + wake_up_process(ca->alloc_thread);
2413    
2414     trace_bcache_alloc(ca, reserve);
2415    
2416     diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
2417     index d2121637b4ab..cae57b5be817 100644
2418     --- a/drivers/md/bitmap.c
2419     +++ b/drivers/md/bitmap.c
2420     @@ -625,7 +625,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
2421     err = read_sb_page(bitmap->mddev,
2422     offset,
2423     sb_page,
2424     - 0, PAGE_SIZE);
2425     + 0, sizeof(bitmap_super_t));
2426     }
2427     if (err)
2428     return err;
2429     @@ -2123,7 +2123,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
2430     if (store.sb_page && bitmap->storage.sb_page)
2431     memcpy(page_address(store.sb_page),
2432     page_address(bitmap->storage.sb_page),
2433     - PAGE_SIZE);
2434     + sizeof(bitmap_super_t));
2435     bitmap_file_unmap(&bitmap->storage);
2436     bitmap->storage = store;
2437    
2438     diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
2439     index d216a8f7bc22..8e3adcb46851 100644
2440     --- a/drivers/md/dm-bufio.c
2441     +++ b/drivers/md/dm-bufio.c
2442     @@ -974,7 +974,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,
2443     buffers = c->minimum_buffers;
2444    
2445     *limit_buffers = buffers;
2446     - *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
2447     + *threshold_buffers = mult_frac(buffers,
2448     + DM_BUFIO_WRITEBACK_PERCENT, 100);
2449     }
2450    
2451     /*
2452     @@ -1910,19 +1911,15 @@ static int __init dm_bufio_init(void)
2453     memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
2454     memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
2455    
2456     - mem = (__u64)((totalram_pages - totalhigh_pages) *
2457     - DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
2458     + mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
2459     + DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2460    
2461     if (mem > ULONG_MAX)
2462     mem = ULONG_MAX;
2463    
2464     #ifdef CONFIG_MMU
2465     - /*
2466     - * Get the size of vmalloc space the same way as VMALLOC_TOTAL
2467     - * in fs/proc/internal.h
2468     - */
2469     - if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
2470     - mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
2471     + if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2472     + mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2473     #endif
2474    
2475     dm_bufio_default_cache_size = mem;
2476     diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
2477     index 8785134c9f1f..0b7edfd0b454 100644
2478     --- a/drivers/md/dm-cache-target.c
2479     +++ b/drivers/md/dm-cache-target.c
2480     @@ -1201,6 +1201,18 @@ static void background_work_end(struct cache *cache)
2481    
2482     /*----------------------------------------------------------------*/
2483    
2484     +static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
2485     +{
2486     + return (bio_data_dir(bio) == WRITE) &&
2487     + (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
2488     +}
2489     +
2490     +static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
2491     +{
2492     + return writeback_mode(&cache->features) &&
2493     + (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
2494     +}
2495     +
2496     static void quiesce(struct dm_cache_migration *mg,
2497     void (*continuation)(struct work_struct *))
2498     {
2499     @@ -1474,12 +1486,50 @@ static void mg_upgrade_lock(struct work_struct *ws)
2500     }
2501     }
2502    
2503     +static void mg_full_copy(struct work_struct *ws)
2504     +{
2505     + struct dm_cache_migration *mg = ws_to_mg(ws);
2506     + struct cache *cache = mg->cache;
2507     + struct policy_work *op = mg->op;
2508     + bool is_policy_promote = (op->op == POLICY_PROMOTE);
2509     +
2510     + if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
2511     + is_discarded_oblock(cache, op->oblock)) {
2512     + mg_upgrade_lock(ws);
2513     + return;
2514     + }
2515     +
2516     + init_continuation(&mg->k, mg_upgrade_lock);
2517     +
2518     + if (copy(mg, is_policy_promote)) {
2519     + DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
2520     + mg->k.input = BLK_STS_IOERR;
2521     + mg_complete(mg, false);
2522     + }
2523     +}
2524     +
2525     static void mg_copy(struct work_struct *ws)
2526     {
2527     - int r;
2528     struct dm_cache_migration *mg = ws_to_mg(ws);
2529    
2530     if (mg->overwrite_bio) {
2531     + /*
2532     + * No exclusive lock was held when we last checked if the bio
2533     + * was optimisable. So we have to check again in case things
2534     + * have changed (eg, the block may no longer be discarded).
2535     + */
2536     + if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) {
2537     + /*
2538     + * Fallback to a real full copy after doing some tidying up.
2539     + */
2540     + bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
2541     + BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
2542     + mg->overwrite_bio = NULL;
2543     + inc_io_migrations(mg->cache);
2544     + mg_full_copy(ws);
2545     + return;
2546     + }
2547     +
2548     /*
2549     * It's safe to do this here, even though it's new data
2550     * because all IO has been locked out of the block.
2551     @@ -1489,26 +1539,8 @@ static void mg_copy(struct work_struct *ws)
2552     */
2553     overwrite(mg, mg_update_metadata_after_copy);
2554    
2555     - } else {
2556     - struct cache *cache = mg->cache;
2557     - struct policy_work *op = mg->op;
2558     - bool is_policy_promote = (op->op == POLICY_PROMOTE);
2559     -
2560     - if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
2561     - is_discarded_oblock(cache, op->oblock)) {
2562     - mg_upgrade_lock(ws);
2563     - return;
2564     - }
2565     -
2566     - init_continuation(&mg->k, mg_upgrade_lock);
2567     -
2568     - r = copy(mg, is_policy_promote);
2569     - if (r) {
2570     - DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
2571     - mg->k.input = BLK_STS_IOERR;
2572     - mg_complete(mg, false);
2573     - }
2574     - }
2575     + } else
2576     + mg_full_copy(ws);
2577     }
2578    
2579     static int mg_lock_writes(struct dm_cache_migration *mg)
2580     @@ -1748,18 +1780,6 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
2581    
2582     /*----------------------------------------------------------------*/
2583    
2584     -static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
2585     -{
2586     - return (bio_data_dir(bio) == WRITE) &&
2587     - (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
2588     -}
2589     -
2590     -static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
2591     -{
2592     - return writeback_mode(&cache->features) &&
2593     - (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
2594     -}
2595     -
2596     static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
2597     bool *commit_needed)
2598     {
2599     diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
2600     index 203144762f36..6a14f945783c 100644
2601     --- a/drivers/md/dm-core.h
2602     +++ b/drivers/md/dm-core.h
2603     @@ -29,7 +29,6 @@ struct dm_kobject_holder {
2604     * DM targets must _not_ deference a mapped_device to directly access its members!
2605     */
2606     struct mapped_device {
2607     - struct srcu_struct io_barrier;
2608     struct mutex suspend_lock;
2609    
2610     /*
2611     @@ -127,6 +126,8 @@ struct mapped_device {
2612     struct blk_mq_tag_set *tag_set;
2613     bool use_blk_mq:1;
2614     bool init_tio_pdu:1;
2615     +
2616     + struct srcu_struct io_barrier;
2617     };
2618    
2619     void dm_init_md_queue(struct mapped_device *md);
2620     diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
2621     index 96ab46512e1f..9fc12f556534 100644
2622     --- a/drivers/md/dm-crypt.c
2623     +++ b/drivers/md/dm-crypt.c
2624     @@ -1075,7 +1075,7 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
2625     BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
2626    
2627     /* Reject unexpected unaligned bio. */
2628     - if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
2629     + if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
2630     return -EIO;
2631    
2632     dmreq = dmreq_of_req(cc, req);
2633     @@ -1168,7 +1168,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
2634     int r = 0;
2635    
2636     /* Reject unexpected unaligned bio. */
2637     - if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
2638     + if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
2639     return -EIO;
2640    
2641     dmreq = dmreq_of_req(cc, req);
2642     diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
2643     index 096fe9b66c50..5e6737a44468 100644
2644     --- a/drivers/md/dm-integrity.c
2645     +++ b/drivers/md/dm-integrity.c
2646     @@ -1376,7 +1376,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
2647     struct bvec_iter iter;
2648     struct bio_vec bv;
2649     bio_for_each_segment(bv, bio, iter) {
2650     - if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
2651     + if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
2652     DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
2653     bv.bv_offset, bv.bv_len, ic->sectors_per_block);
2654     return DM_MAPIO_KILL;
2655     diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
2656     index 11f273d2f018..e8094d8fbe0d 100644
2657     --- a/drivers/md/dm-mpath.c
2658     +++ b/drivers/md/dm-mpath.c
2659     @@ -499,8 +499,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
2660     if (IS_ERR(clone)) {
2661     /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
2662     bool queue_dying = blk_queue_dying(q);
2663     - DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing",
2664     - PTR_ERR(clone), queue_dying ? " (path offline)" : "");
2665     if (queue_dying) {
2666     atomic_inc(&m->pg_init_in_progress);
2667     activate_or_offline_path(pgpath);
2668     diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
2669     index ef7b8f201f73..4287fc9f3527 100644
2670     --- a/drivers/md/dm-table.c
2671     +++ b/drivers/md/dm-table.c
2672     @@ -1758,13 +1758,12 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
2673     return true;
2674     }
2675    
2676     -
2677     -static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
2678     - sector_t start, sector_t len, void *data)
2679     +static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
2680     + sector_t start, sector_t len, void *data)
2681     {
2682     struct request_queue *q = bdev_get_queue(dev->bdev);
2683    
2684     - return q && blk_queue_discard(q);
2685     + return q && !blk_queue_discard(q);
2686     }
2687    
2688     static bool dm_table_supports_discards(struct dm_table *t)
2689     @@ -1772,28 +1771,24 @@ static bool dm_table_supports_discards(struct dm_table *t)
2690     struct dm_target *ti;
2691     unsigned i;
2692    
2693     - /*
2694     - * Unless any target used by the table set discards_supported,
2695     - * require at least one underlying device to support discards.
2696     - * t->devices includes internal dm devices such as mirror logs
2697     - * so we need to use iterate_devices here, which targets
2698     - * supporting discard selectively must provide.
2699     - */
2700     for (i = 0; i < dm_table_get_num_targets(t); i++) {
2701     ti = dm_table_get_target(t, i);
2702    
2703     if (!ti->num_discard_bios)
2704     - continue;
2705     -
2706     - if (ti->discards_supported)
2707     - return true;
2708     + return false;
2709    
2710     - if (ti->type->iterate_devices &&
2711     - ti->type->iterate_devices(ti, device_discard_capable, NULL))
2712     - return true;
2713     + /*
2714     + * Either the target provides discard support (as implied by setting
2715     + * 'discards_supported') or it relies on _all_ data devices having
2716     + * discard support.
2717     + */
2718     + if (!ti->discards_supported &&
2719     + (!ti->type->iterate_devices ||
2720     + ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
2721     + return false;
2722     }
2723    
2724     - return false;
2725     + return true;
2726     }
2727    
2728     void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
2729     diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
2730     index b87c1741da4b..6d7bda6f8190 100644
2731     --- a/drivers/md/dm-zoned-target.c
2732     +++ b/drivers/md/dm-zoned-target.c
2733     @@ -660,6 +660,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
2734     struct dmz_target *dmz = ti->private;
2735     struct request_queue *q;
2736     struct dmz_dev *dev;
2737     + sector_t aligned_capacity;
2738     int ret;
2739    
2740     /* Get the target device */
2741     @@ -685,15 +686,17 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
2742     goto err;
2743     }
2744    
2745     + q = bdev_get_queue(dev->bdev);
2746     dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
2747     - if (ti->begin || (ti->len != dev->capacity)) {
2748     + aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1);
2749     + if (ti->begin ||
2750     + ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
2751     ti->error = "Partial mapping not supported";
2752     ret = -EINVAL;
2753     goto err;
2754     }
2755    
2756     - q = bdev_get_queue(dev->bdev);
2757     - dev->zone_nr_sectors = q->limits.chunk_sectors;
2758     + dev->zone_nr_sectors = blk_queue_zone_sectors(q);
2759     dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
2760    
2761     dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
2762     @@ -929,8 +932,10 @@ static int dmz_iterate_devices(struct dm_target *ti,
2763     iterate_devices_callout_fn fn, void *data)
2764     {
2765     struct dmz_target *dmz = ti->private;
2766     + struct dmz_dev *dev = dmz->dev;
2767     + sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
2768    
2769     - return fn(ti, dmz->ddev, 0, dmz->dev->capacity, data);
2770     + return fn(ti, dmz->ddev, 0, capacity, data);
2771     }
2772    
2773     static struct target_type dmz_type = {
2774     diff --git a/drivers/md/dm.c b/drivers/md/dm.c
2775     index 4be85324f44d..804419635cc7 100644
2776     --- a/drivers/md/dm.c
2777     +++ b/drivers/md/dm.c
2778     @@ -1695,7 +1695,7 @@ static struct mapped_device *alloc_dev(int minor)
2779     struct mapped_device *md;
2780     void *old_md;
2781    
2782     - md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
2783     + md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
2784     if (!md) {
2785     DMWARN("unable to allocate device, out of memory.");
2786     return NULL;
2787     @@ -1795,7 +1795,7 @@ static struct mapped_device *alloc_dev(int minor)
2788     bad_minor:
2789     module_put(THIS_MODULE);
2790     bad_module_get:
2791     - kfree(md);
2792     + kvfree(md);
2793     return NULL;
2794     }
2795    
2796     @@ -1814,7 +1814,7 @@ static void free_dev(struct mapped_device *md)
2797     free_minor(minor);
2798    
2799     module_put(THIS_MODULE);
2800     - kfree(md);
2801     + kvfree(md);
2802     }
2803    
2804     static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2805     @@ -2709,11 +2709,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2806    
2807     md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
2808    
2809     - if (test_bit(DMF_FREEING, &md->flags) ||
2810     - dm_deleting_md(md))
2811     - return NULL;
2812     -
2813     + spin_lock(&_minor_lock);
2814     + if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2815     + md = NULL;
2816     + goto out;
2817     + }
2818     dm_get(md);
2819     +out:
2820     + spin_unlock(&_minor_lock);
2821     +
2822     return md;
2823     }
2824    
2825     diff --git a/drivers/md/md.c b/drivers/md/md.c
2826     index 0ff1bbf6c90e..e019cf8c0d13 100644
2827     --- a/drivers/md/md.c
2828     +++ b/drivers/md/md.c
2829     @@ -8039,7 +8039,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
2830     if (did_change)
2831     sysfs_notify_dirent_safe(mddev->sysfs_state);
2832     wait_event(mddev->sb_wait,
2833     - !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && !mddev->suspended);
2834     + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
2835     + mddev->suspended);
2836     if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2837     percpu_ref_put(&mddev->writes_pending);
2838     return false;
2839     @@ -8110,7 +8111,6 @@ void md_allow_write(struct mddev *mddev)
2840     sysfs_notify_dirent_safe(mddev->sysfs_state);
2841     /* wait for the dirty state to be recorded in the metadata */
2842     wait_event(mddev->sb_wait,
2843     - !test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) &&
2844     !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
2845     } else
2846     spin_unlock(&mddev->lock);
2847     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2848     index f3f3e40dc9d8..e4e8f9e565b7 100644
2849     --- a/drivers/md/raid1.c
2850     +++ b/drivers/md/raid1.c
2851     @@ -990,14 +990,6 @@ static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
2852     _wait_barrier(conf, idx);
2853     }
2854    
2855     -static void wait_all_barriers(struct r1conf *conf)
2856     -{
2857     - int idx;
2858     -
2859     - for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
2860     - _wait_barrier(conf, idx);
2861     -}
2862     -
2863     static void _allow_barrier(struct r1conf *conf, int idx)
2864     {
2865     atomic_dec(&conf->nr_pending[idx]);
2866     @@ -1011,14 +1003,6 @@ static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
2867     _allow_barrier(conf, idx);
2868     }
2869    
2870     -static void allow_all_barriers(struct r1conf *conf)
2871     -{
2872     - int idx;
2873     -
2874     - for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
2875     - _allow_barrier(conf, idx);
2876     -}
2877     -
2878     /* conf->resync_lock should be held */
2879     static int get_unqueued_pending(struct r1conf *conf)
2880     {
2881     @@ -1654,8 +1638,12 @@ static void print_conf(struct r1conf *conf)
2882    
2883     static void close_sync(struct r1conf *conf)
2884     {
2885     - wait_all_barriers(conf);
2886     - allow_all_barriers(conf);
2887     + int idx;
2888     +
2889     + for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
2890     + _wait_barrier(conf, idx);
2891     + _allow_barrier(conf, idx);
2892     + }
2893    
2894     mempool_destroy(conf->r1buf_pool);
2895     conf->r1buf_pool = NULL;
2896     diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
2897     index cba092bcb76d..a0fe80df0cbd 100644
2898     --- a/drivers/media/platform/qcom/venus/core.h
2899     +++ b/drivers/media/platform/qcom/venus/core.h
2900     @@ -194,7 +194,6 @@ struct venus_buffer {
2901     * @fh: a holder of v4l file handle structure
2902     * @streamon_cap: stream on flag for capture queue
2903     * @streamon_out: stream on flag for output queue
2904     - * @cmd_stop: a flag to signal encoder/decoder commands
2905     * @width: current capture width
2906     * @height: current capture height
2907     * @out_width: current output width
2908     @@ -258,7 +257,6 @@ struct venus_inst {
2909     } controls;
2910     struct v4l2_fh fh;
2911     unsigned int streamon_cap, streamon_out;
2912     - bool cmd_stop;
2913     u32 width;
2914     u32 height;
2915     u32 out_width;
2916     diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
2917     index 9b2a401a4891..0ce9559a2924 100644
2918     --- a/drivers/media/platform/qcom/venus/helpers.c
2919     +++ b/drivers/media/platform/qcom/venus/helpers.c
2920     @@ -623,13 +623,6 @@ void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
2921    
2922     mutex_lock(&inst->lock);
2923    
2924     - if (inst->cmd_stop) {
2925     - vbuf->flags |= V4L2_BUF_FLAG_LAST;
2926     - v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
2927     - inst->cmd_stop = false;
2928     - goto unlock;
2929     - }
2930     -
2931     v4l2_m2m_buf_queue(m2m_ctx, vbuf);
2932    
2933     if (!(inst->streamon_out & inst->streamon_cap))
2934     diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
2935     index c09490876516..ba29fd4d4984 100644
2936     --- a/drivers/media/platform/qcom/venus/hfi.c
2937     +++ b/drivers/media/platform/qcom/venus/hfi.c
2938     @@ -484,6 +484,7 @@ int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd)
2939    
2940     return -EINVAL;
2941     }
2942     +EXPORT_SYMBOL_GPL(hfi_session_process_buf);
2943    
2944     irqreturn_t hfi_isr_thread(int irq, void *dev_id)
2945     {
2946     diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
2947     index 1caae8feaa36..734ce11b0ed0 100644
2948     --- a/drivers/media/platform/qcom/venus/hfi_venus.c
2949     +++ b/drivers/media/platform/qcom/venus/hfi_venus.c
2950     @@ -344,7 +344,7 @@ static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
2951     desc->attrs = DMA_ATTR_WRITE_COMBINE;
2952     desc->size = ALIGN(size, SZ_4K);
2953    
2954     - desc->kva = dma_alloc_attrs(dev, size, &desc->da, GFP_KERNEL,
2955     + desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL,
2956     desc->attrs);
2957     if (!desc->kva)
2958     return -ENOMEM;
2959     @@ -710,10 +710,8 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev)
2960     if (ret)
2961     return ret;
2962    
2963     - hdev->ifaceq_table.kva = desc.kva;
2964     - hdev->ifaceq_table.da = desc.da;
2965     - hdev->ifaceq_table.size = IFACEQ_TABLE_SIZE;
2966     - offset = hdev->ifaceq_table.size;
2967     + hdev->ifaceq_table = desc;
2968     + offset = IFACEQ_TABLE_SIZE;
2969    
2970     for (i = 0; i < IFACEQ_NUM; i++) {
2971     queue = &hdev->queues[i];
2972     @@ -755,9 +753,7 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev)
2973     if (ret) {
2974     hdev->sfr.da = 0;
2975     } else {
2976     - hdev->sfr.da = desc.da;
2977     - hdev->sfr.kva = desc.kva;
2978     - hdev->sfr.size = ALIGNED_SFR_SIZE;
2979     + hdev->sfr = desc;
2980     sfr = hdev->sfr.kva;
2981     sfr->buf_size = ALIGNED_SFR_SIZE;
2982     }
2983     diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
2984     index da611a5eb670..c9e9576bb08a 100644
2985     --- a/drivers/media/platform/qcom/venus/vdec.c
2986     +++ b/drivers/media/platform/qcom/venus/vdec.c
2987     @@ -469,8 +469,14 @@ static int vdec_subscribe_event(struct v4l2_fh *fh,
2988     static int
2989     vdec_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
2990     {
2991     - if (cmd->cmd != V4L2_DEC_CMD_STOP)
2992     + switch (cmd->cmd) {
2993     + case V4L2_DEC_CMD_STOP:
2994     + if (cmd->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
2995     + return -EINVAL;
2996     + break;
2997     + default:
2998     return -EINVAL;
2999     + }
3000    
3001     return 0;
3002     }
3003     @@ -479,6 +485,7 @@ static int
3004     vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
3005     {
3006     struct venus_inst *inst = to_inst(file);
3007     + struct hfi_frame_data fdata = {0};
3008     int ret;
3009    
3010     ret = vdec_try_decoder_cmd(file, fh, cmd);
3011     @@ -486,12 +493,23 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
3012     return ret;
3013    
3014     mutex_lock(&inst->lock);
3015     - inst->cmd_stop = true;
3016     - mutex_unlock(&inst->lock);
3017    
3018     - hfi_session_flush(inst);
3019     + /*
3020     + * Implement V4L2_DEC_CMD_STOP by enqueue an empty buffer on decoder
3021     + * input to signal EOS.
3022     + */
3023     + if (!(inst->streamon_out & inst->streamon_cap))
3024     + goto unlock;
3025     +
3026     + fdata.buffer_type = HFI_BUFFER_INPUT;
3027     + fdata.flags |= HFI_BUFFERFLAG_EOS;
3028     + fdata.device_addr = 0xdeadbeef;
3029    
3030     - return 0;
3031     + ret = hfi_session_process_buf(inst, &fdata);
3032     +
3033     +unlock:
3034     + mutex_unlock(&inst->lock);
3035     + return ret;
3036     }
3037    
3038     static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
3039     @@ -718,7 +736,6 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
3040     inst->reconfig = false;
3041     inst->sequence_cap = 0;
3042     inst->sequence_out = 0;
3043     - inst->cmd_stop = false;
3044    
3045     ret = vdec_init_session(inst);
3046     if (ret)
3047     @@ -807,11 +824,6 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
3048     vb->timestamp = timestamp_us * NSEC_PER_USEC;
3049     vbuf->sequence = inst->sequence_cap++;
3050    
3051     - if (inst->cmd_stop) {
3052     - vbuf->flags |= V4L2_BUF_FLAG_LAST;
3053     - inst->cmd_stop = false;
3054     - }
3055     -
3056     if (vbuf->flags & V4L2_BUF_FLAG_LAST) {
3057     const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
3058    
3059     diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
3060     index 6f123a387cf9..3fcf0e9b7b29 100644
3061     --- a/drivers/media/platform/qcom/venus/venc.c
3062     +++ b/drivers/media/platform/qcom/venus/venc.c
3063     @@ -963,13 +963,12 @@ static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type,
3064     if (!vbuf)
3065     return;
3066    
3067     - vb = &vbuf->vb2_buf;
3068     - vb->planes[0].bytesused = bytesused;
3069     - vb->planes[0].data_offset = data_offset;
3070     -
3071     vbuf->flags = flags;
3072    
3073     if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
3074     + vb = &vbuf->vb2_buf;
3075     + vb2_set_plane_payload(vb, 0, bytesused + data_offset);
3076     + vb->planes[0].data_offset = data_offset;
3077     vb->timestamp = timestamp_us * NSEC_PER_USEC;
3078     vbuf->sequence = inst->sequence_cap++;
3079     } else {
3080     diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
3081     index d2223c04e9ad..4c8f456238bc 100644
3082     --- a/drivers/media/rc/ir-lirc-codec.c
3083     +++ b/drivers/media/rc/ir-lirc-codec.c
3084     @@ -298,11 +298,14 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
3085     if (!dev->max_timeout)
3086     return -ENOTTY;
3087    
3088     + /* Check for multiply overflow */
3089     + if (val > U32_MAX / 1000)
3090     + return -EINVAL;
3091     +
3092     tmp = val * 1000;
3093    
3094     - if (tmp < dev->min_timeout ||
3095     - tmp > dev->max_timeout)
3096     - return -EINVAL;
3097     + if (tmp < dev->min_timeout || tmp > dev->max_timeout)
3098     + return -EINVAL;
3099    
3100     if (dev->s_timeout)
3101     ret = dev->s_timeout(dev, tmp);
3102     diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
3103     index 817c18f2ddd1..a95d09acc22a 100644
3104     --- a/drivers/media/rc/ir-nec-decoder.c
3105     +++ b/drivers/media/rc/ir-nec-decoder.c
3106     @@ -87,8 +87,6 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
3107     data->state = STATE_BIT_PULSE;
3108     return 0;
3109     } else if (eq_margin(ev.duration, NEC_REPEAT_SPACE, NEC_UNIT / 2)) {
3110     - rc_repeat(dev);
3111     - IR_dprintk(1, "Repeat last key\n");
3112     data->state = STATE_TRAILER_PULSE;
3113     return 0;
3114     }
3115     @@ -151,19 +149,26 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
3116     if (!geq_margin(ev.duration, NEC_TRAILER_SPACE, NEC_UNIT / 2))
3117     break;
3118    
3119     - address = bitrev8((data->bits >> 24) & 0xff);
3120     - not_address = bitrev8((data->bits >> 16) & 0xff);
3121     - command = bitrev8((data->bits >> 8) & 0xff);
3122     - not_command = bitrev8((data->bits >> 0) & 0xff);
3123     + if (data->count == NEC_NBITS) {
3124     + address = bitrev8((data->bits >> 24) & 0xff);
3125     + not_address = bitrev8((data->bits >> 16) & 0xff);
3126     + command = bitrev8((data->bits >> 8) & 0xff);
3127     + not_command = bitrev8((data->bits >> 0) & 0xff);
3128     +
3129     + scancode = ir_nec_bytes_to_scancode(address,
3130     + not_address,
3131     + command,
3132     + not_command,
3133     + &rc_proto);
3134    
3135     - scancode = ir_nec_bytes_to_scancode(address, not_address,
3136     - command, not_command,
3137     - &rc_proto);
3138     + if (data->is_nec_x)
3139     + data->necx_repeat = true;
3140    
3141     - if (data->is_nec_x)
3142     - data->necx_repeat = true;
3143     + rc_keydown(dev, rc_proto, scancode, 0);
3144     + } else {
3145     + rc_repeat(dev);
3146     + }
3147    
3148     - rc_keydown(dev, rc_proto, scancode, 0);
3149     data->state = STATE_INACTIVE;
3150     return 0;
3151     }
3152     diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c
3153     index 5a28ce3a1d49..38dbc128340d 100644
3154     --- a/drivers/media/usb/as102/as102_fw.c
3155     +++ b/drivers/media/usb/as102/as102_fw.c
3156     @@ -101,18 +101,23 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
3157     unsigned char *cmd,
3158     const struct firmware *firmware) {
3159    
3160     - struct as10x_fw_pkt_t fw_pkt;
3161     + struct as10x_fw_pkt_t *fw_pkt;
3162     int total_read_bytes = 0, errno = 0;
3163     unsigned char addr_has_changed = 0;
3164    
3165     + fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL);
3166     + if (!fw_pkt)
3167     + return -ENOMEM;
3168     +
3169     +
3170     for (total_read_bytes = 0; total_read_bytes < firmware->size; ) {
3171     int read_bytes = 0, data_len = 0;
3172    
3173     /* parse intel hex line */
3174     read_bytes = parse_hex_line(
3175     (u8 *) (firmware->data + total_read_bytes),
3176     - fw_pkt.raw.address,
3177     - fw_pkt.raw.data,
3178     + fw_pkt->raw.address,
3179     + fw_pkt->raw.data,
3180     &data_len,
3181     &addr_has_changed);
3182    
3183     @@ -122,28 +127,28 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
3184     /* detect the end of file */
3185     total_read_bytes += read_bytes;
3186     if (total_read_bytes == firmware->size) {
3187     - fw_pkt.u.request[0] = 0x00;
3188     - fw_pkt.u.request[1] = 0x03;
3189     + fw_pkt->u.request[0] = 0x00;
3190     + fw_pkt->u.request[1] = 0x03;
3191    
3192     /* send EOF command */
3193     errno = bus_adap->ops->upload_fw_pkt(bus_adap,
3194     (uint8_t *)
3195     - &fw_pkt, 2, 0);
3196     + fw_pkt, 2, 0);
3197     if (errno < 0)
3198     goto error;
3199     } else {
3200     if (!addr_has_changed) {
3201     /* prepare command to send */
3202     - fw_pkt.u.request[0] = 0x00;
3203     - fw_pkt.u.request[1] = 0x01;
3204     + fw_pkt->u.request[0] = 0x00;
3205     + fw_pkt->u.request[1] = 0x01;
3206    
3207     - data_len += sizeof(fw_pkt.u.request);
3208     - data_len += sizeof(fw_pkt.raw.address);
3209     + data_len += sizeof(fw_pkt->u.request);
3210     + data_len += sizeof(fw_pkt->raw.address);
3211    
3212     /* send cmd to device */
3213     errno = bus_adap->ops->upload_fw_pkt(bus_adap,
3214     (uint8_t *)
3215     - &fw_pkt,
3216     + fw_pkt,
3217     data_len,
3218     0);
3219     if (errno < 0)
3220     @@ -152,6 +157,7 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
3221     }
3222     }
3223     error:
3224     + kfree(fw_pkt);
3225     return (errno == 0) ? total_read_bytes : errno;
3226     }
3227    
3228     diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
3229     index e0daa9b6c2a0..9b742d569fb5 100644
3230     --- a/drivers/media/usb/cx231xx/cx231xx-cards.c
3231     +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
3232     @@ -1684,7 +1684,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
3233     nr = dev->devno;
3234    
3235     assoc_desc = udev->actconfig->intf_assoc[0];
3236     - if (assoc_desc->bFirstInterface != ifnum) {
3237     + if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) {
3238     dev_err(d, "Not found matching IAD interface\n");
3239     retval = -ENODEV;
3240     goto err_if;
3241     diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
3242     index dd1db678718c..8033d6f73501 100644
3243     --- a/drivers/media/v4l2-core/v4l2-ctrls.c
3244     +++ b/drivers/media/v4l2-core/v4l2-ctrls.c
3245     @@ -1227,6 +1227,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
3246     }
3247     EXPORT_SYMBOL(v4l2_ctrl_fill);
3248    
3249     +static u32 user_flags(const struct v4l2_ctrl *ctrl)
3250     +{
3251     + u32 flags = ctrl->flags;
3252     +
3253     + if (ctrl->is_ptr)
3254     + flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
3255     +
3256     + return flags;
3257     +}
3258     +
3259     static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
3260     {
3261     memset(ev->reserved, 0, sizeof(ev->reserved));
3262     @@ -1234,7 +1244,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change
3263     ev->id = ctrl->id;
3264     ev->u.ctrl.changes = changes;
3265     ev->u.ctrl.type = ctrl->type;
3266     - ev->u.ctrl.flags = ctrl->flags;
3267     + ev->u.ctrl.flags = user_flags(ctrl);
3268     if (ctrl->is_ptr)
3269     ev->u.ctrl.value64 = 0;
3270     else
3271     @@ -2577,10 +2587,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
3272     else
3273     qc->id = ctrl->id;
3274     strlcpy(qc->name, ctrl->name, sizeof(qc->name));
3275     - qc->flags = ctrl->flags;
3276     + qc->flags = user_flags(ctrl);
3277     qc->type = ctrl->type;
3278     - if (ctrl->is_ptr)
3279     - qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
3280     qc->elem_size = ctrl->elem_size;
3281     qc->elems = ctrl->elems;
3282     qc->nr_of_dims = ctrl->nr_of_dims;
3283     diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
3284     index 450ae36645aa..cf1120abbf52 100644
3285     --- a/drivers/mfd/lpc_ich.c
3286     +++ b/drivers/mfd/lpc_ich.c
3287     @@ -522,6 +522,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
3288     .name = "Avoton SoC",
3289     .iTCO_version = 3,
3290     .gpio_version = AVOTON_GPIO,
3291     + .spi_type = INTEL_SPI_BYT,
3292     },
3293     [LPC_BAYTRAIL] = {
3294     .name = "Bay Trail SoC",
3295     diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
3296     index 84b16133554b..0806f72102c0 100644
3297     --- a/drivers/mtd/devices/docg3.c
3298     +++ b/drivers/mtd/devices/docg3.c
3299     @@ -1814,8 +1814,13 @@ static void __init doc_dbg_register(struct mtd_info *floor)
3300     struct dentry *root = floor->dbg.dfs_dir;
3301     struct docg3 *docg3 = floor->priv;
3302    
3303     - if (IS_ERR_OR_NULL(root))
3304     + if (IS_ERR_OR_NULL(root)) {
3305     + if (IS_ENABLED(CONFIG_DEBUG_FS) &&
3306     + !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
3307     + dev_warn(floor->dev.parent,
3308     + "CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
3309     return;
3310     + }
3311    
3312     debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3,
3313     &flashcontrol_fops);
3314     diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
3315     index f25eca79f4e5..68c9d98a3347 100644
3316     --- a/drivers/mtd/nand/atmel/nand-controller.c
3317     +++ b/drivers/mtd/nand/atmel/nand-controller.c
3318     @@ -2547,6 +2547,7 @@ static struct platform_driver atmel_nand_controller_driver = {
3319     .driver = {
3320     .name = "atmel-nand-controller",
3321     .of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
3322     + .pm = &atmel_nand_controller_pm_ops,
3323     },
3324     .probe = atmel_nand_controller_probe,
3325     .remove = atmel_nand_controller_remove,
3326     diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
3327     index 7f3b065b6b8f..c51d214d169e 100644
3328     --- a/drivers/mtd/nand/mtk_ecc.c
3329     +++ b/drivers/mtd/nand/mtk_ecc.c
3330     @@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
3331     op = ECC_DECODE;
3332     dec = readw(ecc->regs + ECC_DECDONE);
3333     if (dec & ecc->sectors) {
3334     + /*
3335     + * Clear decode IRQ status once again to ensure that
3336     + * there will be no extra IRQ.
3337     + */
3338     + readw(ecc->regs + ECC_DECIRQ_STA);
3339     ecc->sectors = 0;
3340     complete(&ecc->done);
3341     } else {
3342     @@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
3343     }
3344     }
3345    
3346     - writel(0, ecc->regs + ECC_IRQ_REG(op));
3347     -
3348     return IRQ_HANDLED;
3349     }
3350    
3351     @@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
3352    
3353     /* disable it */
3354     mtk_ecc_wait_idle(ecc, op);
3355     + if (op == ECC_DECODE)
3356     + /*
3357     + * Clear decode IRQ status in case there is a timeout to wait
3358     + * decode IRQ.
3359     + */
3360     + readw(ecc->regs + ECC_DECIRQ_STA);
3361     writew(0, ecc->regs + ECC_IRQ_REG(op));
3362     writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
3363    
3364     diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
3365     index 12edaae17d81..3f1d806e590a 100644
3366     --- a/drivers/mtd/nand/nand_base.c
3367     +++ b/drivers/mtd/nand/nand_base.c
3368     @@ -1246,6 +1246,7 @@ int nand_reset(struct nand_chip *chip, int chipnr)
3369    
3370     return 0;
3371     }
3372     +EXPORT_SYMBOL_GPL(nand_reset);
3373    
3374     /**
3375     * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
3376     @@ -2799,15 +2800,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
3377     size_t *retlen, const uint8_t *buf)
3378     {
3379     struct nand_chip *chip = mtd_to_nand(mtd);
3380     + int chipnr = (int)(to >> chip->chip_shift);
3381     struct mtd_oob_ops ops;
3382     int ret;
3383    
3384     - /* Wait for the device to get ready */
3385     - panic_nand_wait(mtd, chip, 400);
3386     -
3387     /* Grab the device */
3388     panic_nand_get_device(chip, mtd, FL_WRITING);
3389    
3390     + chip->select_chip(mtd, chipnr);
3391     +
3392     + /* Wait for the device to get ready */
3393     + panic_nand_wait(mtd, chip, 400);
3394     +
3395     memset(&ops, 0, sizeof(ops));
3396     ops.len = len;
3397     ops.datbuf = (uint8_t *)buf;
3398     diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
3399     index 246b4393118e..44322a363ba5 100644
3400     --- a/drivers/mtd/nand/nandsim.c
3401     +++ b/drivers/mtd/nand/nandsim.c
3402     @@ -520,11 +520,16 @@ static int nandsim_debugfs_create(struct nandsim *dev)
3403     struct dentry *root = nsmtd->dbg.dfs_dir;
3404     struct dentry *dent;
3405    
3406     - if (!IS_ENABLED(CONFIG_DEBUG_FS))
3407     + /*
3408     + * Just skip debugfs initialization when the debugfs directory is
3409     + * missing.
3410     + */
3411     + if (IS_ERR_OR_NULL(root)) {
3412     + if (IS_ENABLED(CONFIG_DEBUG_FS) &&
3413     + !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
3414     + NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
3415     return 0;
3416     -
3417     - if (IS_ERR_OR_NULL(root))
3418     - return -1;
3419     + }
3420    
3421     dent = debugfs_create_file("nandsim_wear_report", S_IRUSR,
3422     root, dev, &dfs_fops);
3423     diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
3424     index 54540c8fa1a2..9f98f74ff221 100644
3425     --- a/drivers/mtd/nand/omap2.c
3426     +++ b/drivers/mtd/nand/omap2.c
3427     @@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
3428     0x97, 0x79, 0xe5, 0x24, 0xb5};
3429    
3430     /**
3431     - * omap_calculate_ecc_bch - Generate bytes of ECC bytes
3432     + * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
3433     * @mtd: MTD device structure
3434     * @dat: The pointer to data on which ecc is computed
3435     * @ecc_code: The ecc_code buffer
3436     + * @i: The sector number (for a multi sector page)
3437     *
3438     - * Support calculating of BCH4/8 ecc vectors for the page
3439     + * Support calculating of BCH4/8/16 ECC vectors for one sector
3440     + * within a page. Sector number is in @i.
3441     */
3442     -static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
3443     - const u_char *dat, u_char *ecc_calc)
3444     +static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
3445     + const u_char *dat, u_char *ecc_calc, int i)
3446     {
3447     struct omap_nand_info *info = mtd_to_omap(mtd);
3448     int eccbytes = info->nand.ecc.bytes;
3449     struct gpmc_nand_regs *gpmc_regs = &info->reg;
3450     u8 *ecc_code;
3451     - unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
3452     + unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
3453     u32 val;
3454     - int i, j;
3455     + int j;
3456     +
3457     + ecc_code = ecc_calc;
3458     + switch (info->ecc_opt) {
3459     + case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
3460     + case OMAP_ECC_BCH8_CODE_HW:
3461     + bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
3462     + bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
3463     + bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
3464     + bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
3465     + *ecc_code++ = (bch_val4 & 0xFF);
3466     + *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
3467     + *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
3468     + *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
3469     + *ecc_code++ = (bch_val3 & 0xFF);
3470     + *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
3471     + *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
3472     + *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
3473     + *ecc_code++ = (bch_val2 & 0xFF);
3474     + *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
3475     + *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
3476     + *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
3477     + *ecc_code++ = (bch_val1 & 0xFF);
3478     + break;
3479     + case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
3480     + case OMAP_ECC_BCH4_CODE_HW:
3481     + bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
3482     + bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
3483     + *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
3484     + *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
3485     + *ecc_code++ = ((bch_val2 & 0xF) << 4) |
3486     + ((bch_val1 >> 28) & 0xF);
3487     + *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
3488     + *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
3489     + *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
3490     + *ecc_code++ = ((bch_val1 & 0xF) << 4);
3491     + break;
3492     + case OMAP_ECC_BCH16_CODE_HW:
3493     + val = readl(gpmc_regs->gpmc_bch_result6[i]);
3494     + ecc_code[0] = ((val >> 8) & 0xFF);
3495     + ecc_code[1] = ((val >> 0) & 0xFF);
3496     + val = readl(gpmc_regs->gpmc_bch_result5[i]);
3497     + ecc_code[2] = ((val >> 24) & 0xFF);
3498     + ecc_code[3] = ((val >> 16) & 0xFF);
3499     + ecc_code[4] = ((val >> 8) & 0xFF);
3500     + ecc_code[5] = ((val >> 0) & 0xFF);
3501     + val = readl(gpmc_regs->gpmc_bch_result4[i]);
3502     + ecc_code[6] = ((val >> 24) & 0xFF);
3503     + ecc_code[7] = ((val >> 16) & 0xFF);
3504     + ecc_code[8] = ((val >> 8) & 0xFF);
3505     + ecc_code[9] = ((val >> 0) & 0xFF);
3506     + val = readl(gpmc_regs->gpmc_bch_result3[i]);
3507     + ecc_code[10] = ((val >> 24) & 0xFF);
3508     + ecc_code[11] = ((val >> 16) & 0xFF);
3509     + ecc_code[12] = ((val >> 8) & 0xFF);
3510     + ecc_code[13] = ((val >> 0) & 0xFF);
3511     + val = readl(gpmc_regs->gpmc_bch_result2[i]);
3512     + ecc_code[14] = ((val >> 24) & 0xFF);
3513     + ecc_code[15] = ((val >> 16) & 0xFF);
3514     + ecc_code[16] = ((val >> 8) & 0xFF);
3515     + ecc_code[17] = ((val >> 0) & 0xFF);
3516     + val = readl(gpmc_regs->gpmc_bch_result1[i]);
3517     + ecc_code[18] = ((val >> 24) & 0xFF);
3518     + ecc_code[19] = ((val >> 16) & 0xFF);
3519     + ecc_code[20] = ((val >> 8) & 0xFF);
3520     + ecc_code[21] = ((val >> 0) & 0xFF);
3521     + val = readl(gpmc_regs->gpmc_bch_result0[i]);
3522     + ecc_code[22] = ((val >> 24) & 0xFF);
3523     + ecc_code[23] = ((val >> 16) & 0xFF);
3524     + ecc_code[24] = ((val >> 8) & 0xFF);
3525     + ecc_code[25] = ((val >> 0) & 0xFF);
3526     + break;
3527     + default:
3528     + return -EINVAL;
3529     + }
3530     +
3531     + /* ECC scheme specific syndrome customizations */
3532     + switch (info->ecc_opt) {
3533     + case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
3534     + /* Add constant polynomial to remainder, so that
3535     + * ECC of blank pages results in 0x0 on reading back
3536     + */
3537     + for (j = 0; j < eccbytes; j++)
3538     + ecc_calc[j] ^= bch4_polynomial[j];
3539     + break;
3540     + case OMAP_ECC_BCH4_CODE_HW:
3541     + /* Set 8th ECC byte as 0x0 for ROM compatibility */
3542     + ecc_calc[eccbytes - 1] = 0x0;
3543     + break;
3544     + case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
3545     + /* Add constant polynomial to remainder, so that
3546     + * ECC of blank pages results in 0x0 on reading back
3547     + */
3548     + for (j = 0; j < eccbytes; j++)
3549     + ecc_calc[j] ^= bch8_polynomial[j];
3550     + break;
3551     + case OMAP_ECC_BCH8_CODE_HW:
3552     + /* Set 14th ECC byte as 0x0 for ROM compatibility */
3553     + ecc_calc[eccbytes - 1] = 0x0;
3554     + break;
3555     + case OMAP_ECC_BCH16_CODE_HW:
3556     + break;
3557     + default:
3558     + return -EINVAL;
3559     + }
3560     +
3561     + return 0;
3562     +}
3563     +
3564     +/**
3565     + * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
3566     + * @mtd: MTD device structure
3567     + * @dat: The pointer to data on which ecc is computed
3568     + * @ecc_code: The ecc_code buffer
3569     + *
3570     + * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
3571     + * when SW based correction is required as ECC is required for one sector
3572     + * at a time.
3573     + */
3574     +static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd,
3575     + const u_char *dat, u_char *ecc_calc)
3576     +{
3577     + return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0);
3578     +}
3579     +
3580     +/**
3581     + * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
3582     + * @mtd: MTD device structure
3583     + * @dat: The pointer to data on which ecc is computed
3584     + * @ecc_code: The ecc_code buffer
3585     + *
3586     + * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
3587     + */
3588     +static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
3589     + const u_char *dat, u_char *ecc_calc)
3590     +{
3591     + struct omap_nand_info *info = mtd_to_omap(mtd);
3592     + int eccbytes = info->nand.ecc.bytes;
3593     + unsigned long nsectors;
3594     + int i, ret;
3595    
3596     nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
3597     for (i = 0; i < nsectors; i++) {
3598     - ecc_code = ecc_calc;
3599     - switch (info->ecc_opt) {
3600     - case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
3601     - case OMAP_ECC_BCH8_CODE_HW:
3602     - bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
3603     - bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
3604     - bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
3605     - bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
3606     - *ecc_code++ = (bch_val4 & 0xFF);
3607     - *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
3608     - *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
3609     - *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
3610     - *ecc_code++ = (bch_val3 & 0xFF);
3611     - *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
3612     - *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
3613     - *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
3614     - *ecc_code++ = (bch_val2 & 0xFF);
3615     - *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
3616     - *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
3617     - *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
3618     - *ecc_code++ = (bch_val1 & 0xFF);
3619     - break;
3620     - case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
3621     - case OMAP_ECC_BCH4_CODE_HW:
3622     - bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
3623     - bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
3624     - *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
3625     - *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
3626     - *ecc_code++ = ((bch_val2 & 0xF) << 4) |
3627     - ((bch_val1 >> 28) & 0xF);
3628     - *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
3629     - *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
3630     - *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
3631     - *ecc_code++ = ((bch_val1 & 0xF) << 4);
3632     - break;
3633     - case OMAP_ECC_BCH16_CODE_HW:
3634     - val = readl(gpmc_regs->gpmc_bch_result6[i]);
3635     - ecc_code[0] = ((val >> 8) & 0xFF);
3636     - ecc_code[1] = ((val >> 0) & 0xFF);
3637     - val = readl(gpmc_regs->gpmc_bch_result5[i]);
3638     - ecc_code[2] = ((val >> 24) & 0xFF);
3639     - ecc_code[3] = ((val >> 16) & 0xFF);
3640     - ecc_code[4] = ((val >> 8) & 0xFF);
3641     - ecc_code[5] = ((val >> 0) & 0xFF);
3642     - val = readl(gpmc_regs->gpmc_bch_result4[i]);
3643     - ecc_code[6] = ((val >> 24) & 0xFF);
3644     - ecc_code[7] = ((val >> 16) & 0xFF);
3645     - ecc_code[8] = ((val >> 8) & 0xFF);
3646     - ecc_code[9] = ((val >> 0) & 0xFF);
3647     - val = readl(gpmc_regs->gpmc_bch_result3[i]);
3648     - ecc_code[10] = ((val >> 24) & 0xFF);
3649     - ecc_code[11] = ((val >> 16) & 0xFF);
3650     - ecc_code[12] = ((val >> 8) & 0xFF);
3651     - ecc_code[13] = ((val >> 0) & 0xFF);
3652     - val = readl(gpmc_regs->gpmc_bch_result2[i]);
3653     - ecc_code[14] = ((val >> 24) & 0xFF);
3654     - ecc_code[15] = ((val >> 16) & 0xFF);
3655     - ecc_code[16] = ((val >> 8) & 0xFF);
3656     - ecc_code[17] = ((val >> 0) & 0xFF);
3657     - val = readl(gpmc_regs->gpmc_bch_result1[i]);
3658     - ecc_code[18] = ((val >> 24) & 0xFF);
3659     - ecc_code[19] = ((val >> 16) & 0xFF);
3660     - ecc_code[20] = ((val >> 8) & 0xFF);
3661     - ecc_code[21] = ((val >> 0) & 0xFF);
3662     - val = readl(gpmc_regs->gpmc_bch_result0[i]);
3663     - ecc_code[22] = ((val >> 24) & 0xFF);
3664     - ecc_code[23] = ((val >> 16) & 0xFF);
3665     - ecc_code[24] = ((val >> 8) & 0xFF);
3666     - ecc_code[25] = ((val >> 0) & 0xFF);
3667     - break;
3668     - default:
3669     - return -EINVAL;
3670     - }
3671     -
3672     - /* ECC scheme specific syndrome customizations */
3673     - switch (info->ecc_opt) {
3674     - case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
3675     - /* Add constant polynomial to remainder, so that
3676     - * ECC of blank pages results in 0x0 on reading back */
3677     - for (j = 0; j < eccbytes; j++)
3678     - ecc_calc[j] ^= bch4_polynomial[j];
3679     - break;
3680     - case OMAP_ECC_BCH4_CODE_HW:
3681     - /* Set 8th ECC byte as 0x0 for ROM compatibility */
3682     - ecc_calc[eccbytes - 1] = 0x0;
3683     - break;
3684     - case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
3685     - /* Add constant polynomial to remainder, so that
3686     - * ECC of blank pages results in 0x0 on reading back */
3687     - for (j = 0; j < eccbytes; j++)
3688     - ecc_calc[j] ^= bch8_polynomial[j];
3689     - break;
3690     - case OMAP_ECC_BCH8_CODE_HW:
3691     - /* Set 14th ECC byte as 0x0 for ROM compatibility */
3692     - ecc_calc[eccbytes - 1] = 0x0;
3693     - break;
3694     - case OMAP_ECC_BCH16_CODE_HW:
3695     - break;
3696     - default:
3697     - return -EINVAL;
3698     - }
3699     + ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
3700     + if (ret)
3701     + return ret;
3702    
3703     - ecc_calc += eccbytes;
3704     + ecc_calc += eccbytes;
3705     }
3706    
3707     return 0;
3708     @@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
3709     chip->write_buf(mtd, buf, mtd->writesize);
3710    
3711     /* Update ecc vector from GPMC result registers */
3712     - chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
3713     + omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
3714    
3715     ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3716     chip->ecc.total);
3717     @@ -1508,6 +1551,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
3718     return 0;
3719     }
3720    
3721     +/**
3722     + * omap_write_subpage_bch - BCH hardware ECC based subpage write
3723     + * @mtd: mtd info structure
3724     + * @chip: nand chip info structure
3725     + * @offset: column address of subpage within the page
3726     + * @data_len: data length
3727     + * @buf: data buffer
3728     + * @oob_required: must write chip->oob_poi to OOB
3729     + * @page: page number to write
3730     + *
3731     + * OMAP optimized subpage write method.
3732     + */
3733     +static int omap_write_subpage_bch(struct mtd_info *mtd,
3734     + struct nand_chip *chip, u32 offset,
3735     + u32 data_len, const u8 *buf,
3736     + int oob_required, int page)
3737     +{
3738     + u8 *ecc_calc = chip->buffers->ecccalc;
3739     + int ecc_size = chip->ecc.size;
3740     + int ecc_bytes = chip->ecc.bytes;
3741     + int ecc_steps = chip->ecc.steps;
3742     + u32 start_step = offset / ecc_size;
3743     + u32 end_step = (offset + data_len - 1) / ecc_size;
3744     + int step, ret = 0;
3745     +
3746     + /*
3747     + * Write entire page at one go as it would be optimal
3748     + * as ECC is calculated by hardware.
3749     + * ECC is calculated for all subpages but we choose
3750     + * only what we want.
3751     + */
3752     +
3753     + /* Enable GPMC ECC engine */
3754     + chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
3755     +
3756     + /* Write data */
3757     + chip->write_buf(mtd, buf, mtd->writesize);
3758     +
3759     + for (step = 0; step < ecc_steps; step++) {
3760     + /* mask ECC of un-touched subpages by padding 0xFF */
3761     + if (step < start_step || step > end_step)
3762     + memset(ecc_calc, 0xff, ecc_bytes);
3763     + else
3764     + ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
3765     +
3766     + if (ret)
3767     + return ret;
3768     +
3769     + buf += ecc_size;
3770     + ecc_calc += ecc_bytes;
3771     + }
3772     +
3773     + /* copy calculated ECC for whole page to chip->buffer->oob */
3774     + /* this include masked-value(0xFF) for unwritten subpages */
3775     + ecc_calc = chip->buffers->ecccalc;
3776     + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3777     + chip->ecc.total);
3778     + if (ret)
3779     + return ret;
3780     +
3781     + /* write OOB buffer to NAND device */
3782     + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
3783     +
3784     + return 0;
3785     +}
3786     +
3787     /**
3788     * omap_read_page_bch - BCH ecc based page read function for entire page
3789     * @mtd: mtd info structure
3790     @@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
3791     chip->ecc.total);
3792    
3793     /* Calculate ecc bytes */
3794     - chip->ecc.calculate(mtd, buf, ecc_calc);
3795     + omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
3796    
3797     ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3798     chip->ecc.total);
3799     @@ -2044,7 +2153,7 @@ static int omap_nand_probe(struct platform_device *pdev)
3800     nand_chip->ecc.strength = 4;
3801     nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
3802     nand_chip->ecc.correct = nand_bch_correct_data;
3803     - nand_chip->ecc.calculate = omap_calculate_ecc_bch;
3804     + nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
3805     mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
3806     /* Reserve one byte for the OMAP marker */
3807     oobbytes_per_step = nand_chip->ecc.bytes + 1;
3808     @@ -2066,9 +2175,9 @@ static int omap_nand_probe(struct platform_device *pdev)
3809     nand_chip->ecc.strength = 4;
3810     nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
3811     nand_chip->ecc.correct = omap_elm_correct_data;
3812     - nand_chip->ecc.calculate = omap_calculate_ecc_bch;
3813     nand_chip->ecc.read_page = omap_read_page_bch;
3814     nand_chip->ecc.write_page = omap_write_page_bch;
3815     + nand_chip->ecc.write_subpage = omap_write_subpage_bch;
3816     mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
3817     oobbytes_per_step = nand_chip->ecc.bytes;
3818    
3819     @@ -2087,7 +2196,7 @@ static int omap_nand_probe(struct platform_device *pdev)
3820     nand_chip->ecc.strength = 8;
3821     nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
3822     nand_chip->ecc.correct = nand_bch_correct_data;
3823     - nand_chip->ecc.calculate = omap_calculate_ecc_bch;
3824     + nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
3825     mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
3826     /* Reserve one byte for the OMAP marker */
3827     oobbytes_per_step = nand_chip->ecc.bytes + 1;
3828     @@ -2109,9 +2218,9 @@ static int omap_nand_probe(struct platform_device *pdev)
3829     nand_chip->ecc.strength = 8;
3830     nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
3831     nand_chip->ecc.correct = omap_elm_correct_data;
3832     - nand_chip->ecc.calculate = omap_calculate_ecc_bch;
3833     nand_chip->ecc.read_page = omap_read_page_bch;
3834     nand_chip->ecc.write_page = omap_write_page_bch;
3835     + nand_chip->ecc.write_subpage = omap_write_subpage_bch;
3836     mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
3837     oobbytes_per_step = nand_chip->ecc.bytes;
3838    
3839     @@ -2131,9 +2240,9 @@ static int omap_nand_probe(struct platform_device *pdev)
3840     nand_chip->ecc.strength = 16;
3841     nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
3842     nand_chip->ecc.correct = omap_elm_correct_data;
3843     - nand_chip->ecc.calculate = omap_calculate_ecc_bch;
3844     nand_chip->ecc.read_page = omap_read_page_bch;
3845     nand_chip->ecc.write_page = omap_write_page_bch;
3846     + nand_chip->ecc.write_subpage = omap_write_subpage_bch;
3847     mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
3848     oobbytes_per_step = nand_chip->ecc.bytes;
3849    
3850     diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
3851     index 8a596bfeddff..7802ac3ba934 100644
3852     --- a/drivers/mtd/spi-nor/intel-spi.c
3853     +++ b/drivers/mtd/spi-nor/intel-spi.c
3854     @@ -422,7 +422,7 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
3855     if (ret < 0)
3856     return ret;
3857    
3858     - val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
3859     + val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
3860     val |= ret << SSFSTS_CTL_COP_SHIFT;
3861     val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
3862     val |= SSFSTS_CTL_SCGO;
3863     @@ -432,7 +432,7 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
3864     if (ret)
3865     return ret;
3866    
3867     - status = readl(ispi->base + SSFSTS_CTL);
3868     + status = readl(ispi->sregs + SSFSTS_CTL);
3869     if (status & SSFSTS_CTL_FCERR)
3870     return -EIO;
3871     else if (status & SSFSTS_CTL_AEL)
3872     diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
3873     index 0641c0098738..afb7ebe20b24 100644
3874     --- a/drivers/net/ethernet/intel/e1000e/defines.h
3875     +++ b/drivers/net/ethernet/intel/e1000e/defines.h
3876     @@ -398,6 +398,7 @@
3877     #define E1000_ICR_LSC 0x00000004 /* Link Status Change */
3878     #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
3879     #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
3880     +#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */
3881     #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
3882     #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
3883     /* If this bit asserted, the driver should claim the interrupt */
3884     diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
3885     index b322011ec282..f457c5703d0c 100644
3886     --- a/drivers/net/ethernet/intel/e1000e/mac.c
3887     +++ b/drivers/net/ethernet/intel/e1000e/mac.c
3888     @@ -410,6 +410,9 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
3889     * Checks to see of the link status of the hardware has changed. If a
3890     * change in link status has been detected, then we read the PHY registers
3891     * to get the current speed/duplex if link exists.
3892     + *
3893     + * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
3894     + * up).
3895     **/
3896     s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
3897     {
3898     @@ -423,7 +426,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
3899     * Change or Rx Sequence Error interrupt.
3900     */
3901     if (!mac->get_link_status)
3902     - return 0;
3903     + return 1;
3904    
3905     /* First we want to see if the MII Status Register reports
3906     * link. If so, then we want to get the current speed/duplex
3907     @@ -461,10 +464,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
3908     * different link partner.
3909     */
3910     ret_val = e1000e_config_fc_after_link_up(hw);
3911     - if (ret_val)
3912     + if (ret_val) {
3913     e_dbg("Error configuring flow control\n");
3914     + return ret_val;
3915     + }
3916    
3917     - return ret_val;
3918     + return 1;
3919     }
3920    
3921     /**
3922     diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
3923     index 327dfe5bedc0..c38b00c90f48 100644
3924     --- a/drivers/net/ethernet/intel/e1000e/netdev.c
3925     +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
3926     @@ -1910,14 +1910,30 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
3927     struct net_device *netdev = data;
3928     struct e1000_adapter *adapter = netdev_priv(netdev);
3929     struct e1000_hw *hw = &adapter->hw;
3930     + u32 icr;
3931     + bool enable = true;
3932     +
3933     + icr = er32(ICR);
3934     + if (icr & E1000_ICR_RXO) {
3935     + ew32(ICR, E1000_ICR_RXO);
3936     + enable = false;
3937     + /* napi poll will re-enable Other, make sure it runs */
3938     + if (napi_schedule_prep(&adapter->napi)) {
3939     + adapter->total_rx_bytes = 0;
3940     + adapter->total_rx_packets = 0;
3941     + __napi_schedule(&adapter->napi);
3942     + }
3943     + }
3944     + if (icr & E1000_ICR_LSC) {
3945     + ew32(ICR, E1000_ICR_LSC);
3946     + hw->mac.get_link_status = true;
3947     + /* guard against interrupt when we're going down */
3948     + if (!test_bit(__E1000_DOWN, &adapter->state))
3949     + mod_timer(&adapter->watchdog_timer, jiffies + 1);
3950     + }
3951    
3952     - hw->mac.get_link_status = true;
3953     -
3954     - /* guard against interrupt when we're going down */
3955     - if (!test_bit(__E1000_DOWN, &adapter->state)) {
3956     - mod_timer(&adapter->watchdog_timer, jiffies + 1);
3957     + if (enable && !test_bit(__E1000_DOWN, &adapter->state))
3958     ew32(IMS, E1000_IMS_OTHER);
3959     - }
3960    
3961     return IRQ_HANDLED;
3962     }
3963     @@ -2687,7 +2703,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
3964     napi_complete_done(napi, work_done);
3965     if (!test_bit(__E1000_DOWN, &adapter->state)) {
3966     if (adapter->msix_entries)
3967     - ew32(IMS, adapter->rx_ring->ims_val);
3968     + ew32(IMS, adapter->rx_ring->ims_val |
3969     + E1000_IMS_OTHER);
3970     else
3971     e1000_irq_enable(adapter);
3972     }
3973     @@ -3004,8 +3021,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
3974    
3975     hw->mac.ops.config_collision_dist(hw);
3976    
3977     - /* SPT and CNP Si errata workaround to avoid data corruption */
3978     - if (hw->mac.type >= e1000_pch_spt) {
3979     + /* SPT and KBL Si errata workaround to avoid data corruption */
3980     + if (hw->mac.type == e1000_pch_spt) {
3981     u32 reg_val;
3982    
3983     reg_val = er32(IOSFPC);
3984     @@ -3013,7 +3030,9 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
3985     ew32(IOSFPC, reg_val);
3986    
3987     reg_val = er32(TARC(0));
3988     - reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ;
3989     + /* SPT and KBL Si errata workaround to avoid Tx hang */
3990     + reg_val &= ~BIT(28);
3991     + reg_val |= BIT(29);
3992     ew32(TARC(0), reg_val);
3993     }
3994     }
3995     @@ -4204,7 +4223,7 @@ static void e1000e_trigger_lsc(struct e1000_adapter *adapter)
3996     struct e1000_hw *hw = &adapter->hw;
3997    
3998     if (adapter->msix_entries)
3999     - ew32(ICS, E1000_ICS_OTHER);
4000     + ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER);
4001     else
4002     ew32(ICS, E1000_ICS_LSC);
4003     }
4004     @@ -5081,7 +5100,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
4005     case e1000_media_type_copper:
4006     if (hw->mac.get_link_status) {
4007     ret_val = hw->mac.ops.check_for_link(hw);
4008     - link_active = !hw->mac.get_link_status;
4009     + link_active = ret_val > 0;
4010     } else {
4011     link_active = true;
4012     }
4013     @@ -5099,7 +5118,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
4014     break;
4015     }
4016    
4017     - if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4018     + if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4019     (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4020     /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
4021     e_info("Gigabit has been disabled, downgrading speed\n");
4022     diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
4023     index d78d47b41a71..86ff0969efb6 100644
4024     --- a/drivers/net/ethernet/intel/e1000e/phy.c
4025     +++ b/drivers/net/ethernet/intel/e1000e/phy.c
4026     @@ -1744,6 +1744,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
4027     s32 ret_val = 0;
4028     u16 i, phy_status;
4029    
4030     + *success = false;
4031     for (i = 0; i < iterations; i++) {
4032     /* Some PHYs require the MII_BMSR register to be read
4033     * twice due to the link bit being sticky. No harm doing
4034     @@ -1763,16 +1764,16 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
4035     ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
4036     if (ret_val)
4037     break;
4038     - if (phy_status & BMSR_LSTATUS)
4039     + if (phy_status & BMSR_LSTATUS) {
4040     + *success = true;
4041     break;
4042     + }
4043     if (usec_interval >= 1000)
4044     msleep(usec_interval / 1000);
4045     else
4046     udelay(usec_interval);
4047     }
4048    
4049     - *success = (i < iterations);
4050     -
4051     return ret_val;
4052     }
4053    
4054     diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
4055     index 9dffaba85ae6..103c0a742d03 100644
4056     --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
4057     +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
4058     @@ -1229,7 +1229,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
4059     break;
4060    
4061     /* prevent any other reads prior to eop_desc */
4062     - read_barrier_depends();
4063     + smp_rmb();
4064    
4065     /* if DD is not set pending work has not been completed */
4066     if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
4067     diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
4068     index 6498da8806cb..ea20aacd5e1d 100644
4069     --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
4070     +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
4071     @@ -3760,7 +3760,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4072     break;
4073    
4074     /* prevent any other reads prior to eop_desc */
4075     - read_barrier_depends();
4076     + smp_rmb();
4077    
4078     /* if the descriptor isn't done, no work yet to do */
4079     if (!(eop_desc->cmd_type_offset_bsz &
4080     diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
4081     index 120c68f78951..3c07ff171ddc 100644
4082     --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
4083     +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
4084     @@ -759,7 +759,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
4085     break;
4086    
4087     /* prevent any other reads prior to eop_desc */
4088     - read_barrier_depends();
4089     + smp_rmb();
4090    
4091     i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
4092     /* we have caught up to head, no work left to do */
4093     diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
4094     index c32c62462c84..07a4e6e13925 100644
4095     --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
4096     +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
4097     @@ -179,7 +179,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
4098     break;
4099    
4100     /* prevent any other reads prior to eop_desc */
4101     - read_barrier_depends();
4102     + smp_rmb();
4103    
4104     i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
4105     /* if the descriptor isn't done, no work yet to do */
4106     diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
4107     index ea69af267d63..b0031c5ff767 100644
4108     --- a/drivers/net/ethernet/intel/igb/igb_main.c
4109     +++ b/drivers/net/ethernet/intel/igb/igb_main.c
4110     @@ -6970,7 +6970,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
4111     break;
4112    
4113     /* prevent any other reads prior to eop_desc */
4114     - read_barrier_depends();
4115     + smp_rmb();
4116    
4117     /* if DD is not set pending work has not been completed */
4118     if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
4119     diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
4120     index 1ed556911b14..6f5888bd9194 100644
4121     --- a/drivers/net/ethernet/intel/igbvf/netdev.c
4122     +++ b/drivers/net/ethernet/intel/igbvf/netdev.c
4123     @@ -810,7 +810,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
4124     break;
4125    
4126     /* prevent any other reads prior to eop_desc */
4127     - read_barrier_depends();
4128     + smp_rmb();
4129    
4130     /* if DD is not set pending work has not been completed */
4131     if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
4132     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
4133     index 6d5f31e94358..879a9c4cef59 100644
4134     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
4135     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
4136     @@ -1192,7 +1192,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
4137     break;
4138    
4139     /* prevent any other reads prior to eop_desc */
4140     - read_barrier_depends();
4141     + smp_rmb();
4142    
4143     /* if DD is not set pending work has not been completed */
4144     if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
4145     diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
4146     index 032f8ac06357..90ecc4b06462 100644
4147     --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
4148     +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
4149     @@ -326,7 +326,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
4150     break;
4151    
4152     /* prevent any other reads prior to eop_desc */
4153     - read_barrier_depends();
4154     + smp_rmb();
4155    
4156     /* if DD is not set pending work has not been completed */
4157     if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
4158     diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
4159     index 64a04975bcf8..bc93b69cfd1e 100644
4160     --- a/drivers/net/ethernet/marvell/mvneta.c
4161     +++ b/drivers/net/ethernet/marvell/mvneta.c
4162     @@ -816,11 +816,14 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
4163     {
4164     u32 val;
4165    
4166     - /* Only 255 descriptors can be added at once ; Assume caller
4167     - * process TX desriptors in quanta less than 256
4168     - */
4169     - val = pend_desc + txq->pending;
4170     - mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
4171     + pend_desc += txq->pending;
4172     +
4173     + /* Only 255 Tx descriptors can be added at once */
4174     + do {
4175     + val = min(pend_desc, 255);
4176     + mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
4177     + pend_desc -= val;
4178     + } while (pend_desc > 0);
4179     txq->pending = 0;
4180     }
4181    
4182     diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
4183     index e8b5ff42f5a8..c8e7b54a538a 100644
4184     --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
4185     +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
4186     @@ -72,18 +72,21 @@
4187     #define IWL9000_SMEM_OFFSET 0x400000
4188     #define IWL9000_SMEM_LEN 0x68000
4189    
4190     -#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
4191     +#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
4192     +#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
4193     #define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-"
4194     #define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
4195     #define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
4196     -#define IWL9000_MODULE_FIRMWARE(api) \
4197     - IWL9000_FW_PRE "-" __stringify(api) ".ucode"
4198     +#define IWL9000A_MODULE_FIRMWARE(api) \
4199     + IWL9000A_FW_PRE __stringify(api) ".ucode"
4200     +#define IWL9000B_MODULE_FIRMWARE(api) \
4201     + IWL9000B_FW_PRE __stringify(api) ".ucode"
4202     #define IWL9000RFB_MODULE_FIRMWARE(api) \
4203     - IWL9000RFB_FW_PRE "-" __stringify(api) ".ucode"
4204     + IWL9000RFB_FW_PRE __stringify(api) ".ucode"
4205     #define IWL9260A_MODULE_FIRMWARE(api) \
4206     - IWL9260A_FW_PRE "-" __stringify(api) ".ucode"
4207     + IWL9260A_FW_PRE __stringify(api) ".ucode"
4208     #define IWL9260B_MODULE_FIRMWARE(api) \
4209     - IWL9260B_FW_PRE "-" __stringify(api) ".ucode"
4210     + IWL9260B_FW_PRE __stringify(api) ".ucode"
4211    
4212     #define NVM_HW_SECTION_NUM_FAMILY_9000 10
4213    
4214     @@ -193,7 +196,48 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
4215     .nvm_ver = IWL9000_NVM_VERSION,
4216     .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
4217     .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
4218     +};
4219     +
4220     +const struct iwl_cfg iwl9460_2ac_cfg_soc = {
4221     + .name = "Intel(R) Dual Band Wireless AC 9460",
4222     + .fw_name_pre = IWL9000A_FW_PRE,
4223     + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
4224     + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
4225     + IWL_DEVICE_9000,
4226     + .ht_params = &iwl9000_ht_params,
4227     + .nvm_ver = IWL9000_NVM_VERSION,
4228     + .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
4229     + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
4230     .integrated = true,
4231     + .soc_latency = 5000,
4232     +};
4233     +
4234     +const struct iwl_cfg iwl9461_2ac_cfg_soc = {
4235     + .name = "Intel(R) Dual Band Wireless AC 9461",
4236     + .fw_name_pre = IWL9000A_FW_PRE,
4237     + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
4238     + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
4239     + IWL_DEVICE_9000,
4240     + .ht_params = &iwl9000_ht_params,
4241     + .nvm_ver = IWL9000_NVM_VERSION,
4242     + .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
4243     + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
4244     + .integrated = true,
4245     + .soc_latency = 5000,
4246     +};
4247     +
4248     +const struct iwl_cfg iwl9462_2ac_cfg_soc = {
4249     + .name = "Intel(R) Dual Band Wireless AC 9462",
4250     + .fw_name_pre = IWL9000A_FW_PRE,
4251     + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
4252     + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
4253     + IWL_DEVICE_9000,
4254     + .ht_params = &iwl9000_ht_params,
4255     + .nvm_ver = IWL9000_NVM_VERSION,
4256     + .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
4257     + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
4258     + .integrated = true,
4259     + .soc_latency = 5000,
4260     };
4261    
4262     const struct iwl_cfg iwl9560_2ac_cfg = {
4263     @@ -205,10 +249,23 @@ const struct iwl_cfg iwl9560_2ac_cfg = {
4264     .nvm_ver = IWL9000_NVM_VERSION,
4265     .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
4266     .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
4267     - .integrated = true,
4268     };
4269    
4270     -MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
4271     +const struct iwl_cfg iwl9560_2ac_cfg_soc = {
4272     + .name = "Intel(R) Dual Band Wireless AC 9560",
4273     + .fw_name_pre = IWL9000A_FW_PRE,
4274     + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
4275     + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
4276     + IWL_DEVICE_9000,
4277     + .ht_params = &iwl9000_ht_params,
4278     + .nvm_ver = IWL9000_NVM_VERSION,
4279     + .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
4280     + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
4281     + .integrated = true,
4282     + .soc_latency = 5000,
4283     +};
4284     +MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
4285     +MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
4286     MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
4287     MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
4288     MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
4289     diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
4290     index a440140ed8dd..7eade165b747 100644
4291     --- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
4292     +++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
4293     @@ -80,15 +80,15 @@
4294     #define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
4295    
4296     #define IWL_A000_HR_MODULE_FIRMWARE(api) \
4297     - IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
4298     + IWL_A000_HR_FW_PRE __stringify(api) ".ucode"
4299     #define IWL_A000_JF_MODULE_FIRMWARE(api) \
4300     - IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode"
4301     + IWL_A000_JF_FW_PRE __stringify(api) ".ucode"
4302     #define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
4303     - IWL_A000_HR_F0_FW_PRE "-" __stringify(api) ".ucode"
4304     + IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode"
4305     #define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
4306     - IWL_A000_JF_B0_FW_PRE "-" __stringify(api) ".ucode"
4307     + IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode"
4308     #define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
4309     - IWL_A000_HR_A0_FW_PRE "-" __stringify(api) ".ucode"
4310     + IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode"
4311    
4312     #define NVM_HW_SECTION_NUM_FAMILY_A000 10
4313    
4314     diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
4315     index 5a40092febfb..3bfc657f6b42 100644
4316     --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
4317     +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
4318     @@ -531,6 +531,8 @@ struct iwl_scan_config_v1 {
4319     } __packed; /* SCAN_CONFIG_DB_CMD_API_S */
4320    
4321     #define SCAN_TWO_LMACS 2
4322     +#define SCAN_LB_LMAC_IDX 0
4323     +#define SCAN_HB_LMAC_IDX 1
4324    
4325     struct iwl_scan_config {
4326     __le32 flags;
4327     @@ -578,6 +580,7 @@ enum iwl_umac_scan_general_flags {
4328     IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
4329     IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
4330     IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11),
4331     + IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13),
4332     };
4333    
4334     /**
4335     @@ -631,12 +634,17 @@ struct iwl_scan_req_umac_tail {
4336     * @uid: scan id, &enum iwl_umac_scan_uid_offsets
4337     * @ooc_priority: out of channel priority - &enum iwl_scan_priority
4338     * @general_flags: &enum iwl_umac_scan_general_flags
4339     - * @reserved2: for future use and alignment
4340     * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
4341     * @extended_dwell: dwell time for channels 1, 6 and 11
4342     * @active_dwell: dwell time for active scan
4343     * @passive_dwell: dwell time for passive scan
4344     * @fragmented_dwell: dwell time for fragmented passive scan
4345     + * @adwell_default_n_aps: for adaptive dwell the default number of APs
4346     + * per channel
4347     + * @adwell_default_n_aps_social: for adaptive dwell the default
4348     + * number of APs per social (1,6,11) channel
4349     + * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added
4350     + * to total scan time
4351     * @max_out_time: max out of serving channel time, per LMAC - for CDB there
4352     * are 2 LMACs
4353     * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
4354     @@ -644,6 +652,8 @@ struct iwl_scan_req_umac_tail {
4355     * @channel_flags: &enum iwl_scan_channel_flags
4356     * @n_channels: num of channels in scan request
4357     * @reserved: for future use and alignment
4358     + * @reserved2: for future use and alignment
4359     + * @reserved3: for future use and alignment
4360     * @data: &struct iwl_scan_channel_cfg_umac and
4361     * &struct iwl_scan_req_umac_tail
4362     */
4363     @@ -651,41 +661,64 @@ struct iwl_scan_req_umac {
4364     __le32 flags;
4365     __le32 uid;
4366     __le32 ooc_priority;
4367     - /* SCAN_GENERAL_PARAMS_API_S_VER_4 */
4368     __le16 general_flags;
4369     - u8 reserved2;
4370     + u8 reserved;
4371     u8 scan_start_mac_id;
4372     - u8 extended_dwell;
4373     - u8 active_dwell;
4374     - u8 passive_dwell;
4375     - u8 fragmented_dwell;
4376     union {
4377     struct {
4378     + u8 extended_dwell;
4379     + u8 active_dwell;
4380     + u8 passive_dwell;
4381     + u8 fragmented_dwell;
4382     __le32 max_out_time;
4383     __le32 suspend_time;
4384     __le32 scan_priority;
4385     - /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
4386     + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
4387     u8 channel_flags;
4388     u8 n_channels;
4389     - __le16 reserved;
4390     + __le16 reserved2;
4391     u8 data[];
4392     } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
4393     struct {
4394     + u8 extended_dwell;
4395     + u8 active_dwell;
4396     + u8 passive_dwell;
4397     + u8 fragmented_dwell;
4398     __le32 max_out_time[SCAN_TWO_LMACS];
4399     __le32 suspend_time[SCAN_TWO_LMACS];
4400     __le32 scan_priority;
4401     - /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
4402     + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
4403     u8 channel_flags;
4404     u8 n_channels;
4405     - __le16 reserved;
4406     + __le16 reserved2;
4407     u8 data[];
4408     } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
4409     + struct {
4410     + u8 active_dwell;
4411     + u8 passive_dwell;
4412     + u8 fragmented_dwell;
4413     + u8 adwell_default_n_aps;
4414     + u8 adwell_default_n_aps_social;
4415     + u8 reserved3;
4416     + __le16 adwell_max_budget;
4417     + __le32 max_out_time[SCAN_TWO_LMACS];
4418     + __le32 suspend_time[SCAN_TWO_LMACS];
4419     + __le32 scan_priority;
4420     + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
4421     + u8 channel_flags;
4422     + u8 n_channels;
4423     + __le16 reserved2;
4424     + u8 data[];
4425     + } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */
4426     };
4427     } __packed;
4428    
4429     -#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac)
4430     +#define IWL_SCAN_REQ_UMAC_SIZE_V7 sizeof(struct iwl_scan_req_umac)
4431     +#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \
4432     + 2 * sizeof(u8) - sizeof(__le16))
4433     #define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
4434     - 2 * sizeof(__le32))
4435     + 2 * sizeof(__le32) - 2 * sizeof(u8) - \
4436     + sizeof(__le16))
4437    
4438     /**
4439     * struct iwl_umac_scan_abort
4440     diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
4441     index 279248cd9cfb..e988e4c371c4 100644
4442     --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
4443     +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
4444     @@ -262,6 +262,7 @@ enum iwl_ucode_tlv_api {
4445     IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30,
4446     IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31,
4447     /* API Set 1 */
4448     + IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32,
4449     IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34,
4450     IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35,
4451     IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37,
4452     diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
4453     index 71cb1ecde0f7..e226179c32fa 100644
4454     --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
4455     +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
4456     @@ -364,6 +364,7 @@ struct iwl_cfg {
4457     u32 dccm2_len;
4458     u32 smem_offset;
4459     u32 smem_len;
4460     + u32 soc_latency;
4461     u16 nvm_ver;
4462     u16 nvm_calib_ver;
4463     u16 rx_with_siso_diversity:1,
4464     @@ -471,6 +472,10 @@ extern const struct iwl_cfg iwl9260_2ac_cfg;
4465     extern const struct iwl_cfg iwl9270_2ac_cfg;
4466     extern const struct iwl_cfg iwl9460_2ac_cfg;
4467     extern const struct iwl_cfg iwl9560_2ac_cfg;
4468     +extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
4469     +extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
4470     +extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
4471     +extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
4472     extern const struct iwl_cfg iwla000_2ac_cfg_hr;
4473     extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
4474     extern const struct iwl_cfg iwla000_2ac_cfg_jf;
4475     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
4476     index 949e63418299..8dcdb522b846 100644
4477     --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
4478     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
4479     @@ -1124,6 +1124,12 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
4480     IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
4481     }
4482    
4483     +static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm)
4484     +{
4485     + return fw_has_api(&mvm->fw->ucode_capa,
4486     + IWL_UCODE_TLV_API_ADAPTIVE_DWELL);
4487     +}
4488     +
4489     static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
4490     {
4491     /* For now we only use this mode to differentiate between
4492     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
4493     index 774122fed454..e4fd476e9ccb 100644
4494     --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
4495     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
4496     @@ -130,6 +130,19 @@ struct iwl_mvm_scan_params {
4497     u32 measurement_dwell;
4498     };
4499    
4500     +static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
4501     +{
4502     + struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
4503     +
4504     + if (iwl_mvm_is_adaptive_dwell_supported(mvm))
4505     + return (void *)&cmd->v7.data;
4506     +
4507     + if (iwl_mvm_has_new_tx_api(mvm))
4508     + return (void *)&cmd->v6.data;
4509     +
4510     + return (void *)&cmd->v1.data;
4511     +}
4512     +
4513     static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
4514     {
4515     if (mvm->scan_rx_ant != ANT_NONE)
4516     @@ -1075,25 +1088,57 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
4517     {
4518     struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type];
4519    
4520     + if (iwl_mvm_is_regular_scan(params))
4521     + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
4522     + else
4523     + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
4524     +
4525     + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
4526     + if (params->measurement_dwell) {
4527     + cmd->v7.active_dwell = params->measurement_dwell;
4528     + cmd->v7.passive_dwell = params->measurement_dwell;
4529     + } else {
4530     + cmd->v7.active_dwell = IWL_SCAN_DWELL_ACTIVE;
4531     + cmd->v7.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
4532     + }
4533     + cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
4534     +
4535     + cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
4536     + cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] =
4537     + cpu_to_le32(timing->max_out_time);
4538     + cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] =
4539     + cpu_to_le32(timing->suspend_time);
4540     + if (iwl_mvm_is_cdb_supported(mvm)) {
4541     + cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
4542     + cpu_to_le32(timing->max_out_time);
4543     + cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] =
4544     + cpu_to_le32(timing->suspend_time);
4545     + }
4546     +
4547     + return;
4548     + }
4549     +
4550     if (params->measurement_dwell) {
4551     - cmd->active_dwell = params->measurement_dwell;
4552     - cmd->passive_dwell = params->measurement_dwell;
4553     - cmd->extended_dwell = params->measurement_dwell;
4554     + cmd->v1.active_dwell = params->measurement_dwell;
4555     + cmd->v1.passive_dwell = params->measurement_dwell;
4556     + cmd->v1.extended_dwell = params->measurement_dwell;
4557     } else {
4558     - cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
4559     - cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
4560     - cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
4561     + cmd->v1.active_dwell = IWL_SCAN_DWELL_ACTIVE;
4562     + cmd->v1.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
4563     + cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED;
4564     }
4565     - cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
4566     + cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
4567    
4568     if (iwl_mvm_has_new_tx_api(mvm)) {
4569     cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
4570     - cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time);
4571     - cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time);
4572     + cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
4573     + cpu_to_le32(timing->max_out_time);
4574     + cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] =
4575     + cpu_to_le32(timing->suspend_time);
4576     if (iwl_mvm_is_cdb_supported(mvm)) {
4577     - cmd->v6.max_out_time[1] =
4578     + cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
4579     cpu_to_le32(timing->max_out_time);
4580     - cmd->v6.suspend_time[1] =
4581     + cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] =
4582     cpu_to_le32(timing->suspend_time);
4583     }
4584     } else {
4585     @@ -1102,11 +1147,6 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
4586     cmd->v1.scan_priority =
4587     cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
4588     }
4589     -
4590     - if (iwl_mvm_is_regular_scan(params))
4591     - cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
4592     - else
4593     - cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
4594     }
4595    
4596     static void
4597     @@ -1178,8 +1218,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
4598     int type)
4599     {
4600     struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
4601     - void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ?
4602     - (void *)&cmd->v6.data : (void *)&cmd->v1.data;
4603     + void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
4604     struct iwl_scan_req_umac_tail *sec_part = cmd_data +
4605     sizeof(struct iwl_scan_channel_cfg_umac) *
4606     mvm->fw->ucode_capa.n_scan_channels;
4607     @@ -1216,7 +1255,10 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
4608     IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
4609     IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
4610    
4611     - if (iwl_mvm_has_new_tx_api(mvm)) {
4612     + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
4613     + cmd->v7.channel_flags = channel_flags;
4614     + cmd->v7.n_channels = params->n_channels;
4615     + } else if (iwl_mvm_has_new_tx_api(mvm)) {
4616     cmd->v6.channel_flags = channel_flags;
4617     cmd->v6.n_channels = params->n_channels;
4618     } else {
4619     @@ -1661,8 +1703,10 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
4620     {
4621     int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
4622    
4623     - if (iwl_mvm_has_new_tx_api(mvm))
4624     - base_size = IWL_SCAN_REQ_UMAC_SIZE;
4625     + if (iwl_mvm_is_adaptive_dwell_supported(mvm))
4626     + base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
4627     + else if (iwl_mvm_has_new_tx_api(mvm))
4628     + base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
4629    
4630     if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
4631     return base_size +
4632     diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4633     index 858765fed8f8..548e1928430d 100644
4634     --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4635     +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4636     @@ -465,6 +465,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4637     {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)},
4638     {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
4639     {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
4640     + {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
4641     + {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
4642     {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)},
4643     {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)},
4644     {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)},
4645     @@ -483,6 +485,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4646     {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
4647     {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
4648     {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
4649     + {IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8260_2ac_cfg)},
4650     {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
4651     {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
4652     {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
4653     @@ -508,67 +511,143 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4654     {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)},
4655     {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)},
4656     {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
4657     + {IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8265_2ac_cfg)},
4658     + {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
4659    
4660     /* 9000 Series */
4661     - {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
4662     - {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
4663     - {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
4664     {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
4665     {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
4666     {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
4667     - {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
4668     - {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
4669     - {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
4670     - {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
4671     - {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
4672     - {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
4673     - {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
4674     - {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
4675     - {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
4676     - {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)},
4677     - {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)},
4678     - {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)},
4679     - {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)},
4680     - {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)},
4681     - {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)},
4682     - {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)},
4683     - {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)},
4684     - {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
4685     - {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
4686     + {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
4687     + {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
4688     + {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)},
4689     + {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)},
4690     {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
4691     - {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
4692     {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
4693     - {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
4694     - {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
4695     - {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
4696     {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
4697     - {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
4698     - {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
4699     - {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
4700     - {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
4701     - {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
4702     - {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
4703     + {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
4704     + {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
4705     + {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
4706     {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
4707     {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
4708     {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
4709     {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
4710     - {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
4711     - {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
4712     - {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
4713     + {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
4714     + {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
4715     + {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
4716     + {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
4717     + {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
4718     {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
4719     - {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
4720     - {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg)},
4721     - {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg)},
4722     - {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg)},
4723     - {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)},
4724     - {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)},
4725     - {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg)},
4726     - {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg)},
4727     - {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg)},
4728     - {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg)},
4729     - {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg)},
4730     - {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg)},
4731     - {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
4732     + {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
4733     + {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
4734     + {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
4735     + {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
4736     + {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
4737     + {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
4738     + {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
4739     + {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
4740     + {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
4741     + {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
4742     + {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
4743     + {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
4744     + {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
4745     + {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
4746     + {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
4747     + {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
4748     + {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
4749     + {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
4750     + {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
4751     + {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)},
4752     + {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)},
4753     + {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
4754     + {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
4755     + {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
4756     + {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
4757     + {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
4758     + {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
4759     + {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
4760     + {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
4761     + {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
4762     + {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
4763     + {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
4764     + {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
4765     + {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
4766     + {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
4767     + {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
4768     + {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
4769     + {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
4770     + {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_soc)},
4771     + {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_soc)},
4772     + {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_soc)},
4773     + {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_soc)},
4774     + {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_soc)},
4775     + {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_soc)},
4776     + {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_soc)},
4777     + {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_soc)},
4778     + {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_soc)},
4779     + {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_soc)},
4780     + {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_soc)},
4781     + {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_soc)},
4782     + {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_soc)},
4783     + {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_soc)},
4784     + {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_soc)},
4785     + {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_soc)},
4786     + {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_soc)},
4787     + {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_soc)},
4788     + {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_soc)},
4789     + {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
4790     + {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
4791     + {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
4792     + {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
4793     + {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
4794     + {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)},
4795     + {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)},
4796     + {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)},
4797     + {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)},
4798     + {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)},
4799     + {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)},
4800     + {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
4801     + {IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
4802     + {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)},
4803     + {IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)},
4804     + {IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)},
4805     + {IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)},
4806     + {IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)},
4807     + {IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)},
4808     + {IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)},
4809     + {IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
4810     + {IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
4811     + {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)},
4812     + {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)},
4813     + {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)},
4814     + {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)},
4815     + {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)},
4816     + {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)},
4817     + {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
4818     + {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
4819     + {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)},
4820     + {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)},
4821     + {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
4822     + {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)},
4823     + {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
4824     + {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)},
4825     + {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)},
4826     + {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)},
4827     + {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)},
4828     + {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)},
4829     + {IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)},
4830     + {IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)},
4831     + {IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)},
4832     + {IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)},
4833     + {IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)},
4834     + {IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)},
4835     + {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)},
4836     + {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)},
4837     + {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)},
4838     + {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
4839     + {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
4840     + {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
4841     + {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
4842    
4843     /* a000 Series */
4844     {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
4845     @@ -576,8 +655,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4846     {IWL_PCI_DEVICE(0x2720, 0x0000, iwla000_2ax_cfg_hr)},
4847     {IWL_PCI_DEVICE(0x34F0, 0x0070, iwla000_2ax_cfg_hr)},
4848     {IWL_PCI_DEVICE(0x2720, 0x0078, iwla000_2ax_cfg_hr)},
4849     - {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ax_cfg_hr)},
4850     + {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ac_cfg_hr_cdb)},
4851     + {IWL_PCI_DEVICE(0x2720, 0x0030, iwla000_2ac_cfg_hr_cdb)},
4852     {IWL_PCI_DEVICE(0x2720, 0x1080, iwla000_2ax_cfg_hr)},
4853     + {IWL_PCI_DEVICE(0x2720, 0x0090, iwla000_2ac_cfg_hr_cdb)},
4854     + {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)},
4855     + {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)},
4856     + {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)},
4857     +
4858     #endif /* CONFIG_IWLMVM */
4859    
4860     {0}
4861     diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
4862     index d5a3bf91a03e..ab6d39e12069 100644
4863     --- a/drivers/net/wireless/intersil/p54/main.c
4864     +++ b/drivers/net/wireless/intersil/p54/main.c
4865     @@ -852,12 +852,11 @@ void p54_unregister_common(struct ieee80211_hw *dev)
4866     {
4867     struct p54_common *priv = dev->priv;
4868    
4869     -#ifdef CONFIG_P54_LEDS
4870     - p54_unregister_leds(priv);
4871     -#endif /* CONFIG_P54_LEDS */
4872     -
4873     if (priv->registered) {
4874     priv->registered = false;
4875     +#ifdef CONFIG_P54_LEDS
4876     + p54_unregister_leds(priv);
4877     +#endif /* CONFIG_P54_LEDS */
4878     ieee80211_unregister_hw(dev);
4879     }
4880    
4881     diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
4882     index e2f4f5778267..086aad22743d 100644
4883     --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
4884     +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
4885     @@ -57,7 +57,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
4886     if (status >= 0)
4887     return 0;
4888    
4889     - if (status == -ENODEV) {
4890     + if (status == -ENODEV || status == -ENOENT) {
4891     /* Device has disappeared. */
4892     clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
4893     break;
4894     @@ -321,7 +321,7 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
4895    
4896     status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
4897     if (status) {
4898     - if (status == -ENODEV)
4899     + if (status == -ENODEV || status == -ENOENT)
4900     clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
4901     set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
4902     rt2x00lib_dmadone(entry);
4903     @@ -410,7 +410,7 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
4904    
4905     status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
4906     if (status) {
4907     - if (status == -ENODEV)
4908     + if (status == -ENODEV || status == -ENOENT)
4909     clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
4910     set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
4911     rt2x00lib_dmadone(entry);
4912     diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
4913     index 7eae27f8e173..f9563ae301ad 100644
4914     --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
4915     +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
4916     @@ -682,7 +682,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
4917     struct rtl_priv *rtlpriv = rtl_priv(hw);
4918     struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
4919     struct sk_buff *skb = NULL;
4920     -
4921     + bool rtstatus;
4922     u32 totalpacketlen;
4923     u8 u1rsvdpageloc[5] = { 0 };
4924     bool b_dlok = false;
4925     @@ -768,7 +768,9 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
4926     skb = dev_alloc_skb(totalpacketlen);
4927     skb_put_data(skb, &reserved_page_packet, totalpacketlen);
4928    
4929     - b_dlok = true;
4930     + rtstatus = rtl_cmd_send_packet(hw, skb);
4931     + if (rtstatus)
4932     + b_dlok = true;
4933    
4934     if (b_dlok) {
4935     RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
4936     diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
4937     index 1d431d4bf6d2..9ac1511de7ba 100644
4938     --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
4939     +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
4940     @@ -1372,6 +1372,7 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw)
4941    
4942     ppsc->wakeup_reason = 0;
4943    
4944     + do_gettimeofday(&ts);
4945     rtlhal->last_suspend_sec = ts.tv_sec;
4946    
4947     switch (fw_reason) {
4948     diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
4949     index e0f0e3ce1a32..98466d762c8f 100644
4950     --- a/drivers/nvdimm/dimm.c
4951     +++ b/drivers/nvdimm/dimm.c
4952     @@ -68,6 +68,7 @@ static int nvdimm_probe(struct device *dev)
4953     rc = nd_label_reserve_dpa(ndd);
4954     if (ndd->ns_current >= 0)
4955     nvdimm_set_aliasing(dev);
4956     + nvdimm_clear_locked(dev);
4957     nvdimm_bus_unlock(dev);
4958    
4959     if (rc)
4960     diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
4961     index f0d1b7e5de01..5f1385b96b13 100644
4962     --- a/drivers/nvdimm/dimm_devs.c
4963     +++ b/drivers/nvdimm/dimm_devs.c
4964     @@ -200,6 +200,13 @@ void nvdimm_set_locked(struct device *dev)
4965     set_bit(NDD_LOCKED, &nvdimm->flags);
4966     }
4967    
4968     +void nvdimm_clear_locked(struct device *dev)
4969     +{
4970     + struct nvdimm *nvdimm = to_nvdimm(dev);
4971     +
4972     + clear_bit(NDD_LOCKED, &nvdimm->flags);
4973     +}
4974     +
4975     static void nvdimm_release(struct device *dev)
4976     {
4977     struct nvdimm *nvdimm = to_nvdimm(dev);
4978     diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
4979     index 9c5f108910e3..de66c02f6140 100644
4980     --- a/drivers/nvdimm/label.c
4981     +++ b/drivers/nvdimm/label.c
4982     @@ -1050,7 +1050,7 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
4983     nsindex = to_namespace_index(ndd, 0);
4984     memset(nsindex, 0, ndd->nsarea.config_size);
4985     for (i = 0; i < 2; i++) {
4986     - int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT);
4987     + int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
4988    
4989     if (rc)
4990     return rc;
4991     diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
4992     index 3e4d1e7998da..0af988739a06 100644
4993     --- a/drivers/nvdimm/namespace_devs.c
4994     +++ b/drivers/nvdimm/namespace_devs.c
4995     @@ -1620,7 +1620,7 @@ static umode_t namespace_visible(struct kobject *kobj,
4996     if (a == &dev_attr_resource.attr) {
4997     if (is_namespace_blk(dev))
4998     return 0;
4999     - return a->mode;
5000     + return 0400;
5001     }
5002    
5003     if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
5004     diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
5005     index 9c758a91372b..156be00e1f76 100644
5006     --- a/drivers/nvdimm/nd.h
5007     +++ b/drivers/nvdimm/nd.h
5008     @@ -254,6 +254,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
5009     unsigned int len);
5010     void nvdimm_set_aliasing(struct device *dev);
5011     void nvdimm_set_locked(struct device *dev);
5012     +void nvdimm_clear_locked(struct device *dev);
5013     struct nd_btt *to_nd_btt(struct device *dev);
5014    
5015     struct nd_gen_sb {
5016     diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
5017     index 9576c444f0ab..65cc171c721d 100644
5018     --- a/drivers/nvdimm/pfn_devs.c
5019     +++ b/drivers/nvdimm/pfn_devs.c
5020     @@ -282,8 +282,16 @@ static struct attribute *nd_pfn_attributes[] = {
5021     NULL,
5022     };
5023    
5024     +static umode_t pfn_visible(struct kobject *kobj, struct attribute *a, int n)
5025     +{
5026     + if (a == &dev_attr_resource.attr)
5027     + return 0400;
5028     + return a->mode;
5029     +}
5030     +
5031     struct attribute_group nd_pfn_attribute_group = {
5032     .attrs = nd_pfn_attributes,
5033     + .is_visible = pfn_visible,
5034     };
5035    
5036     static const struct attribute_group *nd_pfn_attribute_groups[] = {
5037     diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
5038     index 829d760f651c..abaf38c61220 100644
5039     --- a/drivers/nvdimm/region_devs.c
5040     +++ b/drivers/nvdimm/region_devs.c
5041     @@ -562,8 +562,12 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
5042     if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
5043     return 0;
5044    
5045     - if (!is_nd_pmem(dev) && a == &dev_attr_resource.attr)
5046     - return 0;
5047     + if (a == &dev_attr_resource.attr) {
5048     + if (is_nd_pmem(dev))
5049     + return 0400;
5050     + else
5051     + return 0;
5052     + }
5053    
5054     if (a == &dev_attr_deep_flush.attr) {
5055     int has_flush = nvdimm_has_flush(nd_region);
5056     diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
5057     index 0fe3ea164ee5..04dac6a42c9f 100644
5058     --- a/drivers/pci/host/pci-hyperv.c
5059     +++ b/drivers/pci/host/pci-hyperv.c
5060     @@ -879,7 +879,7 @@ static void hv_irq_unmask(struct irq_data *data)
5061     int cpu;
5062     u64 res;
5063    
5064     - dest = irq_data_get_affinity_mask(data);
5065     + dest = irq_data_get_effective_affinity_mask(data);
5066     pdev = msi_desc_to_pci_dev(msi_desc);
5067     pbus = pdev->bus;
5068     hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
5069     @@ -1042,6 +1042,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
5070     struct hv_pci_dev *hpdev;
5071     struct pci_bus *pbus;
5072     struct pci_dev *pdev;
5073     + struct cpumask *dest;
5074     struct compose_comp_ctxt comp;
5075     struct tran_int_desc *int_desc;
5076     struct {
5077     @@ -1056,6 +1057,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
5078     int ret;
5079    
5080     pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
5081     + dest = irq_data_get_effective_affinity_mask(data);
5082     pbus = pdev->bus;
5083     hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
5084     hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
5085     @@ -1081,14 +1083,14 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
5086     switch (pci_protocol_version) {
5087     case PCI_PROTOCOL_VERSION_1_1:
5088     size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
5089     - irq_data_get_affinity_mask(data),
5090     + dest,
5091     hpdev->desc.win_slot.slot,
5092     cfg->vector);
5093     break;
5094    
5095     case PCI_PROTOCOL_VERSION_1_2:
5096     size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
5097     - irq_data_get_affinity_mask(data),
5098     + dest,
5099     hpdev->desc.win_slot.slot,
5100     cfg->vector);
5101     break;
5102     diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
5103     index 83e4a892b14b..cae54f8320be 100644
5104     --- a/drivers/pci/pcie/aspm.c
5105     +++ b/drivers/pci/pcie/aspm.c
5106     @@ -453,7 +453,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
5107    
5108     /* Choose the greater of the two T_cmn_mode_rstr_time */
5109     val1 = (upreg->l1ss_cap >> 8) & 0xFF;
5110     - val2 = (upreg->l1ss_cap >> 8) & 0xFF;
5111     + val2 = (dwreg->l1ss_cap >> 8) & 0xFF;
5112     if (val1 > val2)
5113     link->l1ss.ctl1 |= val1 << 8;
5114     else
5115     @@ -658,7 +658,7 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
5116     0xFF00, link->l1ss.ctl1);
5117    
5118     /* Program LTR L1.2 threshold in both ports */
5119     - pci_clear_and_set_dword(parent, dw_cap_ptr + PCI_L1SS_CTL1,
5120     + pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
5121     0xE3FF0000, link->l1ss.ctl1);
5122     pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
5123     0xE3FF0000, link->l1ss.ctl1);
5124     diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
5125     index 911b3b65c8b2..f66f9375177c 100644
5126     --- a/drivers/pci/quirks.c
5127     +++ b/drivers/pci/quirks.c
5128     @@ -4212,17 +4212,32 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
5129     #endif
5130     }
5131    
5132     +static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
5133     +{
5134     + /*
5135     + * Effectively selects all downstream ports for whole ThunderX 1
5136     + * family by 0xf800 mask (which represents 8 SoCs), while the lower
5137     + * bits of device ID are used to indicate which subdevice is used
5138     + * within the SoC.
5139     + */
5140     + return (pci_is_pcie(dev) &&
5141     + (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) &&
5142     + ((dev->device & 0xf800) == 0xa000));
5143     +}
5144     +
5145     static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
5146     {
5147     /*
5148     - * Cavium devices matching this quirk do not perform peer-to-peer
5149     - * with other functions, allowing masking out these bits as if they
5150     - * were unimplemented in the ACS capability.
5151     + * Cavium root ports don't advertise an ACS capability. However,
5152     + * the RTL internally implements similar protection as if ACS had
5153     + * Request Redirection, Completion Redirection, Source Validation,
5154     + * and Upstream Forwarding features enabled. Assert that the
5155     + * hardware implements and enables equivalent ACS functionality for
5156     + * these flags.
5157     */
5158     - acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
5159     - PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
5160     + acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
5161    
5162     - if (!((dev->device >= 0xa000) && (dev->device <= 0xa0ff)))
5163     + if (!pci_quirk_cavium_acs_match(dev))
5164     return -ENOTTY;
5165    
5166     return acs_flags ? 0 : 1;
5167     diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
5168     index c17677f494af..dc6519b2c53a 100644
5169     --- a/drivers/scsi/lpfc/lpfc_attr.c
5170     +++ b/drivers/scsi/lpfc/lpfc_attr.c
5171     @@ -3134,7 +3134,8 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
5172     struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
5173     struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
5174    
5175     - return snprintf(buf, PAGE_SIZE, "%d\n", pring->txq_max);
5176     + return snprintf(buf, PAGE_SIZE, "%d\n",
5177     + pring ? pring->txq_max : 0);
5178     }
5179    
5180     static DEVICE_ATTR(txq_hw, S_IRUGO,
5181     @@ -3147,7 +3148,8 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
5182     struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
5183     struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
5184    
5185     - return snprintf(buf, PAGE_SIZE, "%d\n", pring->txcmplq_max);
5186     + return snprintf(buf, PAGE_SIZE, "%d\n",
5187     + pring ? pring->txcmplq_max : 0);
5188     }
5189    
5190     static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
5191     diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
5192     index fe9e1c079c20..d89816222b23 100644
5193     --- a/drivers/scsi/lpfc/lpfc_bsg.c
5194     +++ b/drivers/scsi/lpfc/lpfc_bsg.c
5195     @@ -2911,7 +2911,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
5196     }
5197     }
5198    
5199     - if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
5200     + if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) {
5201     ret_val = -ENOMEM;
5202     goto err_post_rxbufs_exit;
5203     }
5204     @@ -5421,6 +5421,8 @@ lpfc_bsg_timeout(struct bsg_job *job)
5205     struct lpfc_iocbq *check_iocb, *next_iocb;
5206    
5207     pring = lpfc_phba_elsring(phba);
5208     + if (unlikely(!pring))
5209     + return -EIO;
5210    
5211     /* if job's driver data is NULL, the command completed or is in the
5212     * the process of completing. In this case, return status to request
5213     diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
5214     index 468a66371de9..3ebf6ccba6e6 100644
5215     --- a/drivers/scsi/lpfc/lpfc_els.c
5216     +++ b/drivers/scsi/lpfc/lpfc_els.c
5217     @@ -7430,6 +7430,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
5218     timeout = (uint32_t)(phba->fc_ratov << 1);
5219    
5220     pring = lpfc_phba_elsring(phba);
5221     + if (unlikely(!pring))
5222     + return;
5223    
5224     if ((phba->pport->load_flag & FC_UNLOADING))
5225     return;
5226     @@ -9310,6 +9312,9 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
5227    
5228     pring = lpfc_phba_elsring(phba);
5229    
5230     + if (unlikely(!pring))
5231     + return;
5232     +
5233     spin_lock_irq(&phba->hbalock);
5234     list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
5235     list) {
5236     @@ -9416,7 +9421,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
5237     rxid, 1);
5238    
5239     /* Check if TXQ queue needs to be serviced */
5240     - if (!(list_empty(&pring->txq)))
5241     + if (pring && !list_empty(&pring->txq))
5242     lpfc_worker_wake_up(phba);
5243     return;
5244     }
5245     diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
5246     index 20808349a80e..499df9d17339 100644
5247     --- a/drivers/scsi/lpfc/lpfc_hbadisc.c
5248     +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
5249     @@ -3324,7 +3324,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5250    
5251     /* Unblock ELS traffic */
5252     pring = lpfc_phba_elsring(phba);
5253     - pring->flag &= ~LPFC_STOP_IOCB_EVENT;
5254     + if (pring)
5255     + pring->flag &= ~LPFC_STOP_IOCB_EVENT;
5256    
5257     /* Check for error */
5258     if (mb->mbxStatus) {
5259     @@ -5430,6 +5431,8 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5260    
5261     psli = &phba->sli;
5262     pring = lpfc_phba_elsring(phba);
5263     + if (unlikely(!pring))
5264     + return;
5265    
5266     /* Error matching iocb on txq or txcmplq
5267     * First check the txq.
5268     diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
5269     index 100bc4c8798d..6acf1bb1d320 100644
5270     --- a/drivers/scsi/lpfc/lpfc_init.c
5271     +++ b/drivers/scsi/lpfc/lpfc_init.c
5272     @@ -11404,6 +11404,13 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
5273     /* Remove FC host and then SCSI host with the physical port */
5274     fc_remove_host(shost);
5275     scsi_remove_host(shost);
5276     + /*
5277     + * Bring down the SLI Layer. This step disables all interrupts,
5278     + * clears the rings, discards all mailbox commands, and resets
5279     + * the HBA FCoE function.
5280     + */
5281     + lpfc_debugfs_terminate(vport);
5282     + lpfc_sli4_hba_unset(phba);
5283    
5284     /* Perform ndlp cleanup on the physical port. The nvme and nvmet
5285     * localports are destroyed after to cleanup all transport memory.
5286     @@ -11412,14 +11419,8 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
5287     lpfc_nvmet_destroy_targetport(phba);
5288     lpfc_nvme_destroy_localport(vport);
5289    
5290     - /*
5291     - * Bring down the SLI Layer. This step disables all interrupts,
5292     - * clears the rings, discards all mailbox commands, and resets
5293     - * the HBA FCoE function.
5294     - */
5295     - lpfc_debugfs_terminate(vport);
5296     - lpfc_sli4_hba_unset(phba);
5297    
5298     + lpfc_stop_hba_timers(phba);
5299     spin_lock_irq(&phba->hbalock);
5300     list_del_init(&vport->listentry);
5301     spin_unlock_irq(&phba->hbalock);
5302     diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
5303     index f3ad7cac355d..b6957d944b9a 100644
5304     --- a/drivers/scsi/lpfc/lpfc_nportdisc.c
5305     +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
5306     @@ -216,7 +216,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5307     pring = lpfc_phba_elsring(phba);
5308    
5309     /* In case of error recovery path, we might have a NULL pring here */
5310     - if (!pring)
5311     + if (unlikely(!pring))
5312     return;
5313    
5314     /* Abort outstanding I/O on NPort <nlp_DID> */
5315     diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
5316     index 0b7c1a49e203..3c5b054a56ac 100644
5317     --- a/drivers/scsi/lpfc/lpfc_nvmet.c
5318     +++ b/drivers/scsi/lpfc/lpfc_nvmet.c
5319     @@ -1138,9 +1138,14 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
5320     #endif
5321     if (error) {
5322     lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
5323     - "6025 Cannot register NVME targetport "
5324     - "x%x\n", error);
5325     + "6025 Cannot register NVME targetport x%x: "
5326     + "portnm %llx nodenm %llx segs %d qs %d\n",
5327     + error,
5328     + pinfo.port_name, pinfo.node_name,
5329     + lpfc_tgttemplate.max_sgl_segments,
5330     + lpfc_tgttemplate.max_hw_queues);
5331     phba->targetport = NULL;
5332     + phba->nvmet_support = 0;
5333    
5334     lpfc_nvmet_cleanup_io_context(phba);
5335    
5336     @@ -1152,9 +1157,11 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
5337     lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
5338     "6026 Registered NVME "
5339     "targetport: %p, private %p "
5340     - "portnm %llx nodenm %llx\n",
5341     + "portnm %llx nodenm %llx segs %d qs %d\n",
5342     phba->targetport, tgtp,
5343     - pinfo.port_name, pinfo.node_name);
5344     + pinfo.port_name, pinfo.node_name,
5345     + lpfc_tgttemplate.max_sgl_segments,
5346     + lpfc_tgttemplate.max_hw_queues);
5347    
5348     atomic_set(&tgtp->rcv_ls_req_in, 0);
5349     atomic_set(&tgtp->rcv_ls_req_out, 0);
5350     diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
5351     index 8b119f87b51d..455f3ce9fda9 100644
5352     --- a/drivers/scsi/lpfc/lpfc_sli.c
5353     +++ b/drivers/scsi/lpfc/lpfc_sli.c
5354     @@ -9396,10 +9396,13 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
5355     * for abort iocb hba_wqidx should already
5356     * be setup based on what work queue we used.
5357     */
5358     - if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
5359     + if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
5360     piocb->hba_wqidx =
5361     lpfc_sli4_scmd_to_wqidx_distr(phba,
5362     piocb->context1);
5363     + piocb->hba_wqidx = piocb->hba_wqidx %
5364     + phba->cfg_fcp_io_channel;
5365     + }
5366     return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
5367     } else {
5368     if (unlikely(!phba->sli4_hba.oas_wq))
5369     @@ -10632,6 +10635,14 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5370     (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
5371     return 0;
5372    
5373     + if (!pring) {
5374     + if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
5375     + cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
5376     + else
5377     + cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
5378     + goto abort_iotag_exit;
5379     + }
5380     +
5381     /*
5382     * If we're unloading, don't abort iocb on the ELS ring, but change
5383     * the callback so that nothing happens when it finishes.
5384     @@ -12500,6 +12511,8 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
5385     unsigned long iflags;
5386    
5387     pring = lpfc_phba_elsring(phba);
5388     + if (unlikely(!pring))
5389     + return NULL;
5390    
5391     wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
5392     spin_lock_irqsave(&pring->ring_lock, iflags);
5393     @@ -12507,19 +12520,21 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
5394     /* Look up the ELS command IOCB and create pseudo response IOCB */
5395     cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
5396     bf_get(lpfc_wcqe_c_request_tag, wcqe));
5397     - /* Put the iocb back on the txcmplq */
5398     - lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
5399     - spin_unlock_irqrestore(&pring->ring_lock, iflags);
5400     -
5401     if (unlikely(!cmdiocbq)) {
5402     + spin_unlock_irqrestore(&pring->ring_lock, iflags);
5403     lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5404     "0386 ELS complete with no corresponding "
5405     - "cmdiocb: iotag (%d)\n",
5406     - bf_get(lpfc_wcqe_c_request_tag, wcqe));
5407     + "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
5408     + wcqe->word0, wcqe->total_data_placed,
5409     + wcqe->parameter, wcqe->word3);
5410     lpfc_sli_release_iocbq(phba, irspiocbq);
5411     return NULL;
5412     }
5413    
5414     + /* Put the iocb back on the txcmplq */
5415     + lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
5416     + spin_unlock_irqrestore(&pring->ring_lock, iflags);
5417     +
5418     /* Fake the irspiocbq and copy necessary response information */
5419     lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
5420    
5421     @@ -17137,7 +17152,8 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
5422     if (pcmd && pcmd->virt)
5423     dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
5424     kfree(pcmd);
5425     - lpfc_sli_release_iocbq(phba, iocbq);
5426     + if (iocbq)
5427     + lpfc_sli_release_iocbq(phba, iocbq);
5428     lpfc_in_buf_free(phba, &dmabuf->dbuf);
5429     }
5430    
5431     @@ -18691,6 +18707,8 @@ lpfc_drain_txq(struct lpfc_hba *phba)
5432     uint32_t txq_cnt = 0;
5433    
5434     pring = lpfc_phba_elsring(phba);
5435     + if (unlikely(!pring))
5436     + return 0;
5437    
5438     spin_lock_irqsave(&pring->ring_lock, iflags);
5439     list_for_each_entry(piocbq, &pring->txq, list) {
5440     diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
5441     index dce42a416876..6eaaa326e508 100644
5442     --- a/drivers/scsi/qla2xxx/qla_os.c
5443     +++ b/drivers/scsi/qla2xxx/qla_os.c
5444     @@ -388,7 +388,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
5445     INIT_LIST_HEAD(&ha->base_qpair->nvme_done_list);
5446     ha->base_qpair->enable_class_2 = ql2xenableclass2;
5447     /* init qpair to this cpu. Will adjust at run time. */
5448     - qla_cpu_update(rsp->qpair, smp_processor_id());
5449     + qla_cpu_update(rsp->qpair, raw_smp_processor_id());
5450     ha->base_qpair->pdev = ha->pdev;
5451    
5452     if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
5453     diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
5454     index 8aa54779aac1..2eb61d54bbb4 100644
5455     --- a/drivers/scsi/sd_zbc.c
5456     +++ b/drivers/scsi/sd_zbc.c
5457     @@ -375,15 +375,15 @@ static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
5458     if (sdkp->device->type != TYPE_ZBC) {
5459     /* Host-aware */
5460     sdkp->urswrz = 1;
5461     - sdkp->zones_optimal_open = get_unaligned_be64(&buf[8]);
5462     - sdkp->zones_optimal_nonseq = get_unaligned_be64(&buf[12]);
5463     + sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]);
5464     + sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]);
5465     sdkp->zones_max_open = 0;
5466     } else {
5467     /* Host-managed */
5468     sdkp->urswrz = buf[4] & 1;
5469     sdkp->zones_optimal_open = 0;
5470     sdkp->zones_optimal_nonseq = 0;
5471     - sdkp->zones_max_open = get_unaligned_be64(&buf[16]);
5472     + sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
5473     }
5474    
5475     return 0;
5476     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
5477     index 5001261f5d69..d9ba4ee2c62b 100644
5478     --- a/drivers/target/iscsi/iscsi_target.c
5479     +++ b/drivers/target/iscsi/iscsi_target.c
5480     @@ -1960,7 +1960,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
5481     struct iscsi_tmr_req *tmr_req;
5482     struct iscsi_tm *hdr;
5483     int out_of_order_cmdsn = 0, ret;
5484     - bool sess_ref = false;
5485     u8 function, tcm_function = TMR_UNKNOWN;
5486    
5487     hdr = (struct iscsi_tm *) buf;
5488     @@ -1993,22 +1992,23 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
5489    
5490     cmd->data_direction = DMA_NONE;
5491     cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
5492     - if (!cmd->tmr_req)
5493     + if (!cmd->tmr_req) {
5494     return iscsit_add_reject_cmd(cmd,
5495     ISCSI_REASON_BOOKMARK_NO_RESOURCES,
5496     buf);
5497     + }
5498     +
5499     + transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
5500     + conn->sess->se_sess, 0, DMA_NONE,
5501     + TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
5502     +
5503     + target_get_sess_cmd(&cmd->se_cmd, true);
5504    
5505     /*
5506     * TASK_REASSIGN for ERL=2 / connection stays inside of
5507     * LIO-Target $FABRIC_MOD
5508     */
5509     if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
5510     - transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
5511     - conn->sess->se_sess, 0, DMA_NONE,
5512     - TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
5513     -
5514     - target_get_sess_cmd(&cmd->se_cmd, true);
5515     - sess_ref = true;
5516     tcm_function = iscsit_convert_tmf(function);
5517     if (tcm_function == TMR_UNKNOWN) {
5518     pr_err("Unknown iSCSI TMR Function:"
5519     @@ -2099,12 +2099,14 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
5520    
5521     if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
5522     int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
5523     - if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
5524     + if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
5525     out_of_order_cmdsn = 1;
5526     - else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
5527     + } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
5528     + target_put_sess_cmd(&cmd->se_cmd);
5529     return 0;
5530     - else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
5531     + } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
5532     return -1;
5533     + }
5534     }
5535     iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
5536    
5537     @@ -2124,12 +2126,8 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
5538     * For connection recovery, this is also the default action for
5539     * TMR TASK_REASSIGN.
5540     */
5541     - if (sess_ref) {
5542     - pr_debug("Handle TMR, using sess_ref=true check\n");
5543     - target_put_sess_cmd(&cmd->se_cmd);
5544     - }
5545     -
5546     iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
5547     + target_put_sess_cmd(&cmd->se_cmd);
5548     return 0;
5549     }
5550     EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
5551     diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
5552     index dd2cd8048582..9f25c9c6f67d 100644
5553     --- a/drivers/target/target_core_pr.c
5554     +++ b/drivers/target/target_core_pr.c
5555     @@ -4011,6 +4011,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
5556     * Set the ADDITIONAL DESCRIPTOR LENGTH
5557     */
5558     put_unaligned_be32(desc_len, &buf[off]);
5559     + off += 4;
5560     /*
5561     * Size of full desctipor header minus TransportID
5562     * containing $FABRIC_MOD specific) initiator device/port
5563     diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
5564     index e22847bd79b9..9c7bc1ca341a 100644
5565     --- a/drivers/target/target_core_tmr.c
5566     +++ b/drivers/target/target_core_tmr.c
5567     @@ -133,6 +133,15 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
5568     spin_unlock(&se_cmd->t_state_lock);
5569     return false;
5570     }
5571     + if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) {
5572     + if (se_cmd->scsi_status) {
5573     + pr_debug("Attempted to abort io tag: %llu early failure"
5574     + " status: 0x%02x\n", se_cmd->tag,
5575     + se_cmd->scsi_status);
5576     + spin_unlock(&se_cmd->t_state_lock);
5577     + return false;
5578     + }
5579     + }
5580     if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
5581     pr_debug("Attempted to abort io tag: %llu already shutdown,"
5582     " skipping\n", se_cmd->tag);
5583     @@ -217,7 +226,8 @@ static void core_tmr_drain_tmr_list(
5584     * LUN_RESET tmr..
5585     */
5586     spin_lock_irqsave(&dev->se_tmr_lock, flags);
5587     - list_del_init(&tmr->tmr_list);
5588     + if (tmr)
5589     + list_del_init(&tmr->tmr_list);
5590     list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
5591     cmd = tmr_p->task_cmd;
5592     if (!cmd) {
5593     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
5594     index 836d552b0385..e6d51135d105 100644
5595     --- a/drivers/target/target_core_transport.c
5596     +++ b/drivers/target/target_core_transport.c
5597     @@ -1730,9 +1730,6 @@ void transport_generic_request_failure(struct se_cmd *cmd,
5598     {
5599     int ret = 0, post_ret = 0;
5600    
5601     - if (transport_check_aborted_status(cmd, 1))
5602     - return;
5603     -
5604     pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
5605     sense_reason);
5606     target_show_cmd("-----[ ", cmd);
5607     @@ -1741,6 +1738,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
5608     * For SAM Task Attribute emulation for failed struct se_cmd
5609     */
5610     transport_complete_task_attr(cmd);
5611     +
5612     /*
5613     * Handle special case for COMPARE_AND_WRITE failure, where the
5614     * callback is expected to drop the per device ->caw_sem.
5615     @@ -1749,6 +1747,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
5616     cmd->transport_complete_callback)
5617     cmd->transport_complete_callback(cmd, false, &post_ret);
5618    
5619     + if (transport_check_aborted_status(cmd, 1))
5620     + return;
5621     +
5622     switch (sense_reason) {
5623     case TCM_NON_EXISTENT_LUN:
5624     case TCM_UNSUPPORTED_SCSI_OPCODE:
5625     @@ -1973,6 +1974,7 @@ void target_execute_cmd(struct se_cmd *cmd)
5626     }
5627    
5628     cmd->t_state = TRANSPORT_PROCESSING;
5629     + cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
5630     cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
5631     spin_unlock_irq(&cmd->t_state_lock);
5632    
5633     @@ -2010,6 +2012,8 @@ static void target_restart_delayed_cmds(struct se_device *dev)
5634     list_del(&cmd->se_delayed_node);
5635     spin_unlock(&dev->delayed_cmd_lock);
5636    
5637     + cmd->transport_state |= CMD_T_SENT;
5638     +
5639     __target_execute_cmd(cmd, true);
5640    
5641     if (cmd->sam_task_attr == TCM_ORDERED_TAG)
5642     @@ -2045,6 +2049,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
5643     pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
5644     dev->dev_cur_ordered_id);
5645     }
5646     + cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
5647     +
5648     restart:
5649     target_restart_delayed_cmds(dev);
5650     }
5651     @@ -2570,7 +2576,20 @@ EXPORT_SYMBOL(transport_generic_new_cmd);
5652    
5653     static void transport_write_pending_qf(struct se_cmd *cmd)
5654     {
5655     + unsigned long flags;
5656     int ret;
5657     + bool stop;
5658     +
5659     + spin_lock_irqsave(&cmd->t_state_lock, flags);
5660     + stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
5661     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5662     +
5663     + if (stop) {
5664     + pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
5665     + __func__, __LINE__, cmd->tag);
5666     + complete_all(&cmd->t_transport_stop_comp);
5667     + return;
5668     + }
5669    
5670     ret = cmd->se_tfo->write_pending(cmd);
5671     if (ret) {
5672     @@ -2664,6 +2683,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
5673     ret = -ESHUTDOWN;
5674     goto out;
5675     }
5676     + se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
5677     list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
5678     out:
5679     spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
5680     diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
5681     index c68fb3a8ea1c..97db76afced2 100644
5682     --- a/drivers/tty/serdev/core.c
5683     +++ b/drivers/tty/serdev/core.c
5684     @@ -65,21 +65,32 @@ static int serdev_uevent(struct device *dev, struct kobj_uevent_env *env)
5685     */
5686     int serdev_device_add(struct serdev_device *serdev)
5687     {
5688     + struct serdev_controller *ctrl = serdev->ctrl;
5689     struct device *parent = serdev->dev.parent;
5690     int err;
5691    
5692     dev_set_name(&serdev->dev, "%s-%d", dev_name(parent), serdev->nr);
5693    
5694     + /* Only a single slave device is currently supported. */
5695     + if (ctrl->serdev) {
5696     + dev_err(&serdev->dev, "controller busy\n");
5697     + return -EBUSY;
5698     + }
5699     + ctrl->serdev = serdev;
5700     +
5701     err = device_add(&serdev->dev);
5702     if (err < 0) {
5703     dev_err(&serdev->dev, "Can't add %s, status %d\n",
5704     dev_name(&serdev->dev), err);
5705     - goto err_device_add;
5706     + goto err_clear_serdev;
5707     }
5708    
5709     dev_dbg(&serdev->dev, "device %s registered\n", dev_name(&serdev->dev));
5710    
5711     -err_device_add:
5712     + return 0;
5713     +
5714     +err_clear_serdev:
5715     + ctrl->serdev = NULL;
5716     return err;
5717     }
5718     EXPORT_SYMBOL_GPL(serdev_device_add);
5719     @@ -90,7 +101,10 @@ EXPORT_SYMBOL_GPL(serdev_device_add);
5720     */
5721     void serdev_device_remove(struct serdev_device *serdev)
5722     {
5723     + struct serdev_controller *ctrl = serdev->ctrl;
5724     +
5725     device_unregister(&serdev->dev);
5726     + ctrl->serdev = NULL;
5727     }
5728     EXPORT_SYMBOL_GPL(serdev_device_remove);
5729    
5730     @@ -295,7 +309,6 @@ struct serdev_device *serdev_device_alloc(struct serdev_controller *ctrl)
5731     return NULL;
5732    
5733     serdev->ctrl = ctrl;
5734     - ctrl->serdev = serdev;
5735     device_initialize(&serdev->dev);
5736     serdev->dev.parent = &ctrl->dev;
5737     serdev->dev.bus = &serdev_bus_type;
5738     diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
5739     index 046f6d280af5..e47c5bc3ddca 100644
5740     --- a/drivers/vhost/scsi.c
5741     +++ b/drivers/vhost/scsi.c
5742     @@ -688,6 +688,7 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
5743     struct scatterlist *sg, int sg_count)
5744     {
5745     size_t off = iter->iov_offset;
5746     + struct scatterlist *p = sg;
5747     int i, ret;
5748    
5749     for (i = 0; i < iter->nr_segs; i++) {
5750     @@ -696,8 +697,8 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
5751    
5752     ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
5753     if (ret < 0) {
5754     - for (i = 0; i < sg_count; i++) {
5755     - struct page *page = sg_page(&sg[i]);
5756     + while (p < sg) {
5757     + struct page *page = sg_page(p++);
5758     if (page)
5759     put_page(page);
5760     }
5761     diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
5762     index 2a5de610dd8f..bdabb2765d1b 100644
5763     --- a/fs/9p/vfs_inode.c
5764     +++ b/fs/9p/vfs_inode.c
5765     @@ -483,6 +483,9 @@ static int v9fs_test_inode(struct inode *inode, void *data)
5766    
5767     if (v9inode->qid.type != st->qid.type)
5768     return 0;
5769     +
5770     + if (v9inode->qid.path != st->qid.path)
5771     + return 0;
5772     return 1;
5773     }
5774    
5775     diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
5776     index 70f9887c59a9..7f6ae21a27b3 100644
5777     --- a/fs/9p/vfs_inode_dotl.c
5778     +++ b/fs/9p/vfs_inode_dotl.c
5779     @@ -87,6 +87,9 @@ static int v9fs_test_inode_dotl(struct inode *inode, void *data)
5780    
5781     if (v9inode->qid.type != st->qid.type)
5782     return 0;
5783     +
5784     + if (v9inode->qid.path != st->qid.path)
5785     + return 0;
5786     return 1;
5787     }
5788    
5789     diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
5790     index 4ac49d038bf3..8fc41705c7cd 100644
5791     --- a/fs/autofs4/waitq.c
5792     +++ b/fs/autofs4/waitq.c
5793     @@ -81,7 +81,8 @@ static int autofs4_write(struct autofs_sb_info *sbi,
5794     spin_unlock_irqrestore(&current->sighand->siglock, flags);
5795     }
5796    
5797     - return (bytes > 0);
5798     + /* if 'wr' returned 0 (impossible) we assume -EIO (safe) */
5799     + return bytes == 0 ? 0 : wr < 0 ? wr : -EIO;
5800     }
5801    
5802     static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
5803     @@ -95,6 +96,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
5804     } pkt;
5805     struct file *pipe = NULL;
5806     size_t pktsz;
5807     + int ret;
5808    
5809     pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n",
5810     (unsigned long) wq->wait_queue_token,
5811     @@ -169,7 +171,18 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
5812     mutex_unlock(&sbi->wq_mutex);
5813    
5814     if (autofs4_write(sbi, pipe, &pkt, pktsz))
5815     + switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
5816     + case 0:
5817     + break;
5818     + case -ENOMEM:
5819     + case -ERESTARTSYS:
5820     + /* Just fail this one */
5821     + autofs4_wait_release(sbi, wq->wait_queue_token, ret);
5822     + break;
5823     + default:
5824     autofs4_catatonic_mode(sbi);
5825     + break;
5826     + }
5827     fput(pipe);
5828     }
5829    
5830     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
5831     index e2d7e86b51d1..08698105fa4a 100644
5832     --- a/fs/btrfs/extent-tree.c
5833     +++ b/fs/btrfs/extent-tree.c
5834     @@ -4919,6 +4919,13 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
5835     }
5836     }
5837    
5838     +struct reserve_ticket {
5839     + u64 bytes;
5840     + int error;
5841     + struct list_head list;
5842     + wait_queue_head_t wait;
5843     +};
5844     +
5845     /**
5846     * maybe_commit_transaction - possibly commit the transaction if its ok to
5847     * @root - the root we're allocating for
5848     @@ -4930,18 +4937,29 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
5849     * will return -ENOSPC.
5850     */
5851     static int may_commit_transaction(struct btrfs_fs_info *fs_info,
5852     - struct btrfs_space_info *space_info,
5853     - u64 bytes, int force)
5854     + struct btrfs_space_info *space_info)
5855     {
5856     + struct reserve_ticket *ticket = NULL;
5857     struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
5858     struct btrfs_trans_handle *trans;
5859     + u64 bytes;
5860    
5861     trans = (struct btrfs_trans_handle *)current->journal_info;
5862     if (trans)
5863     return -EAGAIN;
5864    
5865     - if (force)
5866     - goto commit;
5867     + spin_lock(&space_info->lock);
5868     + if (!list_empty(&space_info->priority_tickets))
5869     + ticket = list_first_entry(&space_info->priority_tickets,
5870     + struct reserve_ticket, list);
5871     + else if (!list_empty(&space_info->tickets))
5872     + ticket = list_first_entry(&space_info->tickets,
5873     + struct reserve_ticket, list);
5874     + bytes = (ticket) ? ticket->bytes : 0;
5875     + spin_unlock(&space_info->lock);
5876     +
5877     + if (!bytes)
5878     + return 0;
5879    
5880     /* See if there is enough pinned space to make this reservation */
5881     if (percpu_counter_compare(&space_info->total_bytes_pinned,
5882     @@ -4956,8 +4974,12 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
5883     return -ENOSPC;
5884    
5885     spin_lock(&delayed_rsv->lock);
5886     + if (delayed_rsv->size > bytes)
5887     + bytes = 0;
5888     + else
5889     + bytes -= delayed_rsv->size;
5890     if (percpu_counter_compare(&space_info->total_bytes_pinned,
5891     - bytes - delayed_rsv->size) < 0) {
5892     + bytes) < 0) {
5893     spin_unlock(&delayed_rsv->lock);
5894     return -ENOSPC;
5895     }
5896     @@ -4971,13 +4993,6 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
5897     return btrfs_commit_transaction(trans);
5898     }
5899    
5900     -struct reserve_ticket {
5901     - u64 bytes;
5902     - int error;
5903     - struct list_head list;
5904     - wait_queue_head_t wait;
5905     -};
5906     -
5907     /*
5908     * Try to flush some data based on policy set by @state. This is only advisory
5909     * and may fail for various reasons. The caller is supposed to examine the
5910     @@ -5027,8 +5042,7 @@ static void flush_space(struct btrfs_fs_info *fs_info,
5911     ret = 0;
5912     break;
5913     case COMMIT_TRANS:
5914     - ret = may_commit_transaction(fs_info, space_info,
5915     - num_bytes, 0);
5916     + ret = may_commit_transaction(fs_info, space_info);
5917     break;
5918     default:
5919     ret = -ENOSPC;
5920     diff --git a/fs/buffer.c b/fs/buffer.c
5921     index 170df856bdb9..b96f3b98a6ef 100644
5922     --- a/fs/buffer.c
5923     +++ b/fs/buffer.c
5924     @@ -3055,8 +3055,16 @@ void guard_bio_eod(int op, struct bio *bio)
5925     sector_t maxsector;
5926     struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
5927     unsigned truncated_bytes;
5928     + struct hd_struct *part;
5929     +
5930     + rcu_read_lock();
5931     + part = __disk_get_part(bio->bi_disk, bio->bi_partno);
5932     + if (part)
5933     + maxsector = part_nr_sects_read(part);
5934     + else
5935     + maxsector = get_capacity(bio->bi_disk);
5936     + rcu_read_unlock();
5937    
5938     - maxsector = get_capacity(bio->bi_disk);
5939     if (!maxsector)
5940     return;
5941    
5942     diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
5943     index c7835df7e7b8..d262a93d9b31 100644
5944     --- a/fs/crypto/crypto.c
5945     +++ b/fs/crypto/crypto.c
5946     @@ -410,11 +410,8 @@ int fscrypt_initialize(unsigned int cop_flags)
5947     {
5948     int i, res = -ENOMEM;
5949    
5950     - /*
5951     - * No need to allocate a bounce page pool if there already is one or
5952     - * this FS won't use it.
5953     - */
5954     - if (cop_flags & FS_CFLG_OWN_PAGES || fscrypt_bounce_page_pool)
5955     + /* No need to allocate a bounce page pool if this FS won't use it. */
5956     + if (cop_flags & FS_CFLG_OWN_PAGES)
5957     return 0;
5958    
5959     mutex_lock(&fscrypt_init_mutex);
5960     diff --git a/fs/dax.c b/fs/dax.c
5961     index f001d8c72a06..191306cd8b6b 100644
5962     --- a/fs/dax.c
5963     +++ b/fs/dax.c
5964     @@ -1327,7 +1327,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
5965     * this is a reliable test.
5966     */
5967     pgoff = linear_page_index(vma, pmd_addr);
5968     - max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
5969     + max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
5970    
5971     trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
5972    
5973     @@ -1351,13 +1351,13 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
5974     if ((pmd_addr + PMD_SIZE) > vma->vm_end)
5975     goto fallback;
5976    
5977     - if (pgoff > max_pgoff) {
5978     + if (pgoff >= max_pgoff) {
5979     result = VM_FAULT_SIGBUS;
5980     goto out;
5981     }
5982    
5983     /* If the PMD would extend beyond the file size */
5984     - if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
5985     + if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
5986     goto fallback;
5987    
5988     /*
5989     diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
5990     index 286f10b0363b..4f457d5c4933 100644
5991     --- a/fs/ecryptfs/messaging.c
5992     +++ b/fs/ecryptfs/messaging.c
5993     @@ -442,15 +442,16 @@ void ecryptfs_release_messaging(void)
5994     }
5995     if (ecryptfs_daemon_hash) {
5996     struct ecryptfs_daemon *daemon;
5997     + struct hlist_node *n;
5998     int i;
5999    
6000     mutex_lock(&ecryptfs_daemon_hash_mux);
6001     for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
6002     int rc;
6003    
6004     - hlist_for_each_entry(daemon,
6005     - &ecryptfs_daemon_hash[i],
6006     - euid_chain) {
6007     + hlist_for_each_entry_safe(daemon, n,
6008     + &ecryptfs_daemon_hash[i],
6009     + euid_chain) {
6010     rc = ecryptfs_exorcise_daemon(daemon);
6011     if (rc)
6012     printk(KERN_ERR "%s: Error whilst "
6013     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
6014     index 97f0fd06728d..07bca11749d4 100644
6015     --- a/fs/ext4/extents.c
6016     +++ b/fs/ext4/extents.c
6017     @@ -4794,7 +4794,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
6018     }
6019    
6020     if (!(mode & FALLOC_FL_KEEP_SIZE) &&
6021     - offset + len > i_size_read(inode)) {
6022     + (offset + len > i_size_read(inode) ||
6023     + offset + len > EXT4_I(inode)->i_disksize)) {
6024     new_size = offset + len;
6025     ret = inode_newsize_ok(inode, new_size);
6026     if (ret)
6027     @@ -4965,7 +4966,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
6028     }
6029    
6030     if (!(mode & FALLOC_FL_KEEP_SIZE) &&
6031     - offset + len > i_size_read(inode)) {
6032     + (offset + len > i_size_read(inode) ||
6033     + offset + len > EXT4_I(inode)->i_disksize)) {
6034     new_size = offset + len;
6035     ret = inode_newsize_ok(inode, new_size);
6036     if (ret)
6037     diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
6038     index 28c5c3abddb3..fd9501977f1c 100644
6039     --- a/fs/ext4/inline.c
6040     +++ b/fs/ext4/inline.c
6041     @@ -302,11 +302,6 @@ static int ext4_create_inline_data(handle_t *handle,
6042     EXT4_I(inode)->i_inline_size = len + EXT4_MIN_INLINE_DATA_SIZE;
6043     ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
6044     ext4_set_inode_flag(inode, EXT4_INODE_INLINE_DATA);
6045     - /*
6046     - * Propagate changes to inode->i_flags as well - e.g. S_DAX may
6047     - * get cleared
6048     - */
6049     - ext4_set_inode_flags(inode);
6050     get_bh(is.iloc.bh);
6051     error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
6052    
6053     @@ -451,11 +446,6 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
6054     }
6055     }
6056     ext4_clear_inode_flag(inode, EXT4_INODE_INLINE_DATA);
6057     - /*
6058     - * Propagate changes to inode->i_flags as well - e.g. S_DAX may
6059     - * get set.
6060     - */
6061     - ext4_set_inode_flags(inode);
6062    
6063     get_bh(is.iloc.bh);
6064     error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
6065     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
6066     index 90afeb7293a6..38eb621edd80 100644
6067     --- a/fs/ext4/inode.c
6068     +++ b/fs/ext4/inode.c
6069     @@ -5967,11 +5967,6 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
6070     ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6071     }
6072     ext4_set_aops(inode);
6073     - /*
6074     - * Update inode->i_flags after EXT4_INODE_JOURNAL_DATA was updated.
6075     - * E.g. S_DAX may get cleared / set.
6076     - */
6077     - ext4_set_inode_flags(inode);
6078    
6079     jbd2_journal_unlock_updates(journal);
6080     percpu_up_write(&sbi->s_journal_flag_rwsem);
6081     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
6082     index 75d83471f65c..d97f40396765 100644
6083     --- a/fs/ext4/ioctl.c
6084     +++ b/fs/ext4/ioctl.c
6085     @@ -291,10 +291,20 @@ static int ext4_ioctl_setflags(struct inode *inode,
6086     if (err)
6087     goto flags_out;
6088    
6089     - if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL))
6090     + if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
6091     + /*
6092     + * Changes to the journaling mode can cause unsafe changes to
6093     + * S_DAX if we are using the DAX mount option.
6094     + */
6095     + if (test_opt(inode->i_sb, DAX)) {
6096     + err = -EBUSY;
6097     + goto flags_out;
6098     + }
6099     +
6100     err = ext4_change_inode_journal_flag(inode, jflag);
6101     - if (err)
6102     - goto flags_out;
6103     + if (err)
6104     + goto flags_out;
6105     + }
6106     if (migrate) {
6107     if (flags & EXT4_EXTENTS_FL)
6108     err = ext4_ext_migrate(inode);
6109     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
6110     index b0915b734a38..f29351c66610 100644
6111     --- a/fs/ext4/super.c
6112     +++ b/fs/ext4/super.c
6113     @@ -3708,6 +3708,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
6114     }
6115    
6116     if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
6117     + if (ext4_has_feature_inline_data(sb)) {
6118     + ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
6119     + " that may contain inline data");
6120     + goto failed_mount;
6121     + }
6122     err = bdev_dax_supported(sb, blocksize);
6123     if (err)
6124     goto failed_mount;
6125     diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
6126     index 517e112c8a9a..6ce467872376 100644
6127     --- a/fs/f2fs/file.c
6128     +++ b/fs/f2fs/file.c
6129     @@ -683,6 +683,12 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
6130     STATX_ATTR_NODUMP);
6131    
6132     generic_fillattr(inode, stat);
6133     +
6134     + /* we need to show initial sectors used for inline_data/dentries */
6135     + if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
6136     + f2fs_has_inline_dentry(inode))
6137     + stat->blocks += (stat->size + 511) >> 9;
6138     +
6139     return 0;
6140     }
6141    
6142     diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
6143     index 57d4c3e2e94a..8e42b4fbefdc 100644
6144     --- a/fs/isofs/isofs.h
6145     +++ b/fs/isofs/isofs.h
6146     @@ -107,7 +107,7 @@ static inline unsigned int isonum_733(char *p)
6147     /* Ignore bigendian datum due to broken mastering programs */
6148     return get_unaligned_le32(p);
6149     }
6150     -extern int iso_date(char *, int);
6151     +extern int iso_date(u8 *, int);
6152    
6153     struct inode; /* To make gcc happy */
6154    
6155     diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h
6156     index ef03625431bb..ac5cc587d718 100644
6157     --- a/fs/isofs/rock.h
6158     +++ b/fs/isofs/rock.h
6159     @@ -66,7 +66,7 @@ struct RR_PL_s {
6160     };
6161    
6162     struct stamp {
6163     - char time[7];
6164     + __u8 time[7]; /* actually 6 unsigned, 1 signed */
6165     } __attribute__ ((packed));
6166    
6167     struct RR_TF_s {
6168     diff --git a/fs/isofs/util.c b/fs/isofs/util.c
6169     index 42544bf0e222..e88dba721661 100644
6170     --- a/fs/isofs/util.c
6171     +++ b/fs/isofs/util.c
6172     @@ -16,7 +16,7 @@
6173     * to GMT. Thus we should always be correct.
6174     */
6175    
6176     -int iso_date(char * p, int flag)
6177     +int iso_date(u8 *p, int flag)
6178     {
6179     int year, month, day, hour, minute, second, tz;
6180     int crtime;
6181     diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
6182     index b995bdc13976..f04ecfc7ece0 100644
6183     --- a/fs/lockd/svc.c
6184     +++ b/fs/lockd/svc.c
6185     @@ -369,6 +369,7 @@ static int lockd_start_svc(struct svc_serv *serv)
6186     printk(KERN_WARNING
6187     "lockd_up: svc_rqst allocation failed, error=%d\n",
6188     error);
6189     + lockd_unregister_notifiers();
6190     goto out_rqst;
6191     }
6192    
6193     @@ -459,13 +460,16 @@ int lockd_up(struct net *net)
6194     }
6195    
6196     error = lockd_up_net(serv, net);
6197     - if (error < 0)
6198     - goto err_net;
6199     + if (error < 0) {
6200     + lockd_unregister_notifiers();
6201     + goto err_put;
6202     + }
6203    
6204     error = lockd_start_svc(serv);
6205     - if (error < 0)
6206     - goto err_start;
6207     -
6208     + if (error < 0) {
6209     + lockd_down_net(serv, net);
6210     + goto err_put;
6211     + }
6212     nlmsvc_users++;
6213     /*
6214     * Note: svc_serv structures have an initial use count of 1,
6215     @@ -476,12 +480,6 @@ int lockd_up(struct net *net)
6216     err_create:
6217     mutex_unlock(&nlmsvc_mutex);
6218     return error;
6219     -
6220     -err_start:
6221     - lockd_down_net(serv, net);
6222     -err_net:
6223     - lockd_unregister_notifiers();
6224     - goto err_put;
6225     }
6226     EXPORT_SYMBOL_GPL(lockd_up);
6227    
6228     diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
6229     index 5ceaeb1f6fb6..b03b3bc05f96 100644
6230     --- a/fs/nfs/dir.c
6231     +++ b/fs/nfs/dir.c
6232     @@ -1241,8 +1241,7 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
6233     return 0;
6234     }
6235    
6236     - if (nfs_mapping_need_revalidate_inode(inode))
6237     - error = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
6238     + error = nfs_lookup_verify_inode(inode, flags);
6239     dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
6240     __func__, inode->i_ino, error ? "invalid" : "valid");
6241     return !error;
6242     @@ -1393,6 +1392,7 @@ static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
6243    
6244     const struct dentry_operations nfs4_dentry_operations = {
6245     .d_revalidate = nfs4_lookup_revalidate,
6246     + .d_weak_revalidate = nfs_weak_revalidate,
6247     .d_delete = nfs_dentry_delete,
6248     .d_iput = nfs_dentry_iput,
6249     .d_automount = nfs_d_automount,
6250     diff --git a/fs/nfs/file.c b/fs/nfs/file.c
6251     index 0214dd1e1060..81cca49a8375 100644
6252     --- a/fs/nfs/file.c
6253     +++ b/fs/nfs/file.c
6254     @@ -829,23 +829,9 @@ int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
6255     if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK)
6256     is_local = 1;
6257    
6258     - /*
6259     - * VFS doesn't require the open mode to match a flock() lock's type.
6260     - * NFS, however, may simulate flock() locking with posix locking which
6261     - * requires the open mode to match the lock type.
6262     - */
6263     - switch (fl->fl_type) {
6264     - case F_UNLCK:
6265     + /* We're simulating flock() locks using posix locks on the server */
6266     + if (fl->fl_type == F_UNLCK)
6267     return do_unlk(filp, cmd, fl, is_local);
6268     - case F_RDLCK:
6269     - if (!(filp->f_mode & FMODE_READ))
6270     - return -EBADF;
6271     - break;
6272     - case F_WRLCK:
6273     - if (!(filp->f_mode & FMODE_WRITE))
6274     - return -EBADF;
6275     - }
6276     -
6277     return do_setlk(filp, cmd, fl, is_local);
6278     }
6279     EXPORT_SYMBOL_GPL(nfs_flock);
6280     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
6281     index f90090e8c959..2241d52710f7 100644
6282     --- a/fs/nfs/nfs4proc.c
6283     +++ b/fs/nfs/nfs4proc.c
6284     @@ -254,15 +254,12 @@ const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
6285     };
6286    
6287     const u32 nfs4_fs_locations_bitmap[3] = {
6288     - FATTR4_WORD0_TYPE
6289     - | FATTR4_WORD0_CHANGE
6290     + FATTR4_WORD0_CHANGE
6291     | FATTR4_WORD0_SIZE
6292     | FATTR4_WORD0_FSID
6293     | FATTR4_WORD0_FILEID
6294     | FATTR4_WORD0_FS_LOCATIONS,
6295     - FATTR4_WORD1_MODE
6296     - | FATTR4_WORD1_NUMLINKS
6297     - | FATTR4_WORD1_OWNER
6298     + FATTR4_WORD1_OWNER
6299     | FATTR4_WORD1_OWNER_GROUP
6300     | FATTR4_WORD1_RAWDEV
6301     | FATTR4_WORD1_SPACE_USED
6302     @@ -6568,6 +6565,20 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
6303     !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
6304     return -ENOLCK;
6305    
6306     + /*
6307     + * Don't rely on the VFS having checked the file open mode,
6308     + * since it won't do this for flock() locks.
6309     + */
6310     + switch (request->fl_type) {
6311     + case F_RDLCK:
6312     + if (!(filp->f_mode & FMODE_READ))
6313     + return -EBADF;
6314     + break;
6315     + case F_WRLCK:
6316     + if (!(filp->f_mode & FMODE_WRITE))
6317     + return -EBADF;
6318     + }
6319     +
6320     status = nfs4_set_lock_state(state, request);
6321     if (status != 0)
6322     return status;
6323     @@ -6763,9 +6774,7 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6324     struct page *page)
6325     {
6326     struct nfs_server *server = NFS_SERVER(dir);
6327     - u32 bitmask[3] = {
6328     - [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6329     - };
6330     + u32 bitmask[3];
6331     struct nfs4_fs_locations_arg args = {
6332     .dir_fh = NFS_FH(dir),
6333     .name = name,
6334     @@ -6784,12 +6793,15 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6335    
6336     dprintk("%s: start\n", __func__);
6337    
6338     + bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
6339     + bitmask[1] = nfs4_fattr_bitmap[1];
6340     +
6341     /* Ask for the fileid of the absent filesystem if mounted_on_fileid
6342     * is not supported */
6343     if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
6344     - bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
6345     + bitmask[0] &= ~FATTR4_WORD0_FILEID;
6346     else
6347     - bitmask[0] |= FATTR4_WORD0_FILEID;
6348     + bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
6349    
6350     nfs_fattr_init(&fs_locations->fattr);
6351     fs_locations->server = server;
6352     diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
6353     index e7c6275519b0..71d2ca04a9f8 100644
6354     --- a/fs/nfs/nfs4trace.h
6355     +++ b/fs/nfs/nfs4trace.h
6356     @@ -202,17 +202,13 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
6357     TP_ARGS(clp, error),
6358    
6359     TP_STRUCT__entry(
6360     - __string(dstaddr,
6361     - rpc_peeraddr2str(clp->cl_rpcclient,
6362     - RPC_DISPLAY_ADDR))
6363     + __string(dstaddr, clp->cl_hostname)
6364     __field(int, error)
6365     ),
6366    
6367     TP_fast_assign(
6368     __entry->error = error;
6369     - __assign_str(dstaddr,
6370     - rpc_peeraddr2str(clp->cl_rpcclient,
6371     - RPC_DISPLAY_ADDR));
6372     + __assign_str(dstaddr, clp->cl_hostname);
6373     ),
6374    
6375     TP_printk(
6376     @@ -1133,9 +1129,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
6377     __field(dev_t, dev)
6378     __field(u32, fhandle)
6379     __field(u64, fileid)
6380     - __string(dstaddr, clp ?
6381     - rpc_peeraddr2str(clp->cl_rpcclient,
6382     - RPC_DISPLAY_ADDR) : "unknown")
6383     + __string(dstaddr, clp ? clp->cl_hostname : "unknown")
6384     ),
6385    
6386     TP_fast_assign(
6387     @@ -1148,9 +1142,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
6388     __entry->fileid = 0;
6389     __entry->dev = 0;
6390     }
6391     - __assign_str(dstaddr, clp ?
6392     - rpc_peeraddr2str(clp->cl_rpcclient,
6393     - RPC_DISPLAY_ADDR) : "unknown")
6394     + __assign_str(dstaddr, clp ? clp->cl_hostname : "unknown")
6395     ),
6396    
6397     TP_printk(
6398     @@ -1192,9 +1184,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
6399     __field(dev_t, dev)
6400     __field(u32, fhandle)
6401     __field(u64, fileid)
6402     - __string(dstaddr, clp ?
6403     - rpc_peeraddr2str(clp->cl_rpcclient,
6404     - RPC_DISPLAY_ADDR) : "unknown")
6405     + __string(dstaddr, clp ? clp->cl_hostname : "unknown")
6406     __field(int, stateid_seq)
6407     __field(u32, stateid_hash)
6408     ),
6409     @@ -1209,9 +1199,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
6410     __entry->fileid = 0;
6411     __entry->dev = 0;
6412     }
6413     - __assign_str(dstaddr, clp ?
6414     - rpc_peeraddr2str(clp->cl_rpcclient,
6415     - RPC_DISPLAY_ADDR) : "unknown")
6416     + __assign_str(dstaddr, clp ? clp->cl_hostname : "unknown")
6417     __entry->stateid_seq =
6418     be32_to_cpu(stateid->seqid);
6419     __entry->stateid_hash =
6420     diff --git a/fs/nfs/super.c b/fs/nfs/super.c
6421     index c9d24bae3025..216f67d628b3 100644
6422     --- a/fs/nfs/super.c
6423     +++ b/fs/nfs/super.c
6424     @@ -1332,7 +1332,7 @@ static int nfs_parse_mount_options(char *raw,
6425     mnt->options |= NFS_OPTION_MIGRATION;
6426     break;
6427     case Opt_nomigration:
6428     - mnt->options &= NFS_OPTION_MIGRATION;
6429     + mnt->options &= ~NFS_OPTION_MIGRATION;
6430     break;
6431    
6432     /*
6433     diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
6434     index 0c04f81aa63b..d386d569edbc 100644
6435     --- a/fs/nfsd/nfs4state.c
6436     +++ b/fs/nfsd/nfs4state.c
6437     @@ -3966,7 +3966,8 @@ static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, statei
6438     {
6439     struct nfs4_stid *ret;
6440    
6441     - ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
6442     + ret = find_stateid_by_type(cl, s,
6443     + NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
6444     if (!ret)
6445     return NULL;
6446     return delegstateid(ret);
6447     @@ -3989,6 +3990,12 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
6448     deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
6449     if (deleg == NULL)
6450     goto out;
6451     + if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
6452     + nfs4_put_stid(&deleg->dl_stid);
6453     + if (cl->cl_minorversion)
6454     + status = nfserr_deleg_revoked;
6455     + goto out;
6456     + }
6457     flags = share_access_to_flags(open->op_share_access);
6458     status = nfs4_check_delegmode(deleg, flags);
6459     if (status) {
6460     @@ -4858,6 +4865,16 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
6461     struct nfs4_stid **s, struct nfsd_net *nn)
6462     {
6463     __be32 status;
6464     + bool return_revoked = false;
6465     +
6466     + /*
6467     + * only return revoked delegations if explicitly asked.
6468     + * otherwise we report revoked or bad_stateid status.
6469     + */
6470     + if (typemask & NFS4_REVOKED_DELEG_STID)
6471     + return_revoked = true;
6472     + else if (typemask & NFS4_DELEG_STID)
6473     + typemask |= NFS4_REVOKED_DELEG_STID;
6474    
6475     if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
6476     return nfserr_bad_stateid;
6477     @@ -4872,6 +4889,12 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
6478     *s = find_stateid_by_type(cstate->clp, stateid, typemask);
6479     if (!*s)
6480     return nfserr_bad_stateid;
6481     + if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
6482     + nfs4_put_stid(*s);
6483     + if (cstate->minorversion)
6484     + return nfserr_deleg_revoked;
6485     + return nfserr_bad_stateid;
6486     + }
6487     return nfs_ok;
6488     }
6489    
6490     diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
6491     index 70ded52dc1dd..50e12956c737 100644
6492     --- a/fs/nilfs2/segment.c
6493     +++ b/fs/nilfs2/segment.c
6494     @@ -1958,8 +1958,6 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
6495     err, ii->vfs_inode.i_ino);
6496     return err;
6497     }
6498     - mark_buffer_dirty(ibh);
6499     - nilfs_mdt_mark_dirty(ifile);
6500     spin_lock(&nilfs->ns_inode_lock);
6501     if (likely(!ii->i_bh))
6502     ii->i_bh = ibh;
6503     @@ -1968,6 +1966,10 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
6504     goto retry;
6505     }
6506    
6507     + // Always redirty the buffer to avoid race condition
6508     + mark_buffer_dirty(ii->i_bh);
6509     + nilfs_mdt_mark_dirty(ifile);
6510     +
6511     clear_bit(NILFS_I_QUEUED, &ii->i_state);
6512     set_bit(NILFS_I_BUSY, &ii->i_state);
6513     list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
6514     diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
6515     index 09640b546363..3c7053207297 100644
6516     --- a/fs/notify/fanotify/fanotify.c
6517     +++ b/fs/notify/fanotify/fanotify.c
6518     @@ -65,19 +65,8 @@ static int fanotify_get_response(struct fsnotify_group *group,
6519    
6520     pr_debug("%s: group=%p event=%p\n", __func__, group, event);
6521    
6522     - /*
6523     - * fsnotify_prepare_user_wait() fails if we race with mark deletion.
6524     - * Just let the operation pass in that case.
6525     - */
6526     - if (!fsnotify_prepare_user_wait(iter_info)) {
6527     - event->response = FAN_ALLOW;
6528     - goto out;
6529     - }
6530     -
6531     wait_event(group->fanotify_data.access_waitq, event->response);
6532    
6533     - fsnotify_finish_user_wait(iter_info);
6534     -out:
6535     /* userspace responded, convert to something usable */
6536     switch (event->response) {
6537     case FAN_ALLOW:
6538     @@ -212,9 +201,21 @@ static int fanotify_handle_event(struct fsnotify_group *group,
6539     pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
6540     mask);
6541    
6542     +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
6543     + if (mask & FAN_ALL_PERM_EVENTS) {
6544     + /*
6545     + * fsnotify_prepare_user_wait() fails if we race with mark
6546     + * deletion. Just let the operation pass in that case.
6547     + */
6548     + if (!fsnotify_prepare_user_wait(iter_info))
6549     + return 0;
6550     + }
6551     +#endif
6552     +
6553     event = fanotify_alloc_event(inode, mask, data);
6554     + ret = -ENOMEM;
6555     if (unlikely(!event))
6556     - return -ENOMEM;
6557     + goto finish;
6558    
6559     fsn_event = &event->fse;
6560     ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
6561     @@ -224,7 +225,8 @@ static int fanotify_handle_event(struct fsnotify_group *group,
6562     /* Our event wasn't used in the end. Free it. */
6563     fsnotify_destroy_event(group, fsn_event);
6564    
6565     - return 0;
6566     + ret = 0;
6567     + goto finish;
6568     }
6569    
6570     #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
6571     @@ -233,6 +235,11 @@ static int fanotify_handle_event(struct fsnotify_group *group,
6572     iter_info);
6573     fsnotify_destroy_event(group, fsn_event);
6574     }
6575     +finish:
6576     + if (mask & FAN_ALL_PERM_EVENTS)
6577     + fsnotify_finish_user_wait(iter_info);
6578     +#else
6579     +finish:
6580     #endif
6581     return ret;
6582     }
6583     diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
6584     index 0c4583b61717..074716293829 100644
6585     --- a/fs/notify/fsnotify.c
6586     +++ b/fs/notify/fsnotify.c
6587     @@ -335,6 +335,13 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
6588     struct fsnotify_mark, obj_list);
6589     vfsmount_group = vfsmount_mark->group;
6590     }
6591     + /*
6592     + * Need to protect both marks against freeing so that we can
6593     + * continue iteration from this place, regardless of which mark
6594     + * we actually happen to send an event for.
6595     + */
6596     + iter_info.inode_mark = inode_mark;
6597     + iter_info.vfsmount_mark = vfsmount_mark;
6598    
6599     if (inode_group && vfsmount_group) {
6600     int cmp = fsnotify_compare_groups(inode_group,
6601     @@ -348,9 +355,6 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
6602     }
6603     }
6604    
6605     - iter_info.inode_mark = inode_mark;
6606     - iter_info.vfsmount_mark = vfsmount_mark;
6607     -
6608     ret = send_to_group(to_tell, inode_mark, vfsmount_mark, mask,
6609     data, data_is, cookie, file_name,
6610     &iter_info);
6611     diff --git a/fs/notify/mark.c b/fs/notify/mark.c
6612     index 9991f8826734..258d99087183 100644
6613     --- a/fs/notify/mark.c
6614     +++ b/fs/notify/mark.c
6615     @@ -109,16 +109,6 @@ void fsnotify_get_mark(struct fsnotify_mark *mark)
6616     atomic_inc(&mark->refcnt);
6617     }
6618    
6619     -/*
6620     - * Get mark reference when we found the mark via lockless traversal of object
6621     - * list. Mark can be already removed from the list by now and on its way to be
6622     - * destroyed once SRCU period ends.
6623     - */
6624     -static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
6625     -{
6626     - return atomic_inc_not_zero(&mark->refcnt);
6627     -}
6628     -
6629     static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
6630     {
6631     u32 new_mask = 0;
6632     @@ -256,32 +246,60 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
6633     FSNOTIFY_REAPER_DELAY);
6634     }
6635    
6636     -bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
6637     +/*
6638     + * Get mark reference when we found the mark via lockless traversal of object
6639     + * list. Mark can be already removed from the list by now and on its way to be
6640     + * destroyed once SRCU period ends.
6641     + *
6642     + * Also pin the group so it doesn't disappear under us.
6643     + */
6644     +static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
6645     {
6646     - struct fsnotify_group *group;
6647     -
6648     - if (WARN_ON_ONCE(!iter_info->inode_mark && !iter_info->vfsmount_mark))
6649     - return false;
6650     -
6651     - if (iter_info->inode_mark)
6652     - group = iter_info->inode_mark->group;
6653     - else
6654     - group = iter_info->vfsmount_mark->group;
6655     + if (!mark)
6656     + return true;
6657     +
6658     + if (atomic_inc_not_zero(&mark->refcnt)) {
6659     + spin_lock(&mark->lock);
6660     + if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) {
6661     + /* mark is attached, group is still alive then */
6662     + atomic_inc(&mark->group->user_waits);
6663     + spin_unlock(&mark->lock);
6664     + return true;
6665     + }
6666     + spin_unlock(&mark->lock);
6667     + fsnotify_put_mark(mark);
6668     + }
6669     + return false;
6670     +}
6671    
6672     - /*
6673     - * Since acquisition of mark reference is an atomic op as well, we can
6674     - * be sure this inc is seen before any effect of refcount increment.
6675     - */
6676     - atomic_inc(&group->user_waits);
6677     +/*
6678     + * Puts marks and wakes up group destruction if necessary.
6679     + *
6680     + * Pairs with fsnotify_get_mark_safe()
6681     + */
6682     +static void fsnotify_put_mark_wake(struct fsnotify_mark *mark)
6683     +{
6684     + if (mark) {
6685     + struct fsnotify_group *group = mark->group;
6686    
6687     - if (iter_info->inode_mark) {
6688     - /* This can fail if mark is being removed */
6689     - if (!fsnotify_get_mark_safe(iter_info->inode_mark))
6690     - goto out_wait;
6691     + fsnotify_put_mark(mark);
6692     + /*
6693     + * We abuse notification_waitq on group shutdown for waiting for
6694     + * all marks pinned when waiting for userspace.
6695     + */
6696     + if (atomic_dec_and_test(&group->user_waits) && group->shutdown)
6697     + wake_up(&group->notification_waitq);
6698     }
6699     - if (iter_info->vfsmount_mark) {
6700     - if (!fsnotify_get_mark_safe(iter_info->vfsmount_mark))
6701     - goto out_inode;
6702     +}
6703     +
6704     +bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
6705     +{
6706     + /* This can fail if mark is being removed */
6707     + if (!fsnotify_get_mark_safe(iter_info->inode_mark))
6708     + return false;
6709     + if (!fsnotify_get_mark_safe(iter_info->vfsmount_mark)) {
6710     + fsnotify_put_mark_wake(iter_info->inode_mark);
6711     + return false;
6712     }
6713    
6714     /*
6715     @@ -292,34 +310,13 @@ bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
6716     srcu_read_unlock(&fsnotify_mark_srcu, iter_info->srcu_idx);
6717    
6718     return true;
6719     -out_inode:
6720     - if (iter_info->inode_mark)
6721     - fsnotify_put_mark(iter_info->inode_mark);
6722     -out_wait:
6723     - if (atomic_dec_and_test(&group->user_waits) && group->shutdown)
6724     - wake_up(&group->notification_waitq);
6725     - return false;
6726     }
6727    
6728     void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info)
6729     {
6730     - struct fsnotify_group *group = NULL;
6731     -
6732     iter_info->srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
6733     - if (iter_info->inode_mark) {
6734     - group = iter_info->inode_mark->group;
6735     - fsnotify_put_mark(iter_info->inode_mark);
6736     - }
6737     - if (iter_info->vfsmount_mark) {
6738     - group = iter_info->vfsmount_mark->group;
6739     - fsnotify_put_mark(iter_info->vfsmount_mark);
6740     - }
6741     - /*
6742     - * We abuse notification_waitq on group shutdown for waiting for all
6743     - * marks pinned when waiting for userspace.
6744     - */
6745     - if (atomic_dec_and_test(&group->user_waits) && group->shutdown)
6746     - wake_up(&group->notification_waitq);
6747     + fsnotify_put_mark_wake(iter_info->inode_mark);
6748     + fsnotify_put_mark_wake(iter_info->vfsmount_mark);
6749     }
6750    
6751     /*
6752     diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
6753     index a12dc10bf726..bc6d5c5a3443 100644
6754     --- a/fs/overlayfs/namei.c
6755     +++ b/fs/overlayfs/namei.c
6756     @@ -630,7 +630,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
6757     err = ovl_check_origin(upperdentry, roe->lowerstack,
6758     roe->numlower, &stack, &ctr);
6759     if (err)
6760     - goto out;
6761     + goto out_put_upper;
6762     }
6763    
6764     if (d.redirect) {
6765     diff --git a/include/linux/genhd.h b/include/linux/genhd.h
6766     index 44790523057f..5ade8f2a6987 100644
6767     --- a/include/linux/genhd.h
6768     +++ b/include/linux/genhd.h
6769     @@ -243,6 +243,7 @@ static inline dev_t part_devt(struct hd_struct *part)
6770     return part_to_dev(part)->devt;
6771     }
6772    
6773     +extern struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
6774     extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno);
6775    
6776     static inline void disk_put_part(struct hd_struct *part)
6777     diff --git a/include/linux/irq.h b/include/linux/irq.h
6778     index 4536286cc4d2..0d53626405bf 100644
6779     --- a/include/linux/irq.h
6780     +++ b/include/linux/irq.h
6781     @@ -211,6 +211,7 @@ struct irq_data {
6782     * IRQD_MANAGED_SHUTDOWN - Interrupt was shutdown due to empty affinity
6783     * mask. Applies only to affinity managed irqs.
6784     * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
6785     + * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
6786     */
6787     enum {
6788     IRQD_TRIGGER_MASK = 0xf,
6789     @@ -231,6 +232,7 @@ enum {
6790     IRQD_IRQ_STARTED = (1 << 22),
6791     IRQD_MANAGED_SHUTDOWN = (1 << 23),
6792     IRQD_SINGLE_TARGET = (1 << 24),
6793     + IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
6794     };
6795    
6796     #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
6797     @@ -260,18 +262,25 @@ static inline void irqd_mark_affinity_was_set(struct irq_data *d)
6798     __irqd_to_state(d) |= IRQD_AFFINITY_SET;
6799     }
6800    
6801     +static inline bool irqd_trigger_type_was_set(struct irq_data *d)
6802     +{
6803     + return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET;
6804     +}
6805     +
6806     static inline u32 irqd_get_trigger_type(struct irq_data *d)
6807     {
6808     return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
6809     }
6810    
6811     /*
6812     - * Must only be called inside irq_chip.irq_set_type() functions.
6813     + * Must only be called inside irq_chip.irq_set_type() functions or
6814     + * from the DT/ACPI setup code.
6815     */
6816     static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
6817     {
6818     __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
6819     __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
6820     + __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET;
6821     }
6822    
6823     static inline bool irqd_is_level_type(struct irq_data *d)
6824     diff --git a/include/net/tls.h b/include/net/tls.h
6825     index b89d397dd62f..c06db1eadac2 100644
6826     --- a/include/net/tls.h
6827     +++ b/include/net/tls.h
6828     @@ -35,6 +35,10 @@
6829     #define _TLS_OFFLOAD_H
6830    
6831     #include <linux/types.h>
6832     +#include <asm/byteorder.h>
6833     +#include <linux/socket.h>
6834     +#include <linux/tcp.h>
6835     +#include <net/tcp.h>
6836    
6837     #include <uapi/linux/tls.h>
6838    
6839     diff --git a/include/sound/control.h b/include/sound/control.h
6840     index a1f1152bc687..ca13a44ae9d4 100644
6841     --- a/include/sound/control.h
6842     +++ b/include/sound/control.h
6843     @@ -249,7 +249,9 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl,
6844     void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only);
6845     #define snd_ctl_sync_vmaster_hook(kctl) snd_ctl_sync_vmaster(kctl, true)
6846     int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
6847     - int (*func)(struct snd_kcontrol *, void *),
6848     + int (*func)(struct snd_kcontrol *vslave,
6849     + struct snd_kcontrol *slave,
6850     + void *arg),
6851     void *arg);
6852    
6853     /*
6854     diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
6855     index f5db145e68ec..0d924e968c94 100644
6856     --- a/include/target/target_core_base.h
6857     +++ b/include/target/target_core_base.h
6858     @@ -490,6 +490,7 @@ struct se_cmd {
6859     #define CMD_T_STOP (1 << 5)
6860     #define CMD_T_TAS (1 << 10)
6861     #define CMD_T_FABRIC_STOP (1 << 11)
6862     +#define CMD_T_PRE_EXECUTE (1 << 12)
6863     spinlock_t t_state_lock;
6864     struct kref cmd_kref;
6865     struct completion t_transport_stop_comp;
6866     diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
6867     index 25a7739514cd..3868b4752324 100644
6868     --- a/include/trace/events/sunrpc.h
6869     +++ b/include/trace/events/sunrpc.h
6870     @@ -456,20 +456,22 @@ TRACE_EVENT(svc_recv,
6871     TP_ARGS(rqst, status),
6872    
6873     TP_STRUCT__entry(
6874     - __field(struct sockaddr *, addr)
6875     __field(__be32, xid)
6876     __field(int, status)
6877     __field(unsigned long, flags)
6878     + __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
6879     ),
6880    
6881     TP_fast_assign(
6882     - __entry->addr = (struct sockaddr *)&rqst->rq_addr;
6883     __entry->xid = status > 0 ? rqst->rq_xid : 0;
6884     __entry->status = status;
6885     __entry->flags = rqst->rq_flags;
6886     + memcpy(__get_dynamic_array(addr),
6887     + &rqst->rq_addr, rqst->rq_addrlen);
6888     ),
6889    
6890     - TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr,
6891     + TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s",
6892     + (struct sockaddr *)__get_dynamic_array(addr),
6893     be32_to_cpu(__entry->xid), __entry->status,
6894     show_rqstp_flags(__entry->flags))
6895     );
6896     @@ -514,22 +516,23 @@ DECLARE_EVENT_CLASS(svc_rqst_status,
6897     TP_ARGS(rqst, status),
6898    
6899     TP_STRUCT__entry(
6900     - __field(struct sockaddr *, addr)
6901     __field(__be32, xid)
6902     - __field(int, dropme)
6903     __field(int, status)
6904     __field(unsigned long, flags)
6905     + __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
6906     ),
6907    
6908     TP_fast_assign(
6909     - __entry->addr = (struct sockaddr *)&rqst->rq_addr;
6910     __entry->xid = rqst->rq_xid;
6911     __entry->status = status;
6912     __entry->flags = rqst->rq_flags;
6913     + memcpy(__get_dynamic_array(addr),
6914     + &rqst->rq_addr, rqst->rq_addrlen);
6915     ),
6916    
6917     TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s",
6918     - __entry->addr, be32_to_cpu(__entry->xid),
6919     + (struct sockaddr *)__get_dynamic_array(addr),
6920     + be32_to_cpu(__entry->xid),
6921     __entry->status, show_rqstp_flags(__entry->flags))
6922     );
6923    
6924     diff --git a/include/uapi/linux/rxrpc.h b/include/uapi/linux/rxrpc.h
6925     index 9656aad8f8f7..9d4afea308a4 100644
6926     --- a/include/uapi/linux/rxrpc.h
6927     +++ b/include/uapi/linux/rxrpc.h
6928     @@ -20,12 +20,12 @@
6929     * RxRPC socket address
6930     */
6931     struct sockaddr_rxrpc {
6932     - sa_family_t srx_family; /* address family */
6933     - u16 srx_service; /* service desired */
6934     - u16 transport_type; /* type of transport socket (SOCK_DGRAM) */
6935     - u16 transport_len; /* length of transport address */
6936     + __kernel_sa_family_t srx_family; /* address family */
6937     + __u16 srx_service; /* service desired */
6938     + __u16 transport_type; /* type of transport socket (SOCK_DGRAM) */
6939     + __u16 transport_len; /* length of transport address */
6940     union {
6941     - sa_family_t family; /* transport address family */
6942     + __kernel_sa_family_t family; /* transport address family */
6943     struct sockaddr_in sin; /* IPv4 transport address */
6944     struct sockaddr_in6 sin6; /* IPv6 transport address */
6945     } transport;
6946     diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h
6947     index d5e0682ab837..293b2cdad88d 100644
6948     --- a/include/uapi/linux/tls.h
6949     +++ b/include/uapi/linux/tls.h
6950     @@ -35,10 +35,6 @@
6951     #define _UAPI_LINUX_TLS_H
6952    
6953     #include <linux/types.h>
6954     -#include <asm/byteorder.h>
6955     -#include <linux/socket.h>
6956     -#include <linux/tcp.h>
6957     -#include <net/tcp.h>
6958    
6959     /* TLS socket options */
6960     #define TLS_TX 1 /* Set transmit parameters */
6961     diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
6962     index 4bff6a10ae8e..b02caa442776 100644
6963     --- a/kernel/irq/manage.c
6964     +++ b/kernel/irq/manage.c
6965     @@ -1245,7 +1245,18 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
6966     * set the trigger type must match. Also all must
6967     * agree on ONESHOT.
6968     */
6969     - unsigned int oldtype = irqd_get_trigger_type(&desc->irq_data);
6970     + unsigned int oldtype;
6971     +
6972     + /*
6973     + * If nobody did set the configuration before, inherit
6974     + * the one provided by the requester.
6975     + */
6976     + if (irqd_trigger_type_was_set(&desc->irq_data)) {
6977     + oldtype = irqd_get_trigger_type(&desc->irq_data);
6978     + } else {
6979     + oldtype = new->flags & IRQF_TRIGGER_MASK;
6980     + irqd_set_trigger_type(&desc->irq_data, oldtype);
6981     + }
6982    
6983     if (!((old->flags & new->flags) & IRQF_SHARED) ||
6984     (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
6985     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
6986     index d17c5da523a0..8fa7b6f9e19b 100644
6987     --- a/kernel/sched/core.c
6988     +++ b/kernel/sched/core.c
6989     @@ -505,8 +505,7 @@ void resched_cpu(int cpu)
6990     struct rq *rq = cpu_rq(cpu);
6991     unsigned long flags;
6992    
6993     - if (!raw_spin_trylock_irqsave(&rq->lock, flags))
6994     - return;
6995     + raw_spin_lock_irqsave(&rq->lock, flags);
6996     resched_curr(rq);
6997     raw_spin_unlock_irqrestore(&rq->lock, flags);
6998     }
6999     diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
7000     index ba0da243fdd8..2f52ec0f1539 100644
7001     --- a/kernel/sched/cpufreq_schedutil.c
7002     +++ b/kernel/sched/cpufreq_schedutil.c
7003     @@ -282,8 +282,12 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
7004     * Do not reduce the frequency if the CPU has not been idle
7005     * recently, as the reduction is likely to be premature then.
7006     */
7007     - if (busy && next_f < sg_policy->next_freq)
7008     + if (busy && next_f < sg_policy->next_freq) {
7009     next_f = sg_policy->next_freq;
7010     +
7011     + /* Reset cached freq as next_freq has changed */
7012     + sg_policy->cached_raw_freq = 0;
7013     + }
7014     }
7015     sugov_update_commit(sg_policy, time, next_f);
7016     }
7017     diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
7018     index 3c96c80e0992..d8c43d73e078 100644
7019     --- a/kernel/sched/rt.c
7020     +++ b/kernel/sched/rt.c
7021     @@ -74,10 +74,6 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
7022     raw_spin_unlock(&rt_b->rt_runtime_lock);
7023     }
7024    
7025     -#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
7026     -static void push_irq_work_func(struct irq_work *work);
7027     -#endif
7028     -
7029     void init_rt_rq(struct rt_rq *rt_rq)
7030     {
7031     struct rt_prio_array *array;
7032     @@ -97,13 +93,6 @@ void init_rt_rq(struct rt_rq *rt_rq)
7033     rt_rq->rt_nr_migratory = 0;
7034     rt_rq->overloaded = 0;
7035     plist_head_init(&rt_rq->pushable_tasks);
7036     -
7037     -#ifdef HAVE_RT_PUSH_IPI
7038     - rt_rq->push_flags = 0;
7039     - rt_rq->push_cpu = nr_cpu_ids;
7040     - raw_spin_lock_init(&rt_rq->push_lock);
7041     - init_irq_work(&rt_rq->push_work, push_irq_work_func);
7042     -#endif
7043     #endif /* CONFIG_SMP */
7044     /* We start is dequeued state, because no RT tasks are queued */
7045     rt_rq->rt_queued = 0;
7046     @@ -1876,241 +1865,166 @@ static void push_rt_tasks(struct rq *rq)
7047     }
7048    
7049     #ifdef HAVE_RT_PUSH_IPI
7050     +
7051     /*
7052     - * The search for the next cpu always starts at rq->cpu and ends
7053     - * when we reach rq->cpu again. It will never return rq->cpu.
7054     - * This returns the next cpu to check, or nr_cpu_ids if the loop
7055     - * is complete.
7056     + * When a high priority task schedules out from a CPU and a lower priority
7057     + * task is scheduled in, a check is made to see if there's any RT tasks
7058     + * on other CPUs that are waiting to run because a higher priority RT task
7059     + * is currently running on its CPU. In this case, the CPU with multiple RT
7060     + * tasks queued on it (overloaded) needs to be notified that a CPU has opened
7061     + * up that may be able to run one of its non-running queued RT tasks.
7062     + *
7063     + * All CPUs with overloaded RT tasks need to be notified as there is currently
7064     + * no way to know which of these CPUs have the highest priority task waiting
7065     + * to run. Instead of trying to take a spinlock on each of these CPUs,
7066     + * which has shown to cause large latency when done on machines with many
7067     + * CPUs, sending an IPI to the CPUs to have them push off the overloaded
7068     + * RT tasks waiting to run.
7069     + *
7070     + * Just sending an IPI to each of the CPUs is also an issue, as on large
7071     + * count CPU machines, this can cause an IPI storm on a CPU, especially
7072     + * if its the only CPU with multiple RT tasks queued, and a large number
7073     + * of CPUs scheduling a lower priority task at the same time.
7074     + *
7075     + * Each root domain has its own irq work function that can iterate over
7076     + * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
7077     + * tassk must be checked if there's one or many CPUs that are lowering
7078     + * their priority, there's a single irq work iterator that will try to
7079     + * push off RT tasks that are waiting to run.
7080     + *
7081     + * When a CPU schedules a lower priority task, it will kick off the
7082     + * irq work iterator that will jump to each CPU with overloaded RT tasks.
7083     + * As it only takes the first CPU that schedules a lower priority task
7084     + * to start the process, the rto_start variable is incremented and if
7085     + * the atomic result is one, then that CPU will try to take the rto_lock.
7086     + * This prevents high contention on the lock as the process handles all
7087     + * CPUs scheduling lower priority tasks.
7088     + *
7089     + * All CPUs that are scheduling a lower priority task will increment the
7090     + * rt_loop_next variable. This will make sure that the irq work iterator
7091     + * checks all RT overloaded CPUs whenever a CPU schedules a new lower
7092     + * priority task, even if the iterator is in the middle of a scan. Incrementing
7093     + * the rt_loop_next will cause the iterator to perform another scan.
7094     *
7095     - * rq->rt.push_cpu holds the last cpu returned by this function,
7096     - * or if this is the first instance, it must hold rq->cpu.
7097     */
7098     static int rto_next_cpu(struct rq *rq)
7099     {
7100     - int prev_cpu = rq->rt.push_cpu;
7101     + struct root_domain *rd = rq->rd;
7102     + int next;
7103     int cpu;
7104    
7105     - cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
7106     -
7107     /*
7108     - * If the previous cpu is less than the rq's CPU, then it already
7109     - * passed the end of the mask, and has started from the beginning.
7110     - * We end if the next CPU is greater or equal to rq's CPU.
7111     + * When starting the IPI RT pushing, the rto_cpu is set to -1,
7112     + * rt_next_cpu() will simply return the first CPU found in
7113     + * the rto_mask.
7114     + *
7115     + * If rto_next_cpu() is called with rto_cpu is a valid cpu, it
7116     + * will return the next CPU found in the rto_mask.
7117     + *
7118     + * If there are no more CPUs left in the rto_mask, then a check is made
7119     + * against rto_loop and rto_loop_next. rto_loop is only updated with
7120     + * the rto_lock held, but any CPU may increment the rto_loop_next
7121     + * without any locking.
7122     */
7123     - if (prev_cpu < rq->cpu) {
7124     - if (cpu >= rq->cpu)
7125     - return nr_cpu_ids;
7126     + for (;;) {
7127    
7128     - } else if (cpu >= nr_cpu_ids) {
7129     - /*
7130     - * We passed the end of the mask, start at the beginning.
7131     - * If the result is greater or equal to the rq's CPU, then
7132     - * the loop is finished.
7133     - */
7134     - cpu = cpumask_first(rq->rd->rto_mask);
7135     - if (cpu >= rq->cpu)
7136     - return nr_cpu_ids;
7137     - }
7138     - rq->rt.push_cpu = cpu;
7139     + /* When rto_cpu is -1 this acts like cpumask_first() */
7140     + cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
7141    
7142     - /* Return cpu to let the caller know if the loop is finished or not */
7143     - return cpu;
7144     -}
7145     + rd->rto_cpu = cpu;
7146    
7147     -static int find_next_push_cpu(struct rq *rq)
7148     -{
7149     - struct rq *next_rq;
7150     - int cpu;
7151     + if (cpu < nr_cpu_ids)
7152     + return cpu;
7153    
7154     - while (1) {
7155     - cpu = rto_next_cpu(rq);
7156     - if (cpu >= nr_cpu_ids)
7157     - break;
7158     - next_rq = cpu_rq(cpu);
7159     + rd->rto_cpu = -1;
7160     +
7161     + /*
7162     + * ACQUIRE ensures we see the @rto_mask changes
7163     + * made prior to the @next value observed.
7164     + *
7165     + * Matches WMB in rt_set_overload().
7166     + */
7167     + next = atomic_read_acquire(&rd->rto_loop_next);
7168    
7169     - /* Make sure the next rq can push to this rq */
7170     - if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
7171     + if (rd->rto_loop == next)
7172     break;
7173     +
7174     + rd->rto_loop = next;
7175     }
7176    
7177     - return cpu;
7178     + return -1;
7179     }
7180    
7181     -#define RT_PUSH_IPI_EXECUTING 1
7182     -#define RT_PUSH_IPI_RESTART 2
7183     +static inline bool rto_start_trylock(atomic_t *v)
7184     +{
7185     + return !atomic_cmpxchg_acquire(v, 0, 1);
7186     +}
7187    
7188     -/*
7189     - * When a high priority task schedules out from a CPU and a lower priority
7190     - * task is scheduled in, a check is made to see if there's any RT tasks
7191     - * on other CPUs that are waiting to run because a higher priority RT task
7192     - * is currently running on its CPU. In this case, the CPU with multiple RT
7193     - * tasks queued on it (overloaded) needs to be notified that a CPU has opened
7194     - * up that may be able to run one of its non-running queued RT tasks.
7195     - *
7196     - * On large CPU boxes, there's the case that several CPUs could schedule
7197     - * a lower priority task at the same time, in which case it will look for
7198     - * any overloaded CPUs that it could pull a task from. To do this, the runqueue
7199     - * lock must be taken from that overloaded CPU. Having 10s of CPUs all fighting
7200     - * for a single overloaded CPU's runqueue lock can produce a large latency.
7201     - * (This has actually been observed on large boxes running cyclictest).
7202     - * Instead of taking the runqueue lock of the overloaded CPU, each of the
7203     - * CPUs that scheduled a lower priority task simply sends an IPI to the
7204     - * overloaded CPU. An IPI is much cheaper than taking an runqueue lock with
7205     - * lots of contention. The overloaded CPU will look to push its non-running
7206     - * RT task off, and if it does, it can then ignore the other IPIs coming
7207     - * in, and just pass those IPIs off to any other overloaded CPU.
7208     - *
7209     - * When a CPU schedules a lower priority task, it only sends an IPI to
7210     - * the "next" CPU that has overloaded RT tasks. This prevents IPI storms,
7211     - * as having 10 CPUs scheduling lower priority tasks and 10 CPUs with
7212     - * RT overloaded tasks, would cause 100 IPIs to go out at once.
7213     - *
7214     - * The overloaded RT CPU, when receiving an IPI, will try to push off its
7215     - * overloaded RT tasks and then send an IPI to the next CPU that has
7216     - * overloaded RT tasks. This stops when all CPUs with overloaded RT tasks
7217     - * have completed. Just because a CPU may have pushed off its own overloaded
7218     - * RT task does not mean it should stop sending the IPI around to other
7219     - * overloaded CPUs. There may be another RT task waiting to run on one of
7220     - * those CPUs that are of higher priority than the one that was just
7221     - * pushed.
7222     - *
7223     - * An optimization that could possibly be made is to make a CPU array similar
7224     - * to the cpupri array mask of all running RT tasks, but for the overloaded
7225     - * case, then the IPI could be sent to only the CPU with the highest priority
7226     - * RT task waiting, and that CPU could send off further IPIs to the CPU with
7227     - * the next highest waiting task. Since the overloaded case is much less likely
7228     - * to happen, the complexity of this implementation may not be worth it.
7229     - * Instead, just send an IPI around to all overloaded CPUs.
7230     - *
7231     - * The rq->rt.push_flags holds the status of the IPI that is going around.
7232     - * A run queue can only send out a single IPI at a time. The possible flags
7233     - * for rq->rt.push_flags are:
7234     - *
7235     - * (None or zero): No IPI is going around for the current rq
7236     - * RT_PUSH_IPI_EXECUTING: An IPI for the rq is being passed around
7237     - * RT_PUSH_IPI_RESTART: The priority of the running task for the rq
7238     - * has changed, and the IPI should restart
7239     - * circulating the overloaded CPUs again.
7240     - *
7241     - * rq->rt.push_cpu contains the CPU that is being sent the IPI. It is updated
7242     - * before sending to the next CPU.
7243     - *
7244     - * Instead of having all CPUs that schedule a lower priority task send
7245     - * an IPI to the same "first" CPU in the RT overload mask, they send it
7246     - * to the next overloaded CPU after their own CPU. This helps distribute
7247     - * the work when there's more than one overloaded CPU and multiple CPUs
7248     - * scheduling in lower priority tasks.
7249     - *
7250     - * When a rq schedules a lower priority task than what was currently
7251     - * running, the next CPU with overloaded RT tasks is examined first.
7252     - * That is, if CPU 1 and 5 are overloaded, and CPU 3 schedules a lower
7253     - * priority task, it will send an IPI first to CPU 5, then CPU 5 will
7254     - * send to CPU 1 if it is still overloaded. CPU 1 will clear the
7255     - * rq->rt.push_flags if RT_PUSH_IPI_RESTART is not set.
7256     - *
7257     - * The first CPU to notice IPI_RESTART is set, will clear that flag and then
7258     - * send an IPI to the next overloaded CPU after the rq->cpu and not the next
7259     - * CPU after push_cpu. That is, if CPU 1, 4 and 5 are overloaded when CPU 3
7260     - * schedules a lower priority task, and the IPI_RESTART gets set while the
7261     - * handling is being done on CPU 5, it will clear the flag and send it back to
7262     - * CPU 4 instead of CPU 1.
7263     - *
7264     - * Note, the above logic can be disabled by turning off the sched_feature
7265     - * RT_PUSH_IPI. Then the rq lock of the overloaded CPU will simply be
7266     - * taken by the CPU requesting a pull and the waiting RT task will be pulled
7267     - * by that CPU. This may be fine for machines with few CPUs.
7268     - */
7269     -static void tell_cpu_to_push(struct rq *rq)
7270     +static inline void rto_start_unlock(atomic_t *v)
7271     {
7272     - int cpu;
7273     + atomic_set_release(v, 0);
7274     +}
7275    
7276     - if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
7277     - raw_spin_lock(&rq->rt.push_lock);
7278     - /* Make sure it's still executing */
7279     - if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
7280     - /*
7281     - * Tell the IPI to restart the loop as things have
7282     - * changed since it started.
7283     - */
7284     - rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
7285     - raw_spin_unlock(&rq->rt.push_lock);
7286     - return;
7287     - }
7288     - raw_spin_unlock(&rq->rt.push_lock);
7289     - }
7290     +static void tell_cpu_to_push(struct rq *rq)
7291     +{
7292     + int cpu = -1;
7293    
7294     - /* When here, there's no IPI going around */
7295     + /* Keep the loop going if the IPI is currently active */
7296     + atomic_inc(&rq->rd->rto_loop_next);
7297    
7298     - rq->rt.push_cpu = rq->cpu;
7299     - cpu = find_next_push_cpu(rq);
7300     - if (cpu >= nr_cpu_ids)
7301     + /* Only one CPU can initiate a loop at a time */
7302     + if (!rto_start_trylock(&rq->rd->rto_loop_start))
7303     return;
7304    
7305     - rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
7306     + raw_spin_lock(&rq->rd->rto_lock);
7307     +
7308     + /*
7309     + * The rto_cpu is updated under the lock, if it has a valid cpu
7310     + * then the IPI is still running and will continue due to the
7311     + * update to loop_next, and nothing needs to be done here.
7312     + * Otherwise it is finishing up and an ipi needs to be sent.
7313     + */
7314     + if (rq->rd->rto_cpu < 0)
7315     + cpu = rto_next_cpu(rq);
7316    
7317     - irq_work_queue_on(&rq->rt.push_work, cpu);
7318     + raw_spin_unlock(&rq->rd->rto_lock);
7319     +
7320     + rto_start_unlock(&rq->rd->rto_loop_start);
7321     +
7322     + if (cpu >= 0)
7323     + irq_work_queue_on(&rq->rd->rto_push_work, cpu);
7324     }
7325    
7326     /* Called from hardirq context */
7327     -static void try_to_push_tasks(void *arg)
7328     +void rto_push_irq_work_func(struct irq_work *work)
7329     {
7330     - struct rt_rq *rt_rq = arg;
7331     - struct rq *rq, *src_rq;
7332     - int this_cpu;
7333     + struct rq *rq;
7334     int cpu;
7335    
7336     - this_cpu = rt_rq->push_cpu;
7337     + rq = this_rq();
7338    
7339     - /* Paranoid check */
7340     - BUG_ON(this_cpu != smp_processor_id());
7341     -
7342     - rq = cpu_rq(this_cpu);
7343     - src_rq = rq_of_rt_rq(rt_rq);
7344     -
7345     -again:
7346     + /*
7347     + * We do not need to grab the lock to check for has_pushable_tasks.
7348     + * When it gets updated, a check is made if a push is possible.
7349     + */
7350     if (has_pushable_tasks(rq)) {
7351     raw_spin_lock(&rq->lock);
7352     - push_rt_task(rq);
7353     + push_rt_tasks(rq);
7354     raw_spin_unlock(&rq->lock);
7355     }
7356    
7357     - /* Pass the IPI to the next rt overloaded queue */
7358     - raw_spin_lock(&rt_rq->push_lock);
7359     - /*
7360     - * If the source queue changed since the IPI went out,
7361     - * we need to restart the search from that CPU again.
7362     - */
7363     - if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
7364     - rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
7365     - rt_rq->push_cpu = src_rq->cpu;
7366     - }
7367     + raw_spin_lock(&rq->rd->rto_lock);
7368    
7369     - cpu = find_next_push_cpu(src_rq);
7370     + /* Pass the IPI to the next rt overloaded queue */
7371     + cpu = rto_next_cpu(rq);
7372    
7373     - if (cpu >= nr_cpu_ids)
7374     - rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
7375     - raw_spin_unlock(&rt_rq->push_lock);
7376     + raw_spin_unlock(&rq->rd->rto_lock);
7377    
7378     - if (cpu >= nr_cpu_ids)
7379     + if (cpu < 0)
7380     return;
7381    
7382     - /*
7383     - * It is possible that a restart caused this CPU to be
7384     - * chosen again. Don't bother with an IPI, just see if we
7385     - * have more to push.
7386     - */
7387     - if (unlikely(cpu == rq->cpu))
7388     - goto again;
7389     -
7390     /* Try the next RT overloaded CPU */
7391     - irq_work_queue_on(&rt_rq->push_work, cpu);
7392     -}
7393     -
7394     -static void push_irq_work_func(struct irq_work *work)
7395     -{
7396     - struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
7397     -
7398     - try_to_push_tasks(rt_rq);
7399     + irq_work_queue_on(&rq->rd->rto_push_work, cpu);
7400     }
7401     #endif /* HAVE_RT_PUSH_IPI */
7402    
7403     diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
7404     index 3b448ba82225..b732e779fe7d 100644
7405     --- a/kernel/sched/sched.h
7406     +++ b/kernel/sched/sched.h
7407     @@ -502,7 +502,7 @@ static inline int rt_bandwidth_enabled(void)
7408     }
7409    
7410     /* RT IPI pull logic requires IRQ_WORK */
7411     -#ifdef CONFIG_IRQ_WORK
7412     +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
7413     # define HAVE_RT_PUSH_IPI
7414     #endif
7415    
7416     @@ -524,12 +524,6 @@ struct rt_rq {
7417     unsigned long rt_nr_total;
7418     int overloaded;
7419     struct plist_head pushable_tasks;
7420     -#ifdef HAVE_RT_PUSH_IPI
7421     - int push_flags;
7422     - int push_cpu;
7423     - struct irq_work push_work;
7424     - raw_spinlock_t push_lock;
7425     -#endif
7426     #endif /* CONFIG_SMP */
7427     int rt_queued;
7428    
7429     @@ -638,6 +632,19 @@ struct root_domain {
7430     struct dl_bw dl_bw;
7431     struct cpudl cpudl;
7432    
7433     +#ifdef HAVE_RT_PUSH_IPI
7434     + /*
7435     + * For IPI pull requests, loop across the rto_mask.
7436     + */
7437     + struct irq_work rto_push_work;
7438     + raw_spinlock_t rto_lock;
7439     + /* These are only updated and read within rto_lock */
7440     + int rto_loop;
7441     + int rto_cpu;
7442     + /* These atomics are updated outside of a lock */
7443     + atomic_t rto_loop_next;
7444     + atomic_t rto_loop_start;
7445     +#endif
7446     /*
7447     * The "RT overload" flag: it gets set if a CPU has more than
7448     * one runnable RT task.
7449     @@ -655,6 +662,9 @@ extern void init_defrootdomain(void);
7450     extern int sched_init_domains(const struct cpumask *cpu_map);
7451     extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
7452    
7453     +#ifdef HAVE_RT_PUSH_IPI
7454     +extern void rto_push_irq_work_func(struct irq_work *work);
7455     +#endif
7456     #endif /* CONFIG_SMP */
7457    
7458     /*
7459     diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
7460     index 6798276d29af..093f2ceba2e2 100644
7461     --- a/kernel/sched/topology.c
7462     +++ b/kernel/sched/topology.c
7463     @@ -269,6 +269,12 @@ static int init_rootdomain(struct root_domain *rd)
7464     if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
7465     goto free_dlo_mask;
7466    
7467     +#ifdef HAVE_RT_PUSH_IPI
7468     + rd->rto_cpu = -1;
7469     + raw_spin_lock_init(&rd->rto_lock);
7470     + init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
7471     +#endif
7472     +
7473     init_dl_bw(&rd->dl_bw);
7474     if (cpudl_init(&rd->cpudl) != 0)
7475     goto free_rto_mask;
7476     diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
7477     index e24388a863a7..468fb7cd1221 100644
7478     --- a/lib/mpi/mpi-pow.c
7479     +++ b/lib/mpi/mpi-pow.c
7480     @@ -26,6 +26,7 @@
7481     * however I decided to publish this code under the plain GPL.
7482     */
7483    
7484     +#include <linux/sched.h>
7485     #include <linux/string.h>
7486     #include "mpi-internal.h"
7487     #include "longlong.h"
7488     @@ -256,6 +257,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
7489     }
7490     e <<= 1;
7491     c--;
7492     + cond_resched();
7493     }
7494    
7495     i--;
7496     diff --git a/mm/z3fold.c b/mm/z3fold.c
7497     index b2ba2ba585f3..39e19125d6a0 100644
7498     --- a/mm/z3fold.c
7499     +++ b/mm/z3fold.c
7500     @@ -404,8 +404,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
7501     WARN_ON(z3fold_page_trylock(zhdr));
7502     else
7503     z3fold_page_lock(zhdr);
7504     - if (test_bit(PAGE_STALE, &page->private) ||
7505     - !test_and_clear_bit(NEEDS_COMPACTING, &page->private)) {
7506     + if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
7507     z3fold_page_unlock(zhdr);
7508     return;
7509     }
7510     @@ -413,6 +412,11 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
7511     list_del_init(&zhdr->buddy);
7512     spin_unlock(&pool->lock);
7513    
7514     + if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
7515     + atomic64_dec(&pool->pages_nr);
7516     + return;
7517     + }
7518     +
7519     z3fold_compact_page(zhdr);
7520     unbuddied = get_cpu_ptr(pool->unbuddied);
7521     fchunks = num_free_chunks(zhdr);
7522     @@ -753,9 +757,11 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
7523     list_del_init(&zhdr->buddy);
7524     spin_unlock(&pool->lock);
7525     zhdr->cpu = -1;
7526     + kref_get(&zhdr->refcount);
7527     do_compact_page(zhdr, true);
7528     return;
7529     }
7530     + kref_get(&zhdr->refcount);
7531     queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
7532     z3fold_page_unlock(zhdr);
7533     }
7534     diff --git a/net/9p/client.c b/net/9p/client.c
7535     index 4674235b0d9b..b433aff5ff13 100644
7536     --- a/net/9p/client.c
7537     +++ b/net/9p/client.c
7538     @@ -82,7 +82,7 @@ int p9_show_client_options(struct seq_file *m, struct p9_client *clnt)
7539     {
7540     if (clnt->msize != 8192)
7541     seq_printf(m, ",msize=%u", clnt->msize);
7542     - seq_printf(m, "trans=%s", clnt->trans_mod->name);
7543     + seq_printf(m, ",trans=%s", clnt->trans_mod->name);
7544    
7545     switch (clnt->proto_version) {
7546     case p9_proto_legacy:
7547     @@ -773,8 +773,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
7548     }
7549     again:
7550     /* Wait for the response */
7551     - err = wait_event_interruptible(*req->wq,
7552     - req->status >= REQ_STATUS_RCVD);
7553     + err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
7554    
7555     /*
7556     * Make sure our req is coherent with regard to updates in other
7557     diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
7558     index 903a190319b9..985046ae4231 100644
7559     --- a/net/9p/trans_fd.c
7560     +++ b/net/9p/trans_fd.c
7561     @@ -724,12 +724,12 @@ static int p9_fd_show_options(struct seq_file *m, struct p9_client *clnt)
7562     {
7563     if (clnt->trans_mod == &p9_tcp_trans) {
7564     if (clnt->trans_opts.tcp.port != P9_PORT)
7565     - seq_printf(m, "port=%u", clnt->trans_opts.tcp.port);
7566     + seq_printf(m, ",port=%u", clnt->trans_opts.tcp.port);
7567     } else if (clnt->trans_mod == &p9_fd_trans) {
7568     if (clnt->trans_opts.fd.rfd != ~0)
7569     - seq_printf(m, "rfd=%u", clnt->trans_opts.fd.rfd);
7570     + seq_printf(m, ",rfd=%u", clnt->trans_opts.fd.rfd);
7571     if (clnt->trans_opts.fd.wfd != ~0)
7572     - seq_printf(m, "wfd=%u", clnt->trans_opts.fd.wfd);
7573     + seq_printf(m, ",wfd=%u", clnt->trans_opts.fd.wfd);
7574     }
7575     return 0;
7576     }
7577     diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
7578     index f24b25c25106..f3a4efcf1456 100644
7579     --- a/net/9p/trans_virtio.c
7580     +++ b/net/9p/trans_virtio.c
7581     @@ -286,8 +286,8 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
7582     if (err == -ENOSPC) {
7583     chan->ring_bufs_avail = 0;
7584     spin_unlock_irqrestore(&chan->lock, flags);
7585     - err = wait_event_interruptible(*chan->vc_wq,
7586     - chan->ring_bufs_avail);
7587     + err = wait_event_killable(*chan->vc_wq,
7588     + chan->ring_bufs_avail);
7589     if (err == -ERESTARTSYS)
7590     return err;
7591    
7592     @@ -327,7 +327,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
7593     * Other zc request to finish here
7594     */
7595     if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
7596     - err = wait_event_interruptible(vp_wq,
7597     + err = wait_event_killable(vp_wq,
7598     (atomic_read(&vp_pinned) < chan->p9_max_pages));
7599     if (err == -ERESTARTSYS)
7600     return err;
7601     @@ -471,8 +471,8 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
7602     if (err == -ENOSPC) {
7603     chan->ring_bufs_avail = 0;
7604     spin_unlock_irqrestore(&chan->lock, flags);
7605     - err = wait_event_interruptible(*chan->vc_wq,
7606     - chan->ring_bufs_avail);
7607     + err = wait_event_killable(*chan->vc_wq,
7608     + chan->ring_bufs_avail);
7609     if (err == -ERESTARTSYS)
7610     goto err_out;
7611    
7612     @@ -489,8 +489,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
7613     virtqueue_kick(chan->vq);
7614     spin_unlock_irqrestore(&chan->lock, flags);
7615     p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
7616     - err = wait_event_interruptible(*req->wq,
7617     - req->status >= REQ_STATUS_RCVD);
7618     + err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
7619     /*
7620     * Non kernel buffers are pinned, unpin them
7621     */
7622     diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
7623     index 6ad3e043c617..325c56043007 100644
7624     --- a/net/9p/trans_xen.c
7625     +++ b/net/9p/trans_xen.c
7626     @@ -156,8 +156,8 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
7627     ring = &priv->rings[num];
7628    
7629     again:
7630     - while (wait_event_interruptible(ring->wq,
7631     - p9_xen_write_todo(ring, size)) != 0)
7632     + while (wait_event_killable(ring->wq,
7633     + p9_xen_write_todo(ring, size)) != 0)
7634     ;
7635    
7636     spin_lock_irqsave(&ring->lock, flags);
7637     diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
7638     index 489610ac1cdd..bf9d079cbafd 100644
7639     --- a/net/ceph/crypto.c
7640     +++ b/net/ceph/crypto.c
7641     @@ -37,7 +37,9 @@ static int set_secret(struct ceph_crypto_key *key, void *buf)
7642     return -ENOTSUPP;
7643     }
7644    
7645     - WARN_ON(!key->len);
7646     + if (!key->len)
7647     + return -EINVAL;
7648     +
7649     key->key = kmemdup(buf, key->len, GFP_NOIO);
7650     if (!key->key) {
7651     ret = -ENOMEM;
7652     diff --git a/net/nfc/core.c b/net/nfc/core.c
7653     index 5cf33df888c3..c699d64a0753 100644
7654     --- a/net/nfc/core.c
7655     +++ b/net/nfc/core.c
7656     @@ -1106,7 +1106,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
7657     err_free_dev:
7658     kfree(dev);
7659    
7660     - return ERR_PTR(rc);
7661     + return NULL;
7662     }
7663     EXPORT_SYMBOL(nfc_allocate_device);
7664    
7665     diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
7666     index 992594b7cc6b..af7893501e40 100644
7667     --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
7668     +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
7669     @@ -133,6 +133,10 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
7670     if (ret)
7671     goto out_err;
7672    
7673     + /* Bump page refcnt so Send completion doesn't release
7674     + * the rq_buffer before all retransmits are complete.
7675     + */
7676     + get_page(virt_to_page(rqst->rq_buffer));
7677     ret = svc_rdma_post_send_wr(rdma, ctxt, 1, 0);
7678     if (ret)
7679     goto out_unmap;
7680     @@ -165,7 +169,6 @@ xprt_rdma_bc_allocate(struct rpc_task *task)
7681     return -EINVAL;
7682     }
7683    
7684     - /* svc_rdma_sendto releases this page */
7685     page = alloc_page(RPCRDMA_DEF_GFP);
7686     if (!page)
7687     return -ENOMEM;
7688     @@ -184,6 +187,7 @@ xprt_rdma_bc_free(struct rpc_task *task)
7689     {
7690     struct rpc_rqst *rqst = task->tk_rqstp;
7691    
7692     + put_page(virt_to_page(rqst->rq_buffer));
7693     kfree(rqst->rq_rbuffer);
7694     }
7695    
7696     diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
7697     index a93a4235a332..10e7ef7a8804 100644
7698     --- a/sound/core/pcm_lib.c
7699     +++ b/sound/core/pcm_lib.c
7700     @@ -248,8 +248,10 @@ static void update_audio_tstamp(struct snd_pcm_substream *substream,
7701     runtime->rate);
7702     *audio_tstamp = ns_to_timespec(audio_nsecs);
7703     }
7704     - runtime->status->audio_tstamp = *audio_tstamp;
7705     - runtime->status->tstamp = *curr_tstamp;
7706     + if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) {
7707     + runtime->status->audio_tstamp = *audio_tstamp;
7708     + runtime->status->tstamp = *curr_tstamp;
7709     + }
7710    
7711     /*
7712     * re-take a driver timestamp to let apps detect if the reference tstamp
7713     diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
7714     index 59127b6ef39e..e00f7e399e46 100644
7715     --- a/sound/core/timer_compat.c
7716     +++ b/sound/core/timer_compat.c
7717     @@ -66,11 +66,11 @@ static int snd_timer_user_info_compat(struct file *file,
7718     struct snd_timer *t;
7719    
7720     tu = file->private_data;
7721     - if (snd_BUG_ON(!tu->timeri))
7722     - return -ENXIO;
7723     + if (!tu->timeri)
7724     + return -EBADFD;
7725     t = tu->timeri->timer;
7726     - if (snd_BUG_ON(!t))
7727     - return -ENXIO;
7728     + if (!t)
7729     + return -EBADFD;
7730     memset(&info, 0, sizeof(info));
7731     info.card = t->card ? t->card->number : -1;
7732     if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
7733     @@ -99,8 +99,8 @@ static int snd_timer_user_status_compat(struct file *file,
7734     struct snd_timer_status32 status;
7735    
7736     tu = file->private_data;
7737     - if (snd_BUG_ON(!tu->timeri))
7738     - return -ENXIO;
7739     + if (!tu->timeri)
7740     + return -EBADFD;
7741     memset(&status, 0, sizeof(status));
7742     status.tstamp.tv_sec = tu->tstamp.tv_sec;
7743     status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
7744     diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
7745     index e43af18d4383..8632301489fa 100644
7746     --- a/sound/core/vmaster.c
7747     +++ b/sound/core/vmaster.c
7748     @@ -495,7 +495,9 @@ EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster);
7749     * Returns 0 if successful, or a negative error code.
7750     */
7751     int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
7752     - int (*func)(struct snd_kcontrol *, void *),
7753     + int (*func)(struct snd_kcontrol *vslave,
7754     + struct snd_kcontrol *slave,
7755     + void *arg),
7756     void *arg)
7757     {
7758     struct link_master *master;
7759     @@ -507,7 +509,7 @@ int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
7760     if (err < 0)
7761     return err;
7762     list_for_each_entry(slave, &master->slaves, list) {
7763     - err = func(&slave->slave, arg);
7764     + err = func(slave->kctl, &slave->slave, arg);
7765     if (err < 0)
7766     return err;
7767     }
7768     diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
7769     index 81acc20c2535..f21633cd9b38 100644
7770     --- a/sound/hda/hdmi_chmap.c
7771     +++ b/sound/hda/hdmi_chmap.c
7772     @@ -746,7 +746,7 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol,
7773     memset(pcm_chmap, 0, sizeof(pcm_chmap));
7774     chmap->ops.get_chmap(chmap->hdac, pcm_idx, pcm_chmap);
7775    
7776     - for (i = 0; i < sizeof(chmap); i++)
7777     + for (i = 0; i < ARRAY_SIZE(pcm_chmap); i++)
7778     ucontrol->value.integer.value[i] = pcm_chmap[i];
7779    
7780     return 0;
7781     diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
7782     index a0989d231fd0..417abbb1f72c 100644
7783     --- a/sound/pci/hda/hda_codec.c
7784     +++ b/sound/pci/hda/hda_codec.c
7785     @@ -1823,7 +1823,9 @@ struct slave_init_arg {
7786     };
7787    
7788     /* initialize the slave volume with 0dB via snd_ctl_apply_vmaster_slaves() */
7789     -static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
7790     +static int init_slave_0dB(struct snd_kcontrol *slave,
7791     + struct snd_kcontrol *kctl,
7792     + void *_arg)
7793     {
7794     struct slave_init_arg *arg = _arg;
7795     int _tlv[4];
7796     @@ -1860,7 +1862,7 @@ static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
7797     arg->step = step;
7798     val = -tlv[2] / step;
7799     if (val > 0) {
7800     - put_kctl_with_value(kctl, val);
7801     + put_kctl_with_value(slave, val);
7802     return val;
7803     }
7804    
7805     @@ -1868,7 +1870,9 @@ static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
7806     }
7807    
7808     /* unmute the slave via snd_ctl_apply_vmaster_slaves() */
7809     -static int init_slave_unmute(struct snd_kcontrol *slave, void *_arg)
7810     +static int init_slave_unmute(struct snd_kcontrol *slave,
7811     + struct snd_kcontrol *kctl,
7812     + void *_arg)
7813     {
7814     return put_kctl_with_value(slave, 1);
7815     }
7816     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
7817     index f958d8d54d15..c71dcacea807 100644
7818     --- a/sound/pci/hda/hda_intel.c
7819     +++ b/sound/pci/hda/hda_intel.c
7820     @@ -2463,6 +2463,9 @@ static const struct pci_device_id azx_ids[] = {
7821     /* AMD Hudson */
7822     { PCI_DEVICE(0x1022, 0x780d),
7823     .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
7824     + /* AMD Raven */
7825     + { PCI_DEVICE(0x1022, 0x15e3),
7826     + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
7827     /* ATI HDMI */
7828     { PCI_DEVICE(0x1002, 0x0002),
7829     .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
7830     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7831     index dce0682c5001..7c39114d124f 100644
7832     --- a/sound/pci/hda/patch_realtek.c
7833     +++ b/sound/pci/hda/patch_realtek.c
7834     @@ -341,6 +341,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
7835     case 0x10ec0299:
7836     alc_update_coef_idx(codec, 0x10, 1<<9, 0);
7837     break;
7838     + case 0x10ec0275:
7839     + alc_update_coef_idx(codec, 0xe, 0, 1<<0);
7840     + break;
7841     case 0x10ec0293:
7842     alc_update_coef_idx(codec, 0xa, 1<<13, 0);
7843     break;
7844     @@ -6863,7 +6866,7 @@ static int patch_alc269(struct hda_codec *codec)
7845     case 0x10ec0703:
7846     spec->codec_variant = ALC269_TYPE_ALC700;
7847     spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
7848     - alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
7849     + alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
7850     break;
7851    
7852     }
7853     diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
7854     index abfb710df7cb..7a312168f864 100644
7855     --- a/sound/soc/sunxi/sun8i-codec.c
7856     +++ b/sound/soc/sunxi/sun8i-codec.c
7857     @@ -73,6 +73,7 @@
7858     #define SUN8I_SYS_SR_CTRL_AIF2_FS_MASK GENMASK(11, 8)
7859     #define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK GENMASK(5, 4)
7860     #define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK GENMASK(8, 6)
7861     +#define SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK GENMASK(12, 9)
7862    
7863     struct sun8i_codec {
7864     struct device *dev;
7865     @@ -170,11 +171,11 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
7866    
7867     /* clock masters */
7868     switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
7869     - case SND_SOC_DAIFMT_CBS_CFS: /* DAI Slave */
7870     - value = 0x0; /* Codec Master */
7871     + case SND_SOC_DAIFMT_CBS_CFS: /* Codec slave, DAI master */
7872     + value = 0x1;
7873     break;
7874     - case SND_SOC_DAIFMT_CBM_CFM: /* DAI Master */
7875     - value = 0x1; /* Codec Slave */
7876     + case SND_SOC_DAIFMT_CBM_CFM: /* Codec Master, DAI slave */
7877     + value = 0x0;
7878     break;
7879     default:
7880     return -EINVAL;
7881     @@ -199,7 +200,7 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
7882     value << SUN8I_AIF1CLK_CTRL_AIF1_BCLK_INV);
7883     regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
7884     BIT(SUN8I_AIF1CLK_CTRL_AIF1_LRCK_INV),
7885     - value << SUN8I_AIF1CLK_CTRL_AIF1_LRCK_INV);
7886     + !value << SUN8I_AIF1CLK_CTRL_AIF1_LRCK_INV);
7887    
7888     /* DAI format */
7889     switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
7890     @@ -226,12 +227,57 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
7891     return 0;
7892     }
7893    
7894     +struct sun8i_codec_clk_div {
7895     + u8 div;
7896     + u8 val;
7897     +};
7898     +
7899     +static const struct sun8i_codec_clk_div sun8i_codec_bclk_div[] = {
7900     + { .div = 1, .val = 0 },
7901     + { .div = 2, .val = 1 },
7902     + { .div = 4, .val = 2 },
7903     + { .div = 6, .val = 3 },
7904     + { .div = 8, .val = 4 },
7905     + { .div = 12, .val = 5 },
7906     + { .div = 16, .val = 6 },
7907     + { .div = 24, .val = 7 },
7908     + { .div = 32, .val = 8 },
7909     + { .div = 48, .val = 9 },
7910     + { .div = 64, .val = 10 },
7911     + { .div = 96, .val = 11 },
7912     + { .div = 128, .val = 12 },
7913     + { .div = 192, .val = 13 },
7914     +};
7915     +
7916     +static u8 sun8i_codec_get_bclk_div(struct sun8i_codec *scodec,
7917     + unsigned int rate,
7918     + unsigned int word_size)
7919     +{
7920     + unsigned long clk_rate = clk_get_rate(scodec->clk_module);
7921     + unsigned int div = clk_rate / rate / word_size / 2;
7922     + unsigned int best_val = 0, best_diff = ~0;
7923     + int i;
7924     +
7925     + for (i = 0; i < ARRAY_SIZE(sun8i_codec_bclk_div); i++) {
7926     + const struct sun8i_codec_clk_div *bdiv = &sun8i_codec_bclk_div[i];
7927     + unsigned int diff = abs(bdiv->div - div);
7928     +
7929     + if (diff < best_diff) {
7930     + best_diff = diff;
7931     + best_val = bdiv->val;
7932     + }
7933     + }
7934     +
7935     + return best_val;
7936     +}
7937     +
7938     static int sun8i_codec_hw_params(struct snd_pcm_substream *substream,
7939     struct snd_pcm_hw_params *params,
7940     struct snd_soc_dai *dai)
7941     {
7942     struct sun8i_codec *scodec = snd_soc_codec_get_drvdata(dai->codec);
7943     int sample_rate;
7944     + u8 bclk_div;
7945    
7946     /*
7947     * The CPU DAI handles only a sample of 16 bits. Configure the
7948     @@ -241,6 +287,11 @@ static int sun8i_codec_hw_params(struct snd_pcm_substream *substream,
7949     SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK,
7950     SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_16);
7951    
7952     + bclk_div = sun8i_codec_get_bclk_div(scodec, params_rate(params), 16);
7953     + regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
7954     + SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK,
7955     + bclk_div << SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV);
7956     +
7957     regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
7958     SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK,
7959     SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_16);
7960     diff --git a/sound/usb/clock.c b/sound/usb/clock.c
7961     index 26dd5f20f149..eb3396ffba4c 100644
7962     --- a/sound/usb/clock.c
7963     +++ b/sound/usb/clock.c
7964     @@ -43,7 +43,7 @@ static struct uac_clock_source_descriptor *
7965     while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
7966     ctrl_iface->extralen,
7967     cs, UAC2_CLOCK_SOURCE))) {
7968     - if (cs->bClockID == clock_id)
7969     + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
7970     return cs;
7971     }
7972    
7973     @@ -59,8 +59,11 @@ static struct uac_clock_selector_descriptor *
7974     while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
7975     ctrl_iface->extralen,
7976     cs, UAC2_CLOCK_SELECTOR))) {
7977     - if (cs->bClockID == clock_id)
7978     + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) {
7979     + if (cs->bLength < 5 + cs->bNrInPins)
7980     + return NULL;
7981     return cs;
7982     + }
7983     }
7984    
7985     return NULL;
7986     @@ -75,7 +78,7 @@ static struct uac_clock_multiplier_descriptor *
7987     while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
7988     ctrl_iface->extralen,
7989     cs, UAC2_CLOCK_MULTIPLIER))) {
7990     - if (cs->bClockID == clock_id)
7991     + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
7992     return cs;
7993     }
7994    
7995     diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
7996     index 91bc8f18791e..2b835cca41b1 100644
7997     --- a/sound/usb/mixer.c
7998     +++ b/sound/usb/mixer.c
7999     @@ -1469,6 +1469,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
8000     __u8 *bmaControls;
8001    
8002     if (state->mixer->protocol == UAC_VERSION_1) {
8003     + if (hdr->bLength < 7) {
8004     + usb_audio_err(state->chip,
8005     + "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
8006     + unitid);
8007     + return -EINVAL;
8008     + }
8009     csize = hdr->bControlSize;
8010     if (!csize) {
8011     usb_audio_dbg(state->chip,
8012     @@ -1486,6 +1492,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
8013     }
8014     } else {
8015     struct uac2_feature_unit_descriptor *ftr = _ftr;
8016     + if (hdr->bLength < 6) {
8017     + usb_audio_err(state->chip,
8018     + "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
8019     + unitid);
8020     + return -EINVAL;
8021     + }
8022     csize = 4;
8023     channels = (hdr->bLength - 6) / 4 - 1;
8024     bmaControls = ftr->bmaControls;
8025     @@ -2086,7 +2098,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
8026     const struct usbmix_name_map *map;
8027     char **namelist;
8028    
8029     - if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) {
8030     + if (desc->bLength < 5 || !desc->bNrInPins ||
8031     + desc->bLength < 5 + desc->bNrInPins) {
8032     usb_audio_err(state->chip,
8033     "invalid SELECTOR UNIT descriptor %d\n", unitid);
8034     return -EINVAL;